Merge branch 'remotes/lorenzo/pci/vmd'
authorBjorn Helgaas <bhelgaas@google.com>
Thu, 28 Nov 2019 14:54:52 +0000 (08:54 -0600)
committerBjorn Helgaas <bhelgaas@google.com>
Thu, 28 Nov 2019 14:54:52 +0000 (08:54 -0600)
  - Add VMD bus 224-255 restriction decode (Jon Derrick)

  - Add VMD 8086:9A0B device ID (Jon Derrick)

  - Remove Keith from VMD maintainer list (Keith Busch)

* remotes/lorenzo/pci/vmd:
  MAINTAINERS: Remove Keith from VMD maintainer
  PCI: vmd: Add device id for VMD device 8086:9A0B
  PCI: vmd: Add bus 224-255 restriction decode

1103 files changed:
CREDITS
Documentation/ABI/testing/sysfs-bus-pci
Documentation/admin-guide/cgroup-v2.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/arm64/memory.rst
Documentation/arm64/silicon-errata.rst
Documentation/core-api/index.rst
Documentation/core-api/memory-allocation.rst
Documentation/core-api/symbol-namespaces.rst [moved from Documentation/kbuild/namespaces.rst with 100% similarity]
Documentation/dev-tools/kasan.rst
Documentation/dev-tools/kselftest.rst
Documentation/devicetree/bindings/dsp/fsl,dsp.yaml
Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt
Documentation/devicetree/bindings/media/rc.yaml
Documentation/devicetree/bindings/pci/amlogic,meson-pcie.txt
Documentation/devicetree/bindings/pci/layerscape-pci.txt
Documentation/devicetree/bindings/phy/lantiq,vrx200-pcie-phy.yaml
Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
Documentation/devicetree/bindings/usb/amlogic,dwc3.txt
Documentation/devicetree/bindings/usb/generic-ehci.yaml
Documentation/devicetree/bindings/usb/generic-ohci.yaml
Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
Documentation/devicetree/bindings/usb/usb-hcd.yaml
Documentation/devicetree/bindings/usb/usb-uhci.txt
Documentation/devicetree/bindings/usb/usb-xhci.txt
Documentation/hwmon/index.rst
Documentation/hwmon/inspur-ipsps1.rst
Documentation/hwmon/k10temp.rst
Documentation/kbuild/makefiles.rst
Documentation/kbuild/modules.rst
Documentation/kbuild/reproducible-builds.rst
Documentation/networking/device_drivers/index.rst
Documentation/networking/device_drivers/pensando/ionic.rst
Documentation/networking/j1939.rst
Documentation/networking/net_dim.txt
Documentation/power/pci.rst
Documentation/process/coding-style.rst
Documentation/process/deprecated.rst
Documentation/usb/rio.rst [deleted file]
MAINTAINERS
Makefile
arch/alpha/kernel/pci-sysfs.c
arch/arc/include/asm/Kbuild
arch/arm/boot/dts/am335x-icev2.dts
arch/arm/boot/dts/am33xx-l4.dtsi
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/dra7-l4.dtsi
arch/arm/boot/dts/mt7629-rfb.dts
arch/arm/boot/dts/mt7629.dtsi
arch/arm/boot/dts/omap3-gta04.dtsi
arch/arm/boot/dts/ste-dbx5x0.dtsi
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun5i.dtsi
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/boot/dts/sun8i-a23-a33.dtsi
arch/arm/boot/dts/sun8i-a83t.dtsi
arch/arm/boot/dts/sun8i-r40.dtsi
arch/arm/boot/dts/sun9i-a80.dtsi
arch/arm/boot/dts/sunxi-h3-h5.dtsi
arch/arm/configs/badge4_defconfig
arch/arm/configs/corgi_defconfig
arch/arm/configs/davinci_all_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/configs/pxa_defconfig
arch/arm/configs/s3c2410_defconfig
arch/arm/configs/spitz_defconfig
arch/arm/crypto/Kconfig
arch/arm/crypto/aes-ce-core.S
arch/arm/include/asm/Kbuild
arch/arm/include/asm/xen/xen-ops.h [deleted file]
arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
arch/arm/mach-omap2/omap_hwmod_33xx_data.c
arch/arm/mach-omap2/pm.c
arch/arm/xen/Makefile
arch/arm/xen/efi.c [deleted file]
arch/arm/xen/enlighten.c
arch/arm/xen/mm.c
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dts
arch/arm64/boot/dts/amlogic/meson-g12b-s922x-khadas-vim3.dts
arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dts
arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
arch/arm64/configs/defconfig
arch/arm64/include/asm/Kbuild
arch/arm64/include/asm/asm-uaccess.h
arch/arm64/include/asm/atomic_lse.h
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/kvm_hyp.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/vdso/compat_barrier.h
arch/arm64/include/asm/vdso_datapage.h [deleted file]
arch/arm64/include/asm/xen/xen-ops.h [deleted file]
arch/arm64/kernel/armv8_deprecated.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/ftrace.c
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/process.c
arch/arm64/kernel/vdso32/Makefile
arch/arm64/kvm/hyp/switch.c
arch/arm64/kvm/hyp/tlb.c
arch/arm64/mm/fault.c
arch/arm64/xen/Makefile
arch/mips/boot/dts/qca/ar9331.dtsi
arch/mips/configs/mtx1_defconfig
arch/mips/configs/rm200_defconfig
arch/mips/fw/arc/memory.c
arch/mips/fw/sni/sniprom.c
arch/mips/include/asm/Kbuild
arch/mips/include/asm/cmpxchg.h
arch/mips/include/asm/octeon/cvmx-ipd.h
arch/mips/include/asm/unistd.h
arch/mips/include/uapi/asm/hwcap.h
arch/mips/kernel/cpu-bugs64.c
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/setup.c
arch/mips/kernel/syscall.c
arch/mips/kernel/syscalls/syscall_n32.tbl
arch/mips/kernel/syscalls/syscall_n64.tbl
arch/mips/kernel/syscalls/syscall_o32.tbl
arch/mips/loongson64/Platform
arch/mips/loongson64/common/mem.c
arch/mips/loongson64/common/serial.c
arch/mips/loongson64/loongson-3/numa.c
arch/mips/pmcs-msp71xx/msp_prom.c
arch/mips/vdso/Makefile
arch/mips/vdso/gettimeofday.c [deleted file]
arch/parisc/include/asm/cache.h
arch/parisc/include/asm/ldcw.h
arch/parisc/mm/ioremap.c
arch/powerpc/boot/Makefile
arch/powerpc/include/asm/Kbuild
arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/platforms/cell/spufs/inode.c
arch/powerpc/platforms/pseries/lpar.c
arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
arch/riscv/include/asm/Kbuild
arch/riscv/include/asm/asm.h
arch/riscv/include/asm/pgtable.h
arch/riscv/include/asm/tlbflush.h
arch/riscv/kernel/entry.S
arch/riscv/kernel/traps.c
arch/riscv/mm/init.c
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/asm/atomic_ops.h
arch/s390/include/asm/bitops.h
arch/s390/include/asm/cpacf.h
arch/s390/include/asm/cpu_mf.h
arch/s390/include/asm/hugetlb.h
arch/s390/include/asm/jump_label.h
arch/s390/include/asm/pci.h
arch/s390/include/asm/pci_clp.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/qdio.h
arch/s390/include/asm/uaccess.h
arch/s390/kernel/perf_cpum_cf_diag.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kvm/kvm-s390.c
arch/s390/pci/pci.c
arch/s390/pci/pci_clp.c
arch/sparc/Kconfig
arch/sparc/include/asm/Kbuild
arch/x86/boot/compressed/acpi.c
arch/x86/boot/compressed/misc.c
arch/x86/events/amd/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/cstate.c
arch/x86/events/msr.c
arch/x86/hyperv/hv_apic.c
arch/x86/include/asm/cpu_entry_area.h
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mwait.h
arch/x86/include/asm/pti.h
arch/x86/include/asm/uaccess.h
arch/x86/kernel/apic/x2apic_cluster.c
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/vmware.c
arch/x86/kernel/head64.c
arch/x86/kernel/process.h
arch/x86/kvm/cpuid.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/delay.c
arch/x86/pci/Makefile
arch/x86/pci/common.c
arch/x86/pci/fixup.c
arch/x86/pci/intel_mid_pci.c
arch/x86/pci/numachip.c
arch/x86/platform/efi/efi.c
arch/x86/xen/efi.c
arch/x86/xen/enlighten.c
arch/xtensa/boot/dts/virt.dts
arch/xtensa/include/asm/bitops.h
arch/xtensa/include/asm/uaccess.h
arch/xtensa/kernel/xtensa_ksyms.c
block/blk-cgroup.c
block/blk-mq.c
block/blk-rq-qos.c
block/blk-rq-qos.h
block/blk-wbt.c
block/elevator.c
block/sed-opal.c
drivers/acpi/cppc_acpi.c
drivers/acpi/hmat/hmat.c
drivers/acpi/processor_perflib.c
drivers/acpi/processor_thermal.c
drivers/acpi/sleep.c
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/android/binder_internal.h
drivers/ata/ahci.c
drivers/ata/libata-scsi.c
drivers/ata/pata_atp867x.c
drivers/ata/sata_nv.c
drivers/base/core.c
drivers/base/memory.c
drivers/base/platform.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/null_blk_zoned.c
drivers/block/rbd.c
drivers/block/zram/zram_drv.c
drivers/char/random.c
drivers/clk/ti/clk-7xx.c
drivers/clocksource/timer-of.c
drivers/cpufreq/cpufreq.c
drivers/dma-buf/dma-resv.c
drivers/firmware/dmi_scan.c
drivers/firmware/efi/cper.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/rci2-table.c
drivers/firmware/efi/tpm.c
drivers/firmware/google/vpd_decode.c
drivers/gpio/gpio-eic-sprd.c
drivers/gpio/gpio-intel-mid.c
drivers/gpio/gpio-lynxpoint.c
drivers/gpio/gpio-max77620.c
drivers/gpio/gpio-merrifield.c
drivers/gpio/gpiolib-of.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/si.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/Makefile
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
drivers/gpu/drm/amd/powerplay/vega20_ppt.c
drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
drivers/gpu/drm/arm/malidp_mw.c
drivers/gpu/drm/bridge/tc358767.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_writeback.c
drivers/gpu/drm/i915/display/intel_bios.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display.h
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp.h
drivers/gpu/drm/i915/display/intel_dp_mst.c
drivers/gpu/drm/i915/display/intel_sprite.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
drivers/gpu/drm/i915/gem/i915_gem_pm.c
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
drivers/gpu/drm/i915/gt/intel_engine.h
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/gt/intel_reset.h
drivers/gpu/drm/i915/gt/intel_ringbuffer.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_request.h
drivers/gpu/drm/i915/intel_pch.c
drivers/gpu/drm/i915/intel_pch.h
drivers/gpu/drm/i915/selftests/i915_gem.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/omapdrm/dss/dss.c
drivers/gpu/drm/panel/panel-lg-lb035q02.c
drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
drivers/gpu/drm/panel/panel-sony-acx565akm.c
drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
drivers/gpu/drm/panfrost/panfrost_gpu.c
drivers/gpu/drm/panfrost/panfrost_job.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/rcar-du/rcar_du_writeback.c
drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
drivers/gpu/drm/tiny/Kconfig
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/vc4/vc4_txp.c
drivers/gpu/drm/xen/xen_drm_front.c
drivers/hid/hid-hyperv.c
drivers/hv/vmbus_drv.c
drivers/hwmon/nct7904.c
drivers/iio/accel/adxl372.c
drivers/iio/accel/bmc150-accel-core.c
drivers/iio/adc/ad799x.c
drivers/iio/adc/axp288_adc.c
drivers/iio/adc/hx711.c
drivers/iio/adc/meson_saradc.c
drivers/iio/adc/stm32-adc-core.c
drivers/iio/adc/stm32-adc-core.h
drivers/iio/adc/stm32-adc.c
drivers/iio/imu/adis_buffer.c
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
drivers/iio/light/Kconfig
drivers/iio/light/opt3001.c
drivers/iio/light/vcnl4000.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/device.c
drivers/infiniband/core/nldev.c
drivers/infiniband/core/security.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
drivers/infiniband/sw/siw/siw_qp.c
drivers/input/misc/da9063_onkey.c
drivers/input/misc/soc_button_array.c
drivers/input/mouse/elantech.c
drivers/input/rmi4/rmi_driver.c
drivers/input/touchscreen/goodix.c
drivers/iommu/Kconfig
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_types.h
drivers/iommu/arm-smmu.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/of_iommu.c
drivers/iommu/rockchip-iommu.c
drivers/irqchip/irq-al-fic.c
drivers/irqchip/irq-atmel-aic5.c
drivers/irqchip/irq-gic-v2m.c
drivers/irqchip/irq-gic-v3-its-pci-msi.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-sifive-plic.c
drivers/md/dm-cache-target.c
drivers/md/dm-clone-target.c
drivers/md/dm-snap.c
drivers/md/raid0.c
drivers/media/usb/stkwebcam/stk-webcam.c
drivers/memstick/host/jmb38x_ms.c
drivers/misc/fastrpc.c
drivers/misc/mei/bus-fixup.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/hw-me.c
drivers/misc/mei/hw-me.h
drivers/misc/mei/mei_dev.h
drivers/misc/mei/pci-me.c
drivers/misc/pci_endpoint_test.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/sdhci-iproc.c
drivers/mmc/host/sh_mmcif.c
drivers/mtd/nand/raw/au1550nd.c
drivers/mtd/spi-nor/spi-nor.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/microchip/ksz8795.c
drivers/net/dsa/microchip/ksz8795_spi.c
drivers/net/dsa/microchip/ksz9477_i2c.c
drivers/net/dsa/microchip/ksz9477_reg.h
drivers/net/dsa/microchip/ksz9477_spi.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/dsa/microchip/ksz_common.h
drivers/net/dsa/qca8k.c
drivers/net/dsa/rtl8366.c
drivers/net/dsa/rtl8366rb.c
drivers/net/dsa/sja1105/sja1105.h
drivers/net/dsa/sja1105/sja1105_dynamic_config.h
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/dsa/sja1105/sja1105_ptp.h
drivers/net/dsa/sja1105/sja1105_spi.c
drivers/net/dsa/sja1105/sja1105_static_config.h
drivers/net/dsa/sja1105/sja1105_tas.h
drivers/net/ethernet/aquantia/atlantic/aq_main.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
drivers/net/ethernet/atheros/ag71xx.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cavium/common/cavium_ptp.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/freescale/dpaa2/dpni.h
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
drivers/net/ethernet/hisilicon/hns_mdio.c
drivers/net/ethernet/i825xx/lasi_82596.c
drivers/net/ethernet/i825xx/lib82596.c
drivers/net/ethernet/i825xx/sni_82596.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/e1000/e1000.h
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/ixgb/ixgb.h
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx5/core/mr.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
drivers/net/ethernet/mscc/ocelot_board.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/pensando/Kconfig
drivers/net/ethernet/pensando/ionic/ionic_lif.h
drivers/net/ethernet/pensando/ionic/ionic_stats.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac5.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ieee802154/atusb.c
drivers/net/ieee802154/ca8210.c
drivers/net/ieee802154/mcr20a.c
drivers/net/netdevsim/fib.c
drivers/net/phy/at803x.c
drivers/net/phy/bcm7xxx.c
drivers/net/phy/mdio_device.c
drivers/net/phy/micrel.c
drivers/net/phy/phy-c45.c
drivers/net/phy/phy-core.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/ppp/pptp.c
drivers/net/tun.c
drivers/net/usb/hso.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/sr9800.c
drivers/net/virtio_net.c
drivers/net/vrf.c
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/iwl-io.h
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/ralink/rt2x00/rt2x00.h
drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netfront.c
drivers/nfc/pn533/usb.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/io-cmd-bdev.c
drivers/nvme/target/loop.c
drivers/nvme/target/tcp.c
drivers/parisc/sba_iommu.c
drivers/pci/Kconfig
drivers/pci/Makefile
drivers/pci/access.c
drivers/pci/ats.c
drivers/pci/controller/Kconfig
drivers/pci/controller/Makefile
drivers/pci/controller/cadence/Kconfig [new file with mode: 0644]
drivers/pci/controller/cadence/Makefile [new file with mode: 0644]
drivers/pci/controller/cadence/pcie-cadence-ep.c [moved from drivers/pci/controller/pcie-cadence-ep.c with 83% similarity]
drivers/pci/controller/cadence/pcie-cadence-host.c [moved from drivers/pci/controller/pcie-cadence-host.c with 76% similarity]
drivers/pci/controller/cadence/pcie-cadence-plat.c [new file with mode: 0644]
drivers/pci/controller/cadence/pcie-cadence.c [moved from drivers/pci/controller/pcie-cadence.c with 100% similarity]
drivers/pci/controller/cadence/pcie-cadence.h [moved from drivers/pci/controller/pcie-cadence.h with 82% similarity]
drivers/pci/controller/dwc/pci-dra7xx.c
drivers/pci/controller/dwc/pci-layerscape-ep.c
drivers/pci/controller/dwc/pci-layerscape.c
drivers/pci/controller/dwc/pci-meson.c
drivers/pci/controller/dwc/pcie-artpec6.c
drivers/pci/controller/dwc/pcie-designware-host.c
drivers/pci/controller/dwc/pcie-designware-plat.c
drivers/pci/controller/dwc/pcie-designware.h
drivers/pci/controller/dwc/pcie-tegra194.c
drivers/pci/controller/dwc/pcie-uniphier.c
drivers/pci/controller/pci-aardvark.c
drivers/pci/controller/pci-hyperv.c
drivers/pci/controller/pci-thunder-pem.c
drivers/pci/controller/pcie-iproc.c
drivers/pci/controller/pcie-mobiveil.c
drivers/pci/controller/pcie-rcar.c
drivers/pci/controller/pcie-rockchip-host.c
drivers/pci/controller/pcie-rockchip.h
drivers/pci/endpoint/functions/pci-epf-test.c
drivers/pci/endpoint/pci-epc-mem.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/hotplug/pciehp.h
drivers/pci/hotplug/pciehp_core.c
drivers/pci/hotplug/pciehp_ctrl.c
drivers/pci/hotplug/pciehp_hpc.c
drivers/pci/iov.c
drivers/pci/msi.c
drivers/pci/pci-bridge-emul.c
drivers/pci/pci-bridge-emul.h
drivers/pci/pci-driver.c
drivers/pci/pci-sysfs.c
drivers/pci/pci.c
drivers/pci/pci.h
drivers/pci/pcie/Kconfig
drivers/pci/pcie/aer.c
drivers/pci/pcie/aspm.c
drivers/pci/pcie/dpc.c
drivers/pci/pcie/portdrv.h
drivers/pci/pcie/portdrv_core.c
drivers/pci/pcie/portdrv_pci.c
drivers/pci/pcie/ptm.c
drivers/pci/probe.c
drivers/pci/proc.c
drivers/pci/quirks.c
drivers/pci/setup-bus.c
drivers/pci/switch/switchtec.c
drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.c
drivers/platform/x86/classmate-laptop.c
drivers/platform/x86/i2c-multi-instantiate.c
drivers/platform/x86/intel_punit_ipc.c
drivers/ptp/Kconfig
drivers/ptp/ptp_qoriq.c
drivers/rapidio/devices/tsi721.c
drivers/s390/block/dasd_eckd.c
drivers/s390/cio/cio.h
drivers/s390/cio/css.c
drivers/s390/cio/device.c
drivers/s390/cio/qdio_setup.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/scsi/zfcp_fsf.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/megaraid.c
drivers/scsi/pm8001/pm8001_hwi.c
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/qedf/qedf_main.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/storvsc_drv.c
drivers/scsi/ufs/ufshcd.c
drivers/staging/exfat/Kconfig
drivers/staging/exfat/Makefile
drivers/staging/exfat/exfat.h
drivers/staging/exfat/exfat_blkdev.c
drivers/staging/exfat/exfat_cache.c
drivers/staging/exfat/exfat_core.c
drivers/staging/exfat/exfat_nls.c
drivers/staging/exfat/exfat_super.c
drivers/staging/exfat/exfat_upcase.c
drivers/staging/fbtft/Kconfig
drivers/staging/fbtft/Makefile
drivers/staging/fbtft/fbtft-core.c
drivers/staging/fbtft/fbtft_device.c [deleted file]
drivers/staging/fbtft/flexfb.c [deleted file]
drivers/staging/gasket/gasket_constants.h
drivers/staging/gasket/gasket_core.c
drivers/staging/gasket/gasket_core.h
drivers/staging/octeon/ethernet-tx.c
drivers/staging/octeon/octeon-stubs.h
drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c
drivers/staging/rtl8188eu/os_dep/usb_intf.c
drivers/staging/speakup/sysfs-driver-speakup [new file with mode: 0644]
drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
drivers/staging/vt6655/device_main.c
drivers/tty/n_hdlc.c
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/Kconfig
drivers/tty/serial/fsl_linflexuart.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/imx.c
drivers/tty/serial/owl-uart.c
drivers/tty/serial/rda-uart.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/serial_mctrl_gpio.c
drivers/tty/serial/sh-sci.c
drivers/tty/serial/uartlite.c
drivers/tty/serial/xilinx_uartps.c
drivers/usb/cdns3/cdns3-pci-wrap.c
drivers/usb/cdns3/core.c
drivers/usb/cdns3/ep0.c
drivers/usb/cdns3/gadget.c
drivers/usb/class/usblp.c
drivers/usb/core/hcd-pci.c
drivers/usb/dwc3/drd.c
drivers/usb/dwc3/gadget.c
drivers/usb/dwc3/host.c
drivers/usb/gadget/udc/Kconfig
drivers/usb/gadget/udc/dummy_hcd.c
drivers/usb/gadget/udc/lpc32xx_udc.c
drivers/usb/host/pci-quirks.c
drivers/usb/host/xhci-ext-caps.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/image/microtek.c
drivers/usb/misc/Kconfig
drivers/usb/misc/Makefile
drivers/usb/misc/adutux.c
drivers/usb/misc/chaoskey.c
drivers/usb/misc/iowarrior.c
drivers/usb/misc/ldusb.c
drivers/usb/misc/legousbtower.c
drivers/usb/misc/rio500.c [deleted file]
drivers/usb/misc/rio500_usb.h [deleted file]
drivers/usb/misc/usblcd.c
drivers/usb/misc/yurex.c
drivers/usb/renesas_usbhs/common.h
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/renesas_usbhs/fifo.h
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/renesas_usbhs/pipe.c
drivers/usb/renesas_usbhs/pipe.h
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/keyspan.c
drivers/usb/serial/option.c
drivers/usb/serial/usb-serial.c
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/ucsi/displayport.c
drivers/usb/typec/ucsi/ucsi_ccg.c
drivers/usb/usb-skeleton.c
drivers/usb/usbip/vhci_hcd.c
drivers/vfio/pci/vfio_pci.c
drivers/vfio/pci/vfio_pci_config.c
drivers/vfio/pci/vfio_pci_private.h
drivers/vhost/test.c
drivers/video/fbdev/aty/radeon_pm.c
drivers/video/fbdev/core/fbmem.c
drivers/video/fbdev/efifb.c
drivers/video/logo/Makefile
drivers/virt/vboxguest/vboxguest_utils.c
drivers/w1/slaves/Kconfig
drivers/xen/balloon.c
drivers/xen/efi.c
drivers/xen/gntdev.c
drivers/xen/grant-table.c
drivers/xen/platform-pci.c
drivers/xen/pvcalls-back.c
drivers/xen/xenbus/xenbus_dev_frontend.c
fs/binfmt_elf.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ref-verify.c
fs/btrfs/send.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/ceph/mds_client.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/netmisc.c
fs/cifs/smb2pdu.c
fs/cifs/smb2proto.h
fs/direct-io.c
fs/erofs/data.c
fs/erofs/super.c
fs/erofs/zdata.c
fs/fs-writeback.c
fs/io_uring.c
fs/libfs.c
fs/nfs/direct.c
fs/nfs/nfs4proc.c
fs/nfs/write.c
fs/ocfs2/aops.c
fs/ocfs2/file.c
fs/ocfs2/ioctl.c
fs/ocfs2/journal.c
fs/ocfs2/localalloc.c
fs/ocfs2/xattr.c
fs/proc/meminfo.c
fs/proc/page.c
fs/readdir.c
fs/statfs.c
fs/super.c
fs/tracefs/inode.c
fs/xfs/libxfs/xfs_ag.c
fs/xfs/libxfs/xfs_attr_leaf.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.h
fs/xfs/libxfs/xfs_dir2_block.c
fs/xfs/libxfs/xfs_fs.h
fs/xfs/scrub/refcount.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_recover.c
include/asm-generic/Kbuild
include/linux/aer.h
include/linux/bitmap.h
include/linux/bitops.h
include/linux/compiler_attributes.h
include/linux/dsa/sja1105.h
include/linux/export.h
include/linux/gpio/driver.h
include/linux/hwmon.h
include/linux/kvm_host.h
include/linux/leds.h
include/linux/memcontrol.h
include/linux/micrel_phy.h
include/linux/mii.h
include/linux/of_pci.h
include/linux/page_ext.h
include/linux/pci-ats.h
include/linux/pci-epc.h
include/linux/pci.h
include/linux/phy.h
include/linux/platform_device.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/slab.h
include/linux/string.h
include/linux/sunrpc/xprtsock.h
include/linux/tcp.h
include/linux/tpm_eventlog.h
include/linux/uaccess.h
include/linux/xarray.h
include/net/cfg80211.h
include/net/llc_conn.h
include/net/net_namespace.h
include/net/request_sock.h
include/net/sctp/sctp.h
include/net/sock.h
include/net/tcp.h
include/scsi/scsi_eh.h
include/sound/hda_register.h
include/trace/events/rxrpc.h
include/trace/events/sock.h
include/uapi/drm/amdgpu_drm.h
include/uapi/linux/nvme_ioctl.h
include/uapi/linux/pci_regs.h
include/uapi/linux/pg.h
include/uapi/linux/sched.h
include/uapi/linux/serial_core.h
include/xen/xen-ops.h
kernel/dma/remap.c
kernel/events/core.c
kernel/events/uprobes.c
kernel/fork.c
kernel/freezer.c
kernel/gen_kheaders.sh
kernel/kthread.c
kernel/panic.c
kernel/power/main.c
kernel/sched/core.c
kernel/sched/cputime.c
kernel/sched/fair.c
kernel/sched/membarrier.c
kernel/stop_machine.c
kernel/sysctl.c
kernel/time/hrtimer.c
kernel/time/tick-broadcast-hrtimer.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_dynevent.c
kernel/trace/trace_events.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_events_trigger.c
kernel/trace/trace_hwlat.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_printk.c
kernel/trace/trace_stack.c
kernel/trace/trace_stat.c
kernel/trace/trace_uprobe.c
lib/devres.c
lib/generic-radix-tree.c
lib/string.c
lib/strnlen_user.c
lib/test_meminit.c
lib/test_user_copy.c
lib/textsearch.c
lib/usercopy.c
lib/vdso/Kconfig
mm/backing-dev.c
mm/compaction.c
mm/filemap.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/init-mm.c
mm/kmemleak.c
mm/memblock.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/memremap.c
mm/page_alloc.c
mm/page_ext.c
mm/page_owner.c
mm/rmap.c
mm/shmem.c
mm/shuffle.c
mm/slab.c
mm/slab_common.c
mm/slob.c
mm/slub.c
mm/sparse.c
mm/truncate.c
mm/vmpressure.c
mm/vmscan.c
mm/z3fold.c
net/batman-adv/soft-interface.c
net/bridge/netfilter/nf_conntrack_bridge.c
net/core/datagram.c
net/core/devlink.c
net/core/filter.c
net/core/net_namespace.c
net/core/request_sock.c
net/core/skbuff.c
net/core/sock.c
net/dccp/ipv4.c
net/dsa/dsa2.c
net/dsa/tag_sja1105.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_output.c
net/ipv4/ipmr.c
net/ipv4/netfilter/nf_dup_ipv4.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_diag.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_input.c
net/ipv6/ip6_output.c
net/ipv6/netfilter.c
net/ipv6/netfilter/nf_dup_ipv6.c
net/ipv6/raw.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_eth.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/llc/af_llc.c
net/llc/llc_c_ac.c
net/llc/llc_conn.c
net/llc/llc_if.c
net/llc/llc_s_ac.c
net/llc/llc_sap.c
net/mac80211/debugfs_netdev.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/util.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nft_connlimit.c
net/nfc/llcp_sock.c
net/openvswitch/actions.c
net/openvswitch/vport-internal_dev.c
net/packet/af_packet.c
net/rds/ib.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_object.c
net/rxrpc/conn_client.c
net/rxrpc/conn_object.c
net/rxrpc/conn_service.c
net/rxrpc/peer_event.c
net/rxrpc/peer_object.c
net/rxrpc/recvmsg.c
net/rxrpc/sendmsg.c
net/sched/act_api.c
net/sched/act_mirred.c
net/sched/act_mpls.c
net/sched/cls_api.c
net/sched/em_meta.c
net/sched/sch_api.c
net/sched/sch_cbq.c
net/sched/sch_cbs.c
net/sched/sch_dsmark.c
net/sched/sch_etf.c
net/sched/sch_netem.c
net/sched/sch_taprio.c
net/sctp/diag.c
net/sctp/input.c
net/sctp/sm_make_chunk.c
net/sctp/socket.c
net/smc/smc_core.c
net/smc/smc_rx.c
net/sunrpc/xprtsock.c
net/tipc/link.c
net/tipc/msg.c
net/tipc/socket.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/hyperv_transport.c
net/vmw_vsock/virtio_transport_common.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/reg.h
net/wireless/scan.c
net/wireless/wext-compat.c
net/wireless/wext-sme.c
net/x25/x25_dev.c
net/xdp/xsk.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_interface.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
samples/bpf/asm_goto_workaround.h
samples/bpf/task_fd_query_user.c
scripts/Kbuild.include
scripts/Makefile.build
scripts/Makefile.lib
scripts/coccinelle/api/devm_platform_ioremap_resource.cocci [deleted file]
scripts/coccinelle/misc/add_namespace.cocci
scripts/gdb/linux/dmesg.py
scripts/gdb/linux/symbols.py
scripts/gdb/linux/utils.py
scripts/mod/modpost.c
scripts/namespace.pl
scripts/nsdeps
scripts/recordmcount.h
scripts/setlocalversion
security/integrity/Makefile
security/selinux/ss/services.c
sound/hda/ext/hdac_ext_controller.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/usb/pcm.c
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/vmx.h
tools/bpf/Makefile
tools/include/uapi/asm-generic/mman-common.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/fs.h
tools/include/uapi/linux/fscrypt.h [new file with mode: 0644]
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/usbdevice_fs.h
tools/lib/bpf/Makefile
tools/lib/bpf/libbpf_internal.h
tools/lib/bpf/xsk.c
tools/lib/subcmd/Makefile
tools/perf/Documentation/asciidoc.conf
tools/perf/Documentation/jitdump-specification.txt
tools/perf/arch/arm/annotate/instructions.c
tools/perf/arch/arm64/annotate/instructions.c
tools/perf/arch/powerpc/util/header.c
tools/perf/arch/s390/annotate/instructions.c
tools/perf/arch/s390/util/header.c
tools/perf/arch/x86/annotate/instructions.c
tools/perf/arch/x86/util/header.c
tools/perf/builtin-kvm.c
tools/perf/builtin-script.c
tools/perf/check-headers.sh
tools/perf/pmu-events/arch/s390/cf_z15/basic.json [moved from tools/perf/pmu-events/arch/s390/cf_m8561/basic.json with 100% similarity]
tools/perf/pmu-events/arch/s390/cf_z15/crypto.json [moved from tools/perf/pmu-events/arch/s390/cf_m8561/crypto.json with 100% similarity]
tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json [moved from tools/perf/pmu-events/arch/s390/cf_m8561/crypto6.json with 100% similarity]
tools/perf/pmu-events/arch/s390/cf_z15/extended.json [moved from tools/perf/pmu-events/arch/s390/cf_m8561/extended.json with 100% similarity]
tools/perf/pmu-events/arch/s390/cf_z15/transaction.json [new file with mode: 0644]
tools/perf/pmu-events/arch/s390/mapfile.csv
tools/perf/pmu-events/jevents.c
tools/perf/tests/perf-hooks.c
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/evsel.c
tools/perf/util/jitdump.c
tools/perf/util/llvm-utils.c
tools/perf/util/map.c
tools/perf/util/python.c
tools/testing/selftests/Makefile
tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
tools/testing/selftests/bpf/test_flow_dissector.sh
tools/testing/selftests/bpf/test_lwt_ip_encap.sh
tools/testing/selftests/kselftest/runner.sh
tools/testing/selftests/kselftest_install.sh
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/include/x86_64/vmx.h
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/kvm_util_internal.h
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/lib/x86_64/vmx.c
tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c [new file with mode: 0644]
tools/testing/selftests/net/.gitignore
tools/testing/selftests/net/udpgso.c
tools/testing/selftests/pidfd/Makefile
tools/testing/selftests/powerpc/mm/tlbie_test.c
tools/testing/selftests/rtc/settings [new file with mode: 0644]
tools/testing/selftests/vm/gup_benchmark.c
tools/testing/selftests/watchdog/watchdog-test.c
tools/virtio/crypto/hash.h [moved from arch/arm64/kernel/vdso/gettimeofday.S with 100% similarity]
tools/virtio/linux/dma-mapping.h
tools/virtio/xen/xen.h [new file with mode: 0644]
usr/include/Makefile
virt/kvm/arm/vgic/trace.h
virt/kvm/kvm_main.c

diff --git a/CREDITS b/CREDITS
index 8b67a85..031605d 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -1637,6 +1637,10 @@ S: Panoramastrasse 18
 S: D-69126 Heidelberg
 S: Germany
 
+N: Simon Horman
+M: horms@verge.net.au
+D: Renesas ARM/ARM64 SoC maintainer
+
 N: Christopher Horn
 E: chorn@warwick.net
 D: Miscellaneous sysctl hacks
index 8bfee55..450296c 100644 (file)
@@ -347,3 +347,16 @@ Description:
                If the device has any Peer-to-Peer memory registered, this
                file contains a '1' if the memory has been published for
                use outside the driver that owns the device.
+
+What:          /sys/bus/pci/devices/.../link/clkpm
+               /sys/bus/pci/devices/.../link/l0s_aspm
+               /sys/bus/pci/devices/.../link/l1_aspm
+               /sys/bus/pci/devices/.../link/l1_1_aspm
+               /sys/bus/pci/devices/.../link/l1_2_aspm
+               /sys/bus/pci/devices/.../link/l1_1_pcipm
+               /sys/bus/pci/devices/.../link/l1_2_pcipm
+Date:          October 2019
+Contact:       Heiner Kallweit <hkallweit1@gmail.com>
+Description:   If ASPM is supported for an endpoint, these files can be
+               used to disable or enable the individual power management
+               states. Write y/1/on to enable, n/0/off to disable.
index 0fa8c0e..5361ebe 100644 (file)
@@ -615,8 +615,8 @@ on an IO device and is an example of this type.
 Protections
 -----------
 
-A cgroup is protected to be allocated upto the configured amount of
-the resource if the usages of all its ancestors are under their
+A cgroup is protected upto the configured amount of the resource
+as long as the usages of all its ancestors are under their
 protected levels.  Protections can be hard guarantees or best effort
 soft boundaries.  Protections can also be over-committed in which case
 only upto the amount available to the parent is protected among
@@ -1096,7 +1096,10 @@ PAGE_SIZE multiple when read back.
        is within its effective min boundary, the cgroup's memory
        won't be reclaimed under any conditions. If there is no
        unprotected reclaimable memory available, OOM killer
-       is invoked.
+       is invoked. Above the effective min boundary (or
+       effective low boundary if it is higher), pages are reclaimed
+       proportionally to the overage, reducing reclaim pressure for
+       smaller overages.
 
        Effective min boundary is limited by memory.min values of
        all ancestor cgroups. If there is memory.min overcommitment
@@ -1118,7 +1121,10 @@ PAGE_SIZE multiple when read back.
        Best-effort memory protection.  If the memory usage of a
        cgroup is within its effective low boundary, the cgroup's
        memory won't be reclaimed unless memory can be reclaimed
-       from unprotected cgroups.
+       from unprotected cgroups.  Above the effective low boundary (or
+       effective min boundary if it is higher), pages are reclaimed
+       proportionally to the overage, reducing reclaim pressure for
+       smaller overages.
 
        Effective low boundary is limited by memory.low values of
        all ancestor cgroups. If there is memory.low overcommitment
@@ -2482,8 +2488,10 @@ system performance due to overreclaim, to the point where the feature
 becomes self-defeating.
 
 The memory.low boundary on the other hand is a top-down allocated
-reserve.  A cgroup enjoys reclaim protection when it's within its low,
-which makes delegation of subtrees possible.
+reserve.  A cgroup enjoys reclaim protection when it's within its
+effective low, which makes delegation of subtrees possible. It also
+enjoys having reclaim pressure proportional to its overage when
+above its effective low.
 
 The original high boundary, the hard limit, is defined as a strict
 limit that can not budge, even if the OOM killer has to be called.
index c7ac2f3..c74027c 100644 (file)
                hpiosize=nn[KMG]        The fixed amount of bus space which is
                                reserved for hotplug bridge's IO window.
                                Default size is 256 bytes.
+               hpmmiosize=nn[KMG]      The fixed amount of bus space which is
+                               reserved for hotplug bridge's MMIO window.
+                               Default size is 2 megabytes.
+               hpmmioprefsize=nn[KMG]  The fixed amount of bus space which is
+                               reserved for hotplug bridge's MMIO_PREF window.
+                               Default size is 2 megabytes.
                hpmemsize=nn[KMG]       The fixed amount of bus space which is
-                               reserved for hotplug bridge's memory window.
+                               reserved for hotplug bridge's MMIO and
+                               MMIO_PREF window.
                                Default size is 2 megabytes.
                hpbussize=nn    The minimum amount of additional bus numbers
                                reserved for buses below a hotplug bridge.
                        even if the platform doesn't give the OS permission to
                        use them.  This may cause conflicts if the platform
                        also tries to use these services.
+               dpc-native      Use native PCIe service for DPC only.  May
+                               cause conflicts if firmware uses AER or DPC.
                compat  Disable native PCIe services (PME, AER, DPC, PCIe
                        hotplug).
 
                                the unplug protocol
                        never -- do not unplug even if version check succeeds
 
+       xen_legacy_crash        [X86,XEN]
+                       Crash from Xen panic notifier, without executing late
+                       panic() code such as dumping handler.
+
        xen_nopvspin    [X86,XEN]
                        Disables the ticketlock slowpath using Xen PV
                        optimizations.
index b040909..02e0217 100644 (file)
@@ -154,11 +154,18 @@ return virtual addresses to userspace from a 48-bit range.
 
 Software can "opt-in" to receiving VAs from a 52-bit space by
 specifying an mmap hint parameter that is larger than 48-bit.
+
 For example:
-    maybe_high_address = mmap(~0UL, size, prot, flags,...);
+
+.. code-block:: c
+
+   maybe_high_address = mmap(~0UL, size, prot, flags,...);
 
 It is also possible to build a debug kernel that returns addresses
 from a 52-bit space by enabling the following kernel config options:
+
+.. code-block:: sh
+
    CONFIG_EXPERT=y && CONFIG_ARM64_FORCE_52BIT=y
 
 Note that this option is only intended for debugging applications
index 17ea3fe..ab7ed2f 100644 (file)
@@ -107,6 +107,8 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | Cavium         | ThunderX2 SMMUv3| #126            | N/A                         |
 +----------------+-----------------+-----------------+-----------------------------+
+| Cavium         | ThunderX2 Core  | #219            | CAVIUM_TX2_ERRATUM_219      |
++----------------+-----------------+-----------------+-----------------------------+
 +----------------+-----------------+-----------------+-----------------------------+
 | Freescale/NXP  | LS2080A/LS1043A | A-008585        | FSL_ERRATUM_A008585         |
 +----------------+-----------------+-----------------+-----------------------------+
index fa16a05..ab0eae1 100644 (file)
@@ -38,6 +38,7 @@ Core utilities
    protection-keys
    ../RCU/index
    gcc-plugins
+   symbol-namespaces
 
 
 Interfaces for kernel debugging
index 7744aa3..939e3df 100644 (file)
@@ -98,6 +98,10 @@ limited. The actual limit depends on the hardware and the kernel
 configuration, but it is a good practice to use `kmalloc` for objects
 smaller than page size.
 
+The address of a chunk allocated with `kmalloc` is aligned to at least
+ARCH_KMALLOC_MINALIGN bytes.  For sizes which are a power of two, the
+alignment is also guaranteed to be at least the respective size.
+
 For large allocations you can use :c:func:`vmalloc` and
 :c:func:`vzalloc`, or directly request pages from the page
 allocator. The memory allocated by `vmalloc` and related functions is
index b72d07d..5252961 100644 (file)
@@ -41,6 +41,9 @@ smaller binary while the latter is 1.1 - 2 times faster.
 Both KASAN modes work with both SLUB and SLAB memory allocators.
 For better bug detection and nicer reporting, enable CONFIG_STACKTRACE.
 
+To augment reports with last allocation and freeing stack of the physical page,
+it is recommended to enable also CONFIG_PAGE_OWNER and boot with page_owner=on.
+
 To disable instrumentation for specific files or directories, add a line
 similar to the following to the respective kernel Makefile:
 
index 2560490..ecdfdc9 100644 (file)
@@ -89,6 +89,22 @@ To build, save output files in a separate directory with KBUILD_OUTPUT ::
 
   $ export KBUILD_OUTPUT=/tmp/kselftest; make TARGETS="size timers" kselftest
 
+Additionally you can use the "SKIP_TARGETS" variable on the make command
+line to specify one or more targets to exclude from the TARGETS list.
+
+To run all tests but a single subsystem::
+
+  $ make -C tools/testing/selftests SKIP_TARGETS=ptrace run_tests
+
+You can specify multiple tests to skip::
+
+  $  make SKIP_TARGETS="size timers" kselftest
+
+You can also specify a restricted list of tests to run together with a
+dedicated skiplist::
+
+  $  make TARGETS="bpf breakpoints size timers" SKIP_TARGETS=bpf kselftest
+
 See the top-level tools/testing/selftests/Makefile for the list of all
 possible targets.
 
index 3248595..f04870d 100644 (file)
@@ -85,4 +85,5 @@ examples:
                         <&pd IMX_SC_R_DSP_RAM>;
         mbox-names = "txdb0", "txdb1", "rxdb0", "rxdb1";
         mboxes = <&lsio_mu13 2 0>, <&lsio_mu13 2 1>, <&lsio_mu13 3 0>, <&lsio_mu13 3 1>;
+        memory-region = <&dsp_reserved>;
     };
index 676ec42..567a33a 100644 (file)
@@ -43,13 +43,9 @@ properties:
 
   dvdd-supply:
     description: DVdd voltage supply
-    items:
-      - const: dvdd
 
   avdd-supply:
     description: AVdd voltage supply
-    items:
-      - const: avdd
 
   adi,rejection-60-Hz-enable:
     description: |
@@ -99,6 +95,9 @@ required:
 examples:
   - |
     spi0 {
+      #address-cells = <1>;
+      #size-cells = <0>;
+
       adc@0 {
         compatible = "adi,ad7192";
         reg = <0>;
index f4c5d34..7079d44 100644 (file)
@@ -1,8 +1,11 @@
 * Advanced Interrupt Controller (AIC)
 
 Required properties:
-- compatible: Should be "atmel,<chip>-aic"
-  <chip> can be "at91rm9200", "sama5d2", "sama5d3" or "sama5d4"
+- compatible: Should be:
+    - "atmel,<chip>-aic" where  <chip> can be "at91rm9200", "sama5d2",
+      "sama5d3" or "sama5d4"
+    - "microchip,<chip>-aic" where <chip> can be "sam9x60"
+
 - interrupt-controller: Identifies the node as an interrupt controller.
 - #interrupt-cells: The number of cells to define the interrupts. It should be 3.
   The first cell is the IRQ number (aka "Peripheral IDentifier" on datasheet).
index 3d5c154..9054555 100644 (file)
@@ -73,7 +73,6 @@ properties:
           - rc-genius-tvgo-a11mce
           - rc-gotview7135
           - rc-hauppauge
-          - rc-hauppauge
           - rc-hisi-poplar
           - rc-hisi-tv-demo
           - rc-imon-mce
index efa2c8b..84fdc42 100644 (file)
@@ -9,13 +9,16 @@ Additional properties are described here:
 
 Required properties:
 - compatible:
-       should contain "amlogic,axg-pcie" to identify the core.
+       should contain :
+       - "amlogic,axg-pcie" for AXG SoC Family
+       - "amlogic,g12a-pcie" for G12A SoC Family
+       to identify the core.
 - reg:
        should contain the configuration address space.
 - reg-names: Must be
        - "elbi"        External local bus interface registers
        - "cfg"         Meson specific registers
-       - "phy"         Meson PCIE PHY registers
+       - "phy"         Meson PCIE PHY registers for AXG SoC Family
        - "config"      PCIe configuration space
 - reset-gpios: The GPIO to generate PCIe PERST# assert and deassert signal.
 - clocks: Must contain an entry for each entry in clock-names.
@@ -23,12 +26,13 @@ Required properties:
        - "pclk"       PCIe GEN 100M PLL clock
        - "port"       PCIe_x(A or B) RC clock gate
        - "general"    PCIe Phy clock
-       - "mipi"       PCIe_x(A or B) 100M ref clock gate
+       - "mipi"       PCIe_x(A or B) 100M ref clock gate for AXG SoC Family
 - resets: phandle to the reset lines.
 - reset-names: must contain "phy" "port" and "apb"
-       - "phy"         Share PHY reset
+       - "phy"         Share PHY reset for AXG SoC Family
        - "port"        Port A or B reset
        - "apb"         Share APB reset
+- phys: should contain a phandle to the shared phy for G12A SoC Family
 - device_type:
        should be "pci". As specified in designware-pcie.txt
 
index e20ceaa..99a386e 100644 (file)
@@ -21,6 +21,7 @@ Required properties:
         "fsl,ls1046a-pcie"
         "fsl,ls1043a-pcie"
         "fsl,ls1012a-pcie"
+        "fsl,ls1028a-pcie"
   EP mode:
        "fsl,ls1046a-pcie-ep", "fsl,ls-pcie-ep"
 - reg: base addresses and lengths of the PCIe controller register blocks.
index 8a56a85..a974821 100644 (file)
@@ -37,7 +37,7 @@ properties:
       - description: exclusive PHY reset line
       - description: shared reset line between the PCIe PHY and PCIe controller
 
-  resets-names:
+  reset-names:
     items:
       - const: phy
       - const: pcie
index dd63151..b143d9a 100644 (file)
@@ -26,6 +26,8 @@ Required properties:
     - "renesas,hscif-r8a77470" for R8A77470 (RZ/G1C) HSCIF compatible UART.
     - "renesas,scif-r8a774a1" for R8A774A1 (RZ/G2M) SCIF compatible UART.
     - "renesas,hscif-r8a774a1" for R8A774A1 (RZ/G2M) HSCIF compatible UART.
+    - "renesas,scif-r8a774b1" for R8A774B1 (RZ/G2N) SCIF compatible UART.
+    - "renesas,hscif-r8a774b1" for R8A774B1 (RZ/G2N) HSCIF compatible UART.
     - "renesas,scif-r8a774c0" for R8A774C0 (RZ/G2E) SCIF compatible UART.
     - "renesas,hscif-r8a774c0" for R8A774C0 (RZ/G2E) HSCIF compatible UART.
     - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
index b9f04e6..6ffb09b 100644 (file)
@@ -85,8 +85,8 @@ A child node must exist to represent the core DWC2 IP block. The name of
 the node is not important. The content of the node is defined in dwc2.txt.
 
 PHY documentation is provided in the following places:
-- Documentation/devicetree/bindings/phy/meson-g12a-usb2-phy.txt
-- Documentation/devicetree/bindings/phy/meson-g12a-usb3-pcie-phy.txt
+- Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml
+- Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml
 
 Example device nodes:
        usb: usb@ffe09000 {
index 059f6ef..1ca64c8 100644 (file)
@@ -63,7 +63,11 @@ properties:
     description:
       Set this flag to force EHCI reset after resume.
 
-  phys: true
+  phys:
+    description: PHY specifier for the USB PHY
+
+  phy-names:
+    const: usb
 
 required:
   - compatible
@@ -89,6 +93,7 @@ examples:
         interrupts = <39>;
         clocks = <&ahb_gates 1>;
         phys = <&usbphy 1>;
+        phy-names = "usb";
     };
 
 ...
index da5a14b..bcffec1 100644 (file)
@@ -67,7 +67,11 @@ properties:
     description:
       Overrides the detected port count
 
-  phys: true
+  phys:
+    description: PHY specifier for the USB PHY
+
+  phy-names:
+    const: usb
 
 required:
   - compatible
@@ -84,6 +88,7 @@ examples:
           interrupts = <64>;
           clocks = <&usb_clk 6>, <&ahb_gates 2>;
           phys = <&usbphy 1>;
+          phy-names = "usb";
       };
 
 ...
index f3e4ace..42d8814 100644 (file)
@@ -33,7 +33,7 @@ Required properties:
        "dma_ck": dma_bus clock for data transfer by DMA,
        "xhci_ck": controller clock
 
- - phys : see usb-hcd.txt in the current directory
+ - phys : see usb-hcd.yaml in the current directory
 
 Optional properties:
  - wakeup-source : enable USB remote wakeup;
@@ -53,7 +53,7 @@ Optional properties:
        See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
  - imod-interval-ns: default interrupt moderation interval is 5000ns
 
-additionally the properties from usb-hcd.txt (in the current directory) are
+additionally the properties from usb-hcd.yaml (in the current directory) are
 supported.
 
 Example:
index b9af7f5..e0ae609 100644 (file)
@@ -17,7 +17,7 @@ Required properties:
  - clock-names : must contain "sys_ck" for clock of controller,
        the following clocks are optional:
        "ref_ck", "mcu_ck" and "dma_ck";
- - phys : see usb-hcd.txt in the current directory
+ - phys : see usb-hcd.yaml in the current directory
  - dr_mode : should be one of "host", "peripheral" or "otg",
        refer to usb/generic.txt
 
@@ -60,7 +60,7 @@ Optional properties:
  - mediatek,u3p-dis-msk : mask to disable u3ports, bit0 for u3port0,
        bit1 for u3port1, ... etc;
 
-additionally the properties from usb-hcd.txt (in the current directory) are
+additionally the properties from usb-hcd.yaml (in the current directory) are
 supported.
 
 Sub-nodes:
index 9c8c56d..7263b7f 100644 (file)
@@ -18,8 +18,13 @@ properties:
     description:
       List of all the USB PHYs on this HCD
 
+  phy-names:
+    description:
+      Name specifier for the USB PHY
+
 examples:
   - |
     usb {
         phys = <&usb2_phy1>, <&usb3_phy1>;
+        phy-names = "usb";
     };
index cc2e6f7..d1702eb 100644 (file)
@@ -6,7 +6,7 @@ Required properties:
 - reg : Should contain 1 register ranges(address and length)
 - interrupts : UHCI controller interrupt
 
-additionally the properties from usb-hcd.txt (in the current directory) are
+additionally the properties from usb-hcd.yaml (in the current directory) are
 supported.
 
 Example:
index 97400e8..b49b819 100644 (file)
@@ -41,9 +41,9 @@ Optional properties:
   - usb3-lpm-capable: determines if platform is USB3 LPM capable
   - quirk-broken-port-ped: set if the controller has broken port disable mechanism
   - imod-interval-ns: default interrupt moderation interval is 5000ns
-  - phys : see usb-hcd.txt in the current directory
+  - phys : see usb-hcd.yaml in the current directory
 
-additionally the properties from usb-hcd.txt (in the current directory) are
+additionally the properties from usb-hcd.yaml (in the current directory) are
 supported.
 
 
index 8147c3f..230ad59 100644 (file)
@@ -7,6 +7,7 @@ Linux Hardware Monitoring
 
    hwmon-kernel-api
    pmbus-core
+   inspur-ipsps1
    submitting-patches
    sysfs-interface
    userspace-tools
index 2b871ae..292c0c2 100644 (file)
@@ -1,5 +1,5 @@
 Kernel driver inspur-ipsps1
-=======================
+===========================
 
 Supported chips:
 
index 12a86ba..4451d59 100644 (file)
@@ -21,10 +21,17 @@ Supported chips:
 
 * AMD Family 14h processors: "Brazos" (C/E/G/Z-Series)
 
-* AMD Family 15h processors: "Bulldozer" (FX-Series), "Trinity", "Kaveri", "Carrizo"
+* AMD Family 15h processors: "Bulldozer" (FX-Series), "Trinity", "Kaveri",
+  "Carrizo", "Stoney Ridge", "Bristol Ridge"
 
 * AMD Family 16h processors: "Kabini", "Mullins"
 
+* AMD Family 17h processors: "Zen", "Zen 2"
+
+* AMD Family 18h processors: "Hygon Dhyana"
+
+* AMD Family 19h processors: "Zen 3"
+
   Prefix: 'k10temp'
 
   Addresses scanned: PCI space
@@ -110,3 +117,12 @@ The maximum value for Tctl is available in the file temp1_max.
 If the BIOS has enabled hardware temperature control, the threshold at
 which the processor will throttle itself to avoid damage is available in
 temp1_crit and temp1_crit_hyst.
+
+On some AMD CPUs, there is a difference between the die temperature (Tdie) and
+the reported temperature (Tctl). Tdie is the real measured temperature, and
+Tctl is used for fan control. While Tctl is always available as temp1_input,
+the driver exports Tdie temperature as temp2_input for those CPUs which support
+it.
+
+Models from 17h family report relative temperature, the driver aims to
+compensate and report the real temperature.
index 6ba9d53..b89c881 100644 (file)
@@ -954,11 +954,6 @@ When kbuild executes, the following steps are followed (roughly):
 
        From commandline LDFLAGS_MODULE shall be used (see kbuild.txt).
 
-    KBUILD_ARFLAGS   Options for $(AR) when creating archives
-
-       $(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic
-       mode) if this option is supported by $(AR).
-
     KBUILD_LDS
 
        The linker script with full path. Assigned by the top-level Makefile.
index d2ae799..774a998 100644 (file)
@@ -498,10 +498,11 @@ build.
        will be written containing all exported symbols that were not
        defined in the kernel.
 
---- 6.3 Symbols From Another External Module
+6.3 Symbols From Another External Module
+----------------------------------------
 
        Sometimes, an external module uses exported symbols from
-       another external module. kbuild needs to have full knowledge of
+       another external module. Kbuild needs to have full knowledge of
        all symbols to avoid spitting out warnings about undefined
        symbols. Three solutions exist for this situation.
 
@@ -521,7 +522,7 @@ build.
                The top-level kbuild file would then look like::
 
                        #./Kbuild (or ./Makefile):
-                               obj-y := foo/ bar/
+                               obj-m := foo/ bar/
 
                And executing::
 
index ab92e98..5033938 100644 (file)
@@ -16,16 +16,21 @@ the kernel may be unreproducible, and how to avoid them.
 Timestamps
 ----------
 
-The kernel embeds a timestamp in two places:
+The kernel embeds timestamps in three places:
 
 * The version string exposed by ``uname()`` and included in
   ``/proc/version``
 
 * File timestamps in the embedded initramfs
 
-By default the timestamp is the current time.  This must be overridden
-using the `KBUILD_BUILD_TIMESTAMP`_ variable.  If you are building
-from a git commit, you could use its commit date.
+* If enabled via ``CONFIG_IKHEADERS``, file timestamps of kernel
+  headers embedded in the kernel or respective module,
+  exposed via ``/sys/kernel/kheaders.tar.xz``
+
+By default the timestamp is the current time and in the case of
+``kheaders`` the various files' modification times. This must
+be overridden using the `KBUILD_BUILD_TIMESTAMP`_ variable.
+If you are building from a git commit, you could use its commit date.
 
 The kernel does *not* use the ``__DATE__`` and ``__TIME__`` macros,
 and enables warnings if they are used.  If you incorporate external
index f51f925..c1f7f75 100644 (file)
@@ -23,6 +23,7 @@ Contents:
    intel/ice
    google/gve
    mellanox/mlx5
+   netronome/nfp
    pensando/ionic
 
 .. only::  subproject and html
index 67b6839..1393589 100644 (file)
@@ -36,8 +36,10 @@ Support
 =======
 For general Linux networking support, please use the netdev mailing
 list, which is monitored by Pensando personnel::
+
   netdev@vger.kernel.org
 
 For more specific support needs, please use the Pensando driver support
 email::
-       drivers@pensando.io
+
+  drivers@pensando.io
index ce7e7a0..dc60b13 100644 (file)
@@ -272,7 +272,7 @@ supported flags are:
 * MSG_DONTWAIT, i.e. non-blocking operation.
 
 recvmsg(2)
-^^^^^^^^^
+^^^^^^^^^^
 
 In most cases recvmsg(2) is needed if you want to extract more information than
 recvfrom(2) can provide. For example package priority and timestamp. The
index 9cb31c5..9bdb7d5 100644 (file)
@@ -92,16 +92,16 @@ under some conditions.
 Part III: Registering a Network Device to DIM
 ==============================================
 
-Net DIM API exposes the main function net_dim(struct net_dim *dim,
-struct net_dim_sample end_sample). This function is the entry point to the Net
+Net DIM API exposes the main function net_dim(struct dim *dim,
+struct dim_sample end_sample). This function is the entry point to the Net
 DIM algorithm and has to be called every time the driver would like to check if
 it should change interrupt moderation parameters. The driver should provide two
-data structures: struct net_dim and struct net_dim_sample. Struct net_dim
+data structures: struct dim and struct dim_sample. Struct dim
 describes the state of DIM for a specific object (RX queue, TX queue,
 other queues, etc.). This includes the current selected profile, previous data
 samples, the callback function provided by the driver and more.
-Struct net_dim_sample describes a data sample, which will be compared to the
-data sample stored in struct net_dim in order to decide on the algorithm's next
+Struct dim_sample describes a data sample, which will be compared to the
+data sample stored in struct dim in order to decide on the algorithm's next
 step. The sample should include bytes, packets and interrupts, measured by
 the driver.
 
@@ -110,9 +110,9 @@ main net_dim() function. The recommended method is to call net_dim() on each
 interrupt. Since Net DIM has a built-in moderation and it might decide to skip
 iterations under certain conditions, there is no need to moderate the net_dim()
 calls as well. As mentioned above, the driver needs to provide an object of type
-struct net_dim to the net_dim() function call. It is advised for each entity
-using Net DIM to hold a struct net_dim as part of its data structure and use it
-as the main Net DIM API object. The struct net_dim_sample should hold the latest
+struct dim to the net_dim() function call. It is advised for each entity
+using Net DIM to hold a struct dim as part of its data structure and use it
+as the main Net DIM API object. The struct dim_sample should hold the latest
 bytes, packets and interrupts count. No need to perform any calculations, just
 include the raw data.
 
@@ -132,19 +132,19 @@ usage is not complete but it should make the outline of the usage clear.
 
 my_driver.c:
 
-#include <linux/net_dim.h>
+#include <linux/dim.h>
 
 /* Callback for net DIM to schedule on a decision to change moderation */
 void my_driver_do_dim_work(struct work_struct *work)
 {
-       /* Get struct net_dim from struct work_struct */
-       struct net_dim *dim = container_of(work, struct net_dim,
-                                          work);
+       /* Get struct dim from struct work_struct */
+       struct dim *dim = container_of(work, struct dim,
+                                      work);
        /* Do interrupt moderation related stuff */
        ...
 
        /* Signal net DIM work is done and it should move to next iteration */
-       dim->state = NET_DIM_START_MEASURE;
+       dim->state = DIM_START_MEASURE;
 }
 
 /* My driver's interrupt handler */
@@ -152,13 +152,13 @@ int my_driver_handle_interrupt(struct my_driver_entity *my_entity, ...)
 {
        ...
        /* A struct to hold current measured data */
-       struct net_dim_sample dim_sample;
+       struct dim_sample dim_sample;
        ...
        /* Initiate data sample struct with current data */
-       net_dim_sample(my_entity->events,
-                      my_entity->packets,
-                      my_entity->bytes,
-                      &dim_sample);
+       dim_update_sample(my_entity->events,
+                         my_entity->packets,
+                         my_entity->bytes,
+                         &dim_sample);
        /* Call net DIM */
        net_dim(&my_entity->dim, dim_sample);
        ...
index 0e2ef74..0924d29 100644 (file)
@@ -130,8 +130,8 @@ a full power-on reset sequence and the power-on defaults are restored to the
 device by hardware just as at initial power up.
 
 PCI devices supporting the PCI PM Spec can be programmed to generate PMEs
-while in a low-power state (D1-D3), but they are not required to be capable
-of generating PMEs from all supported low-power states.  In particular, the
+while in any power state (D0-D3), but they are not required to be capable
+of generating PMEs from all supported power states.  In particular, the
 capability of generating PMEs from D3cold is optional and depends on the
 presence of additional voltage (3.3Vaux) allowing the device to remain
 sufficiently active to generate a wakeup signal.
@@ -426,12 +426,12 @@ pm->runtime_idle() callback.
 2.4. System-Wide Power Transitions
 ----------------------------------
 There are a few different types of system-wide power transitions, described in
-Documentation/driver-api/pm/devices.rst.  Each of them requires devices to be handled
-in a specific way and the PM core executes subsystem-level power management
-callbacks for this purpose.  They are executed in phases such that each phase
-involves executing the same subsystem-level callback for every device belonging
-to the given subsystem before the next phase begins.  These phases always run
-after tasks have been frozen.
+Documentation/driver-api/pm/devices.rst.  Each of them requires devices to be
+handled in a specific way and the PM core executes subsystem-level power
+management callbacks for this purpose.  They are executed in phases such that
+each phase involves executing the same subsystem-level callback for every device
+belonging to the given subsystem before the next phase begins.  These phases
+always run after tasks have been frozen.
 
 2.4.1. System Suspend
 ^^^^^^^^^^^^^^^^^^^^^
@@ -600,17 +600,17 @@ using the following PCI bus type's callbacks::
 
 respectively.
 
-The first of them, pci_pm_thaw_noirq(), is analogous to pci_pm_resume_noirq(),
-but it doesn't put the device into the full power state and doesn't attempt to
-restore its standard configuration registers.  It also executes the device
-driver's pm->thaw_noirq() callback, if defined, instead of pm->resume_noirq().
+The first of them, pci_pm_thaw_noirq(), is analogous to pci_pm_resume_noirq().
+It puts the device into the full power state and restores its standard
+configuration registers.  It also executes the device driver's pm->thaw_noirq()
+callback, if defined, instead of pm->resume_noirq().
 
 The pci_pm_thaw() routine is similar to pci_pm_resume(), but it runs the device
 driver's pm->thaw() callback instead of pm->resume().  It is executed
 asynchronously for different PCI devices that don't depend on each other in a
 known way.
 
-The complete phase it the same as for system resume.
+The complete phase is the same as for system resume.
 
 After saving the image, devices need to be powered down before the system can
 enter the target sleep state (ACPI S4 for ACPI-based systems).  This is done in
@@ -636,12 +636,12 @@ System restore requires a hibernation image to be loaded into memory and the
 pre-hibernation memory contents to be restored before the pre-hibernation system
 activity can be resumed.
 
-As described in Documentation/driver-api/pm/devices.rst, the hibernation image is loaded
-into memory by a fresh instance of the kernel, called the boot kernel, which in
-turn is loaded and run by a boot loader in the usual way.  After the boot kernel
-has loaded the image, it needs to replace its own code and data with the code
-and data of the "hibernated" kernel stored within the image, called the image
-kernel.  For this purpose all devices are frozen just like before creating
+As described in Documentation/driver-api/pm/devices.rst, the hibernation image
+is loaded into memory by a fresh instance of the kernel, called the boot kernel,
+which in turn is loaded and run by a boot loader in the usual way.  After the
+boot kernel has loaded the image, it needs to replace its own code and data with
+the code and data of the "hibernated" kernel stored within the image, called the
+image kernel.  For this purpose all devices are frozen just like before creating
 the image during hibernation, in the
 
        prepare, freeze, freeze_noirq
@@ -691,12 +691,12 @@ controlling the runtime power management of their devices.
 
 At the time of this writing there are two ways to define power management
 callbacks for a PCI device driver, the recommended one, based on using a
-dev_pm_ops structure described in Documentation/driver-api/pm/devices.rst, and the
-"legacy" one, in which the .suspend(), .suspend_late(), .resume_early(), and
-.resume() callbacks from struct pci_driver are used.  The legacy approach,
-however, doesn't allow one to define runtime power management callbacks and is
-not really suitable for any new drivers.  Therefore it is not covered by this
-document (refer to the source code to learn more about it).
+dev_pm_ops structure described in Documentation/driver-api/pm/devices.rst, and
+the "legacy" one, in which the .suspend() and .resume() callbacks from struct
+pci_driver are used.  The legacy approach, however, doesn't allow one to define
+runtime power management callbacks and is not really suitable for any new
+drivers.  Therefore it is not covered by this document (refer to the source code
+to learn more about it).
 
 It is recommended that all PCI device drivers define a struct dev_pm_ops object
 containing pointers to power management (PM) callbacks that will be executed by
index f4a2198..ada573b 100644 (file)
@@ -56,7 +56,7 @@ instead of ``double-indenting`` the ``case`` labels.  E.g.:
        case 'K':
        case 'k':
                mem <<= 10;
-               /* fall through */
+               fallthrough;
        default:
                break;
        }
index 053b24a..179f2a5 100644 (file)
@@ -122,14 +122,27 @@ memory adjacent to the stack (when built without `CONFIG_VMAP_STACK=y`)
 
 Implicit switch case fall-through
 ---------------------------------
-The C language allows switch cases to "fall through" when
-a "break" statement is missing at the end of a case. This,
-however, introduces ambiguity in the code, as it's not always
-clear if the missing break is intentional or a bug. As there
-have been a long list of flaws `due to missing "break" statements
+The C language allows switch cases to "fall-through" when a "break" statement
+is missing at the end of a case. This, however, introduces ambiguity in the
+code, as it's not always clear if the missing break is intentional or a bug.
+
+As there have been a long list of flaws `due to missing "break" statements
 <https://cwe.mitre.org/data/definitions/484.html>`_, we no longer allow
-"implicit fall-through". In order to identify an intentional fall-through
-case, we have adopted the marking used by static analyzers: a comment
-saying `/* Fall through */`. Once the C++17 `__attribute__((fallthrough))`
-is more widely handled by C compilers, static analyzers, and IDEs, we can
-switch to using that instead.
+"implicit fall-through".
+
+In order to identify intentional fall-through cases, we have adopted a
+pseudo-keyword macro 'fallthrough' which expands to gcc's extension
+__attribute__((__fallthrough__)).  `Statement Attributes
+<https://gcc.gnu.org/onlinedocs/gcc/Statement-Attributes.html>`_
+
+When the C17/C18  [[fallthrough]] syntax is more commonly supported by
+C compilers, static analyzers, and IDEs, we can switch to using that syntax
+for the macro pseudo-keyword.
+
+All switch/case blocks must end in one of:
+
+       break;
+       fallthrough;
+       continue;
+       goto <label>;
+       return [expression];
diff --git a/Documentation/usb/rio.rst b/Documentation/usb/rio.rst
deleted file mode 100644 (file)
index ea73475..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-============
-Diamonds Rio
-============
-
-Copyright (C) 1999, 2000 Bruce Tenison
-
-Portions Copyright (C) 1999, 2000 David Nelson
-
-Thanks to David Nelson for guidance and the usage of the scanner.txt
-and scanner.c files to model our driver and this informative file.
-
-Mar. 2, 2000
-
-Changes
-=======
-
-- Initial Revision
-
-
-Overview
-========
-
-This README will address issues regarding how to configure the kernel
-to access a RIO 500 mp3 player.
-Before I explain how to use this to access the Rio500 please be warned:
-
-.. warning::
-
-   Please note that this software is still under development.  The authors
-   are in no way responsible for any damage that may occur, no matter how
-   inconsequential.
-
-It seems that the Rio has a problem when sending .mp3 with low batteries.
-I suggest when the batteries are low and you want to transfer stuff that you
-replace it with a fresh one. In my case, what happened is I lost two 16kb
-blocks (they are no longer usable to store information to it). But I don't
-know if that's normal or not; it could simply be a problem with the flash
-memory.
-
-In an extreme case, I left my Rio playing overnight and the batteries wore
-down to nothing and appear to have corrupted the flash memory. My RIO
-needed to be replaced as a result.  Diamond tech support is aware of the
-problem.  Do NOT allow your batteries to wear down to nothing before
-changing them.  It appears RIO 500 firmware does not handle low battery
-power well at all.
-
-On systems with OHCI controllers, the kernel OHCI code appears to have
-power on problems with some chipsets.  If you are having problems
-connecting to your RIO 500, try turning it on first and then plugging it
-into the USB cable.
-
-Contact Information
--------------------
-
-   The main page for the project is hosted at sourceforge.net in the following
-   URL: <http://rio500.sourceforge.net>. You can also go to the project's
-   sourceforge home page at: <http://sourceforge.net/projects/rio500/>.
-   There is also a mailing list: rio500-users@lists.sourceforge.net
-
-Authors
--------
-
-Most of the code was written by Cesar Miquel <miquel@df.uba.ar>. Keith
-Clayton <kclayton@jps.net> is incharge of the PPC port and making sure
-things work there. Bruce Tenison <btenison@dibbs.net> is adding support
-for .fon files and also does testing. The program will mostly sure be
-re-written and Pete Ikusz along with the rest will re-design it. I would
-also like to thank Tri Nguyen <tmn_3022000@hotmail.com> who provided use
-with some important information regarding the communication with the Rio.
-
-Additional Information and userspace tools
-
-       http://rio500.sourceforge.net/
-
-
-Requirements
-============
-
-A host with a USB port running a Linux kernel with RIO 500 support enabled.
-
-The driver is a module called rio500, which should be automatically loaded
-as you plug in your device. If that fails you can manually load it with
-
-  modprobe rio500
-
-Udev should automatically create a device node as soon as plug in your device.
-If that fails, you can manually add a device for the USB rio500::
-
-  mknod /dev/usb/rio500 c 180 64
-
-In that case, set appropriate permissions for /dev/usb/rio500 (don't forget
-about group and world permissions).  Both read and write permissions are
-required for proper operation.
-
-That's it.  The Rio500 Utils at: http://rio500.sourceforge.net should
-be able to access the rio500.
-
-Limits
-======
-
-You can use only a single rio500 device at a time with your computer.
-
-Bugs
-====
-
-If you encounter any problems feel free to drop me an email.
-
-Bruce Tenison
-btenison@dibbs.net
index 25211e9..165d9a2 100644 (file)
@@ -2165,12 +2165,10 @@ F:      arch/arm64/boot/dts/realtek/
 F:     Documentation/devicetree/bindings/arm/realtek.yaml
 
 ARM/RENESAS ARM64 ARCHITECTURE
-M:     Simon Horman <horms@verge.net.au>
 M:     Geert Uytterhoeven <geert+renesas@glider.be>
 M:     Magnus Damm <magnus.damm@gmail.com>
 L:     linux-renesas-soc@vger.kernel.org
 Q:     http://patchwork.kernel.org/project/linux-renesas-soc/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel.git next
 S:     Supported
 F:     arch/arm64/boot/dts/renesas/
@@ -2282,12 +2280,10 @@ S:      Maintained
 F:     drivers/media/platform/s5p-mfc/
 
 ARM/SHMOBILE ARM ARCHITECTURE
-M:     Simon Horman <horms@verge.net.au>
 M:     Geert Uytterhoeven <geert+renesas@glider.be>
 M:     Magnus Damm <magnus.damm@gmail.com>
 L:     linux-renesas-soc@vger.kernel.org
 Q:     http://patchwork.kernel.org/project/linux-renesas-soc/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel.git next
 S:     Supported
 F:     arch/arm/boot/dts/emev2*
@@ -6112,7 +6108,10 @@ M:       Gao Xiang <gaoxiang25@huawei.com>
 M:     Chao Yu <yuchao0@huawei.com>
 L:     linux-erofs@lists.ozlabs.org
 S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git
+F:     Documentation/filesystems/erofs.txt
 F:     fs/erofs/
+F:     include/trace/events/erofs.h
 
 ERRSEQ ERROR TRACKING INFRASTRUCTURE
 M:     Jeff Layton <jlayton@kernel.org>
@@ -9075,6 +9074,7 @@ F:        security/keys/
 KGDB / KDB /debug_core
 M:     Jason Wessel <jason.wessel@windriver.com>
 M:     Daniel Thompson <daniel.thompson@linaro.org>
+R:     Douglas Anderson <dianders@chromium.org>
 W:     http://kgdb.wiki.kernel.org/
 L:     kgdb-bugreport@lists.sourceforge.net
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jwessel/kgdb.git
@@ -9122,7 +9122,7 @@ F:        drivers/auxdisplay/ks0108.c
 F:     include/linux/ks0108.h
 
 L3MDEV
-M:     David Ahern <dsa@cumulusnetworks.com>
+M:     David Ahern <dsahern@kernel.org>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     net/l3mdev
@@ -9183,6 +9183,7 @@ M:        Pavel Machek <pavel@ucw.cz>
 R:     Dan Murphy <dmurphy@ti.com>
 L:     linux-leds@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pavel/linux-leds.git
 S:     Maintained
 F:     Documentation/devicetree/bindings/leds/
 F:     drivers/leds/
@@ -10254,7 +10255,7 @@ MEDIATEK ETHERNET DRIVER
 M:     Felix Fietkau <nbd@openwrt.org>
 M:     John Crispin <john@phrozen.org>
 M:     Sean Wang <sean.wang@mediatek.com>
-M:     Nelson Chang <nelson.chang@mediatek.com>
+M:     Mark Lee <Mark-MC.Lee@mediatek.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/mediatek/
@@ -11543,6 +11544,7 @@ NSDEPS
 M:     Matthias Maennich <maennich@google.com>
 S:     Maintained
 F:     scripts/nsdeps
+F:     Documentation/core-api/symbol-namespaces.rst
 
 NTB AMD DRIVER
 M:     Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
@@ -12310,12 +12312,15 @@ F:    arch/parisc/
 F:     Documentation/parisc/
 F:     drivers/parisc/
 F:     drivers/char/agp/parisc-agp.c
+F:     drivers/input/misc/hp_sdc_rtc.c
 F:     drivers/input/serio/gscps2.c
+F:     drivers/input/serio/hp_sdc*
 F:     drivers/parport/parport_gsc.*
 F:     drivers/tty/serial/8250/8250_gsc.c
 F:     drivers/video/fbdev/sti*
 F:     drivers/video/console/sti*
 F:     drivers/video/logo/logo_parisc*
+F:     include/linux/hp_sdc.h
 
 PARMAN
 M:     Jiri Pirko <jiri@mellanox.com>
@@ -13358,7 +13363,7 @@ S:      Maintained
 F:     drivers/scsi/qla1280.[ch]
 
 QLOGIC QLA2XXX FC-SCSI DRIVER
-M:     qla2xxx-upstream@qlogic.com
+M:     hmadhani@marvell.com
 L:     linux-scsi@vger.kernel.org
 S:     Supported
 F:     Documentation/scsi/LICENSE.qla2xxx
@@ -16761,13 +16766,6 @@ W:     http://www.linux-usb.org/usbnet
 S:     Maintained
 F:     drivers/net/usb/dm9601.c
 
-USB DIAMOND RIO500 DRIVER
-M:     Cesar Miquel <miquel@df.uba.ar>
-L:     rio500-users@lists.sourceforge.net
-W:     http://rio500.sourceforge.net
-S:     Maintained
-F:     drivers/usb/misc/rio500*
-
 USB EHCI DRIVER
 M:     Alan Stern <stern@rowland.harvard.edu>
 L:     linux-usb@vger.kernel.org
@@ -17434,7 +17432,7 @@ F:      include/linux/regulator/
 K:     regulator_get_optional
 
 VRF
-M:     David Ahern <dsa@cumulusnetworks.com>
+M:     David Ahern <dsahern@kernel.org>
 M:     Shrijeet Mukherjee <shrijeet@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
index 6f54f2f..5475cdb 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
 VERSION = 5
 PATCHLEVEL = 4
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
-NAME = Bobtail Squid
+EXTRAVERSION = -rc4
+NAME = Nesting Opossum
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
@@ -206,24 +206,8 @@ ifndef KBUILD_CHECKSRC
   KBUILD_CHECKSRC = 0
 endif
 
-# Use make M=dir to specify directory of external module to build
-# Old syntax make ... SUBDIRS=$PWD is still supported
-# Setting the environment variable KBUILD_EXTMOD take precedence
-ifdef SUBDIRS
-  $(warning ================= WARNING ================)
-  $(warning 'SUBDIRS' will be removed after Linux 5.3)
-  $(warning )
-  $(warning If you are building an individual subdirectory)
-  $(warning in the kernel tree, you can do like this:)
-  $(warning $$ make path/to/dir/you/want/to/build/)
-  $(warning (Do not forget the trailing slash))
-  $(warning )
-  $(warning If you are building an external module,)
-  $(warning Please use 'M=' or 'KBUILD_EXTMOD' instead)
-  $(warning ==========================================)
-  KBUILD_EXTMOD ?= $(SUBDIRS)
-endif
-
+# Use make M=dir or set the environment variable KBUILD_EXTMOD to specify the
+# directory of external module to build. Setting M= takes precedence.
 ifeq ("$(origin M)", "command line")
   KBUILD_EXTMOD := $(M)
 endif
@@ -498,7 +482,6 @@ export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN
 export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
 export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
 export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
-export KBUILD_ARFLAGS
 
 # Files to ignore in find ... statements
 
@@ -616,7 +599,7 @@ endif
 # in addition to whatever we do anyway.
 # Just "make" or "make all" shall build modules as well
 
-ifneq ($(filter all _all modules,$(MAKECMDGOALS)),)
+ifneq ($(filter all _all modules nsdeps,$(MAKECMDGOALS)),)
   KBUILD_MODULES := 1
 endif
 
@@ -914,9 +897,6 @@ ifdef CONFIG_RETPOLINE
 KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
 endif
 
-# use the deterministic mode of AR if available
-KBUILD_ARFLAGS := $(call ar-option,D)
-
 include scripts/Makefile.kasan
 include scripts/Makefile.extrawarn
 include scripts/Makefile.ubsan
@@ -1057,7 +1037,7 @@ export KBUILD_VMLINUX_OBJS := $(head-y) $(init-y) $(core-y) $(libs-y2) \
 export KBUILD_VMLINUX_LIBS := $(libs-y1)
 export KBUILD_LDS          := arch/$(SRCARCH)/kernel/vmlinux.lds
 export LDFLAGS_vmlinux
-# used by scripts/package/Makefile
+# used by scripts/Makefile.package
 export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) LICENSES arch include scripts tools)
 
 vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS)
@@ -1237,9 +1217,8 @@ PHONY += kselftest
 kselftest:
        $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests run_tests
 
-PHONY += kselftest-clean
-kselftest-clean:
-       $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests clean
+kselftest-%: FORCE
+       $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests $*
 
 PHONY += kselftest-merge
 kselftest-merge:
index f94c732..0021580 100644 (file)
@@ -71,10 +71,10 @@ static int pci_mmap_resource(struct kobject *kobj,
        struct pci_bus_region bar;
        int i;
 
-       for (i = 0; i < PCI_ROM_RESOURCE; i++)
+       for (i = 0; i < PCI_STD_NUM_BARS; i++)
                if (res == &pdev->resource[i])
                        break;
-       if (i >= PCI_ROM_RESOURCE)
+       if (i >= PCI_STD_NUM_BARS)
                return -ENODEV;
 
        if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start))
@@ -115,7 +115,7 @@ void pci_remove_resource_files(struct pci_dev *pdev)
 {
        int i;
 
-       for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                struct bin_attribute *res_attr;
 
                res_attr = pdev->res_attr[i];
@@ -232,7 +232,7 @@ int pci_create_resource_files(struct pci_dev *pdev)
        int retval;
 
        /* Expose the PCI resources from this device as files */
-       for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 
                /* skip empty resources */
                if (!pci_resource_len(pdev, i))
index 393d4f5..1b50569 100644 (file)
@@ -17,7 +17,6 @@ generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += mmiowb.h
-generic-y += msi.h
 generic-y += parport.h
 generic-y += percpu.h
 generic-y += preempt.h
index 18f70b3..204bccf 100644 (file)
        pinctrl-0 = <&mmc0_pins_default>;
 };
 
-&gpio0 {
+&gpio0_target {
        /* Do not idle the GPIO used for holding the VTT regulator */
        ti,no-reset-on-init;
        ti,no-idle-on-init;
index 9915c89..7a9eb2b 100644 (file)
                        ranges = <0x0 0x5000 0x1000>;
                };
 
-               target-module@7000 {                    /* 0x44e07000, ap 14 20.0 */
+               gpio0_target: target-module@7000 {      /* 0x44e07000, ap 14 20.0 */
                        compatible = "ti,sysc-omap2", "ti,sysc";
                        ti,hwmods = "gpio1";
                        reg = <0x7000 0x4>,
                        reg = <0xe000 0x4>,
                              <0xe054 0x4>;
                        reg-names = "rev", "sysc";
-                       ti,sysc-midle ;
+                       ti,sysc-midle = <SYSC_IDLE_FORCE>,
+                                       <SYSC_IDLE_NO>,
+                                       <SYSC_IDLE_SMART>;
                        ti,sysc-sidle = <SYSC_IDLE_FORCE>,
                                        <SYSC_IDLE_NO>,
                                        <SYSC_IDLE_SMART>;
index 848e2a8..14bbc43 100644 (file)
                                ti,hwmods = "dss_dispc";
                                clocks = <&disp_clk>;
                                clock-names = "fck";
+
+                               max-memory-bandwidth = <230000000>;
                        };
 
                        rfbi: rfbi@4832a800 {
index ea0e7c1..5cac2dd 100644 (file)
                                interrupt-names = "tx", "rx";
                                dmas = <&edma_xbar 129 1>, <&edma_xbar 128 1>;
                                dma-names = "tx", "rx";
-                               clocks = <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 22>,
+                               clocks = <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 0>,
                                         <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 24>,
                                         <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 28>;
                                clock-names = "fck", "ahclkx", "ahclkr";
                                interrupt-names = "tx", "rx";
                                dmas = <&edma_xbar 131 1>, <&edma_xbar 130 1>;
                                dma-names = "tx", "rx";
-                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 22>,
-                                        <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 24>,
+                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 0>,
+                                        <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 24>,
                                         <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 28>;
                                clock-names = "fck", "ahclkx", "ahclkr";
                                status = "disabled";
                                        <SYSC_IDLE_SMART>;
                        /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
                        clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 0>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 28>;
-                       clock-names = "fck", "ahclkx", "ahclkr";
+                                <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>;
+                       clock-names = "fck", "ahclkx";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0x68000 0x2000>,
                                interrupt-names = "tx", "rx";
                                dmas = <&edma_xbar 133 1>, <&edma_xbar 132 1>;
                                dma-names = "tx", "rx";
-                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 22>,
+                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 0>,
                                         <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>;
                                clock-names = "fck", "ahclkx";
                                status = "disabled";
                                        <SYSC_IDLE_SMART>;
                        /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
                        clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 0>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 24>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 28>;
-                       clock-names = "fck", "ahclkx", "ahclkr";
+                                <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 24>;
+                       clock-names = "fck", "ahclkx";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0x6c000 0x2000>,
                                interrupt-names = "tx", "rx";
                                dmas = <&edma_xbar 135 1>, <&edma_xbar 134 1>;
                                dma-names = "tx", "rx";
-                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 22>,
+                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 0>,
                                         <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 24>;
                                clock-names = "fck", "ahclkx";
                                status = "disabled";
                                        <SYSC_IDLE_SMART>;
                        /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
                        clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 0>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 24>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 28>;
-                       clock-names = "fck", "ahclkx", "ahclkr";
+                                <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 24>;
+                       clock-names = "fck", "ahclkx";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0x70000 0x2000>,
                                interrupt-names = "tx", "rx";
                                dmas = <&edma_xbar 137 1>, <&edma_xbar 136 1>;
                                dma-names = "tx", "rx";
-                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 22>,
+                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 0>,
                                         <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 24>;
                                clock-names = "fck", "ahclkx";
                                status = "disabled";
                                        <SYSC_IDLE_SMART>;
                        /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
                        clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 0>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 24>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 28>;
-                       clock-names = "fck", "ahclkx", "ahclkr";
+                                <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 24>;
+                       clock-names = "fck", "ahclkx";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0x74000 0x2000>,
                                interrupt-names = "tx", "rx";
                                dmas = <&edma_xbar 139 1>, <&edma_xbar 138 1>;
                                dma-names = "tx", "rx";
-                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 22>,
+                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 0>,
                                         <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 24>;
                                clock-names = "fck", "ahclkx";
                                status = "disabled";
                                        <SYSC_IDLE_SMART>;
                        /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
                        clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 0>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 24>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 28>;
-                       clock-names = "fck", "ahclkx", "ahclkr";
+                                <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 24>;
+                       clock-names = "fck", "ahclkx";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0x78000 0x2000>,
                                interrupt-names = "tx", "rx";
                                dmas = <&edma_xbar 141 1>, <&edma_xbar 140 1>;
                                dma-names = "tx", "rx";
-                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 22>,
+                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 0>,
                                         <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 24>;
                                clock-names = "fck", "ahclkx";
                                status = "disabled";
                                        <SYSC_IDLE_SMART>;
                        /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
                        clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 0>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 24>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 28>;
-                       clock-names = "fck", "ahclkx", "ahclkr";
+                                <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 24>;
+                       clock-names = "fck", "ahclkx";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0x7c000 0x2000>,
                                interrupt-names = "tx", "rx";
                                dmas = <&edma_xbar 143 1>, <&edma_xbar 142 1>;
                                dma-names = "tx", "rx";
-                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 22>,
+                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 0>,
                                         <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 24>;
                                clock-names = "fck", "ahclkx";
                                status = "disabled";
index 3621b7d..9980c10 100644 (file)
        pinctrl-1 = <&ephy_leds_pins>;
        status = "okay";
 
+       gmac0: mac@0 {
+               compatible = "mediatek,eth-mac";
+               reg = <0>;
+               phy-mode = "2500base-x";
+               fixed-link {
+                       speed = <2500>;
+                       full-duplex;
+                       pause;
+               };
+       };
+
        gmac1: mac@1 {
                compatible = "mediatek,eth-mac";
                reg = <1>;
+               phy-mode = "gmii";
                phy-handle = <&phy0>;
        };
 
@@ -78,7 +90,6 @@
 
                phy0: ethernet-phy@0 {
                        reg = <0>;
-                       phy-mode = "gmii";
                };
        };
 };
index 9608bc2..867b881 100644 (file)
                        compatible = "mediatek,mt7629-sgmiisys", "syscon";
                        reg = <0x1b128000 0x3000>;
                        #clock-cells = <1>;
-                       mediatek,physpeed = "2500";
                };
 
                sgmiisys1: syscon@1b130000 {
                        compatible = "mediatek,mt7629-sgmiisys", "syscon";
                        reg = <0x1b130000 0x3000>;
                        #clock-cells = <1>;
-                       mediatek,physpeed = "2500";
                };
        };
 };
index d01fc87..b6ef1a7 100644 (file)
                        spi-max-frequency = <100000>;
                        spi-cpol;
                        spi-cpha;
+                       spi-cs-high;
 
                        backlight= <&backlight>;
                        label = "lcd";
index a53657b..bda454d 100644 (file)
@@ -8,6 +8,7 @@
 #include <dt-bindings/mfd/dbx500-prcmu.h>
 #include <dt-bindings/arm/ux500_pm_domains.h>
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/thermal/thermal.h>
 
 / {
        #address-cells = <1>;
                 * cooling.
                 */
                cpu_thermal: cpu-thermal {
-                       polling-delay-passive = <0>;
-                       polling-delay = <1000>;
+                       polling-delay-passive = <250>;
+                       /*
+                        * This sensor fires interrupts to update the thermal
+                        * zone, so no polling is needed.
+                        */
+                       polling-delay = <0>;
 
                        thermal-sensors = <&thermal>;
 
@@ -79,7 +84,7 @@
 
                        cooling-maps {
                                trip = <&cpu_alert>;
-                               cooling-device = <&CPU0 0 2>;
+                               cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
                                contribution = <100>;
                        };
                };
index ce823c4..4c268b7 100644 (file)
                        interrupts = <39>;
                        clocks = <&ccu CLK_AHB_EHCI0>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        interrupts = <64>;
                        clocks = <&ccu CLK_USB_OHCI0>, <&ccu CLK_AHB_OHCI0>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        interrupts = <40>;
                        clocks = <&ccu CLK_AHB_EHCI1>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        interrupts = <65>;
                        clocks = <&ccu CLK_USB_OHCI1>, <&ccu CLK_AHB_OHCI1>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index cfb1efc..6befa23 100644 (file)
                        interrupts = <39>;
                        clocks = <&ccu CLK_AHB_EHCI>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        interrupts = <40>;
                        clocks = <&ccu CLK_USB_OHCI>, <&ccu CLK_AHB_OHCI>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index bbeb743..ac76380 100644 (file)
                        clocks = <&ccu CLK_AHB1_EHCI0>;
                        resets = <&ccu RST_AHB1_EHCI0>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&ccu CLK_AHB1_OHCI0>, <&ccu CLK_USB_OHCI0>;
                        resets = <&ccu RST_AHB1_OHCI0>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&ccu CLK_AHB1_EHCI1>;
                        resets = <&ccu RST_AHB1_EHCI1>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&ccu CLK_AHB1_OHCI1>, <&ccu CLK_USB_OHCI1>;
                        resets = <&ccu RST_AHB1_OHCI1>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index 49380de..874231b 100644 (file)
                        interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&ccu CLK_AHB_EHCI0>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&ccu CLK_USB_OHCI0>, <&ccu CLK_AHB_OHCI0>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&ccu CLK_AHB_EHCI1>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&ccu CLK_USB_OHCI1>, <&ccu CLK_AHB_OHCI1>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index 52eed0a..f292f96 100644 (file)
                        clocks = <&ccu CLK_BUS_EHCI>;
                        resets = <&ccu RST_BUS_EHCI>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&ccu CLK_BUS_OHCI>, <&ccu CLK_USB_OHCI>;
                        resets = <&ccu RST_BUS_OHCI>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index 523be66..74bb053 100644 (file)
                        clocks = <&ccu CLK_BUS_EHCI0>;
                        resets = <&ccu RST_BUS_EHCI0>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&ccu CLK_BUS_OHCI0>, <&ccu CLK_USB_OHCI0>;
                        resets = <&ccu RST_BUS_OHCI0>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&ccu CLK_BUS_EHCI1>;
                        resets = <&ccu RST_BUS_EHCI1>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index bde0681..c9c2688 100644 (file)
                        clocks = <&ccu CLK_BUS_EHCI1>;
                        resets = <&ccu RST_BUS_EHCI1>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                                 <&ccu CLK_USB_OHCI1>;
                        resets = <&ccu RST_BUS_OHCI1>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&ccu CLK_BUS_EHCI2>;
                        resets = <&ccu RST_BUS_EHCI2>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                                 <&ccu CLK_USB_OHCI2>;
                        resets = <&ccu RST_BUS_OHCI2>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index c34d505..b9b6fb0 100644 (file)
                        clocks = <&usb_clocks CLK_BUS_HCI0>;
                        resets = <&usb_clocks RST_USB0_HCI>;
                        phys = <&usbphy1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                                 <&usb_clocks CLK_USB_OHCI0>;
                        resets = <&usb_clocks RST_USB0_HCI>;
                        phys = <&usbphy1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&usb_clocks CLK_BUS_HCI1>;
                        resets = <&usb_clocks RST_USB1_HCI>;
                        phys = <&usbphy2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&usb_clocks CLK_BUS_HCI2>;
                        resets = <&usb_clocks RST_USB2_HCI>;
                        phys = <&usbphy3>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                                 <&usb_clocks CLK_USB_OHCI2>;
                        resets = <&usb_clocks RST_USB2_HCI>;
                        phys = <&usbphy3>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index eba190b..107eeaf 100644 (file)
                        clocks = <&ccu CLK_BUS_EHCI1>, <&ccu CLK_BUS_OHCI1>;
                        resets = <&ccu RST_BUS_EHCI1>, <&ccu RST_BUS_OHCI1>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                                 <&ccu CLK_USB_OHCI1>;
                        resets = <&ccu RST_BUS_EHCI1>, <&ccu RST_BUS_OHCI1>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&ccu CLK_BUS_EHCI2>, <&ccu CLK_BUS_OHCI2>;
                        resets = <&ccu RST_BUS_EHCI2>, <&ccu RST_BUS_OHCI2>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                                 <&ccu CLK_USB_OHCI2>;
                        resets = <&ccu RST_BUS_EHCI2>, <&ccu RST_BUS_OHCI2>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&ccu CLK_BUS_EHCI3>, <&ccu CLK_BUS_OHCI3>;
                        resets = <&ccu RST_BUS_EHCI3>, <&ccu RST_BUS_OHCI3>;
                        phys = <&usbphy 3>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                                 <&ccu CLK_USB_OHCI3>;
                        resets = <&ccu RST_BUS_EHCI3>, <&ccu RST_BUS_OHCI3>;
                        phys = <&usbphy 3>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index 5ae5b52..ef484c4 100644 (file)
@@ -91,7 +91,6 @@ CONFIG_USB_SERIAL_PL2303=m
 CONFIG_USB_SERIAL_CYBERJACK=m
 CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
-CONFIG_USB_RIO500=m
 CONFIG_EXT2_FS=m
 CONFIG_EXT3_FS=m
 CONFIG_MSDOS_FS=y
index e4f6442..4fec2ec 100644 (file)
@@ -195,7 +195,6 @@ CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
index b34970c..01e3c0f 100644 (file)
@@ -228,7 +228,7 @@ CONFIG_RTC_DRV_OMAP=m
 CONFIG_DMADEVICES=y
 CONFIG_TI_EDMA=y
 CONFIG_COMMON_CLK_PWM=m
-CONFIG_REMOTEPROC=m
+CONFIG_REMOTEPROC=y
 CONFIG_DA8XX_REMOTEPROC=m
 CONFIG_MEMORY=y
 CONFIG_TI_AEMIF=m
index 13ba532..e4c8def 100644 (file)
@@ -415,7 +415,7 @@ CONFIG_SPI_SH_MSIOF=m
 CONFIG_SPI_SH_HSPI=y
 CONFIG_SPI_SIRF=y
 CONFIG_SPI_STM32=m
-CONFIG_SPI_STM32_QSPI=m
+CONFIG_SPI_STM32_QSPI=y
 CONFIG_SPI_SUN4I=y
 CONFIG_SPI_SUN6I=y
 CONFIG_SPI_TEGRA114=y
@@ -933,7 +933,7 @@ CONFIG_BCM2835_MBOX=y
 CONFIG_ROCKCHIP_IOMMU=y
 CONFIG_TEGRA_IOMMU_GART=y
 CONFIG_TEGRA_IOMMU_SMMU=y
-CONFIG_REMOTEPROC=m
+CONFIG_REMOTEPROC=y
 CONFIG_ST_REMOTEPROC=m
 CONFIG_RPMSG_VIRTIO=m
 CONFIG_ASPEED_LPC_CTRL=m
index 64eb896..d3f5097 100644 (file)
@@ -364,6 +364,7 @@ CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1=m
 CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11=m
 CONFIG_DRM_TILCDC=m
 CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_TI_TFP410=m
 CONFIG_FB=y
 CONFIG_FIRMWARE_EDID=y
 CONFIG_FB_MODE_HELPERS=y
@@ -423,6 +424,7 @@ CONFIG_USB_SERIAL_GENERIC=y
 CONFIG_USB_SERIAL_SIMPLE=m
 CONFIG_USB_SERIAL_FTDI_SIO=m
 CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OPTION=m
 CONFIG_USB_TEST=m
 CONFIG_NOP_USB_XCEIV=m
 CONFIG_AM335X_PHY_USB=m
@@ -460,6 +462,7 @@ CONFIG_MMC_SDHCI_OMAP=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=m
 CONFIG_LEDS_CPCAP=m
+CONFIG_LEDS_LM3532=m
 CONFIG_LEDS_GPIO=m
 CONFIG_LEDS_PCA963X=m
 CONFIG_LEDS_PWM=m
@@ -481,7 +484,7 @@ CONFIG_RTC_DRV_OMAP=m
 CONFIG_RTC_DRV_CPCAP=m
 CONFIG_DMADEVICES=y
 CONFIG_OMAP_IOMMU=y
-CONFIG_REMOTEPROC=m
+CONFIG_REMOTEPROC=y
 CONFIG_OMAP_REMOTEPROC=m
 CONFIG_WKUP_M3_RPROC=m
 CONFIG_SOC_TI=y
index 787c3f9..b817c57 100644 (file)
@@ -581,7 +581,6 @@ CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
index 95b5a4f..73ed73a 100644 (file)
@@ -327,7 +327,6 @@ CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
 CONFIG_USB_ADUTUX=m
 CONFIG_USB_SEVSEG=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYPRESS_CY7C63=m
index 4fb51d6..a1cdbfa 100644 (file)
@@ -189,7 +189,6 @@ CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
index b24df84..043b0b1 100644 (file)
@@ -98,6 +98,7 @@ config CRYPTO_AES_ARM_CE
        tristate "Accelerated AES using ARMv8 Crypto Extensions"
        depends on KERNEL_MODE_NEON
        select CRYPTO_BLKCIPHER
+       select CRYPTO_LIB_AES
        select CRYPTO_SIMD
        help
          Use an implementation of AES in CBC, CTR and XTS modes that uses
index b978cdf..4d17073 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/assembler.h>
 
        .text
+       .arch           armv8-a
        .fpu            crypto-neon-fp-armv8
        .align          3
 
index 68ca86f..fa579b2 100644 (file)
@@ -12,7 +12,6 @@ generic-y += local.h
 generic-y += local64.h
 generic-y += mm-arch-hooks.h
 generic-y += mmiowb.h
-generic-y += msi.h
 generic-y += parport.h
 generic-y += preempt.h
 generic-y += seccomp.h
diff --git a/arch/arm/include/asm/xen/xen-ops.h b/arch/arm/include/asm/xen/xen-ops.h
deleted file mode 100644 (file)
index ec154e7..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_XEN_OPS_H
-#define _ASM_XEN_OPS_H
-
-void xen_efi_runtime_setup(void);
-
-#endif /* _ASM_XEN_OPS_H */
index dd939e1..29fd136 100644 (file)
@@ -763,7 +763,8 @@ static struct omap_hwmod_class_sysconfig am33xx_timer_sysc = {
        .rev_offs       = 0x0000,
        .sysc_offs      = 0x0010,
        .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+       .sysc_flags     = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+                         SYSC_HAS_RESET_STATUS,
        .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
                          SIDLE_SMART_WKUP),
        .sysc_fields    = &omap_hwmod_sysc_type2,
index 2bcb634..5452477 100644 (file)
@@ -231,8 +231,9 @@ static struct omap_hwmod am33xx_control_hwmod = {
 static struct omap_hwmod_class_sysconfig lcdc_sysc = {
        .rev_offs       = 0x0,
        .sysc_offs      = 0x54,
-       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+       .sysc_flags     = SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE,
+       .idlemodes      = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+                         MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART,
        .sysc_fields    = &omap_hwmod_sysc_type2,
 };
 
index 1fde1bf..7ac9af5 100644 (file)
@@ -74,83 +74,6 @@ int omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused)
        return 0;
 }
 
-/*
- * This API is to be called during init to set the various voltage
- * domains to the voltage as per the opp table. Typically we boot up
- * at the nominal voltage. So this function finds out the rate of
- * the clock associated with the voltage domain, finds out the correct
- * opp entry and sets the voltage domain to the voltage specified
- * in the opp entry
- */
-static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
-                                        const char *oh_name)
-{
-       struct voltagedomain *voltdm;
-       struct clk *clk;
-       struct dev_pm_opp *opp;
-       unsigned long freq, bootup_volt;
-       struct device *dev;
-
-       if (!vdd_name || !clk_name || !oh_name) {
-               pr_err("%s: invalid parameters\n", __func__);
-               goto exit;
-       }
-
-       if (!strncmp(oh_name, "mpu", 3))
-               /* 
-                * All current OMAPs share voltage rail and clock
-                * source, so CPU0 is used to represent the MPU-SS.
-                */
-               dev = get_cpu_device(0);
-       else
-               dev = omap_device_get_by_hwmod_name(oh_name);
-
-       if (IS_ERR(dev)) {
-               pr_err("%s: Unable to get dev pointer for hwmod %s\n",
-                       __func__, oh_name);
-               goto exit;
-       }
-
-       voltdm = voltdm_lookup(vdd_name);
-       if (!voltdm) {
-               pr_err("%s: unable to get vdd pointer for vdd_%s\n",
-                       __func__, vdd_name);
-               goto exit;
-       }
-
-       clk =  clk_get(NULL, clk_name);
-       if (IS_ERR(clk)) {
-               pr_err("%s: unable to get clk %s\n", __func__, clk_name);
-               goto exit;
-       }
-
-       freq = clk_get_rate(clk);
-       clk_put(clk);
-
-       opp = dev_pm_opp_find_freq_ceil(dev, &freq);
-       if (IS_ERR(opp)) {
-               pr_err("%s: unable to find boot up OPP for vdd_%s\n",
-                       __func__, vdd_name);
-               goto exit;
-       }
-
-       bootup_volt = dev_pm_opp_get_voltage(opp);
-       dev_pm_opp_put(opp);
-
-       if (!bootup_volt) {
-               pr_err("%s: unable to find voltage corresponding to the bootup OPP for vdd_%s\n",
-                      __func__, vdd_name);
-               goto exit;
-       }
-
-       voltdm_scale(voltdm, bootup_volt);
-       return 0;
-
-exit:
-       pr_err("%s: unable to set vdd_%s\n", __func__, vdd_name);
-       return -EINVAL;
-}
-
 #ifdef CONFIG_SUSPEND
 static int omap_pm_enter(suspend_state_t suspend_state)
 {
@@ -208,25 +131,6 @@ void omap_common_suspend_init(void *pm_suspend)
 }
 #endif /* CONFIG_SUSPEND */
 
-static void __init omap3_init_voltages(void)
-{
-       if (!soc_is_omap34xx())
-               return;
-
-       omap2_set_init_voltage("mpu_iva", "dpll1_ck", "mpu");
-       omap2_set_init_voltage("core", "l3_ick", "l3_main");
-}
-
-static void __init omap4_init_voltages(void)
-{
-       if (!soc_is_omap44xx())
-               return;
-
-       omap2_set_init_voltage("mpu", "dpll_mpu_ck", "mpu");
-       omap2_set_init_voltage("core", "l3_div_ck", "l3_main_1");
-       omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", "iva");
-}
-
 int __maybe_unused omap_pm_nop_init(void)
 {
        return 0;
@@ -246,10 +150,6 @@ int __init omap2_common_pm_late_init(void)
        omap4_twl_init();
        omap_voltage_late_init();
 
-       /* Initialize the voltages */
-       omap3_init_voltages();
-       omap4_init_voltages();
-
        /* Smartreflex device init */
        omap_devinit_smartreflex();
 
index 7ed2898..c32d047 100644 (file)
@@ -1,3 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-y          := enlighten.o hypercall.o grant-table.o p2m.o mm.o
-obj-$(CONFIG_XEN_EFI) += efi.o
diff --git a/arch/arm/xen/efi.c b/arch/arm/xen/efi.c
deleted file mode 100644 (file)
index d687a73..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2015, Linaro Limited, Shannon Zhao
- */
-
-#include <linux/efi.h>
-#include <xen/xen-ops.h>
-#include <asm/xen/xen-ops.h>
-
-/* Set XEN EFI runtime services function pointers. Other fields of struct efi,
- * e.g. efi.systab, will be set like normal EFI.
- */
-void __init xen_efi_runtime_setup(void)
-{
-       efi.get_time                 = xen_efi_get_time;
-       efi.set_time                 = xen_efi_set_time;
-       efi.get_wakeup_time          = xen_efi_get_wakeup_time;
-       efi.set_wakeup_time          = xen_efi_set_wakeup_time;
-       efi.get_variable             = xen_efi_get_variable;
-       efi.get_next_variable        = xen_efi_get_next_variable;
-       efi.set_variable             = xen_efi_set_variable;
-       efi.query_variable_info      = xen_efi_query_variable_info;
-       efi.update_capsule           = xen_efi_update_capsule;
-       efi.query_capsule_caps       = xen_efi_query_capsule_caps;
-       efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
-       efi.reset_system             = xen_efi_reset_system;
-}
-EXPORT_SYMBOL_GPL(xen_efi_runtime_setup);
index 1e57692..dd6804a 100644 (file)
@@ -15,7 +15,6 @@
 #include <xen/xen-ops.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
-#include <asm/xen/xen-ops.h>
 #include <asm/system_misc.h>
 #include <asm/efi.h>
 #include <linux/interrupt.h>
@@ -437,7 +436,7 @@ EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
 EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
 EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op);
 EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op);
-EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op);
+EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op_raw);
 EXPORT_SYMBOL_GPL(HYPERVISOR_multicall);
 EXPORT_SYMBOL_GPL(HYPERVISOR_vm_assist);
 EXPORT_SYMBOL_GPL(HYPERVISOR_dm_op);
index 2b2c208..38fa917 100644 (file)
@@ -28,7 +28,10 @@ unsigned long xen_get_swiotlb_free_pages(unsigned int order)
 
        for_each_memblock(memory, reg) {
                if (reg->base < (phys_addr_t)0xffffffff) {
-                       flags |= __GFP_DMA;
+                       if (IS_ENABLED(CONFIG_ZONE_DMA32))
+                               flags |= __GFP_DMA32;
+                       else
+                               flags |= __GFP_DMA;
                        break;
                }
        }
index 41a9b42..3f047af 100644 (file)
@@ -110,7 +110,6 @@ config ARM64
        select GENERIC_STRNLEN_USER
        select GENERIC_TIME_VSYSCALL
        select GENERIC_GETTIMEOFDAY
-       select GENERIC_COMPAT_VDSO if (!CPU_BIG_ENDIAN && COMPAT)
        select HANDLE_DOMAIN_IRQ
        select HARDIRQS_SW_RESEND
        select HAVE_PCI
@@ -617,6 +616,23 @@ config CAVIUM_ERRATUM_30115
 
          If unsure, say Y.
 
+config CAVIUM_TX2_ERRATUM_219
+       bool "Cavium ThunderX2 erratum 219: PRFM between TTBR change and ISB fails"
+       default y
+       help
+         On Cavium ThunderX2, a load, store or prefetch instruction between a
+         TTBR update and the corresponding context synchronizing operation can
+         cause a spurious Data Abort to be delivered to any hardware thread in
+         the CPU core.
+
+         Work around the issue by avoiding the problematic code sequence and
+         trapping KVM guest TTBRx_EL1 writes to EL2 when SMT is enabled. The
+         trap handler performs the corresponding register access, skips the
+         instruction and ensures context synchronization by virtue of the
+         exception return.
+
+         If unsure, say Y.
+
 config QCOM_FALKOR_ERRATUM_1003
        bool "Falkor E1003: Incorrect translation due to ASID change"
        default y
@@ -1159,7 +1175,7 @@ menuconfig COMPAT
 if COMPAT
 
 config KUSER_HELPERS
-       bool "Enable kuser helpers page for 32 bit applications"
+       bool "Enable kuser helpers page for 32-bit applications"
        default y
        help
          Warning: disabling this option may break 32-bit user programs.
@@ -1185,6 +1201,18 @@ config KUSER_HELPERS
          Say N here only if you are absolutely certain that you do not
          need these helpers; otherwise, the safe option is to say Y.
 
+config COMPAT_VDSO
+       bool "Enable vDSO for 32-bit applications"
+       depends on !CPU_BIG_ENDIAN && "$(CROSS_COMPILE_COMPAT)" != ""
+       select GENERIC_COMPAT_VDSO
+       default y
+       help
+         Place in the process address space of 32-bit applications an
+         ELF shared object providing fast implementations of gettimeofday
+         and clock_gettime.
+
+         You must have a 32-bit build of glibc 2.22 or later for programs
+         to seamlessly take advantage of this.
 
 menuconfig ARMV8_DEPRECATED
        bool "Emulate deprecated/obsolete ARMv8 instructions"
index 84a3d50..2c0238c 100644 (file)
@@ -53,22 +53,6 @@ $(warning Detected assembler with broken .inst; disassembly will be unreliable)
   endif
 endif
 
-ifeq ($(CONFIG_GENERIC_COMPAT_VDSO), y)
-  CROSS_COMPILE_COMPAT ?= $(CONFIG_CROSS_COMPILE_COMPAT_VDSO:"%"=%)
-
-  ifeq ($(CONFIG_CC_IS_CLANG), y)
-    $(warning CROSS_COMPILE_COMPAT is clang, the compat vDSO will not be built)
-  else ifeq ($(strip $(CROSS_COMPILE_COMPAT)),)
-    $(warning CROSS_COMPILE_COMPAT not defined or empty, the compat vDSO will not be built)
-  else ifeq ($(shell which $(CROSS_COMPILE_COMPAT)gcc 2> /dev/null),)
-    $(error $(CROSS_COMPILE_COMPAT)gcc not found, check CROSS_COMPILE_COMPAT)
-  else
-    export CROSS_COMPILE_COMPAT
-    export CONFIG_COMPAT_VDSO := y
-    compat_vdso := -DCONFIG_COMPAT_VDSO=1
-  endif
-endif
-
 KBUILD_CFLAGS  += -mgeneral-regs-only $(lseinstr) $(brokengasinst)     \
                   $(compat_vdso) $(cc_has_k_constraint)
 KBUILD_CFLAGS  += -fno-asynchronous-unwind-tables
index 2b6345d..78c82a6 100644 (file)
 
 &ehci0 {
        phys = <&usbphy 0>;
+       phy-names = "usb";
        status = "okay";
 };
 
 
 &ohci0 {
        phys = <&usbphy 0>;
+       phy-names = "usb";
        status = "okay";
 };
 
index 69128a6..3eccbdb 100644 (file)
                        resets = <&ccu RST_BUS_OHCI1>,
                                 <&ccu RST_BUS_EHCI1>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                                 <&ccu CLK_USB_OHCI1>;
                        resets = <&ccu RST_BUS_OHCI1>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index 4020a1a..0d5ea19 100644 (file)
                        resets = <&ccu RST_BUS_OHCI3>,
                                 <&ccu RST_BUS_EHCI3>;
                        phys = <&usb2phy 3>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                                 <&ccu CLK_USB_OHCI3>;
                        resets = <&ccu RST_BUS_OHCI3>;
                        phys = <&usb2phy 3>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index 3f39e02..7ab7117 100644 (file)
                #size-cells = <2>;
                ranges;
 
+               pcie: pcie@fc000000 {
+                       compatible = "amlogic,g12a-pcie", "snps,dw-pcie";
+                       reg = <0x0 0xfc000000 0x0 0x400000
+                              0x0 0xff648000 0x0 0x2000
+                              0x0 0xfc400000 0x0 0x200000>;
+                       reg-names = "elbi", "cfg", "config";
+                       interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
+                       #interrupt-cells = <1>;
+                       interrupt-map-mask = <0 0 0 0>;
+                       interrupt-map = <0 0 0 0 &gic GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
+                       bus-range = <0x0 0xff>;
+                       #address-cells = <3>;
+                       #size-cells = <2>;
+                       device_type = "pci";
+                       ranges = <0x81000000 0 0 0x0 0xfc600000 0 0x00100000
+                                 0x82000000 0 0xfc700000 0x0 0xfc700000 0 0x1900000>;
+
+                       clocks = <&clkc CLKID_PCIE_PHY
+                                 &clkc CLKID_PCIE_COMB
+                                 &clkc CLKID_PCIE_PLL>;
+                       clock-names = "general",
+                                     "pclk",
+                                     "port";
+                       resets = <&reset RESET_PCIE_CTRL_A>,
+                                <&reset RESET_PCIE_APB>;
+                       reset-names = "port",
+                                     "apb";
+                       num-lanes = <1>;
+                       phys = <&usb3_pcie_phy PHY_TYPE_PCIE>;
+                       phy-names = "pcie";
+                       status = "disabled";
+               };
+
                ethmac: ethernet@ff3f0000 {
                        compatible = "amlogic,meson-axg-dwmac",
                                     "snps,dwmac-3.70a",
index 3a6a1e0..124a809 100644 (file)
 / {
        compatible = "khadas,vim3", "amlogic,a311d", "amlogic,g12b";
 };
+
+/*
+ * The VIM3 on-board  MCU can mux the PCIe/USB3.0 shared differential
+ * lines using a FUSB340TMX USB 3.1 SuperSpeed Data Switch between
+ * an USB3.0 Type A connector and a M.2 Key M slot.
+ * The PHY driving these differential lines is shared between
+ * the USB3.0 controller and the PCIe Controller, thus only
+ * a single controller can use it.
+ * If the MCU is configured to mux the PCIe/USB3.0 differential lines
+ * to the M.2 Key M slot, uncomment the following block to disable
+ * USB3.0 from the USB Complex and enable the PCIe controller.
+ * The End User is not expected to uncomment the following except for
+ * testing purposes, but instead rely on the firmware/bootloader to
+ * update these nodes accordingly if PCIe mode is selected by the MCU.
+ */
+/*
+&pcie {
+       status = "okay";
+};
+
+&usb {
+       phys = <&usb2_phy0>, <&usb2_phy1>;
+       phy-names = "usb2-phy0", "usb2-phy1";
+};
+ */
index b73deb2..bba98f9 100644 (file)
 / {
        compatible = "khadas,vim3", "amlogic,s922x", "amlogic,g12b";
 };
+
+/*
+ * The VIM3 on-board  MCU can mux the PCIe/USB3.0 shared differential
+ * lines using a FUSB340TMX USB 3.1 SuperSpeed Data Switch between
+ * an USB3.0 Type A connector and a M.2 Key M slot.
+ * The PHY driving these differential lines is shared between
+ * the USB3.0 controller and the PCIe Controller, thus only
+ * a single controller can use it.
+ * If the MCU is configured to mux the PCIe/USB3.0 differential lines
+ * to the M.2 Key M slot, uncomment the following block to disable
+ * USB3.0 from the USB Complex and enable the PCIe controller.
+ * The End User is not expected to uncomment the following except for
+ * testing purposes, but instead rely on the firmware/bootloader to
+ * update these nodes accordingly if PCIe mode is selected by the MCU.
+ */
+/*
+&pcie {
+       status = "okay";
+};
+
+&usb {
+       phys = <&usb2_phy0>, <&usb2_phy1>;
+       phy-names = "usb2-phy0", "usb2-phy1";
+};
+ */
index 8647da7..eac5720 100644 (file)
        linux,rc-map-name = "rc-khadas";
 };
 
+&pcie {
+       reset-gpios = <&gpio GPIOA_8 GPIO_ACTIVE_LOW>;
+};
+
 &pwm_ef {
         status = "okay";
         pinctrl-0 = <&pwm_e_pins>;
index 5233bd7..dbbf29a 100644 (file)
        clock-names = "clkin1";
        status = "okay";
 };
+
+/*
+ * The VIM3 on-board  MCU can mux the PCIe/USB3.0 shared differential
+ * lines using a FUSB340TMX USB 3.1 SuperSpeed Data Switch between
+ * an USB3.0 Type A connector and a M.2 Key M slot.
+ * The PHY driving these differential lines is shared between
+ * the USB3.0 controller and the PCIe Controller, thus only
+ * a single controller can use it.
+ * If the MCU is configured to mux the PCIe/USB3.0 differential lines
+ * to the M.2 Key M slot, uncomment the following block to disable
+ * USB3.0 from the USB Complex and enable the PCIe controller.
+ * The End User is not expected to uncomment the following except for
+ * testing purposes, but instead rely on the firmware/bootloader to
+ * update these nodes accordingly if PCIe mode is selected by the MCU.
+ */
+/*
+&pcie {
+       status = "okay";
+};
+
+&usb {
+       phys = <&usb2_phy0>, <&usb2_phy1>;
+       phy-names = "usb2-phy0", "usb2-phy1";
+};
+ */
index 521573f..256ea03 100644 (file)
        power-domains = <&pwrc PWRC_SM1_ETH_ID>;
 };
 
+&pcie {
+       power-domains = <&pwrc PWRC_SM1_PCIE_ID>;
+};
+
 &pwrc {
        compatible = "amlogic,meson-sm1-pwrc";
 };
index 8e05c39..c9a867a 100644 (file)
@@ -723,7 +723,7 @@ CONFIG_TEGRA_IOMMU_SMMU=y
 CONFIG_ARM_SMMU=y
 CONFIG_ARM_SMMU_V3=y
 CONFIG_QCOM_IOMMU=y
-CONFIG_REMOTEPROC=m
+CONFIG_REMOTEPROC=y
 CONFIG_QCOM_Q6V5_MSS=m
 CONFIG_QCOM_Q6V5_PAS=m
 CONFIG_QCOM_SYSMON=m
index 98a5405..bd23f87 100644 (file)
@@ -16,7 +16,6 @@ generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += mmiowb.h
-generic-y += msi.h
 generic-y += qrwlock.h
 generic-y += qspinlock.h
 generic-y += serial.h
index f74909b..5bf9638 100644 (file)
@@ -78,10 +78,9 @@ alternative_else_nop_endif
 /*
  * Remove the address tag from a virtual address, if present.
  */
-       .macro  clear_address_tag, dst, addr
-       tst     \addr, #(1 << 55)
-       bic     \dst, \addr, #(0xff << 56)
-       csel    \dst, \dst, \addr, eq
+       .macro  untagged_addr, dst, addr
+       sbfx    \dst, \addr, #0, #56
+       and     \dst, \dst, \addr
        .endm
 
 #endif
index c6bd87d..574808b 100644 (file)
@@ -321,7 +321,8 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
 }
 
 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...)                    \
-static inline u##sz __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
+static __always_inline u##sz                                           \
+__lse__cmpxchg_case_##name##sz(volatile void *ptr,                     \
                                              u##sz old,                \
                                              u##sz new)                \
 {                                                                      \
@@ -362,7 +363,8 @@ __CMPXCHG_CASE(x,  ,  mb_, 64, al, "memory")
 #undef __CMPXCHG_CASE
 
 #define __CMPXCHG_DBL(name, mb, cl...)                                 \
-static inline long __lse__cmpxchg_double##name(unsigned long old1,     \
+static __always_inline long                                            \
+__lse__cmpxchg_double##name(unsigned long old1,                                \
                                         unsigned long old2,            \
                                         unsigned long new1,            \
                                         unsigned long new2,            \
index f19fe4b..ac1dbca 100644 (file)
@@ -52,7 +52,9 @@
 #define ARM64_HAS_IRQ_PRIO_MASKING             42
 #define ARM64_HAS_DCPODP                       43
 #define ARM64_WORKAROUND_1463225               44
+#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM    45
+#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM   46
 
-#define ARM64_NCAPS                            45
+#define ARM64_NCAPS                            47
 
 #endif /* __ASM_CPUCAPS_H */
index 86825aa..97f21cc 100644 (file)
 #define read_sysreg_el2(r)     read_sysreg_elx(r, _EL2, _EL1)
 #define write_sysreg_el2(v,r)  write_sysreg_elx(v, r, _EL2, _EL1)
 
-/**
- * hyp_alternate_select - Generates patchable code sequences that are
- * used to switch between two implementations of a function, depending
- * on the availability of a feature.
- *
- * @fname: a symbol name that will be defined as a function returning a
- * function pointer whose type will match @orig and @alt
- * @orig: A pointer to the default function, as returned by @fname when
- * @cond doesn't hold
- * @alt: A pointer to the alternate function, as returned by @fname
- * when @cond holds
- * @cond: a CPU feature (as described in asm/cpufeature.h)
- */
-#define hyp_alternate_select(fname, orig, alt, cond)                   \
-typeof(orig) * __hyp_text fname(void)                                  \
-{                                                                      \
-       typeof(alt) *val = orig;                                        \
-       asm volatile(ALTERNATIVE("nop           \n",                    \
-                                "mov   %0, %1  \n",                    \
-                                cond)                                  \
-                    : "+r" (val) : "r" (alt));                         \
-       return val;                                                     \
-}
-
 int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
 
 void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
index b61b50b..c23c473 100644 (file)
@@ -215,12 +215,18 @@ static inline unsigned long kaslr_offset(void)
  * up with a tagged userland pointer. Clear the tag to get a sane pointer to
  * pass on to access_ok(), for instance.
  */
-#define untagged_addr(addr)    \
+#define __untagged_addr(addr)  \
        ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
 
+#define untagged_addr(addr)    ({                                      \
+       u64 __addr = (__force u64)addr;                                 \
+       __addr &= __untagged_addr(__addr);                              \
+       (__force __typeof__(addr))__addr;                               \
+})
+
 #ifdef CONFIG_KASAN_SW_TAGS
 #define __tag_shifted(tag)     ((u64)(tag) << 56)
-#define __tag_reset(addr)      untagged_addr(addr)
+#define __tag_reset(addr)      __untagged_addr(addr)
 #define __tag_get(addr)                (__u8)((u64)(addr) >> 56)
 #else
 #define __tag_shifted(tag)     0UL
index 7576df0..8330810 100644 (file)
@@ -876,9 +876,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
 
 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
 
-#define kc_vaddr_to_offset(v)  ((v) & ~PAGE_END)
-#define kc_offset_to_vaddr(o)  ((o) | PAGE_END)
-
 #ifdef CONFIG_ARM64_PA_BITS_52
 #define phys_to_ttbr(addr)     (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
 #else
index 972d196..6e919fa 100644 (file)
 #define SYS_FAR_EL1                    sys_reg(3, 0, 6, 0, 0)
 #define SYS_PAR_EL1                    sys_reg(3, 0, 7, 4, 0)
 
-#define SYS_PAR_EL1_F                  BIT(1)
+#define SYS_PAR_EL1_F                  BIT(0)
 #define SYS_PAR_EL1_FST                        GENMASK(6, 1)
 
 /*** Statistical Profiling Extension ***/
index fb60a88..3fd8fd6 100644 (file)
@@ -20,7 +20,7 @@
 
 #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
 
-#if __LINUX_ARM_ARCH__ >= 8
+#if __LINUX_ARM_ARCH__ >= 8 && defined(CONFIG_AS_DMB_ISHLD)
 #define aarch32_smp_mb()       dmb(ish)
 #define aarch32_smp_rmb()      dmb(ishld)
 #define aarch32_smp_wmb()      dmb(ishst)
diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
deleted file mode 100644 (file)
index 1f38bf3..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 ARM Limited
- */
-#ifndef __ASM_VDSO_DATAPAGE_H
-#define __ASM_VDSO_DATAPAGE_H
-
-#ifndef __ASSEMBLY__
-
-struct vdso_data {
-       __u64 cs_cycle_last;    /* Timebase at clocksource init */
-       __u64 raw_time_sec;     /* Raw time */
-       __u64 raw_time_nsec;
-       __u64 xtime_clock_sec;  /* Kernel time */
-       __u64 xtime_clock_nsec;
-       __u64 xtime_coarse_sec; /* Coarse time */
-       __u64 xtime_coarse_nsec;
-       __u64 wtm_clock_sec;    /* Wall to monotonic time */
-       __u64 wtm_clock_nsec;
-       __u32 tb_seq_count;     /* Timebase sequence counter */
-       /* cs_* members must be adjacent and in this order (ldp accesses) */
-       __u32 cs_mono_mult;     /* NTP-adjusted clocksource multiplier */
-       __u32 cs_shift;         /* Clocksource shift (mono = raw) */
-       __u32 cs_raw_mult;      /* Raw clocksource multiplier */
-       __u32 tz_minuteswest;   /* Whacky timezone stuff */
-       __u32 tz_dsttime;
-       __u32 use_syscall;
-       __u32 hrtimer_res;
-};
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* __ASM_VDSO_DATAPAGE_H */
diff --git a/arch/arm64/include/asm/xen/xen-ops.h b/arch/arm64/include/asm/xen/xen-ops.h
deleted file mode 100644 (file)
index e6e7840..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_XEN_OPS_H
-#define _ASM_XEN_OPS_H
-
-void xen_efi_runtime_setup(void);
-
-#endif /* _ASM_XEN_OPS_H */
index 2ec09de..ca158be 100644 (file)
@@ -174,6 +174,9 @@ static void __init register_insn_emulation(struct insn_emulation_ops *ops)
        struct insn_emulation *insn;
 
        insn = kzalloc(sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return;
+
        insn->ops = ops;
        insn->min = INSN_UNDEF;
 
@@ -233,6 +236,8 @@ static void __init register_insn_emulation_sysctl(void)
 
        insns_sysctl = kcalloc(nr_insn_emulated + 1, sizeof(*sysctl),
                               GFP_KERNEL);
+       if (!insns_sysctl)
+               return;
 
        raw_spin_lock_irqsave(&insn_emulation_lock, flags);
        list_for_each_entry(insn, &insn_emulation, node) {
index 1e43ba5..6c3b10a 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/cpufeature.h>
+#include <asm/smp_plat.h>
 
 static bool __maybe_unused
 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
@@ -128,8 +129,8 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
        int cpu, slot = -1;
 
        /*
-        * enable_smccc_arch_workaround_1() passes NULL for the hyp_vecs
-        * start/end if we're a guest. Skip the hyp-vectors work.
+        * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
+        * we're a guest. Skip the hyp-vectors work.
         */
        if (!hyp_vecs_start) {
                __this_cpu_write(bp_hardening_data.fn, fn);
@@ -623,6 +624,30 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
        return (need_wa > 0);
 }
 
+static const __maybe_unused struct midr_range tx2_family_cpus[] = {
+       MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+       MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+       {},
+};
+
+static bool __maybe_unused
+needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
+                        int scope)
+{
+       int i;
+
+       if (!is_affected_midr_range_list(entry, scope) ||
+           !is_hyp_mode_available())
+               return false;
+
+       for_each_possible_cpu(i) {
+               if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
+                       return true;
+       }
+
+       return false;
+}
+
 #ifdef CONFIG_HARDEN_EL2_VECTORS
 
 static const struct midr_range arm64_harden_el2_vectors[] = {
@@ -852,6 +877,19 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                .matches = has_cortex_a76_erratum_1463225,
        },
 #endif
+#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
+       {
+               .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
+               .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
+               ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
+               .matches = needs_tx2_tvm_workaround,
+       },
+       {
+               .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
+               .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
+               ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
+       },
+#endif
        {
        }
 };
index 9323bcc..80f459a 100644 (file)
@@ -136,6 +136,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
 
 static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
                       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
@@ -175,11 +176,16 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
        ARM64_FTR_END,
 };
 
index 84a8227..cf3bd29 100644 (file)
@@ -604,7 +604,7 @@ el1_da:
         */
        mrs     x3, far_el1
        inherit_daif    pstate=x23, tmp=x2
-       clear_address_tag x0, x3
+       untagged_addr   x0, x3
        mov     x2, sp                          // struct pt_regs
        bl      do_mem_abort
 
@@ -680,7 +680,7 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
        orr     x24, x24, x0
 alternative_else_nop_endif
        cbnz    x24, 1f                         // preempt count != 0 || NMI return path
-       bl      preempt_schedule_irq            // irq en/disable is done inside
+       bl      arm64_preempt_schedule_irq      // irq en/disable is done inside
 1:
 #endif
 
@@ -775,6 +775,7 @@ el0_sync_compat:
        b.ge    el0_dbg
        b       el0_inv
 el0_svc_compat:
+       gic_prio_kentry_setup tmp=x1
        mov     x0, sp
        bl      el0_svc_compat_handler
        b       ret_to_user
@@ -807,7 +808,7 @@ el0_da:
        mrs     x26, far_el1
        ct_user_exit_irqoff
        enable_daif
-       clear_address_tag x0, x26
+       untagged_addr   x0, x26
        mov     x1, x25
        mov     x2, sp
        bl      do_mem_abort
@@ -1070,7 +1071,9 @@ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
 #else
        ldr     x30, =vectors
 #endif
+alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
        prfm    plil1strm, [x30, #(1b - tramp_vectors)]
+alternative_else_nop_endif
        msr     vbar_el1, x30
        add     x30, x30, #(1b - tramp_vectors)
        isb
index 1717732..06e56b4 100644 (file)
@@ -121,10 +121,16 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
                        /*
                         * Ensure updated trampoline is visible to instruction
-                        * fetch before we patch in the branch.
+                        * fetch before we patch in the branch. Although the
+                        * architecture doesn't require an IPI in this case,
+                        * Neoverse-N1 erratum #1542419 does require one
+                        * if the TLB maintenance in module_enable_ro() is
+                        * skipped due to rodata_enabled. It doesn't seem worth
+                        * it to make it conditional given that this is
+                        * certainly not a fast-path.
                         */
-                       __flush_icache_range((unsigned long)&dst[0],
-                                            (unsigned long)&dst[1]);
+                       flush_icache_range((unsigned long)&dst[0],
+                                          (unsigned long)&dst[1]);
                }
                addr = (unsigned long)dst;
 #else /* CONFIG_ARM64_MODULE_PLTS */
index e0a7fce..a96b292 100644 (file)
@@ -201,6 +201,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
                                 gfp_t mask)
 {
        int rc = 0;
+       pgd_t *trans_pgd;
        pgd_t *pgdp;
        pud_t *pudp;
        pmd_t *pmdp;
@@ -215,7 +216,13 @@ static int create_safe_exec_page(void *src_start, size_t length,
        memcpy((void *)dst, src_start, length);
        __flush_icache_range(dst, dst + length);
 
-       pgdp = pgd_offset_raw(allocator(mask), dst_addr);
+       trans_pgd = allocator(mask);
+       if (!trans_pgd) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       pgdp = pgd_offset_raw(trans_pgd, dst_addr);
        if (pgd_none(READ_ONCE(*pgdp))) {
                pudp = allocator(mask);
                if (!pudp) {
index a47462d..71f788c 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/sched/task.h>
 #include <linux/sched/task_stack.h>
 #include <linux/kernel.h>
+#include <linux/lockdep.h>
 #include <linux/mm.h>
 #include <linux/stddef.h>
 #include <linux/sysctl.h>
@@ -44,6 +45,7 @@
 #include <asm/alternative.h>
 #include <asm/arch_gicv3.h>
 #include <asm/compat.h>
+#include <asm/cpufeature.h>
 #include <asm/cacheflush.h>
 #include <asm/exec.h>
 #include <asm/fpsimd.h>
@@ -332,22 +334,27 @@ void arch_release_task_struct(struct task_struct *tsk)
        fpsimd_release_task(tsk);
 }
 
-/*
- * src and dst may temporarily have aliased sve_state after task_struct
- * is copied.  We cannot fix this properly here, because src may have
- * live SVE state and dst's thread_info may not exist yet, so tweaking
- * either src's or dst's TIF_SVE is not safe.
- *
- * The unaliasing is done in copy_thread() instead.  This works because
- * dst is not schedulable or traceable until both of these functions
- * have been called.
- */
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 {
        if (current->mm)
                fpsimd_preserve_current_state();
        *dst = *src;
 
+       /* We rely on the above assignment to initialize dst's thread_flags: */
+       BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
+
+       /*
+        * Detach src's sve_state (if any) from dst so that it does not
+        * get erroneously used or freed prematurely.  dst's sve_state
+        * will be allocated on demand later on if dst uses SVE.
+        * For consistency, also clear TIF_SVE here: this could be done
+        * later in copy_process(), but to avoid tripping up future
+        * maintainers it is best not to leave TIF_SVE and sve_state in
+        * an inconsistent state, even temporarily.
+        */
+       dst->thread.sve_state = NULL;
+       clear_tsk_thread_flag(dst, TIF_SVE);
+
        return 0;
 }
 
@@ -361,13 +368,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
        memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
 
        /*
-        * Unalias p->thread.sve_state (if any) from the parent task
-        * and disable discard SVE state for p:
-        */
-       clear_tsk_thread_flag(p, TIF_SVE);
-       p->thread.sve_state = NULL;
-
-       /*
         * In case p was allocated the same task_struct pointer as some
         * other recently-exited task, make sure p is disassociated from
         * any cpu that may have run that now-exited task recently.
@@ -633,3 +633,19 @@ static int __init tagged_addr_init(void)
 
 core_initcall(tagged_addr_init);
 #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
+
+asmlinkage void __sched arm64_preempt_schedule_irq(void)
+{
+       lockdep_assert_irqs_disabled();
+
+       /*
+        * Preempting a task from an IRQ means we leave copies of PSTATE
+        * on the stack. cpufeature's enable calls may modify PSTATE, but
+        * resuming one of these preempted tasks would undo those changes.
+        *
+        * Only allow a task to be preempted once cpufeatures have been
+        * enabled.
+        */
+       if (static_branch_likely(&arm64_const_caps_ready))
+               preempt_schedule_irq();
+}
index 1fba077..76b327f 100644 (file)
@@ -8,15 +8,21 @@
 ARCH_REL_TYPE_ABS := R_ARM_JUMP_SLOT|R_ARM_GLOB_DAT|R_ARM_ABS32
 include $(srctree)/lib/vdso/Makefile
 
-COMPATCC := $(CROSS_COMPILE_COMPAT)gcc
+# Same as cc-*option, but using CC_COMPAT instead of CC
+ifeq ($(CONFIG_CC_IS_CLANG), y)
+CC_COMPAT ?= $(CC)
+else
+CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc
+endif
 
-# Same as cc-*option, but using COMPATCC instead of CC
 cc32-option = $(call try-run,\
-        $(COMPATCC) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+        $(CC_COMPAT) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
 cc32-disable-warning = $(call try-run,\
-       $(COMPATCC) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+       $(CC_COMPAT) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
 cc32-ldoption = $(call try-run,\
-        $(COMPATCC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
+        $(CC_COMPAT) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
+cc32-as-instr = $(call try-run,\
+       printf "%b\n" "$(1)" | $(CC_COMPAT) $(VDSO_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
 
 # We cannot use the global flags to compile the vDSO files, the main reason
 # being that the 32-bit compiler may be older than the main (64-bit) compiler
@@ -25,22 +31,21 @@ cc32-ldoption = $(call try-run,\
 # arm64 one.
 # As a result we set our own flags here.
 
-# From top-level Makefile
-# NOSTDINC_FLAGS
-VDSO_CPPFLAGS := -nostdinc -isystem $(shell $(COMPATCC) -print-file-name=include)
+# KBUILD_CPPFLAGS and NOSTDINC_FLAGS from top-level Makefile
+VDSO_CPPFLAGS := -D__KERNEL__ -nostdinc -isystem $(shell $(CC_COMPAT) -print-file-name=include)
 VDSO_CPPFLAGS += $(LINUXINCLUDE)
-VDSO_CPPFLAGS += $(KBUILD_CPPFLAGS)
 
 # Common C and assembly flags
 # From top-level Makefile
 VDSO_CAFLAGS := $(VDSO_CPPFLAGS)
+ifneq ($(shell $(CC_COMPAT) --version 2>&1 | head -n 1 | grep clang),)
+VDSO_CAFLAGS += --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%))
+endif
+
 VDSO_CAFLAGS += $(call cc32-option,-fno-PIE)
 ifdef CONFIG_DEBUG_INFO
 VDSO_CAFLAGS += -g
 endif
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(COMPATCC)), y)
-VDSO_CAFLAGS += -DCC_HAVE_ASM_GOTO
-endif
 
 # From arm Makefile
 VDSO_CAFLAGS += $(call cc32-option,-fno-dwarf2-cfi-asm)
@@ -55,6 +60,7 @@ endif
 VDSO_CAFLAGS += -fPIC -fno-builtin -fno-stack-protector
 VDSO_CAFLAGS += -DDISABLE_BRANCH_PROFILING
 
+
 # Try to compile for ARMv8. If the compiler is too old and doesn't support it,
 # fall back to v7. There is no easy way to check for what architecture the code
 # is being compiled, so define a macro specifying that (see arch/arm/Makefile).
@@ -91,6 +97,12 @@ VDSO_CFLAGS += -Wno-int-to-pointer-cast
 VDSO_AFLAGS := $(VDSO_CAFLAGS)
 VDSO_AFLAGS += -D__ASSEMBLY__
 
+# Check for binutils support for dmb ishld
+dmbinstr := $(call cc32-as-instr,dmb ishld,-DCONFIG_AS_DMB_ISHLD=1)
+
+VDSO_CFLAGS += $(dmbinstr)
+VDSO_AFLAGS += $(dmbinstr)
+
 VDSO_LDFLAGS := $(VDSO_CPPFLAGS)
 # From arm vDSO Makefile
 VDSO_LDFLAGS += -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1
@@ -159,14 +171,14 @@ quiet_cmd_vdsold_and_vdso_check = LD32    $@
       cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check)
 
 quiet_cmd_vdsold = LD32    $@
-      cmd_vdsold = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \
+      cmd_vdsold = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \
                    -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
 quiet_cmd_vdsocc = CC32    $@
-      cmd_vdsocc = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $<
+      cmd_vdsocc = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $<
 quiet_cmd_vdsocc_gettimeofday = CC32    $@
-      cmd_vdsocc_gettimeofday = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $<
+      cmd_vdsocc_gettimeofday = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $<
 quiet_cmd_vdsoas = AS32    $@
-      cmd_vdsoas = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $<
+      cmd_vdsoas = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $<
 
 quiet_cmd_vdsomunge = MUNGE   $@
       cmd_vdsomunge = $(obj)/$(munge) $< $@
index bd978ad..799e84a 100644 (file)
@@ -124,6 +124,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
 {
        u64 hcr = vcpu->arch.hcr_el2;
 
+       if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
+               hcr |= HCR_TVM;
+
        write_sysreg(hcr, hcr_el2);
 
        if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
@@ -174,8 +177,10 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
         * the crucial bit is "On taking a vSError interrupt,
         * HCR_EL2.VSE is cleared to 0."
         */
-       if (vcpu->arch.hcr_el2 & HCR_VSE)
-               vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
+       if (vcpu->arch.hcr_el2 & HCR_VSE) {
+               vcpu->arch.hcr_el2 &= ~HCR_VSE;
+               vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
+       }
 
        if (has_vhe())
                deactivate_traps_vhe();
@@ -229,20 +234,6 @@ static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
        }
 }
 
-static bool __hyp_text __true_value(void)
-{
-       return true;
-}
-
-static bool __hyp_text __false_value(void)
-{
-       return false;
-}
-
-static hyp_alternate_select(__check_arm_834220,
-                           __false_value, __true_value,
-                           ARM64_WORKAROUND_834220);
-
 static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
 {
        u64 par, tmp;
@@ -298,7 +289,8 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
         * resolve the IPA using the AT instruction.
         */
        if (!(esr & ESR_ELx_S1PTW) &&
-           (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
+           (cpus_have_const_cap(ARM64_WORKAROUND_834220) ||
+            (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
                if (!__translate_far_to_hpfar(far, &hpfar))
                        return false;
        } else {
@@ -393,6 +385,61 @@ static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
        return true;
 }
 
+static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
+{
+       u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
+       int rt = kvm_vcpu_sys_get_rt(vcpu);
+       u64 val = vcpu_get_reg(vcpu, rt);
+
+       /*
+        * The normal sysreg handling code expects to see the traps,
+        * let's not do anything here.
+        */
+       if (vcpu->arch.hcr_el2 & HCR_TVM)
+               return false;
+
+       switch (sysreg) {
+       case SYS_SCTLR_EL1:
+               write_sysreg_el1(val, SYS_SCTLR);
+               break;
+       case SYS_TTBR0_EL1:
+               write_sysreg_el1(val, SYS_TTBR0);
+               break;
+       case SYS_TTBR1_EL1:
+               write_sysreg_el1(val, SYS_TTBR1);
+               break;
+       case SYS_TCR_EL1:
+               write_sysreg_el1(val, SYS_TCR);
+               break;
+       case SYS_ESR_EL1:
+               write_sysreg_el1(val, SYS_ESR);
+               break;
+       case SYS_FAR_EL1:
+               write_sysreg_el1(val, SYS_FAR);
+               break;
+       case SYS_AFSR0_EL1:
+               write_sysreg_el1(val, SYS_AFSR0);
+               break;
+       case SYS_AFSR1_EL1:
+               write_sysreg_el1(val, SYS_AFSR1);
+               break;
+       case SYS_MAIR_EL1:
+               write_sysreg_el1(val, SYS_MAIR);
+               break;
+       case SYS_AMAIR_EL1:
+               write_sysreg_el1(val, SYS_AMAIR);
+               break;
+       case SYS_CONTEXTIDR_EL1:
+               write_sysreg_el1(val, SYS_CONTEXTIDR);
+               break;
+       default:
+               return false;
+       }
+
+       __kvm_skip_instr(vcpu);
+       return true;
+}
+
 /*
  * Return true when we were able to fixup the guest exit and should return to
  * the guest, false when we should restore the host state and return to the
@@ -412,6 +459,11 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
        if (*exit_code != ARM_EXCEPTION_TRAP)
                goto exit;
 
+       if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
+           kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
+           handle_tx2_tvm(vcpu))
+               return true;
+
        /*
         * We trap the first access to the FP/SIMD to save the host context
         * and restore the guest context lazily.
index c466060..eb0efc5 100644 (file)
@@ -67,10 +67,14 @@ static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
        isb();
 }
 
-static hyp_alternate_select(__tlb_switch_to_guest,
-                           __tlb_switch_to_guest_nvhe,
-                           __tlb_switch_to_guest_vhe,
-                           ARM64_HAS_VIRT_HOST_EXTN);
+static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm,
+                                            struct tlb_inv_context *cxt)
+{
+       if (has_vhe())
+               __tlb_switch_to_guest_vhe(kvm, cxt);
+       else
+               __tlb_switch_to_guest_nvhe(kvm, cxt);
+}
 
 static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
                                                struct tlb_inv_context *cxt)
@@ -98,10 +102,14 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
        write_sysreg(0, vttbr_el2);
 }
 
-static hyp_alternate_select(__tlb_switch_to_host,
-                           __tlb_switch_to_host_nvhe,
-                           __tlb_switch_to_host_vhe,
-                           ARM64_HAS_VIRT_HOST_EXTN);
+static void __hyp_text __tlb_switch_to_host(struct kvm *kvm,
+                                           struct tlb_inv_context *cxt)
+{
+       if (has_vhe())
+               __tlb_switch_to_host_vhe(kvm, cxt);
+       else
+               __tlb_switch_to_host_nvhe(kvm, cxt);
+}
 
 void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
@@ -111,7 +119,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 
        /* Switch to requested VMID */
        kvm = kern_hyp_va(kvm);
-       __tlb_switch_to_guest()(kvm, &cxt);
+       __tlb_switch_to_guest(kvm, &cxt);
 
        /*
         * We could do so much better if we had the VA as well.
@@ -154,7 +162,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
        if (!has_vhe() && icache_is_vpipt())
                __flush_icache_all();
 
-       __tlb_switch_to_host()(kvm, &cxt);
+       __tlb_switch_to_host(kvm, &cxt);
 }
 
 void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
@@ -165,13 +173,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
 
        /* Switch to requested VMID */
        kvm = kern_hyp_va(kvm);
-       __tlb_switch_to_guest()(kvm, &cxt);
+       __tlb_switch_to_guest(kvm, &cxt);
 
        __tlbi(vmalls12e1is);
        dsb(ish);
        isb();
 
-       __tlb_switch_to_host()(kvm, &cxt);
+       __tlb_switch_to_host(kvm, &cxt);
 }
 
 void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
@@ -180,13 +188,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
        struct tlb_inv_context cxt;
 
        /* Switch to requested VMID */
-       __tlb_switch_to_guest()(kvm, &cxt);
+       __tlb_switch_to_guest(kvm, &cxt);
 
        __tlbi(vmalle1);
        dsb(nsh);
        isb();
 
-       __tlb_switch_to_host()(kvm, &cxt);
+       __tlb_switch_to_host(kvm, &cxt);
 }
 
 void __hyp_text __kvm_flush_vm_context(void)
index 115d7a0..9fc6db0 100644 (file)
@@ -113,6 +113,15 @@ static inline bool is_ttbr1_addr(unsigned long addr)
        return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
 }
 
+static inline unsigned long mm_to_pgd_phys(struct mm_struct *mm)
+{
+       /* Either init_pg_dir or swapper_pg_dir */
+       if (mm == &init_mm)
+               return __pa_symbol(mm->pgd);
+
+       return (unsigned long)virt_to_phys(mm->pgd);
+}
+
 /*
  * Dump out the page tables associated with 'addr' in the currently active mm.
  */
@@ -141,7 +150,7 @@ static void show_pte(unsigned long addr)
 
        pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n",
                 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
-                vabits_actual, (unsigned long)virt_to_phys(mm->pgd));
+                vabits_actual, mm_to_pgd_phys(mm));
        pgdp = pgd_offset(mm, addr);
        pgd = READ_ONCE(*pgdp);
        pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
@@ -259,14 +268,18 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
        par = read_sysreg(par_el1);
        local_irq_restore(flags);
 
+       /*
+        * If we now have a valid translation, treat the translation fault as
+        * spurious.
+        */
        if (!(par & SYS_PAR_EL1_F))
-               return false;
+               return true;
 
        /*
         * If we got a different type of fault from the AT instruction,
         * treat the translation fault as spurious.
         */
-       dfsc = FIELD_PREP(SYS_PAR_EL1_FST, par);
+       dfsc = FIELD_GET(SYS_PAR_EL1_FST, par);
        return (dfsc & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT;
 }
 
index a4fc65f..b66215e 100644 (file)
@@ -1,4 +1,3 @@
 # SPDX-License-Identifier: GPL-2.0-only
 xen-arm-y      += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o)
 obj-y          := xen-arm.o hypercall.o
-obj-$(CONFIG_XEN_EFI) += $(addprefix ../../arm/xen/, efi.o)
index 63a9f33..5cfc9d3 100644 (file)
@@ -99,7 +99,7 @@
 
                        miscintc: interrupt-controller@18060010 {
                                compatible = "qca,ar7240-misc-intc";
-                               reg = <0x18060010 0x4>;
+                               reg = <0x18060010 0x8>;
 
                                interrupt-parent = <&cpuintc>;
                                interrupts = <6>;
index 16bef81..914af12 100644 (file)
@@ -571,7 +571,6 @@ CONFIG_USB_SERIAL_OMNINET=m
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
 CONFIG_USB_ADUTUX=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYPRESS_CY7C63=m
index 8762e75..2c7adea 100644 (file)
@@ -314,7 +314,6 @@ CONFIG_USB_SERIAL_SAFE_PADDED=y
 CONFIG_USB_SERIAL_CYBERJACK=m
 CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
index af44b35..b4328b3 100644 (file)
@@ -160,7 +160,6 @@ void __init prom_meminit(void)
 
 void __init prom_free_prom_memory(void)
 {
-       unsigned long addr;
        int i;
 
        if (prom_flags & PROM_FLAG_DONT_FREE_TEMP)
index 8772617..80112f2 100644 (file)
@@ -43,7 +43,7 @@
 
 /* O32 stack has to be 8-byte aligned. */
 static u64 o32_stk[4096];
-#define O32_STK          &o32_stk[sizeof(o32_stk)]
+#define O32_STK          (&o32_stk[ARRAY_SIZE(o32_stk)])
 
 #define __PROM_O32(fun, arg) fun arg __asm__(#fun); \
                                     __asm__(#fun " = call_o32")
index c8b595c..61b0fc2 100644 (file)
@@ -13,7 +13,6 @@ generic-y += irq_work.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
-generic-y += msi.h
 generic-y += parport.h
 generic-y += percpu.h
 generic-y += preempt.h
index 79bf34e..f613687 100644 (file)
@@ -77,8 +77,8 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
 extern unsigned long __xchg_small(volatile void *ptr, unsigned long val,
                                  unsigned int size);
 
-static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
-                                  int size)
+static __always_inline
+unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
 {
        switch (size) {
        case 1:
@@ -153,8 +153,9 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
 extern unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
                                     unsigned long new, unsigned int size);
 
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
-                                     unsigned long new, unsigned int size)
+static __always_inline
+unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+                       unsigned long new, unsigned int size)
 {
        switch (size) {
        case 1:
index cbdc14b..adab7b5 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/octeon/octeon-feature.h>
 
 #include <asm/octeon/cvmx-ipd-defs.h>
+#include <asm/octeon/cvmx-pip-defs.h>
 
 enum cvmx_ipd_mode {
    CVMX_IPD_OPC_MODE_STT = 0LL,          /* All blocks DRAM, not cached in L2 */
index 071053e..5d70bab 100644 (file)
@@ -52,6 +52,7 @@
 # endif
 #define __ARCH_WANT_SYS_FORK
 #define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
 
 /* whitelists for checksyscalls */
 #define __IGNORE_fadvise64_64
index a2aba4b..1ade1da 100644 (file)
@@ -6,5 +6,16 @@
 #define HWCAP_MIPS_R6          (1 << 0)
 #define HWCAP_MIPS_MSA         (1 << 1)
 #define HWCAP_MIPS_CRC32       (1 << 2)
+#define HWCAP_MIPS_MIPS16      (1 << 3)
+#define HWCAP_MIPS_MDMX     (1 << 4)
+#define HWCAP_MIPS_MIPS3D   (1 << 5)
+#define HWCAP_MIPS_SMARTMIPS (1 << 6)
+#define HWCAP_MIPS_DSP      (1 << 7)
+#define HWCAP_MIPS_DSP2     (1 << 8)
+#define HWCAP_MIPS_DSP3     (1 << 9)
+#define HWCAP_MIPS_MIPS16E2 (1 << 10)
+#define HWCAP_LOONGSON_MMI  (1 << 11)
+#define HWCAP_LOONGSON_EXT  (1 << 12)
+#define HWCAP_LOONGSON_EXT2 (1 << 13)
 
 #endif /* _UAPI_ASM_HWCAP_H */
index fa62cd1..6a7afe7 100644 (file)
@@ -24,7 +24,8 @@ static char r4kwar[] __initdata =
 static char daddiwar[] __initdata =
        "Enable CPU_DADDI_WORKAROUNDS to rectify.";
 
-static inline void align_mod(const int align, const int mod)
+static __always_inline __init
+void align_mod(const int align, const int mod)
 {
        asm volatile(
                ".set   push\n\t"
@@ -38,8 +39,9 @@ static inline void align_mod(const int align, const int mod)
                : "n"(align), "n"(mod));
 }
 
-static __always_inline void mult_sh_align_mod(long *v1, long *v2, long *w,
-                                             const int align, const int mod)
+static __always_inline __init
+void mult_sh_align_mod(long *v1, long *v2, long *w,
+                      const int align, const int mod)
 {
        unsigned long flags;
        int m1, m2;
@@ -113,7 +115,7 @@ static __always_inline void mult_sh_align_mod(long *v1, long *v2, long *w,
        *w = lw;
 }
 
-static inline void check_mult_sh(void)
+static __always_inline __init void check_mult_sh(void)
 {
        long v1[8], v2[8], w[8];
        int bug, fix, i;
@@ -176,7 +178,7 @@ asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
        exception_exit(prev_state);
 }
 
-static inline void check_daddi(void)
+static __init void check_daddi(void)
 {
        extern asmlinkage void handle_daddi_ov(void);
        unsigned long flags;
@@ -242,7 +244,7 @@ static inline void check_daddi(void)
 
 int daddiu_bug = IS_ENABLED(CONFIG_CPU_MIPSR6) ? 0 : -1;
 
-static inline void check_daddiu(void)
+static __init void check_daddiu(void)
 {
        long v, w, tmp;
 
index c2eb392..f521cbf 100644 (file)
@@ -2180,6 +2180,39 @@ void cpu_probe(void)
                elf_hwcap |= HWCAP_MIPS_MSA;
        }
 
+       if (cpu_has_mips16)
+               elf_hwcap |= HWCAP_MIPS_MIPS16;
+
+       if (cpu_has_mdmx)
+               elf_hwcap |= HWCAP_MIPS_MDMX;
+
+       if (cpu_has_mips3d)
+               elf_hwcap |= HWCAP_MIPS_MIPS3D;
+
+       if (cpu_has_smartmips)
+               elf_hwcap |= HWCAP_MIPS_SMARTMIPS;
+
+       if (cpu_has_dsp)
+               elf_hwcap |= HWCAP_MIPS_DSP;
+
+       if (cpu_has_dsp2)
+               elf_hwcap |= HWCAP_MIPS_DSP2;
+
+       if (cpu_has_dsp3)
+               elf_hwcap |= HWCAP_MIPS_DSP3;
+
+       if (cpu_has_mips16e2)
+               elf_hwcap |= HWCAP_MIPS_MIPS16E2;
+
+       if (cpu_has_loongson_mmi)
+               elf_hwcap |= HWCAP_LOONGSON_MMI;
+
+       if (cpu_has_loongson_ext)
+               elf_hwcap |= HWCAP_LOONGSON_EXT;
+
+       if (cpu_has_loongson_ext2)
+               elf_hwcap |= HWCAP_LOONGSON_EXT2;
+
        if (cpu_has_vz)
                cpu_probe_vz(c);
 
index b8249c2..5eec13b 100644 (file)
@@ -108,6 +108,9 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
                return;
        }
 
+       if (start < PHYS_OFFSET)
+               return;
+
        memblock_add(start, size);
        /* Reserve any memory except the ordinary RAM ranges. */
        switch (type) {
@@ -321,7 +324,7 @@ static void __init bootmem_init(void)
         * Reserve any memory between the start of RAM and PHYS_OFFSET
         */
        if (ramstart > PHYS_OFFSET)
-               memblock_reserve(PHYS_OFFSET, PFN_UP(ramstart) - PHYS_OFFSET);
+               memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
 
        if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
                pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
index b0e25e9..3f16f38 100644 (file)
@@ -80,6 +80,7 @@ SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len,
 
 save_static_function(sys_fork);
 save_static_function(sys_clone);
+save_static_function(sys_clone3);
 
 SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
 {
index c9c879e..e7c5ab3 100644 (file)
 432    n32     fsmount                         sys_fsmount
 433    n32     fspick                          sys_fspick
 434    n32     pidfd_open                      sys_pidfd_open
-# 435 reserved for clone3
+435    n32     clone3                          __sys_clone3
index bbce915..13cd665 100644 (file)
 432    n64     fsmount                         sys_fsmount
 433    n64     fspick                          sys_fspick
 434    n64     pidfd_open                      sys_pidfd_open
-# 435 reserved for clone3
+435    n64     clone3                          __sys_clone3
index 9653591..353539e 100644 (file)
 432    o32     fsmount                         sys_fsmount
 433    o32     fspick                          sys_fspick
 434    o32     pidfd_open                      sys_pidfd_open
-# 435 reserved for clone3
+435    o32     clone3                          __sys_clone3
index c1a4d4d..9f79908 100644 (file)
@@ -66,6 +66,10 @@ else
       $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
 endif
 
+# Some -march= flags enable MMI instructions, and GCC complains about that
+# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
+cflags-y += $(call cc-option,-mno-loongson-mmi)
+
 #
 # Loongson Machines' Support
 #
index 4abb92e..4254ac4 100644 (file)
@@ -3,6 +3,7 @@
  */
 #include <linux/fs.h>
 #include <linux/fcntl.h>
+#include <linux/memblock.h>
 #include <linux/mm.h>
 
 #include <asm/bootinfo.h>
@@ -64,24 +65,22 @@ void __init prom_init_memory(void)
                node_id = loongson_memmap->map[i].node_id;
                mem_type = loongson_memmap->map[i].mem_type;
 
-               if (node_id == 0) {
-                       switch (mem_type) {
-                       case SYSTEM_RAM_LOW:
-                               add_memory_region(loongson_memmap->map[i].mem_start,
-                                       (u64)loongson_memmap->map[i].mem_size << 20,
-                                       BOOT_MEM_RAM);
-                               break;
-                       case SYSTEM_RAM_HIGH:
-                               add_memory_region(loongson_memmap->map[i].mem_start,
-                                       (u64)loongson_memmap->map[i].mem_size << 20,
-                                       BOOT_MEM_RAM);
-                               break;
-                       case SYSTEM_RAM_RESERVED:
-                               add_memory_region(loongson_memmap->map[i].mem_start,
-                                       (u64)loongson_memmap->map[i].mem_size << 20,
-                                       BOOT_MEM_RESERVED);
-                               break;
-                       }
+               if (node_id != 0)
+                       continue;
+
+               switch (mem_type) {
+               case SYSTEM_RAM_LOW:
+                       memblock_add(loongson_memmap->map[i].mem_start,
+                               (u64)loongson_memmap->map[i].mem_size << 20);
+                       break;
+               case SYSTEM_RAM_HIGH:
+                       memblock_add(loongson_memmap->map[i].mem_start,
+                               (u64)loongson_memmap->map[i].mem_size << 20);
+                       break;
+               case SYSTEM_RAM_RESERVED:
+                       memblock_reserve(loongson_memmap->map[i].mem_start,
+                               (u64)loongson_memmap->map[i].mem_size << 20);
+                       break;
                }
        }
 }
index ffefc1c..98c3a7f 100644 (file)
@@ -110,7 +110,7 @@ static int __init serial_init(void)
 }
 module_init(serial_init);
 
-static void __init serial_exit(void)
+static void __exit serial_exit(void)
 {
        platform_device_unregister(&uart8250_device);
 }
index 414e97d..8f20d2c 100644 (file)
@@ -142,8 +142,6 @@ static void __init szmem(unsigned int node)
                                (u32)node_id, mem_type, mem_start, mem_size);
                        pr_info("       start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
                                start_pfn, end_pfn, num_physpages);
-                       add_memory_region((node_id << 44) + mem_start,
-                               (u64)mem_size << 20, BOOT_MEM_RAM);
                        memblock_add_node(PFN_PHYS(start_pfn),
                                PFN_PHYS(end_pfn - start_pfn), node);
                        break;
@@ -156,16 +154,12 @@ static void __init szmem(unsigned int node)
                                (u32)node_id, mem_type, mem_start, mem_size);
                        pr_info("       start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
                                start_pfn, end_pfn, num_physpages);
-                       add_memory_region((node_id << 44) + mem_start,
-                               (u64)mem_size << 20, BOOT_MEM_RAM);
                        memblock_add_node(PFN_PHYS(start_pfn),
                                PFN_PHYS(end_pfn - start_pfn), node);
                        break;
                case SYSTEM_RAM_RESERVED:
                        pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
                                (u32)node_id, mem_type, mem_start, mem_size);
-                       add_memory_region((node_id << 44) + mem_start,
-                               (u64)mem_size << 20, BOOT_MEM_RESERVED);
                        memblock_reserve(((node_id << 44) + mem_start),
                                mem_size << 20);
                        break;
@@ -191,8 +185,6 @@ static void __init node_mem_init(unsigned int node)
        NODE_DATA(node)->node_start_pfn = start_pfn;
        NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
 
-       free_bootmem_with_active_regions(node, end_pfn);
-
        if (node == 0) {
                /* kernel end address */
                unsigned long kernel_end_pfn = PFN_UP(__pa_symbol(&_end));
@@ -209,8 +201,6 @@ static void __init node_mem_init(unsigned int node)
                        memblock_reserve((node_addrspace_offset | 0xfe000000),
                                         32 << 20);
        }
-
-       sparse_memory_present_with_active_regions(node);
 }
 
 static __init void prom_meminit(void)
@@ -227,6 +217,7 @@ static __init void prom_meminit(void)
                        cpumask_clear(&__node_data[(node)]->cpumask);
                }
        }
+       memblocks_present();
        max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
 
        for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {
index dfb5279..800a21b 100644 (file)
@@ -61,6 +61,7 @@ int init_debug = 1;
 /* memory blocks */
 struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS];
 
+#define MAX_PROM_MEM 5
 static phys_addr_t prom_mem_base[MAX_PROM_MEM] __initdata;
 static phys_addr_t prom_mem_size[MAX_PROM_MEM] __initdata;
 static unsigned int nr_prom_mem __initdata;
@@ -358,7 +359,7 @@ void __init prom_meminit(void)
                p++;
 
                if (type == BOOT_MEM_ROM_DATA) {
-                       if (nr_prom_mem >= 5) {
+                       if (nr_prom_mem >= MAX_PROM_MEM) {
                                pr_err("Too many ROM DATA regions");
                                continue;
                        }
@@ -377,7 +378,6 @@ void __init prom_free_prom_memory(void)
        char    *ptr;
        int     len = 0;
        int     i;
-       unsigned long addr;
 
        /*
         * preserve environment variables and command line from pmon/bbload
index 69cfa0a..996a934 100644 (file)
@@ -15,6 +15,7 @@ ccflags-vdso := \
        $(filter -mmicromips,$(KBUILD_CFLAGS)) \
        $(filter -march=%,$(KBUILD_CFLAGS)) \
        $(filter -m%-float,$(KBUILD_CFLAGS)) \
+       $(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \
        -D__VDSO__
 
 ifdef CONFIG_CC_IS_CLANG
@@ -59,7 +60,7 @@ CFLAGS_REMOVE_vgettimeofday.o = -pg
 ifndef CONFIG_CPU_MIPSR6
   ifeq ($(call ld-ifversion, -lt, 225000000, y),y)
     $(warning MIPS VDSO requires binutils >= 2.25)
-    obj-vdso-y := $(filter-out gettimeofday.o, $(obj-vdso-y))
+    obj-vdso-y := $(filter-out vgettimeofday.o, $(obj-vdso-y))
     ccflags-vdso += -DDISABLE_MIPS_VDSO
   endif
 endif
diff --git a/arch/mips/vdso/gettimeofday.c b/arch/mips/vdso/gettimeofday.c
deleted file mode 100644 (file)
index e8243c7..0000000
+++ /dev/null
@@ -1,269 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2015 Imagination Technologies
- * Author: Alex Smith <alex.smith@imgtec.com>
- */
-
-#include "vdso.h"
-
-#include <linux/compiler.h>
-#include <linux/time.h>
-
-#include <asm/clocksource.h>
-#include <asm/io.h>
-#include <asm/unistd.h>
-#include <asm/vdso.h>
-
-#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
-
-static __always_inline long gettimeofday_fallback(struct timeval *_tv,
-                                         struct timezone *_tz)
-{
-       register struct timezone *tz asm("a1") = _tz;
-       register struct timeval *tv asm("a0") = _tv;
-       register long ret asm("v0");
-       register long nr asm("v0") = __NR_gettimeofday;
-       register long error asm("a3");
-
-       asm volatile(
-       "       syscall\n"
-       : "=r" (ret), "=r" (error)
-       : "r" (tv), "r" (tz), "r" (nr)
-       : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
-         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
-
-       return error ? -ret : ret;
-}
-
-#endif
-
-static __always_inline long clock_gettime_fallback(clockid_t _clkid,
-                                          struct timespec *_ts)
-{
-       register struct timespec *ts asm("a1") = _ts;
-       register clockid_t clkid asm("a0") = _clkid;
-       register long ret asm("v0");
-       register long nr asm("v0") = __NR_clock_gettime;
-       register long error asm("a3");
-
-       asm volatile(
-       "       syscall\n"
-       : "=r" (ret), "=r" (error)
-       : "r" (clkid), "r" (ts), "r" (nr)
-       : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
-         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
-
-       return error ? -ret : ret;
-}
-
-static __always_inline int do_realtime_coarse(struct timespec *ts,
-                                             const union mips_vdso_data *data)
-{
-       u32 start_seq;
-
-       do {
-               start_seq = vdso_data_read_begin(data);
-
-               ts->tv_sec = data->xtime_sec;
-               ts->tv_nsec = data->xtime_nsec >> data->cs_shift;
-       } while (vdso_data_read_retry(data, start_seq));
-
-       return 0;
-}
-
-static __always_inline int do_monotonic_coarse(struct timespec *ts,
-                                              const union mips_vdso_data *data)
-{
-       u32 start_seq;
-       u64 to_mono_sec;
-       u64 to_mono_nsec;
-
-       do {
-               start_seq = vdso_data_read_begin(data);
-
-               ts->tv_sec = data->xtime_sec;
-               ts->tv_nsec = data->xtime_nsec >> data->cs_shift;
-
-               to_mono_sec = data->wall_to_mono_sec;
-               to_mono_nsec = data->wall_to_mono_nsec;
-       } while (vdso_data_read_retry(data, start_seq));
-
-       ts->tv_sec += to_mono_sec;
-       timespec_add_ns(ts, to_mono_nsec);
-
-       return 0;
-}
-
-#ifdef CONFIG_CSRC_R4K
-
-static __always_inline u64 read_r4k_count(void)
-{
-       unsigned int count;
-
-       __asm__ __volatile__(
-       "       .set push\n"
-       "       .set mips32r2\n"
-       "       rdhwr   %0, $2\n"
-       "       .set pop\n"
-       : "=r" (count));
-
-       return count;
-}
-
-#endif
-
-#ifdef CONFIG_CLKSRC_MIPS_GIC
-
-static __always_inline u64 read_gic_count(const union mips_vdso_data *data)
-{
-       void __iomem *gic = get_gic(data);
-       u32 hi, hi2, lo;
-
-       do {
-               hi = __raw_readl(gic + sizeof(lo));
-               lo = __raw_readl(gic);
-               hi2 = __raw_readl(gic + sizeof(lo));
-       } while (hi2 != hi);
-
-       return (((u64)hi) << 32) + lo;
-}
-
-#endif
-
-static __always_inline u64 get_ns(const union mips_vdso_data *data)
-{
-       u64 cycle_now, delta, nsec;
-
-       switch (data->clock_mode) {
-#ifdef CONFIG_CSRC_R4K
-       case VDSO_CLOCK_R4K:
-               cycle_now = read_r4k_count();
-               break;
-#endif
-#ifdef CONFIG_CLKSRC_MIPS_GIC
-       case VDSO_CLOCK_GIC:
-               cycle_now = read_gic_count(data);
-               break;
-#endif
-       default:
-               return 0;
-       }
-
-       delta = (cycle_now - data->cs_cycle_last) & data->cs_mask;
-
-       nsec = (delta * data->cs_mult) + data->xtime_nsec;
-       nsec >>= data->cs_shift;
-
-       return nsec;
-}
-
-static __always_inline int do_realtime(struct timespec *ts,
-                                      const union mips_vdso_data *data)
-{
-       u32 start_seq;
-       u64 ns;
-
-       do {
-               start_seq = vdso_data_read_begin(data);
-
-               if (data->clock_mode == VDSO_CLOCK_NONE)
-                       return -ENOSYS;
-
-               ts->tv_sec = data->xtime_sec;
-               ns = get_ns(data);
-       } while (vdso_data_read_retry(data, start_seq));
-
-       ts->tv_nsec = 0;
-       timespec_add_ns(ts, ns);
-
-       return 0;
-}
-
-static __always_inline int do_monotonic(struct timespec *ts,
-                                       const union mips_vdso_data *data)
-{
-       u32 start_seq;
-       u64 ns;
-       u64 to_mono_sec;
-       u64 to_mono_nsec;
-
-       do {
-               start_seq = vdso_data_read_begin(data);
-
-               if (data->clock_mode == VDSO_CLOCK_NONE)
-                       return -ENOSYS;
-
-               ts->tv_sec = data->xtime_sec;
-               ns = get_ns(data);
-
-               to_mono_sec = data->wall_to_mono_sec;
-               to_mono_nsec = data->wall_to_mono_nsec;
-       } while (vdso_data_read_retry(data, start_seq));
-
-       ts->tv_sec += to_mono_sec;
-       ts->tv_nsec = 0;
-       timespec_add_ns(ts, ns + to_mono_nsec);
-
-       return 0;
-}
-
-#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
-
-/*
- * This is behind the ifdef so that we don't provide the symbol when there's no
- * possibility of there being a usable clocksource, because there's nothing we
- * can do without it. When libc fails the symbol lookup it should fall back on
- * the standard syscall path.
- */
-int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
-{
-       const union mips_vdso_data *data = get_vdso_data();
-       struct timespec ts;
-       int ret;
-
-       ret = do_realtime(&ts, data);
-       if (ret)
-               return gettimeofday_fallback(tv, tz);
-
-       if (tv) {
-               tv->tv_sec = ts.tv_sec;
-               tv->tv_usec = ts.tv_nsec / 1000;
-       }
-
-       if (tz) {
-               tz->tz_minuteswest = data->tz_minuteswest;
-               tz->tz_dsttime = data->tz_dsttime;
-       }
-
-       return 0;
-}
-
-#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
-
-int __vdso_clock_gettime(clockid_t clkid, struct timespec *ts)
-{
-       const union mips_vdso_data *data = get_vdso_data();
-       int ret = -1;
-
-       switch (clkid) {
-       case CLOCK_REALTIME_COARSE:
-               ret = do_realtime_coarse(ts, data);
-               break;
-       case CLOCK_MONOTONIC_COARSE:
-               ret = do_monotonic_coarse(ts, data);
-               break;
-       case CLOCK_REALTIME:
-               ret = do_realtime(ts, data);
-               break;
-       case CLOCK_MONOTONIC:
-               ret = do_monotonic(ts, data);
-               break;
-       default:
-               break;
-       }
-
-       if (ret)
-               ret = clock_gettime_fallback(clkid, ts);
-
-       return ret;
-}
index 73ca89a..e5de3f8 100644 (file)
@@ -22,7 +22,7 @@
 
 #define ARCH_DMA_MINALIGN      L1_CACHE_BYTES
 
-#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+#define __read_mostly __section(.data..read_mostly)
 
 void parisc_cache_init(void);  /* initializes cache-flushing */
 void disable_sr_hashing_asm(int); /* low level support for above */
index 3eb4bfc..e080143 100644 (file)
@@ -52,7 +52,7 @@
 })
 
 #ifdef CONFIG_SMP
-# define __lock_aligned __attribute__((__section__(".data..lock_aligned")))
+# define __lock_aligned __section(.data..lock_aligned)
 #endif
 
 #endif /* __PARISC_LDCW_H */
index 92a9b5f..f29f682 100644 (file)
@@ -3,7 +3,7 @@
  * arch/parisc/mm/ioremap.c
  *
  * (C) Copyright 1995 1996 Linus Torvalds
- * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de>
+ * (C) Copyright 2001-2019 Helge Deller <deller@gmx.de>
  * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
  */
 
@@ -84,7 +84,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
        addr = (void __iomem *) area->addr;
        if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
                               phys_addr, pgprot)) {
-               vfree(addr);
+               vunmap(addr);
                return NULL;
        }
 
@@ -92,9 +92,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
 }
 EXPORT_SYMBOL(__ioremap);
 
-void iounmap(const volatile void __iomem *addr)
+void iounmap(const volatile void __iomem *io_addr)
 {
-       if (addr > high_memory)
-               return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
+       unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
+
+       if (is_vmalloc_addr((void *)addr))
+               vunmap((void *)addr);
 }
 EXPORT_SYMBOL(iounmap);
index 6841bd5..dfbd7f2 100644 (file)
@@ -50,7 +50,7 @@ endif
 
 BOOTAFLAGS     := -D__ASSEMBLY__ $(BOOTCFLAGS) -nostdinc
 
-BOOTARFLAGS    := -cr$(KBUILD_ARFLAGS)
+BOOTARFLAGS    := -crD
 
 ifdef CONFIG_CC_IS_CLANG
 BOOTCFLAGS += $(CLANG_FLAGS)
index 64870c7..17726f2 100644 (file)
@@ -10,4 +10,3 @@ generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += preempt.h
 generic-y += vtime.h
-generic-y += msi.h
index 4ce795d..ca8db19 100644 (file)
@@ -35,6 +35,10 @@ static inline void radix__flush_all_lpid(unsigned int lpid)
 {
        WARN_ON(1);
 }
+static inline void radix__flush_all_lpid_guest(unsigned int lpid)
+{
+       WARN_ON(1);
+}
 #endif
 
 extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma,
index d7fcdfa..ec2547c 100644 (file)
@@ -36,8 +36,8 @@
 #include "book3s.h"
 #include "trace.h"
 
-#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
-#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+#define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
+#define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__
 
 /* #define EXIT_DEBUG */
 
@@ -69,8 +69,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "pthru_all",       VCPU_STAT(pthru_all) },
        { "pthru_host",      VCPU_STAT(pthru_host) },
        { "pthru_bad_aff",   VCPU_STAT(pthru_bad_aff) },
-       { "largepages_2M",    VM_STAT(num_2M_pages) },
-       { "largepages_1G",    VM_STAT(num_1G_pages) },
+       { "largepages_2M",    VM_STAT(num_2M_pages, .mode = 0444) },
+       { "largepages_1G",    VM_STAT(num_1G_pages, .mode = 0444) },
        { NULL }
 };
 
index 74a9cfe..faebcbb 100644 (file)
@@ -1921,6 +1921,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
        mtspr   SPRN_PCR, r6
 18:
        /* Signal secondary CPUs to continue */
+       li      r0, 0
        stb     r0,VCORE_IN_GUEST(r5)
 19:    lis     r8,0x7fff               /* MAX_INT@h */
        mtspr   SPRN_HDEC,r8
index 1d93e55..2dd452a 100644 (file)
@@ -761,6 +761,7 @@ static int spufs_init_fs_context(struct fs_context *fc)
        ctx->gid = current_gid();
        ctx->mode = 0755;
 
+       fc->fs_private = ctx;
        fc->s_fs_info = sbi;
        fc->ops = &spufs_context_ops;
        return 0;
index b533592..f87a5c6 100644 (file)
@@ -1419,6 +1419,9 @@ void __init pseries_lpar_read_hblkrm_characteristics(void)
        unsigned char local_buffer[SPLPAR_TLB_BIC_MAXLENGTH];
        int call_status, len, idx, bpsize;
 
+       if (!firmware_has_feature(FW_FEATURE_BLOCK_REMOVE))
+               return;
+
        spin_lock(&rtas_data_buf_lock);
        memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
        call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
index 104d334..88cfcb9 100644 (file)
@@ -13,6 +13,7 @@
        compatible = "sifive,hifive-unleashed-a00", "sifive,fu540-c000";
 
        chosen {
+               stdout-path = "serial0";
        };
 
        cpus {
index 16970f2..1efaedd 100644 (file)
@@ -22,7 +22,6 @@ generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mm-arch-hooks.h
-generic-y += msi.h
 generic-y += percpu.h
 generic-y += preempt.h
 generic-y += sections.h
index 5a02b7d..9c992a8 100644 (file)
@@ -22,6 +22,7 @@
 
 #define REG_L          __REG_SEL(ld, lw)
 #define REG_S          __REG_SEL(sd, sw)
+#define REG_SC         __REG_SEL(sc.d, sc.w)
 #define SZREG          __REG_SEL(8, 4)
 #define LGREG          __REG_SEL(3, 2)
 
index 7255f2d..42292d9 100644 (file)
@@ -87,14 +87,6 @@ extern pgd_t swapper_pg_dir[];
 #define VMALLOC_END      (PAGE_OFFSET - 1)
 #define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
 
-#define FIXADDR_TOP      VMALLOC_START
-#ifdef CONFIG_64BIT
-#define FIXADDR_SIZE     PMD_SIZE
-#else
-#define FIXADDR_SIZE     PGDIR_SIZE
-#endif
-#define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)
-
 /*
  * Roughly size the vmemmap space to be large enough to fit enough
  * struct pages to map half the virtual address space. Then
@@ -108,6 +100,14 @@ extern pgd_t swapper_pg_dir[];
 
 #define vmemmap                ((struct page *)VMEMMAP_START)
 
+#define FIXADDR_TOP      (VMEMMAP_START)
+#ifdef CONFIG_64BIT
+#define FIXADDR_SIZE     PMD_SIZE
+#else
+#define FIXADDR_SIZE     PGDIR_SIZE
+#endif
+#define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)
+
 /*
  * ZERO_PAGE is a global shared page that is always zero,
  * used for zero-mapped memory areas, etc.
index 37ae4e3..f02188a 100644 (file)
 #include <linux/mm_types.h>
 #include <asm/smp.h>
 
-/*
- * Flush entire local TLB.  'sfence.vma' implicitly fences with the instruction
- * cache as well, so a 'fence.i' is not necessary.
- */
 static inline void local_flush_tlb_all(void)
 {
        __asm__ __volatile__ ("sfence.vma" : : : "memory");
index da7aa88..8ca4798 100644 (file)
@@ -98,7 +98,26 @@ _save_context:
  */
        .macro RESTORE_ALL
        REG_L a0, PT_SSTATUS(sp)
-       REG_L a2, PT_SEPC(sp)
+       /*
+        * The current load reservation is effectively part of the processor's
+        * state, in the sense that load reservations cannot be shared between
+        * different hart contexts.  We can't actually save and restore a load
+        * reservation, so instead here we clear any existing reservation --
+        * it's always legal for implementations to clear load reservations at
+        * any point (as long as the forward progress guarantee is kept, but
+        * we'll ignore that here).
+        *
+        * Dangling load reservations can be the result of taking a trap in the
+        * middle of an LR/SC sequence, but can also be the result of a taken
+        * forward branch around an SC -- which is how we implement CAS.  As a
+        * result we need to clear reservations between the last CAS and the
+        * jump back to the new context.  While it is unlikely the store
+        * completes, implementations are allowed to expand reservations to be
+        * arbitrarily large.
+        */
+       REG_L  a2, PT_SEPC(sp)
+       REG_SC x0, a2, PT_SEPC(sp)
+
        csrw CSR_SSTATUS, a0
        csrw CSR_SEPC, a2
 
@@ -254,12 +273,11 @@ restore_all:
 resume_kernel:
        REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
        bnez s0, restore_all
-need_resched:
        REG_L s0, TASK_TI_FLAGS(tp)
        andi s0, s0, _TIF_NEED_RESCHED
        beqz s0, restore_all
        call preempt_schedule_irq
-       j need_resched
+       j restore_all
 #endif
 
 work_pending:
index 424eb72..1ac75f7 100644 (file)
@@ -124,24 +124,24 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
 
 asmlinkage void do_trap_break(struct pt_regs *regs)
 {
+       if (user_mode(regs)) {
+               force_sig_fault(SIGTRAP, TRAP_BRKPT,
+                               (void __user *)(regs->sepc));
+               return;
+       }
 #ifdef CONFIG_GENERIC_BUG
-       if (!user_mode(regs)) {
+       {
                enum bug_trap_type type;
 
                type = report_bug(regs->sepc, regs);
-               switch (type) {
-               case BUG_TRAP_TYPE_NONE:
-                       break;
-               case BUG_TRAP_TYPE_WARN:
+               if (type == BUG_TRAP_TYPE_WARN) {
                        regs->sepc += get_break_insn_length(regs->sepc);
-                       break;
-               case BUG_TRAP_TYPE_BUG:
-                       die(regs, "Kernel BUG");
+                       return;
                }
        }
 #endif /* CONFIG_GENERIC_BUG */
 
-       force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)(regs->sepc));
+       die(regs, "Kernel BUG");
 }
 
 #ifdef CONFIG_GENERIC_BUG
index f0ba713..83f7d12 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/swap.h>
 #include <linux/sizes.h>
 #include <linux/of_fdt.h>
+#include <linux/libfdt.h>
 
 #include <asm/fixmap.h>
 #include <asm/tlbflush.h>
@@ -82,6 +83,8 @@ disable:
 }
 #endif /* CONFIG_BLK_DEV_INITRD */
 
+static phys_addr_t dtb_early_pa __initdata;
+
 void __init setup_bootmem(void)
 {
        struct memblock_region *reg;
@@ -117,7 +120,12 @@ void __init setup_bootmem(void)
        setup_initrd();
 #endif /* CONFIG_BLK_DEV_INITRD */
 
-       early_init_fdt_reserve_self();
+       /*
+        * Avoid using early_init_fdt_reserve_self() since __pa() does
+        * not work for DTB pointers that are fixmap addresses
+        */
+       memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
+
        early_init_fdt_scan_reserved_mem();
        memblock_allow_resize();
        memblock_dump_all();
@@ -393,6 +401,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
 
        /* Save pointer to DTB for early FDT parsing */
        dtb_early_va = (void *)fix_to_virt(FIX_FDT) + (dtb_pa & ~PAGE_MASK);
+       /* Save physical address for memblock reservation */
+       dtb_early_pa = dtb_pa;
 }
 
 static void __init setup_vm_final(void)
index 347f487..38d6403 100644 (file)
@@ -44,6 +44,7 @@ CONFIG_NR_CPUS=512
 CONFIG_NUMA=y
 CONFIG_HZ_100=y
 CONFIG_KEXEC_FILE=y
+CONFIG_KEXEC_SIG=y
 CONFIG_EXPOLINE=y
 CONFIG_EXPOLINE_AUTO=y
 CONFIG_CHSC_SCH=y
@@ -69,12 +70,13 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_MODULE_SIG=y
 CONFIG_MODULE_SIG_SHA256=y
+CONFIG_UNUSED_SYMBOLS=y
 CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
 CONFIG_BLK_CGROUP_IOLATENCY=y
+CONFIG_BLK_CGROUP_IOCOST=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
@@ -370,6 +372,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
 CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
+# CONFIG_NET_DROP_MONITOR is not set
 CONFIG_PCI=y
 CONFIG_PCI_DEBUG=y
 CONFIG_HOTPLUG_PCI=y
@@ -424,6 +427,7 @@ CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
 CONFIG_DM_WRITECACHE=m
+CONFIG_DM_CLONE=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_LOG_USERSPACE=m
 CONFIG_DM_RAID=m
@@ -435,6 +439,7 @@ CONFIG_DM_DELAY=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_FLAKEY=m
 CONFIG_DM_VERITY=m
+CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
 CONFIG_DM_SWITCH=m
 CONFIG_NETDEVICES=y
 CONFIG_BONDING=m
@@ -489,6 +494,7 @@ CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_NVIDIA is not set
 # CONFIG_NET_VENDOR_OKI is not set
 # CONFIG_NET_VENDOR_PACKET_ENGINES is not set
+# CONFIG_NET_VENDOR_PENSANDO is not set
 # CONFIG_NET_VENDOR_QLOGIC is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RDC is not set
@@ -538,15 +544,16 @@ CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
 CONFIG_DIAG288_WATCHDOG=m
-CONFIG_DRM=y
-CONFIG_DRM_VIRTIO_GPU=y
+CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_USER_ACCESS=m
 CONFIG_MLX4_INFINIBAND=m
 CONFIG_MLX5_INFINIBAND=m
+CONFIG_SYNC_FILE=y
 CONFIG_VFIO=m
 CONFIG_VFIO_PCI=m
 CONFIG_VFIO_MDEV=m
@@ -580,6 +587,8 @@ CONFIG_NILFS2_FS=m
 CONFIG_FS_DAX=y
 CONFIG_EXPORTFS_BLOCK_OPS=y
 CONFIG_FS_ENCRYPTION=y
+CONFIG_FS_VERITY=y
+CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
 CONFIG_FANOTIFY=y
 CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
@@ -589,6 +598,7 @@ CONFIG_QFMT_V2=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
+CONFIG_VIRTIO_FS=m
 CONFIG_OVERLAY_FS=m
 CONFIG_FSCACHE=m
 CONFIG_CACHEFILES=m
@@ -648,12 +658,15 @@ CONFIG_FORTIFY_SOURCE=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_LOCKDOWN_LSM=y
+CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
 CONFIG_INTEGRITY_SIGNATURE=y
 CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
 CONFIG_IMA=y
 CONFIG_IMA_DEFAULT_HASH_SHA256=y
 CONFIG_IMA_WRITE_POLICY=y
 CONFIG_IMA_APPRAISE=y
+CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
 CONFIG_CRYPTO_USER=m
 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
 CONFIG_CRYPTO_PCRYPT=m
@@ -664,10 +677,6 @@ CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_AEGIS128L=m
-CONFIG_CRYPTO_AEGIS256=m
-CONFIG_CRYPTO_MORUS640=m
-CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -739,7 +748,6 @@ CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_GDB_SCRIPTS=y
 CONFIG_FRAME_WARN=1024
-CONFIG_UNUSED_SYMBOLS=y
 CONFIG_HEADERS_INSTALL=y
 CONFIG_HEADERS_CHECK=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
index 8514b8b..25f7998 100644 (file)
@@ -44,6 +44,7 @@ CONFIG_NUMA=y
 # CONFIG_NUMA_EMU is not set
 CONFIG_HZ_100=y
 CONFIG_KEXEC_FILE=y
+CONFIG_KEXEC_SIG=y
 CONFIG_EXPOLINE=y
 CONFIG_EXPOLINE_AUTO=y
 CONFIG_CHSC_SCH=y
@@ -66,11 +67,12 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_MODULE_SIG=y
 CONFIG_MODULE_SIG_SHA256=y
+CONFIG_UNUSED_SYMBOLS=y
 CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
 CONFIG_BLK_CGROUP_IOLATENCY=y
+CONFIG_BLK_CGROUP_IOCOST=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
@@ -363,6 +365,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
 CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
+# CONFIG_NET_DROP_MONITOR is not set
 CONFIG_PCI=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
@@ -418,6 +421,7 @@ CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
 CONFIG_DM_WRITECACHE=m
+CONFIG_DM_CLONE=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_LOG_USERSPACE=m
 CONFIG_DM_RAID=m
@@ -429,6 +433,7 @@ CONFIG_DM_DELAY=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_FLAKEY=m
 CONFIG_DM_VERITY=m
+CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
 CONFIG_DM_SWITCH=m
 CONFIG_DM_INTEGRITY=m
 CONFIG_NETDEVICES=y
@@ -484,6 +489,7 @@ CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_NVIDIA is not set
 # CONFIG_NET_VENDOR_OKI is not set
 # CONFIG_NET_VENDOR_PACKET_ENGINES is not set
+# CONFIG_NET_VENDOR_PENSANDO is not set
 # CONFIG_NET_VENDOR_QLOGIC is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RDC is not set
@@ -533,16 +539,16 @@ CONFIG_WATCHDOG_CORE=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
 CONFIG_DIAG288_WATCHDOG=m
-CONFIG_DRM=y
-CONFIG_DRM_VIRTIO_GPU=y
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
+CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_USER_ACCESS=m
 CONFIG_MLX4_INFINIBAND=m
 CONFIG_MLX5_INFINIBAND=m
+CONFIG_SYNC_FILE=y
 CONFIG_VFIO=m
 CONFIG_VFIO_PCI=m
 CONFIG_VFIO_MDEV=m
@@ -573,6 +579,8 @@ CONFIG_NILFS2_FS=m
 CONFIG_FS_DAX=y
 CONFIG_EXPORTFS_BLOCK_OPS=y
 CONFIG_FS_ENCRYPTION=y
+CONFIG_FS_VERITY=y
+CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
 CONFIG_FANOTIFY=y
 CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
@@ -581,6 +589,7 @@ CONFIG_QFMT_V2=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
+CONFIG_VIRTIO_FS=m
 CONFIG_OVERLAY_FS=m
 CONFIG_FSCACHE=m
 CONFIG_CACHEFILES=m
@@ -639,12 +648,15 @@ CONFIG_SECURITY_NETWORK=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_LOCKDOWN_LSM=y
+CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
 CONFIG_INTEGRITY_SIGNATURE=y
 CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
 CONFIG_IMA=y
 CONFIG_IMA_DEFAULT_HASH_SHA256=y
 CONFIG_IMA_WRITE_POLICY=y
 CONFIG_IMA_APPRAISE=y
+CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
 CONFIG_CRYPTO_FIPS=y
 CONFIG_CRYPTO_USER=m
 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
@@ -656,10 +668,6 @@ CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_AEGIS128L=m
-CONFIG_CRYPTO_AEGIS256=m
-CONFIG_CRYPTO_MORUS640=m
-CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_OFB=m
@@ -727,7 +735,6 @@ CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_GDB_SCRIPTS=y
 CONFIG_FRAME_WARN=1024
-CONFIG_UNUSED_SYMBOLS=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_MEMORY_INIT=y
index be09a20..20c51e5 100644 (file)
@@ -61,7 +61,7 @@ CONFIG_RAW_DRIVER=y
 CONFIG_CONFIGFS_FS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 # CONFIG_NETWORK_FILESYSTEMS is not set
-# CONFIG_DIMLIB is not set
+CONFIG_LSM="yama,loadpin,safesetid,integrity"
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_FS=y
index d3f0952..61467b9 100644 (file)
@@ -41,7 +41,7 @@ __ATOMIC_OPS(__atomic64_xor, long, "laxg")
 #undef __ATOMIC_OP
 
 #define __ATOMIC_CONST_OP(op_name, op_type, op_string, op_barrier)     \
-static inline void op_name(op_type val, op_type *ptr)                  \
+static __always_inline void op_name(op_type val, op_type *ptr)         \
 {                                                                      \
        asm volatile(                                                   \
                op_string "     %[ptr],%[val]\n"                        \
index b8833ac..eb7eed4 100644 (file)
@@ -56,7 +56,7 @@ __bitops_byte(unsigned long nr, volatile unsigned long *ptr)
        return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
 }
 
-static inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
+static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
 {
        unsigned long *addr = __bitops_word(nr, ptr);
        unsigned long mask;
@@ -77,7 +77,7 @@ static inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
        __atomic64_or(mask, (long *)addr);
 }
 
-static inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
+static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
 {
        unsigned long *addr = __bitops_word(nr, ptr);
        unsigned long mask;
@@ -98,8 +98,8 @@ static inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
        __atomic64_and(mask, (long *)addr);
 }
 
-static inline void arch_change_bit(unsigned long nr,
-                                  volatile unsigned long *ptr)
+static __always_inline void arch_change_bit(unsigned long nr,
+                                           volatile unsigned long *ptr)
 {
        unsigned long *addr = __bitops_word(nr, ptr);
        unsigned long mask;
index a092f63..c0f3bfe 100644 (file)
@@ -171,7 +171,7 @@ typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
  *
  * Returns 1 if @func is available for @opcode, 0 otherwise
  */
-static inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
+static __always_inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
 {
        register unsigned long r0 asm("0") = 0; /* query function */
        register unsigned long r1 asm("1") = (unsigned long) mask;
index ceeb552..819803a 100644 (file)
@@ -28,6 +28,8 @@ asm(".include \"asm/cpu_mf-insn.h\"\n");
                                 CPU_MF_INT_SF_PRA|CPU_MF_INT_SF_SACA|  \
                                 CPU_MF_INT_SF_LSDA)
 
+#define CPU_MF_SF_RIBM_NOTAV   0x1             /* Sampling unavailable */
+
 /* CPU measurement facility support */
 static inline int cpum_cf_avail(void)
 {
@@ -69,7 +71,8 @@ struct hws_qsi_info_block {       /* Bit(s) */
        unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/
        unsigned long tear;         /* 24-31: TEAR contents              */
        unsigned long dear;         /* 32-39: DEAR contents              */
-       unsigned int rsvrd0;        /* 40-43: reserved                   */
+       unsigned int rsvrd0:24;     /* 40-42: reserved                   */
+       unsigned int ribm:8;        /* 43: Reserved by IBM               */
        unsigned int cpu_speed;     /* 44-47: CPU speed                  */
        unsigned long long rsvrd1;  /* 48-55: reserved                   */
        unsigned long long rsvrd2;  /* 56-63: reserved                   */
@@ -220,7 +223,8 @@ enum stcctm_ctr_set {
        MT_DIAG = 5,
        MT_DIAG_CLEARING = 9,   /* clears loss-of-MT-ctr-data alert */
 };
-static inline int stcctm(enum stcctm_ctr_set set, u64 range, u64 *dest)
+
+static __always_inline int stcctm(enum stcctm_ctr_set set, u64 range, u64 *dest)
 {
        int cc;
 
index bb59dd9..de8f0bf 100644 (file)
@@ -12,8 +12,6 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 
-
-#define is_hugepage_only_range(mm, addr, len)  0
 #define hugetlb_free_pgd_range                 free_pgd_range
 #define hugepages_supported()                  (MACHINE_HAS_EDAT1)
 
@@ -23,6 +21,13 @@ pte_t huge_ptep_get(pte_t *ptep);
 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
                              unsigned long addr, pte_t *ptep);
 
+static inline bool is_hugepage_only_range(struct mm_struct *mm,
+                                         unsigned long addr,
+                                         unsigned long len)
+{
+       return false;
+}
+
 /*
  * If the arch doesn't supply something else, assume that hugepage
  * size aligned regions are ok without further preparation.
index e548ec1..39f747d 100644 (file)
@@ -20,7 +20,7 @@
  * We use a brcl 0,2 instruction for jump labels at compile time so it
  * can be easily distinguished from a hotpatch generated instruction.
  */
-static inline bool arch_static_branch(struct static_key *key, bool branch)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
        asm_volatile_goto("0:   brcl    0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
                          ".pushsection __jump_table,\"aw\"\n"
@@ -34,7 +34,7 @@ label:
        return true;
 }
 
-static inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
        asm_volatile_goto("0:   brcl 15,%l[label]\n"
                          ".pushsection __jump_table,\"aw\"\n"
index a2399ef..3a06c26 100644 (file)
@@ -2,9 +2,6 @@
 #ifndef __ASM_S390_PCI_H
 #define __ASM_S390_PCI_H
 
-/* must be set before including pci_clp.h */
-#define PCI_BAR_COUNT  6
-
 #include <linux/pci.h>
 #include <linux/mutex.h>
 #include <linux/iommu.h>
@@ -138,7 +135,7 @@ struct zpci_dev {
 
        char res_name[16];
        bool mio_capable;
-       struct zpci_bar_struct bars[PCI_BAR_COUNT];
+       struct zpci_bar_struct bars[PCI_STD_NUM_BARS];
 
        u64             start_dma;      /* Start of available DMA addresses */
        u64             end_dma;        /* End of available DMA addresses */
index 5035917..bd2cb4e 100644 (file)
@@ -77,7 +77,7 @@ struct mio_info {
        struct {
                u64 wb;
                u64 wt;
-       } addr[PCI_BAR_COUNT];
+       } addr[PCI_STD_NUM_BARS];
        u32 reserved[6];
 } __packed;
 
@@ -98,9 +98,9 @@ struct clp_rsp_query_pci {
        u16 util_str_avail      :  1;   /* utility string available? */
        u16 pfgid               :  8;   /* pci function group id */
        u32 fid;                        /* pci function id */
-       u8 bar_size[PCI_BAR_COUNT];
+       u8 bar_size[PCI_STD_NUM_BARS];
        u16 pchid;
-       __le32 bar[PCI_BAR_COUNT];
+       __le32 bar[PCI_STD_NUM_BARS];
        u8 pfip[CLP_PFIP_NR_SEGMENTS];  /* pci function internal path */
        u32                     : 16;
        u8 fmb_len;
index 36c578c..5ff98d7 100644 (file)
@@ -997,9 +997,9 @@ static inline pte_t pte_mkhuge(pte_t pte)
 #define IPTE_NODAT     0x400
 #define IPTE_GUEST_ASCE        0x800
 
-static inline void __ptep_ipte(unsigned long address, pte_t *ptep,
-                              unsigned long opt, unsigned long asce,
-                              int local)
+static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
+                                       unsigned long opt, unsigned long asce,
+                                       int local)
 {
        unsigned long pto = (unsigned long) ptep;
 
@@ -1020,8 +1020,8 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep,
                : [r1] "a" (pto), [m4] "i" (local) : "memory");
 }
 
-static inline void __ptep_ipte_range(unsigned long address, int nr,
-                                    pte_t *ptep, int local)
+static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
+                                             pte_t *ptep, int local)
 {
        unsigned long pto = (unsigned long) ptep;
 
@@ -1269,7 +1269,8 @@ static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
 
 #define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
-#define pte_unmap(pte) do { } while (0)
+
+static inline void pte_unmap(pte_t *pte) { }
 
 static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
 {
@@ -1435,9 +1436,9 @@ static inline void __pmdp_csp(pmd_t *pmdp)
 #define IDTE_NODAT     0x1000
 #define IDTE_GUEST_ASCE        0x2000
 
-static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
-                              unsigned long opt, unsigned long asce,
-                              int local)
+static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
+                                       unsigned long opt, unsigned long asce,
+                                       int local)
 {
        unsigned long sto;
 
@@ -1461,9 +1462,9 @@ static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
        }
 }
 
-static inline void __pudp_idte(unsigned long addr, pud_t *pudp,
-                              unsigned long opt, unsigned long asce,
-                              int local)
+static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
+                                       unsigned long opt, unsigned long asce,
+                                       int local)
 {
        unsigned long r3o;
 
index 78e8a88..e3f238e 100644 (file)
@@ -111,7 +111,7 @@ struct qib {
        /* private: */
        u8 res[88];
        /* public: */
-       u8 parm[QDIO_MAX_BUFFERS_PER_Q];
+       u8 parm[128];
 } __attribute__ ((packed, aligned(256)));
 
 /**
index bd2fd9a..a470f1f 100644 (file)
@@ -83,7 +83,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n);
        __rc;                                                   \
 })
 
-static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
+static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
 {
        unsigned long spec = 0x010000UL;
        int rc;
@@ -113,7 +113,7 @@ static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
        return rc;
 }
 
-static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
+static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
 {
        unsigned long spec = 0x01UL;
        int rc;
index 5f1fd15..2654e34 100644 (file)
@@ -390,7 +390,7 @@ static size_t cf_diag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset,
 
        debug_sprintf_event(cf_diag_dbg, 6,
                            "%s ctrset %d ctrset_size %zu cfvn %d csvn %d"
-                           " need %zd rc:%d\n",
+                           " need %zd rc %d\n",
                            __func__, ctrset, ctrset_size, cpuhw->info.cfvn,
                            cpuhw->info.csvn, need, rc);
        return need;
@@ -567,7 +567,7 @@ static int cf_diag_add(struct perf_event *event, int flags)
        int err = 0;
 
        debug_sprintf_event(cf_diag_dbg, 5,
-                           "%s event %p cpu %d flags %#x cpuhw:%p\n",
+                           "%s event %p cpu %d flags %#x cpuhw %p\n",
                            __func__, event, event->cpu, flags, cpuhw);
 
        if (cpuhw->flags & PMU_F_IN_USE) {
index 544a02e..3d8b12a 100644 (file)
@@ -803,6 +803,12 @@ static int __hw_perf_event_init(struct perf_event *event)
                goto out;
        }
 
+       if (si.ribm & CPU_MF_SF_RIBM_NOTAV) {
+               pr_warn("CPU Measurement Facility sampling is temporarily not available\n");
+               err = -EBUSY;
+               goto out;
+       }
+
        /* Always enable basic sampling */
        SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE;
 
@@ -895,7 +901,7 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
 
        /* Check online status of the CPU to which the event is pinned */
        if (event->cpu >= 0 && !cpu_online(event->cpu))
-                       return -ENODEV;
+               return -ENODEV;
 
        /* Force reset of idle/hv excludes regardless of what the
         * user requested.
index f6db0f1..d047e84 100644 (file)
@@ -332,7 +332,7 @@ static inline int plo_test_bit(unsigned char nr)
        return cc == 0;
 }
 
-static inline void __insn32_query(unsigned int opcode, u8 query[32])
+static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
 {
        register unsigned long r0 asm("0") = 0; /* query function */
        register unsigned long r1 asm("1") = (unsigned long) query;
@@ -340,9 +340,9 @@ static inline void __insn32_query(unsigned int opcode, u8 query[32])
        asm volatile(
                /* Parameter regs are ignored */
                "       .insn   rrf,%[opc] << 16,2,4,6,0\n"
-               : "=m" (*query)
+               :
                : "d" (r0), "a" (r1), [opc] "i" (opcode)
-               : "cc");
+               : "cc", "memory");
 }
 
 #define INSN_SORTL 0xb938
index c7fea9b..7b4c2ac 100644 (file)
@@ -43,7 +43,7 @@ static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
 static DEFINE_SPINLOCK(zpci_domain_lock);
 
 #define ZPCI_IOMAP_ENTRIES                                             \
-       min(((unsigned long) ZPCI_NR_DEVICES * PCI_BAR_COUNT / 2),      \
+       min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2),   \
            ZPCI_IOMAP_MAX_ENTRIES)
 
 static DEFINE_SPINLOCK(zpci_iomap_lock);
@@ -294,7 +294,7 @@ static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
 void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
                              unsigned long offset, unsigned long max)
 {
-       if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT)
+       if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
                return NULL;
 
        if (static_branch_likely(&have_mio))
@@ -324,7 +324,7 @@ static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
 void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
                                 unsigned long offset, unsigned long max)
 {
-       if (!pci_resource_len(pdev, bar) || bar >= PCI_BAR_COUNT)
+       if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
                return NULL;
 
        if (static_branch_likely(&have_mio))
@@ -416,7 +416,7 @@ static void zpci_map_resources(struct pci_dev *pdev)
        resource_size_t len;
        int i;
 
-       for (i = 0; i < PCI_BAR_COUNT; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                len = pci_resource_len(pdev, i);
                if (!len)
                        continue;
@@ -451,7 +451,7 @@ static void zpci_unmap_resources(struct pci_dev *pdev)
        if (zpci_use_mio(zdev))
                return;
 
-       for (i = 0; i < PCI_BAR_COUNT; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                len = pci_resource_len(pdev, i);
                if (!len)
                        continue;
@@ -514,7 +514,7 @@ static int zpci_setup_bus_resources(struct zpci_dev *zdev,
        snprintf(zdev->res_name, sizeof(zdev->res_name),
                 "PCI Bus %04x:%02x", zdev->domain, ZPCI_BUS_NR);
 
-       for (i = 0; i < PCI_BAR_COUNT; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                if (!zdev->bars[i].size)
                        continue;
                entry = zpci_alloc_iomap(zdev);
@@ -551,7 +551,7 @@ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
 {
        int i;
 
-       for (i = 0; i < PCI_BAR_COUNT; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                if (!zdev->bars[i].size || !zdev->bars[i].res)
                        continue;
 
@@ -573,7 +573,7 @@ int pcibios_add_device(struct pci_dev *pdev)
        pdev->dev.dma_ops = &s390_pci_dma_ops;
        zpci_map_resources(pdev);
 
-       for (i = 0; i < PCI_BAR_COUNT; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                res = &pdev->resource[i];
                if (res->parent || !res->flags)
                        continue;
index 9bdff4d..4c613e5 100644 (file)
@@ -66,7 +66,7 @@ static inline int clp_get_ilp(unsigned long *ilp)
 /*
  * Call Logical Processor with c=0, the give constant lps and an lpcb request.
  */
-static inline int clp_req(void *data, unsigned int lps)
+static __always_inline int clp_req(void *data, unsigned int lps)
 {
        struct { u8 _[CLP_BLK_SIZE]; } *req = data;
        u64 ignored;
@@ -145,7 +145,7 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
 {
        int i;
 
-       for (i = 0; i < PCI_BAR_COUNT; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                zdev->bars[i].val = le32_to_cpu(response->bar[i]);
                zdev->bars[i].size = response->bar_size[i];
        }
@@ -164,8 +164,8 @@ static int clp_store_query_pci_fn(struct zpci_dev *zdev,
                       sizeof(zdev->util_str));
        }
        zdev->mio_capable = response->mio_addr_avail;
-       for (i = 0; i < PCI_BAR_COUNT; i++) {
-               if (!(response->mio.valid & (1 << (PCI_BAR_COUNT - i - 1))))
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+               if (!(response->mio.valid & (1 << (PCI_STD_NUM_BARS - i - 1))))
                        continue;
 
                zdev->bars[i].mio_wb = (void __iomem *) response->mio.addr[i].wb;
index fbc1aec..eb24cb1 100644 (file)
@@ -29,7 +29,6 @@ config SPARC
        select RTC_DRV_M48T59
        select RTC_SYSTOHC
        select HAVE_ARCH_JUMP_LABEL if SPARC64
-       select HAVE_FAST_GUP if SPARC64
        select GENERIC_IRQ_SHOW
        select ARCH_WANT_IPC_PARSE_VERSION
        select GENERIC_PCI_IOMAP
index b621216..62de2eb 100644 (file)
@@ -18,7 +18,6 @@ generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
 generic-y += mmiowb.h
 generic-y += module.h
-generic-y += msi.h
 generic-y += preempt.h
 generic-y += serial.h
 generic-y += trace_clock.h
index 149795c..25019d4 100644 (file)
 struct mem_vector immovable_mem[MAX_NUMNODES*2];
 
 /*
- * Max length of 64-bit hex address string is 19, prefix "0x" + 16 hex
- * digits, and '\0' for termination.
- */
-#define MAX_ADDR_LEN 19
-
-static acpi_physical_address get_cmdline_acpi_rsdp(void)
-{
-       acpi_physical_address addr = 0;
-
-#ifdef CONFIG_KEXEC
-       char val[MAX_ADDR_LEN] = { };
-       int ret;
-
-       ret = cmdline_find_option("acpi_rsdp", val, MAX_ADDR_LEN);
-       if (ret < 0)
-               return 0;
-
-       if (kstrtoull(val, 16, &addr))
-               return 0;
-#endif
-       return addr;
-}
-
-/*
  * Search EFI system tables for RSDP.  If both ACPI_20_TABLE_GUID and
  * ACPI_TABLE_GUID are found, take the former, which has more features.
  */
@@ -298,6 +274,30 @@ acpi_physical_address get_rsdp_addr(void)
 }
 
 #if defined(CONFIG_RANDOMIZE_BASE) && defined(CONFIG_MEMORY_HOTREMOVE)
+/*
+ * Max length of 64-bit hex address string is 19, prefix "0x" + 16 hex
+ * digits, and '\0' for termination.
+ */
+#define MAX_ADDR_LEN 19
+
+static acpi_physical_address get_cmdline_acpi_rsdp(void)
+{
+       acpi_physical_address addr = 0;
+
+#ifdef CONFIG_KEXEC
+       char val[MAX_ADDR_LEN] = { };
+       int ret;
+
+       ret = cmdline_find_option("acpi_rsdp", val, MAX_ADDR_LEN);
+       if (ret < 0)
+               return 0;
+
+       if (kstrtoull(val, 16, &addr))
+               return 0;
+#endif
+       return addr;
+}
+
 /* Compute SRAT address from RSDP. */
 static unsigned long get_acpi_srat_table(void)
 {
index 53ac0cb..9652d5c 100644 (file)
@@ -345,6 +345,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
 {
        const unsigned long kernel_total_size = VO__end - VO__text;
        unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
+       unsigned long needed_size;
 
        /* Retain x86 boot parameters pointer passed from startup_32/64. */
        boot_params = rmode;
@@ -379,26 +380,38 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
        free_mem_ptr     = heap;        /* Heap */
        free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
 
+       /*
+        * The memory hole needed for the kernel is the larger of either
+        * the entire decompressed kernel plus relocation table, or the
+        * entire decompressed kernel plus .bss and .brk sections.
+        *
+        * On X86_64, the memory is mapped with PMD pages. Round the
+        * size up so that the full extent of PMD pages mapped is
+        * included in the check against the valid memory table
+        * entries. This ensures the full mapped area is usable RAM
+        * and doesn't include any reserved areas.
+        */
+       needed_size = max(output_len, kernel_total_size);
+#ifdef CONFIG_X86_64
+       needed_size = ALIGN(needed_size, MIN_KERNEL_ALIGN);
+#endif
+
        /* Report initial kernel position details. */
        debug_putaddr(input_data);
        debug_putaddr(input_len);
        debug_putaddr(output);
        debug_putaddr(output_len);
        debug_putaddr(kernel_total_size);
+       debug_putaddr(needed_size);
 
 #ifdef CONFIG_X86_64
        /* Report address of 32-bit trampoline */
        debug_putaddr(trampoline_32bit);
 #endif
 
-       /*
-        * The memory hole needed for the kernel is the larger of either
-        * the entire decompressed kernel plus relocation table, or the
-        * entire decompressed kernel plus .bss and .brk sections.
-        */
        choose_random_location((unsigned long)input_data, input_len,
                                (unsigned long *)&output,
-                               max(output_len, kernel_total_size),
+                               needed_size,
                                &virt_addr);
 
        /* Validate memory location choices. */
index e7d35f6..64c3e70 100644 (file)
@@ -5,12 +5,14 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
+#include <linux/jiffies.h>
 #include <asm/apicdef.h>
 #include <asm/nmi.h>
 
 #include "../perf_event.h"
 
-static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
+static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp);
+static unsigned long perf_nmi_window;
 
 static __initconst const u64 amd_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
@@ -641,11 +643,12 @@ static void amd_pmu_disable_event(struct perf_event *event)
  * handler when multiple PMCs are active or PMC overflow while handling some
  * other source of an NMI.
  *
- * Attempt to mitigate this by using the number of active PMCs to determine
- * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
- * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
- * number of active PMCs or 2. The value of 2 is used in case an NMI does not
- * arrive at the LAPIC in time to be collapsed into an already pending NMI.
+ * Attempt to mitigate this by creating an NMI window in which un-handled NMIs
+ * received during this window will be claimed. This prevents extending the
+ * window past when it is possible that latent NMIs should be received. The
+ * per-CPU perf_nmi_tstamp will be set to the window end time whenever perf has
+ * handled a counter. When an un-handled NMI is received, it will be claimed
+ * only if arriving within that window.
  */
 static int amd_pmu_handle_irq(struct pt_regs *regs)
 {
@@ -663,21 +666,19 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
        handled = x86_pmu_handle_irq(regs);
 
        /*
-        * If a counter was handled, record the number of possible remaining
-        * NMIs that can occur.
+        * If a counter was handled, record a timestamp such that un-handled
+        * NMIs will be claimed if arriving within that window.
         */
        if (handled) {
-               this_cpu_write(perf_nmi_counter,
-                              min_t(unsigned int, 2, active));
+               this_cpu_write(perf_nmi_tstamp,
+                              jiffies + perf_nmi_window);
 
                return handled;
        }
 
-       if (!this_cpu_read(perf_nmi_counter))
+       if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp)))
                return NMI_DONE;
 
-       this_cpu_dec(perf_nmi_counter);
-
        return NMI_HANDLED;
 }
 
@@ -909,6 +910,9 @@ static int __init amd_core_pmu_init(void)
        if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
                return 0;
 
+       /* Avoid calulating the value each time in the NMI handler */
+       perf_nmi_window = msecs_to_jiffies(100);
+
        switch (boot_cpu_data.x86) {
        case 0x15:
                pr_cont("Fam15h ");
index 27ee47a..fcef678 100644 (file)
@@ -4983,6 +4983,8 @@ __init int intel_pmu_init(void)
        case INTEL_FAM6_SKYLAKE:
        case INTEL_FAM6_KABYLAKE_L:
        case INTEL_FAM6_KABYLAKE:
+       case INTEL_FAM6_COMETLAKE_L:
+       case INTEL_FAM6_COMETLAKE:
                x86_add_quirk(intel_pebs_isolation_quirk);
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
@@ -5031,6 +5033,8 @@ __init int intel_pmu_init(void)
                /* fall through */
        case INTEL_FAM6_ICELAKE_L:
        case INTEL_FAM6_ICELAKE:
+       case INTEL_FAM6_TIGERLAKE_L:
+       case INTEL_FAM6_TIGERLAKE:
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
index 9f2f390..e1daf41 100644 (file)
  *     MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
  *                            perf code: 0x01
  *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM,
                                              CNL
*                                             CNL,KBL,CML
  *                            Scope: Core
  *     MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
  *                            perf code: 0x02
  *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
- *                                             SKL,KNL,GLM,CNL
+ *                                             SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL
  *                            Scope: Core
  *     MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
  *                            perf code: 0x03
- *                            Available model: SNB,IVB,HSW,BDW,SKL,CNL
+ *                            Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
+ *                                             ICL,TGL
  *                            Scope: Core
  *     MSR_PKG_C2_RESIDENCY:  Package C2 Residency Counter.
  *                            perf code: 0x00
- *                            Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL
+ *                            Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
+ *                                             KBL,CML,ICL,TGL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C3_RESIDENCY:  Package C3 Residency Counter.
  *                            perf code: 0x01
  *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
- *                                             GLM,CNL
+ *                                             GLM,CNL,KBL,CML,ICL,TGL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C6_RESIDENCY:  Package C6 Residency Counter.
  *                            perf code: 0x02
  *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
- *                                             SKL,KNL,GLM,CNL
+ *                                             SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
  *                            perf code: 0x03
- *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL
+ *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
+ *                                             KBL,CML,ICL,TGL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
  *                            perf code: 0x04
- *                            Available model: HSW ULT,KBL,CNL
+ *                            Available model: HSW ULT,KBL,CNL,CML,ICL,TGL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
  *                            perf code: 0x05
- *                            Available model: HSW ULT,KBL,CNL
+ *                            Available model: HSW ULT,KBL,CNL,CML,ICL,TGL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
  *                            perf code: 0x06
- *                            Available model: HSW ULT,KBL,GLM,CNL
+ *                            Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL
  *                            Scope: Package (physical package)
  *
  */
@@ -544,6 +547,19 @@ static const struct cstate_model cnl_cstates __initconst = {
                                  BIT(PERF_CSTATE_PKG_C10_RES),
 };
 
+static const struct cstate_model icl_cstates __initconst = {
+       .core_events            = BIT(PERF_CSTATE_CORE_C6_RES) |
+                                 BIT(PERF_CSTATE_CORE_C7_RES),
+
+       .pkg_events             = BIT(PERF_CSTATE_PKG_C2_RES) |
+                                 BIT(PERF_CSTATE_PKG_C3_RES) |
+                                 BIT(PERF_CSTATE_PKG_C6_RES) |
+                                 BIT(PERF_CSTATE_PKG_C7_RES) |
+                                 BIT(PERF_CSTATE_PKG_C8_RES) |
+                                 BIT(PERF_CSTATE_PKG_C9_RES) |
+                                 BIT(PERF_CSTATE_PKG_C10_RES),
+};
+
 static const struct cstate_model slm_cstates __initconst = {
        .core_events            = BIT(PERF_CSTATE_CORE_C1_RES) |
                                  BIT(PERF_CSTATE_CORE_C6_RES),
@@ -614,6 +630,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
 
        X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_L, hswult_cstates),
        X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE,   hswult_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_COMETLAKE_L, hswult_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_COMETLAKE, hswult_cstates),
 
        X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_L, cnl_cstates),
 
@@ -625,8 +643,10 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
 
        X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
 
-       X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_L, snb_cstates),
-       X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE,   snb_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_L, icl_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE,   icl_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_TIGERLAKE_L, icl_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_TIGERLAKE, icl_cstates),
        { },
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
index b1afc77..6f86650 100644 (file)
@@ -89,7 +89,14 @@ static bool test_intel(int idx, void *data)
        case INTEL_FAM6_SKYLAKE_X:
        case INTEL_FAM6_KABYLAKE_L:
        case INTEL_FAM6_KABYLAKE:
+       case INTEL_FAM6_COMETLAKE_L:
+       case INTEL_FAM6_COMETLAKE:
        case INTEL_FAM6_ICELAKE_L:
+       case INTEL_FAM6_ICELAKE:
+       case INTEL_FAM6_ICELAKE_X:
+       case INTEL_FAM6_ICELAKE_D:
+       case INTEL_FAM6_TIGERLAKE_L:
+       case INTEL_FAM6_TIGERLAKE:
                if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
                        return true;
                break;
index 5c056b8..e01078e 100644 (file)
@@ -260,11 +260,21 @@ void __init hv_apic_init(void)
        }
 
        if (ms_hyperv.hints & HV_X64_APIC_ACCESS_RECOMMENDED) {
-               pr_info("Hyper-V: Using MSR based APIC access\n");
+               pr_info("Hyper-V: Using enlightened APIC (%s mode)",
+                       x2apic_enabled() ? "x2apic" : "xapic");
+               /*
+                * With x2apic, architectural x2apic MSRs are equivalent to the
+                * respective synthetic MSRs, so there's no need to override
+                * the apic accessors.  The only exception is
+                * hv_apic_eoi_write, because it benefits from lazy EOI when
+                * available, but it works for both xapic and x2apic modes.
+                */
                apic_set_eoi_write(hv_apic_eoi_write);
-               apic->read      = hv_apic_read;
-               apic->write     = hv_apic_write;
-               apic->icr_write = hv_apic_icr_write;
-               apic->icr_read  = hv_apic_icr_read;
+               if (!x2apic_enabled()) {
+                       apic->read      = hv_apic_read;
+                       apic->write     = hv_apic_write;
+                       apic->icr_write = hv_apic_icr_write;
+                       apic->icr_read  = hv_apic_icr_read;
+               }
        }
 }
index cff3f3f..8348f7d 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 
 #ifndef _ASM_X86_CPU_ENTRY_AREA_H
 #define _ASM_X86_CPU_ENTRY_AREA_H
index f046225..c606c0b 100644 (file)
@@ -83,6 +83,9 @@
 #define INTEL_FAM6_TIGERLAKE_L         0x8C
 #define INTEL_FAM6_TIGERLAKE           0x8D
 
+#define INTEL_FAM6_COMETLAKE           0xA5
+#define INTEL_FAM6_COMETLAKE_L         0xA6
+
 /* "Small Core" Processors (Atom) */
 
 #define INTEL_FAM6_ATOM_BONNELL                0x1C /* Diamondville, Pineview */
index 23edf56..50eb430 100644 (file)
@@ -219,13 +219,6 @@ enum {
                                 PFERR_WRITE_MASK |             \
                                 PFERR_PRESENT_MASK)
 
-/*
- * The mask used to denote special SPTEs, which can be either MMIO SPTEs or
- * Access Tracking SPTEs. We use bit 62 instead of bit 63 to avoid conflicting
- * with the SVE bit in EPT PTEs.
- */
-#define SPTE_SPECIAL_MASK (1ULL << 62)
-
 /* apic attention bits */
 #define KVM_APIC_CHECK_VAPIC   0
 /*
index e28f8b7..9d5252c 100644 (file)
@@ -21,7 +21,7 @@
 #define MWAIT_ECX_INTERRUPT_BREAK      0x1
 #define MWAITX_ECX_TIMER_ENABLE                BIT(1)
 #define MWAITX_MAX_LOOPS               ((u32)-1)
-#define MWAITX_DISABLE_CSTATES         0xf
+#define MWAITX_DISABLE_CSTATES         0xf0
 
 static inline void __monitor(const void *eax, unsigned long ecx,
                             unsigned long edx)
index 5df09a0..07375b4 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _ASM_X86_PTI_H
 #define _ASM_X86_PTI_H
 #ifndef __ASSEMBLY__
index 35c225e..61d93f0 100644 (file)
@@ -734,5 +734,28 @@ do {                                                                               \
        if (unlikely(__gu_err)) goto err_label;                                 \
 } while (0)
 
+/*
+ * We want the unsafe accessors to always be inlined and use
+ * the error labels - thus the macro games.
+ */
+#define unsafe_copy_loop(dst, src, len, type, label)                   \
+       while (len >= sizeof(type)) {                                   \
+               unsafe_put_user(*(type *)src,(type __user *)dst,label); \
+               dst += sizeof(type);                                    \
+               src += sizeof(type);                                    \
+               len -= sizeof(type);                                    \
+       }
+
+#define unsafe_copy_to_user(_dst,_src,_len,label)                      \
+do {                                                                   \
+       char __user *__ucu_dst = (_dst);                                \
+       const char *__ucu_src = (_src);                                 \
+       size_t __ucu_len = (_len);                                      \
+       unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label);  \
+       unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label);  \
+       unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label);  \
+       unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label);   \
+} while (0)
+
 #endif /* _ASM_X86_UACCESS_H */
 
index 45e92cb..b0889c4 100644 (file)
@@ -156,7 +156,8 @@ static int x2apic_dead_cpu(unsigned int dead_cpu)
 {
        struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
 
-       cpumask_clear_cpu(dead_cpu, &cmsk->mask);
+       if (cmsk)
+               cpumask_clear_cpu(dead_cpu, &cmsk->mask);
        free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
        return 0;
 }
index 267daad..c656d92 100644 (file)
@@ -216,6 +216,10 @@ static void __init ms_hyperv_init_platform(void)
        int hv_host_info_ecx;
        int hv_host_info_edx;
 
+#ifdef CONFIG_PARAVIRT
+       pv_info.name = "Hyper-V";
+#endif
+
        /*
         * Extract the features and hints
         */
index 9735139..46d7326 100644 (file)
@@ -49,7 +49,7 @@
 #define VMWARE_CMD_VCPU_RESERVED 31
 
 #define VMWARE_PORT(cmd, eax, ebx, ecx, edx)                           \
-       __asm__("inl (%%dx)" :                                          \
+       __asm__("inl (%%dx), %%eax" :                                   \
                "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) :            \
                "a"(VMWARE_HYPERVISOR_MAGIC),                           \
                "c"(VMWARE_CMD_##cmd),                                  \
index 29ffa49..206a4b6 100644 (file)
@@ -222,13 +222,31 @@ unsigned long __head __startup_64(unsigned long physaddr,
         * we might write invalid pmds, when the kernel is relocated
         * cleanup_highmap() fixes this up along with the mappings
         * beyond _end.
+        *
+        * Only the region occupied by the kernel image has so far
+        * been checked against the table of usable memory regions
+        * provided by the firmware, so invalidate pages outside that
+        * region. A page table entry that maps to a reserved area of
+        * memory would allow processor speculation into that area,
+        * and on some hardware (particularly the UV platform) even
+        * speculative access to some reserved areas is caught as an
+        * error, causing the BIOS to halt the system.
         */
 
        pmd = fixup_pointer(level2_kernel_pgt, physaddr);
-       for (i = 0; i < PTRS_PER_PMD; i++) {
+
+       /* invalidate pages before the kernel image */
+       for (i = 0; i < pmd_index((unsigned long)_text); i++)
+               pmd[i] &= ~_PAGE_PRESENT;
+
+       /* fixup pages that are part of the kernel image */
+       for (; i <= pmd_index((unsigned long)_end); i++)
                if (pmd[i] & _PAGE_PRESENT)
                        pmd[i] += load_delta;
-       }
+
+       /* invalidate pages after the kernel image */
+       for (; i < PTRS_PER_PMD; i++)
+               pmd[i] &= ~_PAGE_PRESENT;
 
        /*
         * Fixup phys_base - remove the memory encryption mask to obtain
index 320ab97..1d0797b 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 //
 // Code shared between 32 and 64 bit
 
index 6331603..9c5029c 100644 (file)
@@ -485,6 +485,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
 
        /* cpuid 0x80000008.ebx */
        const u32 kvm_cpuid_8000_0008_ebx_x86_features =
+               F(CLZERO) | F(XSAVEERPTR) |
                F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
                F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON);
 
@@ -618,16 +619,20 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
         */
        case 0x1f:
        case 0xb: {
-               int i, level_type;
+               int i;
 
-               /* read more entries until level_type is zero */
-               for (i = 1; ; ++i) {
+               /*
+                * We filled in entry[0] for CPUID(EAX=<function>,
+                * ECX=00H) above.  If its level type (ECX[15:8]) is
+                * zero, then the leaf is unimplemented, and we're
+                * done.  Otherwise, continue to populate entries
+                * until the level type (ECX[15:8]) of the previously
+                * added entry is zero.
+                */
+               for (i = 1; entry[i - 1].ecx & 0xff00; ++i) {
                        if (*nent >= maxnent)
                                goto out;
 
-                       level_type = entry[i - 1].ecx & 0xff00;
-                       if (!level_type)
-                               break;
                        do_host_cpuid(&entry[i], function, i);
                        ++*nent;
                }
@@ -969,53 +974,66 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
 
 /*
- * If no match is found, check whether we exceed the vCPU's limit
- * and return the content of the highest valid _standard_ leaf instead.
- * This is to satisfy the CPUID specification.
+ * If the basic or extended CPUID leaf requested is higher than the
+ * maximum supported basic or extended leaf, respectively, then it is
+ * out of range.
  */
-static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
-                                                  u32 function, u32 index)
+static bool cpuid_function_in_range(struct kvm_vcpu *vcpu, u32 function)
 {
-       struct kvm_cpuid_entry2 *maxlevel;
-
-       maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
-       if (!maxlevel || maxlevel->eax >= function)
-               return NULL;
-       if (function & 0x80000000) {
-               maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
-               if (!maxlevel)
-                       return NULL;
-       }
-       return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
+       struct kvm_cpuid_entry2 *max;
+
+       max = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
+       return max && function <= max->eax;
 }
 
 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
               u32 *ecx, u32 *edx, bool check_limit)
 {
        u32 function = *eax, index = *ecx;
-       struct kvm_cpuid_entry2 *best;
-       bool entry_found = true;
-
-       best = kvm_find_cpuid_entry(vcpu, function, index);
-
-       if (!best) {
-               entry_found = false;
-               if (!check_limit)
-                       goto out;
+       struct kvm_cpuid_entry2 *entry;
+       struct kvm_cpuid_entry2 *max;
+       bool found;
 
-               best = check_cpuid_limit(vcpu, function, index);
+       entry = kvm_find_cpuid_entry(vcpu, function, index);
+       found = entry;
+       /*
+        * Intel CPUID semantics treats any query for an out-of-range
+        * leaf as if the highest basic leaf (i.e. CPUID.0H:EAX) were
+        * requested. AMD CPUID semantics returns all zeroes for any
+        * undefined leaf, whether or not the leaf is in range.
+        */
+       if (!entry && check_limit && !guest_cpuid_is_amd(vcpu) &&
+           !cpuid_function_in_range(vcpu, function)) {
+               max = kvm_find_cpuid_entry(vcpu, 0, 0);
+               if (max) {
+                       function = max->eax;
+                       entry = kvm_find_cpuid_entry(vcpu, function, index);
+               }
        }
-
-out:
-       if (best) {
-               *eax = best->eax;
-               *ebx = best->ebx;
-               *ecx = best->ecx;
-               *edx = best->edx;
-       } else
+       if (entry) {
+               *eax = entry->eax;
+               *ebx = entry->ebx;
+               *ecx = entry->ecx;
+               *edx = entry->edx;
+       } else {
                *eax = *ebx = *ecx = *edx = 0;
-       trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, entry_found);
-       return entry_found;
+               /*
+                * When leaf 0BH or 1FH is defined, CL is pass-through
+                * and EDX is always the x2APIC ID, even for undefined
+                * subleaves. Index 1 will exist iff the leaf is
+                * implemented, so we pass through CL iff leaf 1
+                * exists. EDX can be copied from any existing index.
+                */
+               if (function == 0xb || function == 0x1f) {
+                       entry = kvm_find_cpuid_entry(vcpu, function, 1);
+                       if (entry) {
+                               *ecx = index & 0xff;
+                               *edx = entry->edx;
+                       }
+               }
+       }
+       trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, found);
+       return found;
 }
 EXPORT_SYMBOL_GPL(kvm_cpuid);
 
index 3a3a685..87b0fcc 100644 (file)
 #define X2APIC_BROADCAST               0xFFFFFFFFul
 
 static bool lapic_timer_advance_dynamic __read_mostly;
-#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100
-#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 5000
-#define LAPIC_TIMER_ADVANCE_ADJUST_INIT 1000
+#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100     /* clock cycles */
+#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000   /* clock cycles */
+#define LAPIC_TIMER_ADVANCE_NS_INIT    1000
+#define LAPIC_TIMER_ADVANCE_NS_MAX     5000
 /* step-by-step approximation to mitigate fluctuation */
 #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
 
@@ -1504,8 +1505,8 @@ static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
                timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
        }
 
-       if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_ADJUST_MAX))
-               timer_advance_ns = LAPIC_TIMER_ADVANCE_ADJUST_INIT;
+       if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
+               timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
        apic->lapic_timer.timer_advance_ns = timer_advance_ns;
 }
 
@@ -2302,7 +2303,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
                     HRTIMER_MODE_ABS_HARD);
        apic->lapic_timer.timer.function = apic_timer_fn;
        if (timer_advance_ns == -1) {
-               apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_ADJUST_INIT;
+               apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
                lapic_timer_advance_dynamic = true;
        } else {
                apic->lapic_timer.timer_advance_ns = timer_advance_ns;
index 5269aa0..24c23c6 100644 (file)
@@ -83,7 +83,17 @@ module_param(dbg, bool, 0644);
 #define PTE_PREFETCH_NUM               8
 
 #define PT_FIRST_AVAIL_BITS_SHIFT 10
-#define PT64_SECOND_AVAIL_BITS_SHIFT 52
+#define PT64_SECOND_AVAIL_BITS_SHIFT 54
+
+/*
+ * The mask used to denote special SPTEs, which can be either MMIO SPTEs or
+ * Access Tracking SPTEs.
+ */
+#define SPTE_SPECIAL_MASK (3ULL << 52)
+#define SPTE_AD_ENABLED_MASK (0ULL << 52)
+#define SPTE_AD_DISABLED_MASK (1ULL << 52)
+#define SPTE_AD_WRPROT_ONLY_MASK (2ULL << 52)
+#define SPTE_MMIO_MASK (3ULL << 52)
 
 #define PT64_LEVEL_BITS 9
 
@@ -219,12 +229,11 @@ static u64 __read_mostly shadow_present_mask;
 static u64 __read_mostly shadow_me_mask;
 
 /*
- * SPTEs used by MMUs without A/D bits are marked with shadow_acc_track_value.
- * Non-present SPTEs with shadow_acc_track_value set are in place for access
- * tracking.
+ * SPTEs used by MMUs without A/D bits are marked with SPTE_AD_DISABLED_MASK;
+ * shadow_acc_track_mask is the set of bits to be cleared in non-accessed
+ * pages.
  */
 static u64 __read_mostly shadow_acc_track_mask;
-static const u64 shadow_acc_track_value = SPTE_SPECIAL_MASK;
 
 /*
  * The mask/shift to use for saving the original R/X bits when marking the PTE
@@ -304,7 +313,7 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask)
 {
        BUG_ON((u64)(unsigned)access_mask != access_mask);
        BUG_ON((mmio_mask & mmio_value) != mmio_value);
-       shadow_mmio_value = mmio_value | SPTE_SPECIAL_MASK;
+       shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
        shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
        shadow_mmio_access_mask = access_mask;
 }
@@ -320,10 +329,27 @@ static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
        return sp->role.ad_disabled;
 }
 
+static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
+{
+       /*
+        * When using the EPT page-modification log, the GPAs in the log
+        * would come from L2 rather than L1.  Therefore, we need to rely
+        * on write protection to record dirty pages.  This also bypasses
+        * PML, since writes now result in a vmexit.
+        */
+       return vcpu->arch.mmu == &vcpu->arch.guest_mmu;
+}
+
 static inline bool spte_ad_enabled(u64 spte)
 {
        MMU_WARN_ON(is_mmio_spte(spte));
-       return !(spte & shadow_acc_track_value);
+       return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_DISABLED_MASK;
+}
+
+static inline bool spte_ad_need_write_protect(u64 spte)
+{
+       MMU_WARN_ON(is_mmio_spte(spte));
+       return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK;
 }
 
 static inline u64 spte_shadow_accessed_mask(u64 spte)
@@ -461,7 +487,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 {
        BUG_ON(!dirty_mask != !accessed_mask);
        BUG_ON(!accessed_mask && !acc_track_mask);
-       BUG_ON(acc_track_mask & shadow_acc_track_value);
+       BUG_ON(acc_track_mask & SPTE_SPECIAL_MASK);
 
        shadow_user_mask = user_mask;
        shadow_accessed_mask = accessed_mask;
@@ -1589,16 +1615,16 @@ static bool spte_clear_dirty(u64 *sptep)
 
        rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);
 
+       MMU_WARN_ON(!spte_ad_enabled(spte));
        spte &= ~shadow_dirty_mask;
-
        return mmu_spte_update(sptep, spte);
 }
 
-static bool wrprot_ad_disabled_spte(u64 *sptep)
+static bool spte_wrprot_for_clear_dirty(u64 *sptep)
 {
        bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
                                               (unsigned long *)sptep);
-       if (was_writable)
+       if (was_writable && !spte_ad_enabled(*sptep))
                kvm_set_pfn_dirty(spte_to_pfn(*sptep));
 
        return was_writable;
@@ -1617,10 +1643,10 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
        bool flush = false;
 
        for_each_rmap_spte(rmap_head, &iter, sptep)
-               if (spte_ad_enabled(*sptep))
-                       flush |= spte_clear_dirty(sptep);
+               if (spte_ad_need_write_protect(*sptep))
+                       flush |= spte_wrprot_for_clear_dirty(sptep);
                else
-                       flush |= wrprot_ad_disabled_spte(sptep);
+                       flush |= spte_clear_dirty(sptep);
 
        return flush;
 }
@@ -1631,6 +1657,11 @@ static bool spte_set_dirty(u64 *sptep)
 
        rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
 
+       /*
+        * Similar to the !kvm_x86_ops->slot_disable_log_dirty case,
+        * do not bother adding back write access to pages marked
+        * SPTE_AD_WRPROT_ONLY_MASK.
+        */
        spte |= shadow_dirty_mask;
 
        return mmu_spte_update(sptep, spte);
@@ -2622,7 +2653,7 @@ static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
               shadow_user_mask | shadow_x_mask | shadow_me_mask;
 
        if (sp_ad_disabled(sp))
-               spte |= shadow_acc_track_value;
+               spte |= SPTE_AD_DISABLED_MASK;
        else
                spte |= shadow_accessed_mask;
 
@@ -2968,7 +2999,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 
        sp = page_header(__pa(sptep));
        if (sp_ad_disabled(sp))
-               spte |= shadow_acc_track_value;
+               spte |= SPTE_AD_DISABLED_MASK;
+       else if (kvm_vcpu_ad_need_write_protect(vcpu))
+               spte |= SPTE_AD_WRPROT_ONLY_MASK;
 
        /*
         * For the EPT case, shadow_present_mask is 0 if hardware
index 41abc62..e76eb4f 100644 (file)
@@ -2610,7 +2610,7 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
 
                /* VM-entry exception error code */
                if (CC(has_error_code &&
-                      vmcs12->vm_entry_exception_error_code & GENMASK(31, 15)))
+                      vmcs12->vm_entry_exception_error_code & GENMASK(31, 16)))
                        return -EINVAL;
 
                /* VM-entry interruption-info field: reserved bits */
index 4dea0e0..3e9c059 100644 (file)
@@ -262,6 +262,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 {
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+       struct x86_pmu_capability x86_pmu;
        struct kvm_cpuid_entry2 *entry;
        union cpuid10_eax eax;
        union cpuid10_edx edx;
@@ -283,8 +284,10 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        if (!pmu->version)
                return;
 
+       perf_get_x86_pmu_capability(&x86_pmu);
+
        pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
-                                       INTEL_PMC_MAX_GENERIC);
+                                        x86_pmu.num_counters_gp);
        pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
        pmu->available_event_types = ~entry->ebx &
                                        ((1ull << eax.split.mask_length) - 1);
@@ -294,7 +297,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        } else {
                pmu->nr_arch_fixed_counters =
                        min_t(int, edx.split.num_counters_fixed,
-                               INTEL_PMC_MAX_FIXED);
+                             x86_pmu.num_counters_fixed);
                pmu->counter_bitmask[KVM_PMC_FIXED] =
                        ((u64)1 << edx.split.bit_width_fixed) - 1;
        }
index d4575ff..e7970a2 100644 (file)
@@ -209,6 +209,11 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
        struct page *page;
        unsigned int i;
 
+       if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
+               l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
+               return 0;
+       }
+
        if (!enable_ept) {
                l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
                return 0;
@@ -7995,12 +8000,10 @@ static int __init vmx_init(void)
         * contain 'auto' which will be turned into the default 'cond'
         * mitigation mode.
         */
-       if (boot_cpu_has(X86_BUG_L1TF)) {
-               r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
-               if (r) {
-                       vmx_exit();
-                       return r;
-               }
+       r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
+       if (r) {
+               vmx_exit();
+               return r;
        }
 
 #ifdef CONFIG_KEXEC_CORE
index 0ed07d8..661e2bf 100644 (file)
@@ -92,8 +92,8 @@ u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
 #endif
 
-#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
-#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+#define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
+#define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__
 
 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
                                     KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
@@ -212,7 +212,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
        { "mmu_unsync", VM_STAT(mmu_unsync) },
        { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
-       { "largepages", VM_STAT(lpages) },
+       { "largepages", VM_STAT(lpages, .mode = 0444) },
        { "max_mmu_page_hash_collisions",
                VM_STAT(max_mmu_page_hash_collisions) },
        { NULL }
@@ -885,34 +885,42 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 }
 EXPORT_SYMBOL_GPL(kvm_set_xcr);
 
-int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
-       unsigned long old_cr4 = kvm_read_cr4(vcpu);
-       unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
-                                  X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
-
        if (cr4 & CR4_RESERVED_BITS)
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
+               return -EINVAL;
+
+       return 0;
+}
+
+int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+{
+       unsigned long old_cr4 = kvm_read_cr4(vcpu);
+       unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
+                                  X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
+
+       if (kvm_valid_cr4(vcpu, cr4))
                return 1;
 
        if (is_long_mode(vcpu)) {
@@ -1161,13 +1169,6 @@ static u32 msrs_to_save[] = {
        MSR_ARCH_PERFMON_PERFCTR0 + 12, MSR_ARCH_PERFMON_PERFCTR0 + 13,
        MSR_ARCH_PERFMON_PERFCTR0 + 14, MSR_ARCH_PERFMON_PERFCTR0 + 15,
        MSR_ARCH_PERFMON_PERFCTR0 + 16, MSR_ARCH_PERFMON_PERFCTR0 + 17,
-       MSR_ARCH_PERFMON_PERFCTR0 + 18, MSR_ARCH_PERFMON_PERFCTR0 + 19,
-       MSR_ARCH_PERFMON_PERFCTR0 + 20, MSR_ARCH_PERFMON_PERFCTR0 + 21,
-       MSR_ARCH_PERFMON_PERFCTR0 + 22, MSR_ARCH_PERFMON_PERFCTR0 + 23,
-       MSR_ARCH_PERFMON_PERFCTR0 + 24, MSR_ARCH_PERFMON_PERFCTR0 + 25,
-       MSR_ARCH_PERFMON_PERFCTR0 + 26, MSR_ARCH_PERFMON_PERFCTR0 + 27,
-       MSR_ARCH_PERFMON_PERFCTR0 + 28, MSR_ARCH_PERFMON_PERFCTR0 + 29,
-       MSR_ARCH_PERFMON_PERFCTR0 + 30, MSR_ARCH_PERFMON_PERFCTR0 + 31,
        MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
        MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
        MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
@@ -1177,13 +1178,6 @@ static u32 msrs_to_save[] = {
        MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
        MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
        MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
-       MSR_ARCH_PERFMON_EVENTSEL0 + 18, MSR_ARCH_PERFMON_EVENTSEL0 + 19,
-       MSR_ARCH_PERFMON_EVENTSEL0 + 20, MSR_ARCH_PERFMON_EVENTSEL0 + 21,
-       MSR_ARCH_PERFMON_EVENTSEL0 + 22, MSR_ARCH_PERFMON_EVENTSEL0 + 23,
-       MSR_ARCH_PERFMON_EVENTSEL0 + 24, MSR_ARCH_PERFMON_EVENTSEL0 + 25,
-       MSR_ARCH_PERFMON_EVENTSEL0 + 26, MSR_ARCH_PERFMON_EVENTSEL0 + 27,
-       MSR_ARCH_PERFMON_EVENTSEL0 + 28, MSR_ARCH_PERFMON_EVENTSEL0 + 29,
-       MSR_ARCH_PERFMON_EVENTSEL0 + 30, MSR_ARCH_PERFMON_EVENTSEL0 + 31,
 };
 
 static unsigned num_msrs_to_save;
@@ -5097,13 +5091,14 @@ out:
 
 static void kvm_init_msr_list(void)
 {
+       struct x86_pmu_capability x86_pmu;
        u32 dummy[2];
        unsigned i, j;
 
        BUILD_BUG_ON_MSG(INTEL_PMC_MAX_FIXED != 4,
                         "Please update the fixed PMCs in msrs_to_save[]");
-       BUILD_BUG_ON_MSG(INTEL_PMC_MAX_GENERIC != 32,
-                        "Please update the generic perfctr/eventsel MSRs in msrs_to_save[]");
+
+       perf_get_x86_pmu_capability(&x86_pmu);
 
        for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
                if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
@@ -5145,6 +5140,15 @@ static void kvm_init_msr_list(void)
                                intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
                                continue;
                        break;
+               case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17:
+                       if (msrs_to_save[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
+                           min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
+                               continue;
+                       break;
+               case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17:
+                       if (msrs_to_save[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
+                           min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
+                               continue;
                }
                default:
                        break;
@@ -8714,10 +8718,6 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
 
 static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 {
-       if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
-                       (sregs->cr4 & X86_CR4_OSXSAVE))
-               return  -EINVAL;
-
        if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
                /*
                 * When EFER.LME and CR0.PG are set, the processor is in
@@ -8736,7 +8736,7 @@ static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
                        return -EINVAL;
        }
 
-       return 0;
+       return kvm_valid_cr4(vcpu, sregs->cr4);
 }
 
 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
index b7375dc..c126571 100644 (file)
@@ -113,8 +113,8 @@ static void delay_mwaitx(unsigned long __loops)
                __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
 
                /*
-                * AMD, like Intel, supports the EAX hint and EAX=0xf
-                * means, do not enter any deep C-state and we use it
+                * AMD, like Intel's MWAIT version, supports the EAX hint and
+                * EAX=0xf0 means, do not enter any deep C-state and we use it
                 * here in delay() to minimize wakeup latency.
                 */
                __mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE);
index c806b57..48bcada 100644 (file)
@@ -24,6 +24,4 @@ obj-y                         += bus_numa.o
 obj-$(CONFIG_AMD_NB)           += amd_bus.o
 obj-$(CONFIG_PCI_CNB20LE_QUIRK)        += broadcom_bus.o
 
-ifeq ($(CONFIG_PCI_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_PCI_DEBUG)    += -DDEBUG
index 9acab6a..1e59df0 100644 (file)
@@ -135,7 +135,7 @@ static void pcibios_fixup_device_resources(struct pci_dev *dev)
                * resource so the kernel doesn't attempt to assign
                * it later on in pci_assign_unassigned_resources
                */
-               for (bar = 0; bar <= PCI_STD_RESOURCE_END; bar++) {
+               for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
                        bar_r = &dev->resource[bar];
                        if (bar_r->start == 0 && bar_r->end != 0) {
                                bar_r->flags = 0;
index 527e69b..e723559 100644 (file)
@@ -589,6 +589,17 @@ static void pci_fixup_amd_ehci_pme(struct pci_dev *dev)
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme);
 
 /*
+ * Device [1022:7914]
+ * When in D0, PME# doesn't get asserted when plugging USB 2.0 device.
+ */
+static void pci_fixup_amd_fch_xhci_pme(struct pci_dev *dev)
+{
+       dev_info(&dev->dev, "PME# does not work under D0, disabling it\n");
+       dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7914, pci_fixup_amd_fch_xhci_pme);
+
+/*
  * Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff]
  *
  * Using the [mem 0x7fa00000-0x7fbfffff] region, e.g., by assigning it to
index 43867bc..00c6211 100644 (file)
@@ -382,7 +382,7 @@ static void pci_fixed_bar_fixup(struct pci_dev *dev)
            PCI_DEVFN(2, 2) == dev->devfn)
                return;
 
-       for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                pci_read_config_dword(dev, offset + 8 + (i * 4), &size);
                dev->resource[i].end = dev->resource[i].start + size - 1;
                dev->resource[i].flags |= IORESOURCE_PCI_FIXED;
index 2e565e6..01a085d 100644 (file)
@@ -1,8 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
  * Numascale NumaConnect-specific PCI code
  *
  * Copyright (C) 2012 Numascale AS. All rights reserved.
index c202e1b..425e025 100644 (file)
@@ -917,9 +917,6 @@ static void __init kexec_enter_virtual_mode(void)
 
        if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
                runtime_code_page_mkexec();
-
-       /* clean DUMMY object */
-       efi_delete_dummy_variable();
 #endif
 }
 
index 0d3365c..a04551e 100644 (file)
@@ -57,19 +57,7 @@ static efi_system_table_t __init *xen_efi_probe(void)
                return NULL;
 
        /* Here we know that Xen runs on EFI platform. */
-
-       efi.get_time                 = xen_efi_get_time;
-       efi.set_time                 = xen_efi_set_time;
-       efi.get_wakeup_time          = xen_efi_get_wakeup_time;
-       efi.set_wakeup_time          = xen_efi_set_wakeup_time;
-       efi.get_variable             = xen_efi_get_variable;
-       efi.get_next_variable        = xen_efi_get_next_variable;
-       efi.set_variable             = xen_efi_set_variable;
-       efi.query_variable_info      = xen_efi_query_variable_info;
-       efi.update_capsule           = xen_efi_update_capsule;
-       efi.query_capsule_caps       = xen_efi_query_capsule_caps;
-       efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
-       efi.reset_system             = xen_efi_reset_system;
+       xen_efi_runtime_setup();
 
        efi_systab_xen.tables = info->cfg.addr;
        efi_systab_xen.nr_tables = info->cfg.nent;
index 750f46a..205b117 100644 (file)
@@ -269,19 +269,41 @@ void xen_reboot(int reason)
                BUG();
 }
 
+static int reboot_reason = SHUTDOWN_reboot;
+static bool xen_legacy_crash;
 void xen_emergency_restart(void)
 {
-       xen_reboot(SHUTDOWN_reboot);
+       xen_reboot(reboot_reason);
 }
 
 static int
 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
-       if (!kexec_crash_loaded())
-               xen_reboot(SHUTDOWN_crash);
+       if (!kexec_crash_loaded()) {
+               if (xen_legacy_crash)
+                       xen_reboot(SHUTDOWN_crash);
+
+               reboot_reason = SHUTDOWN_crash;
+
+               /*
+                * If panic_timeout==0 then we are supposed to wait forever.
+                * However, to preserve original dom0 behavior we have to drop
+                * into hypervisor. (domU behavior is controlled by its
+                * config file)
+                */
+               if (panic_timeout == 0)
+                       panic_timeout = -1;
+       }
        return NOTIFY_DONE;
 }
 
+static int __init parse_xen_legacy_crash(char *arg)
+{
+       xen_legacy_crash = true;
+       return 0;
+}
+early_param("xen_legacy_crash", parse_xen_legacy_crash);
+
 static struct notifier_block xen_panic_block = {
        .notifier_call = xen_panic_event,
        .priority = INT_MIN
index a9dcd87..611b98a 100644 (file)
@@ -56,7 +56,7 @@
                reg = <0xf0100000 0x03f00000>;
 
                     // BUS_ADDRESS(3)  CPU_PHYSICAL(1)  SIZE(2)
-               ranges = <0x01000000 0x0 0xf0000000  0xf0000000  0x0 0x00010000>,
+               ranges = <0x01000000 0x0 0x00000000  0xf0000000  0x0 0x00010000>,
                         <0x02000000 0x0 0xf4000000  0xf4000000  0x0 0x08000000>;
 
                     // PCI_DEVICE(3)  INT#(1)  CONTROLLER(PHANDLE)  CONTROLLER_DATA(2)
index aeb15f4..be8b2be 100644 (file)
@@ -148,7 +148,7 @@ static inline void change_bit(unsigned int bit, volatile unsigned long *p)
                        "       getex   %0\n"
                        "       beqz    %0, 1b\n"
                        : "=&a" (tmp)
-                       : "a" (~mask), "a" (p)
+                       : "a" (mask), "a" (p)
                        : "memory");
 }
 
index 6792928..3f80386 100644 (file)
@@ -100,7 +100,7 @@ do {                                                                        \
        case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
        case 8: {                                                       \
                     __typeof__(*ptr) __v64 = x;                        \
-                    retval = __copy_to_user(ptr, &__v64, 8);           \
+                    retval = __copy_to_user(ptr, &__v64, 8) ? -EFAULT : 0;     \
                     break;                                             \
                }                                                       \
        default: __put_user_bad();                                      \
@@ -132,14 +132,14 @@ do {                                                                      \
 #define __check_align_1  ""
 
 #define __check_align_2                                \
-       "   _bbci.l %3,  0, 1f          \n"     \
-       "   movi    %0, %4              \n"     \
+       "   _bbci.l %[addr], 0, 1f      \n"     \
+       "   movi    %[err], %[efault]   \n"     \
        "   _j      2f                  \n"
 
 #define __check_align_4                                \
-       "   _bbsi.l %3,  0, 0f          \n"     \
-       "   _bbci.l %3,  1, 1f          \n"     \
-       "0: movi    %0, %4              \n"     \
+       "   _bbsi.l %[addr], 0, 0f      \n"     \
+       "   _bbci.l %[addr], 1, 1f      \n"     \
+       "0: movi    %[err], %[efault]   \n"     \
        "   _j      2f                  \n"
 
 
@@ -151,40 +151,40 @@ do {                                                                      \
  * WARNING: If you modify this macro at all, verify that the
  * __check_align_* macros still work.
  */
-#define __put_user_asm(x, addr, err, align, insn, cb)  \
+#define __put_user_asm(x_, addr_, err_, align, insn, cb)\
 __asm__ __volatile__(                                  \
        __check_align_##align                           \
-       "1: "insn"  %2, %3, 0           \n"             \
+       "1: "insn"  %[x], %[addr], 0    \n"             \
        "2:                             \n"             \
        "   .section  .fixup,\"ax\"     \n"             \
        "   .align 4                    \n"             \
        "   .literal_position           \n"             \
        "5:                             \n"             \
-       "   movi   %1, 2b               \n"             \
-       "   movi   %0, %4               \n"             \
-       "   jx     %1                   \n"             \
+       "   movi   %[tmp], 2b           \n"             \
+       "   movi   %[err], %[efault]    \n"             \
+       "   jx     %[tmp]               \n"             \
        "   .previous                   \n"             \
        "   .section  __ex_table,\"a\"  \n"             \
        "   .long       1b, 5b          \n"             \
        "   .previous"                                  \
-       :"=r" (err), "=r" (cb)                          \
-       :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
+       :[err] "+r"(err_), [tmp] "=r"(cb)               \
+       :[x] "r"(x_), [addr] "r"(addr_), [efault] "i"(-EFAULT))
 
 #define __get_user_nocheck(x, ptr, size)                       \
 ({                                                             \
-       long __gu_err, __gu_val;                                \
-       __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
-       (x) = (__force __typeof__(*(ptr)))__gu_val;             \
+       long __gu_err;                                          \
+       __get_user_size((x), (ptr), (size), __gu_err);          \
        __gu_err;                                               \
 })
 
 #define __get_user_check(x, ptr, size)                                 \
 ({                                                                     \
-       long __gu_err = -EFAULT, __gu_val = 0;                          \
+       long __gu_err = -EFAULT;                                        \
        const __typeof__(*(ptr)) *__gu_addr = (ptr);                    \
-       if (access_ok(__gu_addr, size))                 \
-               __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
-       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
+       if (access_ok(__gu_addr, size))                                 \
+               __get_user_size((x), __gu_addr, (size), __gu_err);      \
+       else                                                            \
+               (x) = 0;                                                \
        __gu_err;                                                       \
 })
 
@@ -198,8 +198,17 @@ do {                                                                       \
        case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb);  break;\
        case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
        case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb);  break;\
-       case 8: retval = __copy_from_user(&x, ptr, 8);    break;        \
-       default: (x) = __get_user_bad();                                \
+       case 8: {                                                       \
+               u64 __x;                                                \
+               if (unlikely(__copy_from_user(&__x, ptr, 8))) {         \
+                       retval = -EFAULT;                               \
+                       (x) = 0;                                        \
+               } else {                                                \
+                       (x) = *(__force __typeof__((ptr)))&__x;         \
+               }                                                       \
+               break;                                                  \
+       }                                                               \
+       default: (x) = 0; __get_user_bad();                             \
        }                                                               \
 } while (0)
 
@@ -208,25 +217,28 @@ do {                                                                      \
  * WARNING: If you modify this macro at all, verify that the
  * __check_align_* macros still work.
  */
-#define __get_user_asm(x, addr, err, align, insn, cb) \
-__asm__ __volatile__(                  \
-       __check_align_##align                   \
-       "1: "insn"  %2, %3, 0           \n"     \
-       "2:                             \n"     \
-       "   .section  .fixup,\"ax\"     \n"     \
-       "   .align 4                    \n"     \
-       "   .literal_position           \n"     \
-       "5:                             \n"     \
-       "   movi   %1, 2b               \n"     \
-       "   movi   %2, 0                \n"     \
-       "   movi   %0, %4               \n"     \
-       "   jx     %1                   \n"     \
-       "   .previous                   \n"     \
-       "   .section  __ex_table,\"a\"  \n"     \
-       "   .long       1b, 5b          \n"     \
-       "   .previous"                          \
-       :"=r" (err), "=r" (cb), "=r" (x)        \
-       :"r" (addr), "i" (-EFAULT), "0" (err))
+#define __get_user_asm(x_, addr_, err_, align, insn, cb) \
+do {                                                   \
+       u32 __x = 0;                                    \
+       __asm__ __volatile__(                           \
+               __check_align_##align                   \
+               "1: "insn"  %[x], %[addr], 0    \n"     \
+               "2:                             \n"     \
+               "   .section  .fixup,\"ax\"     \n"     \
+               "   .align 4                    \n"     \
+               "   .literal_position           \n"     \
+               "5:                             \n"     \
+               "   movi   %[tmp], 2b           \n"     \
+               "   movi   %[err], %[efault]    \n"     \
+               "   jx     %[tmp]               \n"     \
+               "   .previous                   \n"     \
+               "   .section  __ex_table,\"a\"  \n"     \
+               "   .long       1b, 5b          \n"     \
+               "   .previous"                          \
+               :[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
+               :[addr] "r"(addr_), [efault] "i"(-EFAULT)); \
+       (x_) = (__force __typeof__(*(addr_)))__x;       \
+} while (0)
 
 
 /*
index 04f19de..4092555 100644 (file)
@@ -119,13 +119,6 @@ EXPORT_SYMBOL(__invalidate_icache_range);
 // FIXME EXPORT_SYMBOL(screen_info);
 #endif
 
-EXPORT_SYMBOL(outsb);
-EXPORT_SYMBOL(outsw);
-EXPORT_SYMBOL(outsl);
-EXPORT_SYMBOL(insb);
-EXPORT_SYMBOL(insw);
-EXPORT_SYMBOL(insl);
-
 extern long common_exception_return;
 EXPORT_SYMBOL(common_exception_return);
 
index b6f20be..5d21027 100644 (file)
@@ -1362,7 +1362,7 @@ int blkcg_activate_policy(struct request_queue *q,
                          const struct blkcg_policy *pol)
 {
        struct blkg_policy_data *pd_prealloc = NULL;
-       struct blkcg_gq *blkg;
+       struct blkcg_gq *blkg, *pinned_blkg = NULL;
        int ret;
 
        if (blkcg_policy_enabled(q, pol))
@@ -1370,49 +1370,82 @@ int blkcg_activate_policy(struct request_queue *q,
 
        if (queue_is_mq(q))
                blk_mq_freeze_queue(q);
-pd_prealloc:
-       if (!pd_prealloc) {
-               pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q, &blkcg_root);
-               if (!pd_prealloc) {
-                       ret = -ENOMEM;
-                       goto out_bypass_end;
-               }
-       }
-
+retry:
        spin_lock_irq(&q->queue_lock);
 
-       /* blkg_list is pushed at the head, reverse walk to init parents first */
+       /* blkg_list is pushed at the head, reverse walk to allocate parents first */
        list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
                struct blkg_policy_data *pd;
 
                if (blkg->pd[pol->plid])
                        continue;
 
-               pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q, &blkcg_root);
-               if (!pd)
-                       swap(pd, pd_prealloc);
+               /* If prealloc matches, use it; otherwise try GFP_NOWAIT */
+               if (blkg == pinned_blkg) {
+                       pd = pd_prealloc;
+                       pd_prealloc = NULL;
+               } else {
+                       pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q,
+                                             blkg->blkcg);
+               }
+
                if (!pd) {
+                       /*
+                        * GFP_NOWAIT failed.  Free the existing one and
+                        * prealloc for @blkg w/ GFP_KERNEL.
+                        */
+                       if (pinned_blkg)
+                               blkg_put(pinned_blkg);
+                       blkg_get(blkg);
+                       pinned_blkg = blkg;
+
                        spin_unlock_irq(&q->queue_lock);
-                       goto pd_prealloc;
+
+                       if (pd_prealloc)
+                               pol->pd_free_fn(pd_prealloc);
+                       pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q,
+                                                      blkg->blkcg);
+                       if (pd_prealloc)
+                               goto retry;
+                       else
+                               goto enomem;
                }
 
                blkg->pd[pol->plid] = pd;
                pd->blkg = blkg;
                pd->plid = pol->plid;
-               if (pol->pd_init_fn)
-                       pol->pd_init_fn(pd);
        }
 
+       /* all allocated, init in the same order */
+       if (pol->pd_init_fn)
+               list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
+                       pol->pd_init_fn(blkg->pd[pol->plid]);
+
        __set_bit(pol->plid, q->blkcg_pols);
        ret = 0;
 
        spin_unlock_irq(&q->queue_lock);
-out_bypass_end:
+out:
        if (queue_is_mq(q))
                blk_mq_unfreeze_queue(q);
+       if (pinned_blkg)
+               blkg_put(pinned_blkg);
        if (pd_prealloc)
                pol->pd_free_fn(pd_prealloc);
        return ret;
+
+enomem:
+       /* alloc failed, nothing's initialized yet, free everything */
+       spin_lock_irq(&q->queue_lock);
+       list_for_each_entry(blkg, &q->blkg_list, q_node) {
+               if (blkg->pd[pol->plid]) {
+                       pol->pd_free_fn(blkg->pd[pol->plid]);
+                       blkg->pd[pol->plid] = NULL;
+               }
+       }
+       spin_unlock_irq(&q->queue_lock);
+       ret = -ENOMEM;
+       goto out;
 }
 EXPORT_SYMBOL_GPL(blkcg_activate_policy);
 
index 6e3b15f..ec79115 100644 (file)
@@ -1992,10 +1992,14 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                /* bypass scheduler for flush rq */
                blk_insert_flush(rq);
                blk_mq_run_hw_queue(data.hctx, true);
-       } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs)) {
+       } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
+                               !blk_queue_nonrot(q))) {
                /*
                 * Use plugging if we have a ->commit_rqs() hook as well, as
                 * we know the driver uses bd->last in a smart fashion.
+                *
+                * Use normal plugging if this disk is slow HDD, as sequential
+                * IO may benefit a lot from plug merging.
                 */
                unsigned int request_count = plug->rq_count;
                struct request *last = NULL;
@@ -2012,6 +2016,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                }
 
                blk_add_rq_to_plug(plug, rq);
+       } else if (q->elevator) {
+               blk_mq_sched_insert_request(rq, false, true, true);
        } else if (plug && !blk_queue_nomerges(q)) {
                /*
                 * We do limited plugging. If the bio can be merged, do that.
@@ -2035,8 +2041,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                        blk_mq_try_issue_directly(data.hctx, same_queue_rq,
                                        &cookie);
                }
-       } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
-                       !data.hctx->dispatch_busy)) {
+       } else if ((q->nr_hw_queues > 1 && is_sync) ||
+                       !data.hctx->dispatch_busy) {
                blk_mq_try_issue_directly(data.hctx, rq, &cookie);
        } else {
                blk_mq_sched_insert_request(rq, false, true, true);
index 61b635b..6564606 100644 (file)
@@ -160,24 +160,27 @@ bool rq_depth_calc_max_depth(struct rq_depth *rqd)
        return ret;
 }
 
-void rq_depth_scale_up(struct rq_depth *rqd)
+/* Returns true on success and false if scaling up wasn't possible */
+bool rq_depth_scale_up(struct rq_depth *rqd)
 {
        /*
         * Hit max in previous round, stop here
         */
        if (rqd->scaled_max)
-               return;
+               return false;
 
        rqd->scale_step--;
 
        rqd->scaled_max = rq_depth_calc_max_depth(rqd);
+       return true;
 }
 
 /*
  * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
- * had a latency violation.
+ * had a latency violation. Returns true on success and returns false if
+ * scaling down wasn't possible.
  */
-void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
+bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
 {
        /*
         * Stop scaling down when we've hit the limit. This also prevents
@@ -185,7 +188,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
         * keep up.
         */
        if (rqd->max_depth == 1)
-               return;
+               return false;
 
        if (rqd->scale_step < 0 && hard_throttle)
                rqd->scale_step = 0;
@@ -194,6 +197,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
 
        rqd->scaled_max = false;
        rq_depth_calc_max_depth(rqd);
+       return true;
 }
 
 struct rq_qos_wait_data {
index 08a09db..2bc43e9 100644 (file)
@@ -108,16 +108,13 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
 
 static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
 {
-       struct rq_qos *cur, *prev = NULL;
-       for (cur = q->rq_qos; cur; cur = cur->next) {
-               if (cur == rqos) {
-                       if (prev)
-                               prev->next = rqos->next;
-                       else
-                               q->rq_qos = cur;
+       struct rq_qos **cur;
+
+       for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
+               if (*cur == rqos) {
+                       *cur = rqos->next;
                        break;
                }
-               prev = cur;
        }
 
        blk_mq_debugfs_unregister_rqos(rqos);
@@ -130,8 +127,8 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
                 acquire_inflight_cb_t *acquire_inflight_cb,
                 cleanup_cb_t *cleanup_cb);
 bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
-void rq_depth_scale_up(struct rq_depth *rqd);
-void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
+bool rq_depth_scale_up(struct rq_depth *rqd);
+bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
 
 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
index 8af553a..8641ba9 100644 (file)
@@ -308,7 +308,8 @@ static void calc_wb_limits(struct rq_wb *rwb)
 
 static void scale_up(struct rq_wb *rwb)
 {
-       rq_depth_scale_up(&rwb->rq_depth);
+       if (!rq_depth_scale_up(&rwb->rq_depth))
+               return;
        calc_wb_limits(rwb);
        rwb->unknown_cnt = 0;
        rwb_wake_all(rwb);
@@ -317,7 +318,8 @@ static void scale_up(struct rq_wb *rwb)
 
 static void scale_down(struct rq_wb *rwb, bool hard_throttle)
 {
-       rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
+       if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
+               return;
        calc_wb_limits(rwb);
        rwb->unknown_cnt = 0;
        rwb_trace_step(rwb, "scale down");
index 5437059..076ba73 100644 (file)
@@ -616,7 +616,8 @@ out:
 
 static inline bool elv_support_iosched(struct request_queue *q)
 {
-       if (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
+       if (!q->mq_ops ||
+           (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
                return false;
        return true;
 }
index 4e95a97..b4c7619 100644 (file)
@@ -129,7 +129,7 @@ static const u8 opaluid[][OPAL_UID_LENGTH] = {
                { 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x84, 0x01 },
 
        /* tables */
-       [OPAL_TABLE_TABLE]
+       [OPAL_TABLE_TABLE] =
                { 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01 },
        [OPAL_LOCKINGRANGE_GLOBAL] =
                { 0x00, 0x00, 0x08, 0x02, 0x00, 0x00, 0x00, 0x01 },
@@ -372,8 +372,8 @@ static void check_geometry(struct opal_dev *dev, const void *data)
 {
        const struct d0_geometry_features *geo = data;
 
-       dev->align = geo->alignment_granularity;
-       dev->lowest_lba = geo->lowest_aligned_lba;
+       dev->align = be64_to_cpu(geo->alignment_granularity);
+       dev->lowest_lba = be64_to_cpu(geo->lowest_aligned_lba);
 }
 
 static int execute_step(struct opal_dev *dev,
index 3b25259..a1a858a 100644 (file)
@@ -905,8 +905,8 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
                        pcc_data[pcc_ss_id]->refcount--;
                        if (!pcc_data[pcc_ss_id]->refcount) {
                                pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
-                               pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
                                kfree(pcc_data[pcc_ss_id]);
+                               pcc_data[pcc_ss_id] = NULL;
                        }
                }
        }
index 8f9a28a..8b0de8a 100644 (file)
@@ -403,7 +403,7 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
                pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n",
                        p->flags, p->processor_PD, p->memory_PD);
 
-       if (p->flags & ACPI_HMAT_MEMORY_PD_VALID) {
+       if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) {
                target = find_mem_target(p->memory_PD);
                if (!target) {
                        pr_debug("HMAT: Memory Domain missing from SRAT\n");
index 2261713..930a49f 100644 (file)
@@ -162,21 +162,23 @@ void acpi_processor_ppc_init(int cpu)
        struct acpi_processor *pr = per_cpu(processors, cpu);
        int ret;
 
+       if (!pr)
+               return;
+
        ret = dev_pm_qos_add_request(get_cpu_device(cpu),
                                     &pr->perflib_req, DEV_PM_QOS_MAX_FREQUENCY,
                                     INT_MAX);
-       if (ret < 0) {
+       if (ret < 0)
                pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
                       ret);
-               return;
-       }
 }
 
 void acpi_processor_ppc_exit(int cpu)
 {
        struct acpi_processor *pr = per_cpu(processors, cpu);
 
-       dev_pm_qos_remove_request(&pr->perflib_req);
+       if (pr)
+               dev_pm_qos_remove_request(&pr->perflib_req);
 }
 
 static int acpi_processor_get_performance_control(struct acpi_processor *pr)
index ec2638f..8227c7d 100644 (file)
@@ -130,21 +130,23 @@ void acpi_thermal_cpufreq_init(int cpu)
        struct acpi_processor *pr = per_cpu(processors, cpu);
        int ret;
 
+       if (!pr)
+               return;
+
        ret = dev_pm_qos_add_request(get_cpu_device(cpu),
                                     &pr->thermal_req, DEV_PM_QOS_MAX_FREQUENCY,
                                     INT_MAX);
-       if (ret < 0) {
+       if (ret < 0)
                pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
                       ret);
-               return;
-       }
 }
 
 void acpi_thermal_cpufreq_exit(int cpu)
 {
        struct acpi_processor *pr = per_cpu(processors, cpu);
 
-       dev_pm_qos_remove_request(&pr->thermal_req);
+       if (pr)
+               dev_pm_qos_remove_request(&pr->thermal_req);
 }
 #else                          /* ! CONFIG_CPU_FREQ */
 static int cpufreq_get_max_state(unsigned int cpu)
index 9fa77d7..2af937a 100644 (file)
@@ -362,19 +362,6 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
                },
        },
        /*
-        * https://bugzilla.kernel.org/show_bug.cgi?id=196907
-        * Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power
-        * S0 Idle firmware interface.
-        */
-       {
-       .callback = init_default_s3,
-       .ident = "Dell XPS13 9360",
-       .matches = {
-               DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-               DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
-               },
-       },
-       /*
         * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
         * the Low Power S0 Idle firmware interface (see
         * https://bugzilla.kernel.org/show_bug.cgi?id=199057).
index c0a4912..5b9ac21 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/sched/signal.h>
 #include <linux/sched/mm.h>
 #include <linux/seq_file.h>
+#include <linux/string.h>
 #include <linux/uaccess.h>
 #include <linux/pid_namespace.h>
 #include <linux/security.h>
@@ -66,6 +67,7 @@
 #include <linux/task_work.h>
 
 #include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
 
 #include <asm/cacheflush.h>
 
@@ -2876,7 +2878,7 @@ static void binder_transaction(struct binder_proc *proc,
        e->target_handle = tr->target.handle;
        e->data_size = tr->data_size;
        e->offsets_size = tr->offsets_size;
-       e->context_name = proc->context->name;
+       strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
 
        if (reply) {
                binder_inner_proc_lock(proc);
index 6d79a1b..d42a8b2 100644 (file)
@@ -156,7 +156,7 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
 }
 
 /**
- * binder_alloc_buffer_lookup() - get buffer given user ptr
+ * binder_alloc_prepare_to_free() - get buffer given user ptr
  * @alloc:     binder_alloc for this proc
  * @user_ptr:  User pointer to buffer data
  *
index bd47f7f..ae99109 100644 (file)
@@ -130,7 +130,7 @@ struct binder_transaction_log_entry {
        int return_error_line;
        uint32_t return_error;
        uint32_t return_error_param;
-       const char *context_name;
+       char context_name[BINDERFS_MAX_NAME + 1];
 };
 
 struct binder_transaction_log {
index dd92faf..05c2b32 100644 (file)
@@ -1600,7 +1600,9 @@ static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hp
         */
        if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
                return;
-       if (((enum board_ids) id->driver_data) < board_ahci_pcs7)
+
+       /* Skip applying the quirk on Denverton and beyond */
+       if (((enum board_ids) id->driver_data) >= board_ahci_pcs7)
                return;
 
        /*
index 76d0f9d..58e09ff 100644 (file)
@@ -4791,27 +4791,6 @@ void ata_scsi_hotplug(struct work_struct *work)
                return;
        }
 
-       /*
-        * XXX - UGLY HACK
-        *
-        * The block layer suspend/resume path is fundamentally broken due
-        * to freezable kthreads and workqueue and may deadlock if a block
-        * device gets removed while resume is in progress.  I don't know
-        * what the solution is short of removing freezable kthreads and
-        * workqueues altogether.
-        *
-        * The following is an ugly hack to avoid kicking off device
-        * removal while freezer is active.  This is a joke but does avoid
-        * this particular deadlock scenario.
-        *
-        * https://bugzilla.kernel.org/show_bug.cgi?id=62801
-        * http://marc.info/?l=linux-kernel&m=138695698516487
-        */
-#ifdef CONFIG_FREEZER
-       while (pm_freezing)
-               msleep(10);
-#endif
-
        DPRINTK("ENTER\n");
        mutex_lock(&ap->scsi_scan_mutex);
 
index cfd0cf2..e01a3a6 100644 (file)
@@ -422,7 +422,7 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
 #ifdef ATP867X_DEBUG
        atp867x_check_res(pdev);
 
-       for (i = 0; i < PCI_ROM_RESOURCE; i++)
+       for (i = 0; i < PCI_STD_NUM_BARS; i++)
                printk(KERN_DEBUG "ATP867X: iomap[%d]=0x%llx\n", i,
                        (unsigned long long)(host->iomap[i]));
 #endif
index 5694601..6f261fb 100644 (file)
@@ -2325,7 +2325,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
         // Make sure this is a SATA controller by counting the number of bars
         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
         // it's an IDE controller and we ignore it.
-       for (bar = 0; bar < 6; bar++)
+       for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
                if (pci_resource_start(pdev, bar) == 0)
                        return -ENODEV;
 
index 2db62d9..7bd9cd3 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/cpufreq.h>
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/fwnode.h>
@@ -3179,6 +3180,8 @@ void device_shutdown(void)
        wait_for_device_probe();
        device_block_probing();
 
+       cpufreq_suspend();
+
        spin_lock(&devices_kset->list_lock);
        /*
         * Walk the devices list backward, shutting down each in turn.
index 6bea4f3..55907c2 100644 (file)
@@ -540,6 +540,9 @@ static ssize_t soft_offline_page_store(struct device *dev,
        pfn >>= PAGE_SHIFT;
        if (!pfn_valid(pfn))
                return -ENXIO;
+       /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
+       if (!pfn_to_online_page(pfn))
+               return -EIO;
        ret = soft_offline_page(pfn_to_page(pfn), 0);
        return ret == 0 ? count : ret;
 }
index b6c6c7d..b230beb 100644 (file)
@@ -241,12 +241,8 @@ struct resource *platform_get_resource_byname(struct platform_device *dev,
 }
 EXPORT_SYMBOL_GPL(platform_get_resource_byname);
 
-/**
- * platform_get_irq_byname - get an IRQ for a device by name
- * @dev: platform device
- * @name: IRQ name
- */
-int platform_get_irq_byname(struct platform_device *dev, const char *name)
+static int __platform_get_irq_byname(struct platform_device *dev,
+                                    const char *name)
 {
        struct resource *r;
 
@@ -262,12 +258,48 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
        if (r)
                return r->start;
 
-       dev_err(&dev->dev, "IRQ %s not found\n", name);
        return -ENXIO;
 }
+
+/**
+ * platform_get_irq_byname - get an IRQ for a device by name
+ * @dev: platform device
+ * @name: IRQ name
+ *
+ * Get an IRQ like platform_get_irq(), but then by name rather then by index.
+ *
+ * Return: IRQ number on success, negative error number on failure.
+ */
+int platform_get_irq_byname(struct platform_device *dev, const char *name)
+{
+       int ret;
+
+       ret = __platform_get_irq_byname(dev, name);
+       if (ret < 0 && ret != -EPROBE_DEFER)
+               dev_err(&dev->dev, "IRQ %s not found\n", name);
+
+       return ret;
+}
 EXPORT_SYMBOL_GPL(platform_get_irq_byname);
 
 /**
+ * platform_get_irq_byname_optional - get an optional IRQ for a device by name
+ * @dev: platform device
+ * @name: IRQ name
+ *
+ * Get an optional IRQ by name like platform_get_irq_byname(). Except that it
+ * does not print an error message if an IRQ can not be obtained.
+ *
+ * Return: IRQ number on success, negative error number on failure.
+ */
+int platform_get_irq_byname_optional(struct platform_device *dev,
+                                    const char *name)
+{
+       return __platform_get_irq_byname(dev, name);
+}
+EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
+
+/**
  * platform_add_devices - add a numbers of platform devices
  * @devs: array of platform devices to add
  * @num: number of platform devices in array
index 1410fa8..f6f77ea 100644 (file)
@@ -994,6 +994,16 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
        if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
                blk_queue_write_cache(lo->lo_queue, true, false);
 
+       if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev) {
+               /* In case of direct I/O, match underlying block size */
+               unsigned short bsize = bdev_logical_block_size(
+                       inode->i_sb->s_bdev);
+
+               blk_queue_logical_block_size(lo->lo_queue, bsize);
+               blk_queue_physical_block_size(lo->lo_queue, bsize);
+               blk_queue_io_min(lo->lo_queue, bsize);
+       }
+
        loop_update_rotational(lo);
        loop_update_dio(lo);
        set_capacity(lo->lo_disk, size);
index ac07e8c..478aa86 100644 (file)
@@ -248,8 +248,8 @@ static void nbd_put(struct nbd_device *nbd)
        if (refcount_dec_and_mutex_lock(&nbd->refs,
                                        &nbd_index_mutex)) {
                idr_remove(&nbd_index_idr, nbd->index);
-               mutex_unlock(&nbd_index_mutex);
                nbd_dev_remove(nbd);
+               mutex_unlock(&nbd_index_mutex);
        }
 }
 
index eabc116..3d7fdea 100644 (file)
@@ -142,8 +142,7 @@ static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
                zone->wp = zone->start;
                break;
        default:
-               cmd->error = BLK_STS_NOTSUPP;
-               break;
+               return BLK_STS_NOTSUPP;
        }
        return BLK_STS_OK;
 }
index 7c4350c..3913667 100644 (file)
@@ -6639,10 +6639,13 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
        queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
        ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
                            ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
-       if (ret > 0)
+       if (ret > 0) {
                ret = rbd_dev->acquire_err;
-       else if (!ret)
-               ret = -ETIMEDOUT;
+       } else {
+               cancel_delayed_work_sync(&rbd_dev->lock_dwork);
+               if (!ret)
+                       ret = -ETIMEDOUT;
+       }
 
        if (ret) {
                rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
index d58a359..4285e75 100644 (file)
@@ -413,13 +413,14 @@ static void reset_bdev(struct zram *zram)
 static ssize_t backing_dev_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
+       struct file *file;
        struct zram *zram = dev_to_zram(dev);
-       struct file *file = zram->backing_dev;
        char *p;
        ssize_t ret;
 
        down_read(&zram->init_lock);
-       if (!zram->backing_dev) {
+       file = zram->backing_dev;
+       if (!file) {
                memcpy(buf, "none\n", 5);
                up_read(&zram->init_lock);
                return 5;
index c2f7de9..de434fe 100644 (file)
@@ -2520,4 +2520,4 @@ void add_bootloader_randomness(const void *buf, unsigned int size)
        else
                add_device_randomness(buf, size);
 }
-EXPORT_SYMBOL_GPL(add_bootloader_randomness);
\ No newline at end of file
+EXPORT_SYMBOL_GPL(add_bootloader_randomness);
index b57fe09..9dd6185 100644 (file)
@@ -683,7 +683,7 @@ static const struct omap_clkctrl_reg_data dra7_l4per2_clkctrl_regs[] __initconst
        { DRA7_L4PER2_MCASP2_CLKCTRL, dra7_mcasp2_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0154:22" },
        { DRA7_L4PER2_MCASP3_CLKCTRL, dra7_mcasp3_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:015c:22" },
        { DRA7_L4PER2_MCASP5_CLKCTRL, dra7_mcasp5_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:016c:22" },
-       { DRA7_L4PER2_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0184:24" },
+       { DRA7_L4PER2_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0184:22" },
        { DRA7_L4PER2_MCASP4_CLKCTRL, dra7_mcasp4_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:018c:22" },
        { DRA7_L4PER2_UART7_CLKCTRL, dra7_uart7_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01c4:24" },
        { DRA7_L4PER2_UART8_CLKCTRL, dra7_uart8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01d4:24" },
@@ -828,8 +828,8 @@ static struct ti_dt_clk dra7xx_clks[] = {
        DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "l4per2-clkctrl:01f8:22"),
        DT_CLK(NULL, "mcasp7_ahclkx_mux", "l4per2-clkctrl:01fc:24"),
        DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "l4per2-clkctrl:01fc:22"),
-       DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per2-clkctrl:0184:22"),
-       DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per2-clkctrl:0184:24"),
+       DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per2-clkctrl:0184:24"),
+       DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per2-clkctrl:0184:22"),
        DT_CLK(NULL, "mmc1_clk32k", "l3init-clkctrl:0008:8"),
        DT_CLK(NULL, "mmc1_fclk_div", "l3init-clkctrl:0008:25"),
        DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"),
index d8c2bd4..11ff701 100644 (file)
@@ -25,7 +25,9 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
 
        struct clock_event_device *clkevt = &to->clkevt;
 
-       of_irq->percpu ? free_percpu_irq(of_irq->irq, clkevt) :
+       if (of_irq->percpu)
+               free_percpu_irq(of_irq->irq, clkevt);
+       else
                free_irq(of_irq->irq, clkevt);
 }
 
index c52d6fa..bffc11b 100644 (file)
@@ -2737,14 +2737,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
 }
 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
 
-/*
- * Stop cpufreq at shutdown to make sure it isn't holding any locks
- * or mutexes when secondary CPUs are halted.
- */
-static struct syscore_ops cpufreq_syscore_ops = {
-       .shutdown = cpufreq_suspend,
-};
-
 struct kobject *cpufreq_global_kobject;
 EXPORT_SYMBOL(cpufreq_global_kobject);
 
@@ -2756,8 +2748,6 @@ static int __init cpufreq_core_init(void)
        cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
        BUG_ON(!cpufreq_global_kobject);
 
-       register_syscore_ops(&cpufreq_syscore_ops);
-
        return 0;
 }
 module_param(off, int, 0444);
index 42a8f3f..7090025 100644 (file)
@@ -471,7 +471,7 @@ unlock:
        if (pfence_excl)
                *pfence_excl = fence_excl;
        else if (fence_excl)
-               shared[++shared_count] = fence_excl;
+               shared[shared_count++] = fence_excl;
 
        if (!shared_count) {
                kfree(shared);
index 35ed56b..1e21fc3 100644 (file)
@@ -408,7 +408,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
                bytes = ~0ull;
        else if (size & 0x8000)
                bytes = (u64)(size & 0x7fff) << 10;
-       else if (size != 0x7fff)
+       else if (size != 0x7fff || dm->length < 0x20)
                bytes = (u64)size << 20;
        else
                bytes = (u64)get_unaligned((u32 *)&d[0x1C]) << 20;
index addf074..b1af0de 100644 (file)
@@ -381,7 +381,7 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
                printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
                       pcie->device_id.vendor_id, pcie->device_id.device_id);
                p = pcie->device_id.class_code;
-               printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]);
+               printk("%s""class_code: %02x%02x%02x\n", pfx, p[2], p[1], p[0]);
        }
        if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
                printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,
index 8d3e778..69f00f7 100644 (file)
@@ -267,6 +267,9 @@ static __init int efivar_ssdt_load(void)
        void *data;
        int ret;
 
+       if (!efivar_ssdt[0])
+               return 0;
+
        ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
 
        list_for_each_entry_safe(entry, aux, &entries, list) {
index 3e290f9..76b0c35 100644 (file)
@@ -76,7 +76,7 @@ static u16 checksum(void)
        return chksum;
 }
 
-int __init efi_rci2_sysfs_init(void)
+static int __init efi_rci2_sysfs_init(void)
 {
        struct kobject *tables_kobj;
        int ret = -ENOMEM;
index 1d3f5ca..ebd7977 100644 (file)
@@ -40,7 +40,7 @@ int __init efi_tpm_eventlog_init(void)
 {
        struct linux_efi_tpm_eventlog *log_tbl;
        struct efi_tcg2_final_events_table *final_tbl;
-       unsigned int tbl_size;
+       int tbl_size;
        int ret = 0;
 
        if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) {
@@ -75,16 +75,28 @@ int __init efi_tpm_eventlog_init(void)
                goto out;
        }
 
-       tbl_size = tpm2_calc_event_log_size((void *)efi.tpm_final_log
-                                           + sizeof(final_tbl->version)
-                                           + sizeof(final_tbl->nr_events),
-                                           final_tbl->nr_events,
-                                           log_tbl->log);
+       tbl_size = 0;
+       if (final_tbl->nr_events != 0) {
+               void *events = (void *)efi.tpm_final_log
+                               + sizeof(final_tbl->version)
+                               + sizeof(final_tbl->nr_events);
+
+               tbl_size = tpm2_calc_event_log_size(events,
+                                                   final_tbl->nr_events,
+                                                   log_tbl->log);
+       }
+
+       if (tbl_size < 0) {
+               pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n");
+               goto out_calc;
+       }
+
        memblock_reserve((unsigned long)final_tbl,
                         tbl_size + sizeof(*final_tbl));
-       early_memunmap(final_tbl, sizeof(*final_tbl));
        efi_tpm_final_log_size = tbl_size;
 
+out_calc:
+       early_memunmap(final_tbl, sizeof(*final_tbl));
 out:
        early_memunmap(log_tbl, sizeof(*log_tbl));
        return ret;
index dda525c..5c6f2a7 100644 (file)
@@ -52,7 +52,7 @@ static int vpd_decode_entry(const u32 max_len, const u8 *input_buf,
        if (max_len - consumed < *entry_len)
                return VPD_FAIL;
 
-       consumed += decoded_len;
+       consumed += *entry_len;
        *_consumed = consumed;
        return VPD_OK;
 }
index fe7a73f..bb287f3 100644 (file)
@@ -530,11 +530,12 @@ static void sprd_eic_handle_one_type(struct gpio_chip *chip)
                }
 
                for_each_set_bit(n, &reg, SPRD_EIC_PER_BANK_NR) {
-                       girq = irq_find_mapping(chip->irq.domain,
-                                       bank * SPRD_EIC_PER_BANK_NR + n);
+                       u32 offset = bank * SPRD_EIC_PER_BANK_NR + n;
+
+                       girq = irq_find_mapping(chip->irq.domain, offset);
 
                        generic_handle_irq(girq);
-                       sprd_eic_toggle_trigger(chip, girq, n);
+                       sprd_eic_toggle_trigger(chip, girq, offset);
                }
        }
 }
index 4d835f9..86a10c8 100644 (file)
@@ -293,8 +293,9 @@ static void intel_mid_irq_handler(struct irq_desc *desc)
        chip->irq_eoi(data);
 }
 
-static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv)
+static int intel_mid_irq_init_hw(struct gpio_chip *chip)
 {
+       struct intel_mid_gpio *priv = gpiochip_get_data(chip);
        void __iomem *reg;
        unsigned base;
 
@@ -309,6 +310,8 @@ static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv)
                reg = gpio_reg(&priv->chip, base, GEDR);
                writel(~0, reg);
        }
+
+       return 0;
 }
 
 static int __maybe_unused intel_gpio_runtime_idle(struct device *dev)
@@ -372,6 +375,7 @@ static int intel_gpio_probe(struct pci_dev *pdev,
 
        girq = &priv->chip.irq;
        girq->chip = &intel_mid_irqchip;
+       girq->init_hw = intel_mid_irq_init_hw;
        girq->parent_handler = intel_mid_irq_handler;
        girq->num_parents = 1;
        girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
@@ -384,9 +388,8 @@ static int intel_gpio_probe(struct pci_dev *pdev,
        girq->default_type = IRQ_TYPE_NONE;
        girq->handler = handle_simple_irq;
 
-       intel_mid_irq_init_hw(priv);
-
        pci_set_drvdata(pdev, priv);
+
        retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
        if (retval) {
                dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);
index 6bb9741..e9e47c0 100644 (file)
@@ -294,8 +294,9 @@ static struct irq_chip lp_irqchip = {
        .flags = IRQCHIP_SKIP_SET_WAKE,
 };
 
-static void lp_gpio_irq_init_hw(struct lp_gpio *lg)
+static int lp_gpio_irq_init_hw(struct gpio_chip *chip)
 {
+       struct lp_gpio *lg = gpiochip_get_data(chip);
        unsigned long reg;
        unsigned base;
 
@@ -307,6 +308,8 @@ static void lp_gpio_irq_init_hw(struct lp_gpio *lg)
                reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
                outl(0xffffffff, reg);
        }
+
+       return 0;
 }
 
 static int lp_gpio_probe(struct platform_device *pdev)
@@ -364,6 +367,7 @@ static int lp_gpio_probe(struct platform_device *pdev)
 
                girq = &gc->irq;
                girq->chip = &lp_irqchip;
+               girq->init_hw = lp_gpio_irq_init_hw;
                girq->parent_handler = lp_gpio_irq_handler;
                girq->num_parents = 1;
                girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
@@ -373,9 +377,7 @@ static int lp_gpio_probe(struct platform_device *pdev)
                        return -ENOMEM;
                girq->parents[0] = (unsigned)irq_rc->start;
                girq->default_type = IRQ_TYPE_NONE;
-               girq->handler = handle_simple_irq;
-
-               lp_gpio_irq_init_hw(lg);
+               girq->handler = handle_bad_irq;
        }
 
        ret = devm_gpiochip_add_data(dev, gc, lg);
index 47d05e3..faf86ea 100644 (file)
@@ -192,13 +192,13 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
        case 0:
                val = MAX77620_CNFG_GPIO_DBNC_None;
                break;
-       case 1 ... 8:
+       case 1000 ... 8000:
                val = MAX77620_CNFG_GPIO_DBNC_8ms;
                break;
-       case 9 ... 16:
+       case 9000 ... 16000:
                val = MAX77620_CNFG_GPIO_DBNC_16ms;
                break;
-       case 17 ... 32:
+       case 17000 ... 32000:
                val = MAX77620_CNFG_GPIO_DBNC_32ms;
                break;
        default:
index 4f27ddf..2f1e9da 100644 (file)
@@ -362,8 +362,9 @@ static void mrfld_irq_handler(struct irq_desc *desc)
        chained_irq_exit(irqchip, desc);
 }
 
-static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
+static int mrfld_irq_init_hw(struct gpio_chip *chip)
 {
+       struct mrfld_gpio *priv = gpiochip_get_data(chip);
        void __iomem *reg;
        unsigned int base;
 
@@ -375,6 +376,8 @@ static void mrfld_irq_init_hw(struct mrfld_gpio *priv)
                reg = gpio_reg(&priv->chip, base, GFER);
                writel(0, reg);
        }
+
+       return 0;
 }
 
 static const char *mrfld_gpio_get_pinctrl_dev_name(struct mrfld_gpio *priv)
@@ -447,6 +450,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
 
        girq = &priv->chip.irq;
        girq->chip = &mrfld_irqchip;
+       girq->init_hw = mrfld_irq_init_hw;
        girq->parent_handler = mrfld_irq_handler;
        girq->num_parents = 1;
        girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
@@ -455,11 +459,10 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
        if (!girq->parents)
                return -ENOMEM;
        girq->parents[0] = pdev->irq;
+       girq->first = irq_base;
        girq->default_type = IRQ_TYPE_NONE;
        girq->handler = handle_bad_irq;
 
-       mrfld_irq_init_hw(priv);
-
        pci_set_drvdata(pdev, priv);
        retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
        if (retval) {
index 1eea2c6..80ea49f 100644 (file)
@@ -317,7 +317,7 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
        transitory = flags & OF_GPIO_TRANSITORY;
 
        ret = gpiod_request(desc, label);
-       if (ret == -EBUSY && (flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE))
+       if (ret == -EBUSY && (dflags & GPIOD_FLAGS_BIT_NONEXCLUSIVE))
                return desc;
        if (ret)
                return ERR_PTR(ret);
index bdbc164..104ed29 100644 (file)
@@ -86,6 +86,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
                                struct lock_class_key *lock_key,
                                struct lock_class_key *request_key);
 static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
+static int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip);
 static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip);
 static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip);
 
@@ -1406,6 +1407,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
 
        machine_gpiochip_add(chip);
 
+       ret = gpiochip_irqchip_init_hw(chip);
+       if (ret)
+               goto err_remove_acpi_chip;
+
        ret = gpiochip_irqchip_init_valid_mask(chip);
        if (ret)
                goto err_remove_acpi_chip;
@@ -1622,6 +1627,16 @@ static struct gpio_chip *find_chip_by_name(const char *name)
  * The following is irqchip helper code for gpiochips.
  */
 
+static int gpiochip_irqchip_init_hw(struct gpio_chip *gc)
+{
+       struct gpio_irq_chip *girq = &gc->irq;
+
+       if (!girq->init_hw)
+               return 0;
+
+       return girq->init_hw(gc);
+}
+
 static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc)
 {
        struct gpio_irq_chip *girq = &gc->irq;
@@ -2446,8 +2461,13 @@ static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
 {
        return 0;
 }
-
 static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {}
+
+static inline int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip)
+{
+       return 0;
+}
+
 static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip)
 {
        return 0;
@@ -3070,8 +3090,10 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
                if (!ret)
                        goto set_output_value;
                /* Emulate open drain by not actively driving the line high */
-               if (value)
-                       return gpiod_direction_input(desc);
+               if (value) {
+                       ret = gpiod_direction_input(desc);
+                       goto set_output_flag;
+               }
        }
        else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
                ret = gpio_set_config(gc, gpio_chip_hwgpio(desc),
@@ -3079,8 +3101,10 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
                if (!ret)
                        goto set_output_value;
                /* Emulate open source by not actively driving the line low */
-               if (!value)
-                       return gpiod_direction_input(desc);
+               if (!value) {
+                       ret = gpiod_direction_input(desc);
+                       goto set_output_flag;
+               }
        } else {
                gpio_set_config(gc, gpio_chip_hwgpio(desc),
                                PIN_CONFIG_DRIVE_PUSH_PULL);
@@ -3088,6 +3112,17 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
 
 set_output_value:
        return gpiod_direction_output_raw_commit(desc, value);
+
+set_output_flag:
+       /*
+        * When emulating open-source or open-drain functionalities by not
+        * actively driving the line (setting mode to input) we still need to
+        * set the IS_OUT flag or otherwise we won't be able to set the line
+        * value anymore.
+        */
+       if (ret == 0)
+               set_bit(FLAG_IS_OUT, &desc->flags);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(gpiod_direction_output);
 
@@ -3448,8 +3483,6 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
 
        if (value) {
                ret = chip->direction_input(chip, offset);
-               if (!ret)
-                       clear_bit(FLAG_IS_OUT, &desc->flags);
        } else {
                ret = chip->direction_output(chip, offset, 0);
                if (!ret)
@@ -3479,8 +3512,6 @@ static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value
                        set_bit(FLAG_IS_OUT, &desc->flags);
        } else {
                ret = chip->direction_input(chip, offset);
-               if (!ret)
-                       clear_bit(FLAG_IS_OUT, &desc->flags);
        }
        trace_gpio_direction(desc_to_gpio(desc), !value, ret);
        if (ret < 0)
index 42e2c1f..00962a6 100644 (file)
@@ -54,7 +54,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
        amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
        amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
        amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
-       amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o
+       amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o
 
 amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
 
index eba42c7..82155ac 100644 (file)
@@ -189,7 +189,7 @@ static int acp_hw_init(void *handle)
        u32 val = 0;
        u32 count = 0;
        struct device *dev;
-       struct i2s_platform_data *i2s_pdata;
+       struct i2s_platform_data *i2s_pdata = NULL;
 
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
@@ -231,20 +231,21 @@ static int acp_hw_init(void *handle)
        adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
                                                        GFP_KERNEL);
 
-       if (adev->acp.acp_cell == NULL)
-               return -ENOMEM;
+       if (adev->acp.acp_cell == NULL) {
+               r = -ENOMEM;
+               goto failure;
+       }
 
        adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
        if (adev->acp.acp_res == NULL) {
-               kfree(adev->acp.acp_cell);
-               return -ENOMEM;
+               r = -ENOMEM;
+               goto failure;
        }
 
        i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
        if (i2s_pdata == NULL) {
-               kfree(adev->acp.acp_res);
-               kfree(adev->acp.acp_cell);
-               return -ENOMEM;
+               r = -ENOMEM;
+               goto failure;
        }
 
        switch (adev->asic_type) {
@@ -341,14 +342,14 @@ static int acp_hw_init(void *handle)
        r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
                                                                ACP_DEVS);
        if (r)
-               return r;
+               goto failure;
 
        for (i = 0; i < ACP_DEVS ; i++) {
                dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
                r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
                if (r) {
                        dev_err(dev, "Failed to add dev to genpd\n");
-                       return r;
+                       goto failure;
                }
        }
 
@@ -367,7 +368,8 @@ static int acp_hw_init(void *handle)
                        break;
                if (--count == 0) {
                        dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
-                       return -ETIMEDOUT;
+                       r = -ETIMEDOUT;
+                       goto failure;
                }
                udelay(100);
        }
@@ -384,7 +386,8 @@ static int acp_hw_init(void *handle)
                        break;
                if (--count == 0) {
                        dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
-                       return -ETIMEDOUT;
+                       r = -ETIMEDOUT;
+                       goto failure;
                }
                udelay(100);
        }
@@ -393,6 +396,13 @@ static int acp_hw_init(void *handle)
        val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
        cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
        return 0;
+
+failure:
+       kfree(i2s_pdata);
+       kfree(adev->acp.acp_res);
+       kfree(adev->acp.acp_cell);
+       kfree(adev->acp.acp_genpd);
+       return r;
 }
 
 /**
index 7bcf86c..61e38e4 100644 (file)
@@ -270,7 +270,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
 
        r = amdgpu_bo_create_list_entry_array(&args->in, &info);
        if (r)
-               goto error_free;
+               return r;
 
        switch (args->in.operation) {
        case AMDGPU_BO_LIST_OP_CREATE:
@@ -283,8 +283,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
                r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
                mutex_unlock(&fpriv->bo_list_lock);
                if (r < 0) {
-                       amdgpu_bo_list_put(list);
-                       return r;
+                       goto error_put_list;
                }
 
                handle = r;
@@ -306,9 +305,8 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
                mutex_unlock(&fpriv->bo_list_lock);
 
                if (IS_ERR(old)) {
-                       amdgpu_bo_list_put(list);
                        r = PTR_ERR(old);
-                       goto error_free;
+                       goto error_put_list;
                }
 
                amdgpu_bo_list_put(old);
@@ -325,8 +323,10 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
 
        return 0;
 
+error_put_list:
+       amdgpu_bo_list_put(list);
+
 error_free:
-       if (info)
-               kvfree(info);
+       kvfree(info);
        return r;
 }
index 264677a..2a00a36 100644 (file)
  * - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
  * - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
  * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
+ * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
  */
 #define KMS_DRIVER_MAJOR       3
-#define KMS_DRIVER_MINOR       34
+#define KMS_DRIVER_MINOR       35
 #define KMS_DRIVER_PATCHLEVEL  0
 
 #define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256
@@ -1047,6 +1048,41 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
                return -ENODEV;
        }
 
+#ifdef CONFIG_DRM_AMDGPU_SI
+       if (!amdgpu_si_support) {
+               switch (flags & AMD_ASIC_MASK) {
+               case CHIP_TAHITI:
+               case CHIP_PITCAIRN:
+               case CHIP_VERDE:
+               case CHIP_OLAND:
+               case CHIP_HAINAN:
+                       dev_info(&pdev->dev,
+                                "SI support provided by radeon.\n");
+                       dev_info(&pdev->dev,
+                                "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
+                               );
+                       return -ENODEV;
+               }
+       }
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+       if (!amdgpu_cik_support) {
+               switch (flags & AMD_ASIC_MASK) {
+               case CHIP_KAVERI:
+               case CHIP_BONAIRE:
+               case CHIP_HAWAII:
+               case CHIP_KABINI:
+               case CHIP_MULLINS:
+                       dev_info(&pdev->dev,
+                                "CIK support provided by radeon.\n");
+                       dev_info(&pdev->dev,
+                                "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
+                               );
+                       return -ENODEV;
+               }
+       }
+#endif
+
        /* Get rid of things like offb */
        ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "amdgpudrmfb");
        if (ret)
index 554a59b..6ee4021 100644 (file)
@@ -165,6 +165,7 @@ struct amdgpu_gfx_config {
        uint32_t num_sc_per_sh;
        uint32_t num_packer_per_sc;
        uint32_t pa_sc_tile_steering_override;
+       uint64_t tcc_disabled_mask;
 };
 
 struct amdgpu_cu_info {
index f614752..d55f5ba 100644 (file)
@@ -144,41 +144,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
        struct amdgpu_device *adev;
        int r, acpi_status;
 
-#ifdef CONFIG_DRM_AMDGPU_SI
-       if (!amdgpu_si_support) {
-               switch (flags & AMD_ASIC_MASK) {
-               case CHIP_TAHITI:
-               case CHIP_PITCAIRN:
-               case CHIP_VERDE:
-               case CHIP_OLAND:
-               case CHIP_HAINAN:
-                       dev_info(dev->dev,
-                                "SI support provided by radeon.\n");
-                       dev_info(dev->dev,
-                                "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
-                               );
-                       return -ENODEV;
-               }
-       }
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
-       if (!amdgpu_cik_support) {
-               switch (flags & AMD_ASIC_MASK) {
-               case CHIP_KAVERI:
-               case CHIP_BONAIRE:
-               case CHIP_HAWAII:
-               case CHIP_KABINI:
-               case CHIP_MULLINS:
-                       dev_info(dev->dev,
-                                "CIK support provided by radeon.\n");
-                       dev_info(dev->dev,
-                                "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
-                               );
-                       return -ENODEV;
-               }
-       }
-#endif
-
        adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
        if (adev == NULL) {
                return -ENOMEM;
@@ -787,6 +752,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        dev_info.pa_sc_tile_steering_override =
                                adev->gfx.config.pa_sc_tile_steering_override;
 
+               dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
+
                return copy_to_user(out, &dev_info,
                                    min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
        }
index e2fb141..5251352 100644 (file)
@@ -603,14 +603,12 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
        struct ttm_bo_global *glob = adev->mman.bdev.glob;
        struct amdgpu_vm_bo_base *bo_base;
 
-#if 0
        if (vm->bulk_moveable) {
                spin_lock(&glob->lru_lock);
                ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
                spin_unlock(&glob->lru_lock);
                return;
        }
-#endif
 
        memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
 
index b81bb41..38b06ae 100644 (file)
@@ -1384,7 +1384,6 @@ static int cik_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
 static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
 {
        struct pci_dev *root = adev->pdev->bus->self;
-       int bridge_pos, gpu_pos;
        u32 speed_cntl, current_data_rate;
        int i;
        u16 tmp16;
@@ -1419,12 +1418,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
                DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
        }
 
-       bridge_pos = pci_pcie_cap(root);
-       if (!bridge_pos)
-               return;
-
-       gpu_pos = pci_pcie_cap(adev->pdev);
-       if (!gpu_pos)
+       if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
                return;
 
        if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
@@ -1434,14 +1428,17 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
                        u16 bridge_cfg2, gpu_cfg2;
                        u32 max_lw, current_lw, tmp;
 
-                       pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
-                       pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+                       pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+                                                 &bridge_cfg);
+                       pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
+                                                 &gpu_cfg);
 
                        tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
-                       pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+                       pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
 
                        tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
-                       pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+                       pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
+                                                  tmp16);
 
                        tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
                        max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >>
@@ -1465,15 +1462,23 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
 
                        for (i = 0; i < 10; i++) {
                                /* check status */
-                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+                               pcie_capability_read_word(adev->pdev,
+                                                         PCI_EXP_DEVSTA,
+                                                         &tmp16);
                                if (tmp16 & PCI_EXP_DEVSTA_TRPND)
                                        break;
 
-                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
-                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+                               pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+                                                         &bridge_cfg);
+                               pcie_capability_read_word(adev->pdev,
+                                                         PCI_EXP_LNKCTL,
+                                                         &gpu_cfg);
 
-                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
-                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+                               pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+                                                         &bridge_cfg2);
+                               pcie_capability_read_word(adev->pdev,
+                                                         PCI_EXP_LNKCTL2,
+                                                         &gpu_cfg2);
 
                                tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
                                tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
@@ -1486,26 +1491,45 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
                                msleep(100);
 
                                /* linkctl */
-                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+                               pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+                                                         &tmp16);
                                tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
                                tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
-                               pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+                               pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+                                                          tmp16);
 
-                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+                               pcie_capability_read_word(adev->pdev,
+                                                         PCI_EXP_LNKCTL,
+                                                         &tmp16);
                                tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
                                tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
-                               pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+                               pcie_capability_write_word(adev->pdev,
+                                                          PCI_EXP_LNKCTL,
+                                                          tmp16);
 
                                /* linkctl2 */
-                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
-                               tmp16 &= ~((1 << 4) | (7 << 9));
-                               tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
-                               pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
-                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
-                               tmp16 &= ~((1 << 4) | (7 << 9));
-                               tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
-                               pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+                               pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+                                                         &tmp16);
+                               tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+                                          PCI_EXP_LNKCTL2_TX_MARGIN);
+                               tmp16 |= (bridge_cfg2 &
+                                         (PCI_EXP_LNKCTL2_ENTER_COMP |
+                                          PCI_EXP_LNKCTL2_TX_MARGIN));
+                               pcie_capability_write_word(root,
+                                                          PCI_EXP_LNKCTL2,
+                                                          tmp16);
+
+                               pcie_capability_read_word(adev->pdev,
+                                                         PCI_EXP_LNKCTL2,
+                                                         &tmp16);
+                               tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+                                          PCI_EXP_LNKCTL2_TX_MARGIN);
+                               tmp16 |= (gpu_cfg2 &
+                                         (PCI_EXP_LNKCTL2_ENTER_COMP |
+                                          PCI_EXP_LNKCTL2_TX_MARGIN));
+                               pcie_capability_write_word(adev->pdev,
+                                                          PCI_EXP_LNKCTL2,
+                                                          tmp16);
 
                                tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
                                tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
@@ -1520,15 +1544,16 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
        speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
        WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
 
-       pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
-       tmp16 &= ~0xf;
+       pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+       tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
+
        if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
-               tmp16 |= 3; /* gen3 */
+               tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
        else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
-               tmp16 |= 2; /* gen2 */
+               tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
        else
-               tmp16 |= 1; /* gen1 */
-       pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+               tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+       pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16);
 
        speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
        speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
index 638c821..957811b 100644 (file)
@@ -1691,6 +1691,17 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
        }
 }
 
+static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
+{
+       /* TCCs are global (not instanced). */
+       uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
+                              RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+
+       adev->gfx.config.tcc_disabled_mask =
+               REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
+               (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
+}
+
 static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
 {
        u32 tmp;
@@ -1702,6 +1713,7 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
 
        gfx_v10_0_setup_rb(adev);
        gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
+       gfx_v10_0_get_tcc_info(adev);
        adev->gfx.config.pa_sc_tile_steering_override =
                gfx_v10_0_init_pa_sc_tile_steering_override(adev);
 
index 85393a9..de9b995 100644 (file)
@@ -317,10 +317,12 @@ static int nv_asic_reset(struct amdgpu_device *adev)
        struct smu_context *smu = &adev->smu;
 
        if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
-               amdgpu_inc_vram_lost(adev);
+               if (!adev->in_suspend)
+                       amdgpu_inc_vram_lost(adev);
                ret = smu_baco_reset(smu);
        } else {
-               amdgpu_inc_vram_lost(adev);
+               if (!adev->in_suspend)
+                       amdgpu_inc_vram_lost(adev);
                ret = nv_asic_mode1_reset(adev);
        }
 
index fa2f70c..f6e8168 100644 (file)
@@ -1129,7 +1129,7 @@ static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, addr & 0xfffffffc);
        amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
        amdgpu_ring_write(ring, seq); /* reference */
-       amdgpu_ring_write(ring, 0xfffffff); /* mask */
+       amdgpu_ring_write(ring, 0xffffffff); /* mask */
        amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
                          SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
 }
index 493af42..9f82be8 100644 (file)
@@ -1633,7 +1633,6 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
 static void si_pcie_gen3_enable(struct amdgpu_device *adev)
 {
        struct pci_dev *root = adev->pdev->bus->self;
-       int bridge_pos, gpu_pos;
        u32 speed_cntl, current_data_rate;
        int i;
        u16 tmp16;
@@ -1668,12 +1667,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
                DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
        }
 
-       bridge_pos = pci_pcie_cap(root);
-       if (!bridge_pos)
-               return;
-
-       gpu_pos = pci_pcie_cap(adev->pdev);
-       if (!gpu_pos)
+       if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
                return;
 
        if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
@@ -1682,14 +1676,17 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
                        u16 bridge_cfg2, gpu_cfg2;
                        u32 max_lw, current_lw, tmp;
 
-                       pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
-                       pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+                       pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+                                                 &bridge_cfg);
+                       pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL,
+                                                 &gpu_cfg);
 
                        tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
-                       pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+                       pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
 
                        tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
-                       pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+                       pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL,
+                                                  tmp16);
 
                        tmp = RREG32_PCIE(PCIE_LC_STATUS1);
                        max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@@ -1706,15 +1703,23 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
                        }
 
                        for (i = 0; i < 10; i++) {
-                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+                               pcie_capability_read_word(adev->pdev,
+                                                         PCI_EXP_DEVSTA,
+                                                         &tmp16);
                                if (tmp16 & PCI_EXP_DEVSTA_TRPND)
                                        break;
 
-                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
-                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+                               pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+                                                         &bridge_cfg);
+                               pcie_capability_read_word(adev->pdev,
+                                                         PCI_EXP_LNKCTL,
+                                                         &gpu_cfg);
 
-                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
-                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+                               pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+                                                         &bridge_cfg2);
+                               pcie_capability_read_word(adev->pdev,
+                                                         PCI_EXP_LNKCTL2,
+                                                         &gpu_cfg2);
 
                                tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
                                tmp |= LC_SET_QUIESCE;
@@ -1726,25 +1731,44 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
 
                                mdelay(100);
 
-                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+                               pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+                                                         &tmp16);
                                tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
                                tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
-                               pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+                               pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+                                                          tmp16);
 
-                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+                               pcie_capability_read_word(adev->pdev,
+                                                         PCI_EXP_LNKCTL,
+                                                         &tmp16);
                                tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
                                tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
-                               pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
-
-                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
-                               tmp16 &= ~((1 << 4) | (7 << 9));
-                               tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
-                               pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
-                               pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
-                               tmp16 &= ~((1 << 4) | (7 << 9));
-                               tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
-                               pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+                               pcie_capability_write_word(adev->pdev,
+                                                          PCI_EXP_LNKCTL,
+                                                          tmp16);
+
+                               pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+                                                         &tmp16);
+                               tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+                                          PCI_EXP_LNKCTL2_TX_MARGIN);
+                               tmp16 |= (bridge_cfg2 &
+                                         (PCI_EXP_LNKCTL2_ENTER_COMP |
+                                          PCI_EXP_LNKCTL2_TX_MARGIN));
+                               pcie_capability_write_word(root,
+                                                          PCI_EXP_LNKCTL2,
+                                                          tmp16);
+
+                               pcie_capability_read_word(adev->pdev,
+                                                         PCI_EXP_LNKCTL2,
+                                                         &tmp16);
+                               tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+                                          PCI_EXP_LNKCTL2_TX_MARGIN);
+                               tmp16 |= (gpu_cfg2 &
+                                         (PCI_EXP_LNKCTL2_ENTER_COMP |
+                                          PCI_EXP_LNKCTL2_TX_MARGIN));
+                               pcie_capability_write_word(adev->pdev,
+                                                          PCI_EXP_LNKCTL2,
+                                                          tmp16);
 
                                tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
                                tmp &= ~LC_SET_QUIESCE;
@@ -1757,15 +1781,16 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
        speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
        WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
-       pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
-       tmp16 &= ~0xf;
+       pcie_capability_read_word(adev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+       tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
+
        if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
-               tmp16 |= 3;
+               tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
        else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
-               tmp16 |= 2;
+               tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
        else
-               tmp16 |= 1;
-       pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+               tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+       pcie_capability_write_word(adev->pdev, PCI_EXP_LNKCTL2, tmp16);
 
        speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
        speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
index f70658a..f8ab80c 100644 (file)
@@ -558,12 +558,14 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
 {
        switch (soc15_asic_reset_method(adev)) {
                case AMD_RESET_METHOD_BACO:
-                       amdgpu_inc_vram_lost(adev);
+                       if (!adev->in_suspend)
+                               amdgpu_inc_vram_lost(adev);
                        return soc15_asic_baco_reset(adev);
                case AMD_RESET_METHOD_MODE2:
                        return soc15_mode2_reset(adev);
                default:
-                       amdgpu_inc_vram_lost(adev);
+                       if (!adev->in_suspend)
+                               amdgpu_inc_vram_lost(adev);
                        return soc15_asic_mode1_reset(adev);
        }
 }
@@ -771,8 +773,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 #if defined(CONFIG_DRM_AMD_DC)
                 else if (amdgpu_device_has_dc_support(adev))
                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#else
-#       warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
 #endif
                amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
                break;
index 8cab6da..a52f0b1 100644 (file)
@@ -2385,8 +2385,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 
        if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
                dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
-       if (adev->asic_type == CHIP_RENOIR)
-               dm->dc->debug.disable_stutter = true;
 
        return 0;
 fail:
@@ -6019,7 +6017,9 @@ static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
        struct drm_crtc *crtc;
        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
        int i;
+#ifdef CONFIG_DEBUG_FS
        enum amdgpu_dm_pipe_crc_source source;
+#endif
 
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
                                      new_crtc_state, i) {
index 1787b9b..76d5488 100644 (file)
@@ -668,6 +668,7 @@ struct clock_source *dce100_clock_source_create(
                return &clk_src->base;
        }
 
+       kfree(clk_src);
        BREAK_TO_DEBUGGER();
        return NULL;
 }
index 318e9c2..89620ad 100644 (file)
@@ -714,6 +714,7 @@ struct clock_source *dce110_clock_source_create(
                return &clk_src->base;
        }
 
+       kfree(clk_src);
        BREAK_TO_DEBUGGER();
        return NULL;
 }
index 83e1878..21a657e 100644 (file)
@@ -687,6 +687,7 @@ struct clock_source *dce112_clock_source_create(
                return &clk_src->base;
        }
 
+       kfree(clk_src);
        BREAK_TO_DEBUGGER();
        return NULL;
 }
index 8b85e52..7c52f7f 100644 (file)
@@ -500,6 +500,7 @@ static struct clock_source *dce120_clock_source_create(
                return &clk_src->base;
        }
 
+       kfree(clk_src);
        BREAK_TO_DEBUGGER();
        return NULL;
 }
index 4625df9..643ccb0 100644 (file)
@@ -701,6 +701,7 @@ struct clock_source *dce80_clock_source_create(
                return &clk_src->base;
        }
 
+       kfree(clk_src);
        BREAK_TO_DEBUGGER();
        return NULL;
 }
index 59305e4..1599bb9 100644 (file)
@@ -786,6 +786,7 @@ struct clock_source *dcn10_clock_source_create(
                return &clk_src->base;
        }
 
+       kfree(clk_src);
        BREAK_TO_DEBUGGER();
        return NULL;
 }
index b4e3ce2..5a2763d 100644 (file)
@@ -1077,6 +1077,7 @@ struct clock_source *dcn20_clock_source_create(
                return &clk_src->base;
        }
 
+       kfree(clk_src);
        BREAK_TO_DEBUGGER();
        return NULL;
 }
index 8cd9de8..ef673bf 100644 (file)
@@ -3,7 +3,17 @@
 
 DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o
 
-CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse -mpreferred-stack-boundary=4
+ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
+       cc_stack_align := -mpreferred-stack-boundary=4
+else ifneq ($(call cc-option, -mstack-alignment=16),)
+       cc_stack_align := -mstack-alignment=16
+endif
+
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse $(cc_stack_align)
+
+ifdef CONFIG_CC_IS_CLANG
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2
+endif
 
 AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
 
index 456cd0e..3b6ed60 100644 (file)
@@ -39,9 +39,6 @@
  * ways. Unless there is something clearly wrong with it the code should
  * remain as-is as it provides us with a guarantee from HW that it is correct.
  */
-
-typedef unsigned int uint;
-
 typedef struct {
        double DPPCLK;
        double DISPCLK;
@@ -4774,7 +4771,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                                mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0;
                                mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0;
                                for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
-                                       uint m;
+                                       unsigned int m;
 
                                        locals->cursor_bw[k] = 0;
                                        locals->cursor_bw_pre[k] = 0;
@@ -5285,7 +5282,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
        double SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank;
        double FullDETBufferingTimeYStutterCriticalPlane = 0;
        double TimeToFinishSwathTransferStutterCriticalPlane = 0;
-       uint k, j;
+       unsigned int k, j;
 
        mode_lib->vba.TotalActiveDPP = 0;
        mode_lib->vba.TotalDCCActiveDPP = 0;
@@ -5507,7 +5504,7 @@ static void CalculateDCFCLKDeepSleep(
                double DPPCLK[],
                double *DCFCLKDeepSleep)
 {
-       uint k;
+       unsigned int k;
        double DisplayPipeLineDeliveryTimeLuma;
        double DisplayPipeLineDeliveryTimeChroma;
        //double   DCFCLKDeepSleepPerPlane[DC__NUM_DPP__MAX];
@@ -5727,7 +5724,7 @@ static void CalculatePixelDeliveryTimes(
                double DisplayPipeRequestDeliveryTimeChromaPrefetch[])
 {
        double req_per_swath_ub;
-       uint k;
+       unsigned int k;
 
        for (k = 0; k < NumberOfActivePlanes; ++k) {
                if (VRatio[k] <= 1) {
@@ -5869,7 +5866,7 @@ static void CalculateMetaAndPTETimes(
        unsigned int dpte_groups_per_row_chroma_ub;
        unsigned int num_group_per_lower_vm_stage;
        unsigned int num_req_per_lower_vm_stage;
-       uint k;
+       unsigned int k;
 
        for (k = 0; k < NumberOfActivePlanes; ++k) {
                if (GPUVMEnable == true) {
index 33960fb..4acf139 100644 (file)
@@ -843,6 +843,8 @@ static int smu_sw_init(void *handle)
        smu->smu_baco.state = SMU_BACO_STATE_EXIT;
        smu->smu_baco.platform_support = false;
 
+       mutex_init(&smu->sensor_lock);
+
        smu->watermarks_bitmap = 0;
        smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
        smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
index f1f0720..d493a3f 100644 (file)
@@ -1018,6 +1018,7 @@ static int arcturus_read_sensor(struct smu_context *smu,
        if (!data || !size)
                return -EINVAL;
 
+       mutex_lock(&smu->sensor_lock);
        switch (sensor) {
        case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
                *(uint32_t *)data = pptable->FanMaximumRpm;
@@ -1044,6 +1045,7 @@ static int arcturus_read_sensor(struct smu_context *smu,
        default:
                ret = smu_smc_read_sensor(smu, sensor, data, size);
        }
+       mutex_unlock(&smu->sensor_lock);
 
        return ret;
 }
index 6109815..23171a4 100644 (file)
@@ -344,6 +344,7 @@ struct smu_context
        const struct smu_funcs          *funcs;
        const struct pptable_funcs      *ppt_funcs;
        struct mutex                    mutex;
+       struct mutex                    sensor_lock;
        uint64_t pool_size;
 
        struct smu_table_context        smu_table;
index 12c0e46..0b46140 100644 (file)
@@ -547,7 +547,7 @@ static int navi10_get_metrics_table(struct smu_context *smu,
        struct smu_table_context *smu_table= &smu->smu_table;
        int ret = 0;
 
-       if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
+       if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
                ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
                                (void *)smu_table->metrics_table, false);
                if (ret) {
@@ -1386,6 +1386,7 @@ static int navi10_read_sensor(struct smu_context *smu,
        if(!data || !size)
                return -EINVAL;
 
+       mutex_lock(&smu->sensor_lock);
        switch (sensor) {
        case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
                *(uint32_t *)data = pptable->FanMaximumRpm;
@@ -1409,6 +1410,7 @@ static int navi10_read_sensor(struct smu_context *smu,
        default:
                ret = smu_smc_read_sensor(smu, sensor, data, size);
        }
+       mutex_unlock(&smu->sensor_lock);
 
        return ret;
 }
index dc75444..23c1201 100644 (file)
@@ -655,7 +655,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
                        count = SMU_MAX_SMIO_LEVELS;
                for (level = 0; level < count; level++) {
                        table->SmioTable2.Pattern[level].Voltage =
-                               PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+                               PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
                        /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
                        table->SmioTable2.Pattern[level].Smio =
                                (uint8_t) level;
index 7c960b0..ae18fbc 100644 (file)
@@ -456,7 +456,7 @@ static int vegam_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
                        count = SMU_MAX_SMIO_LEVELS;
                for (level = 0; level < count; level++) {
                        table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
-                                       data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+                                       data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
                        /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
                        table->SmioTable2.Pattern[level].Smio =
                                (uint8_t) level;
index 64386ee..bbd8ebd 100644 (file)
@@ -3023,6 +3023,7 @@ static int vega20_read_sensor(struct smu_context *smu,
        if(!data || !size)
                return -EINVAL;
 
+       mutex_lock(&smu->sensor_lock);
        switch (sensor) {
        case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
                *(uint32_t *)data = pptable->FanMaximumRpm;
@@ -3048,6 +3049,7 @@ static int vega20_read_sensor(struct smu_context *smu,
        default:
                ret = smu_smc_read_sensor(smu, sensor, data, size);
        }
+       mutex_unlock(&smu->sensor_lock);
 
        return ret;
 }
index 2851cac..b72840c 100644 (file)
@@ -43,9 +43,8 @@ komeda_wb_encoder_atomic_check(struct drm_encoder *encoder,
        struct komeda_data_flow_cfg dflow;
        int err;
 
-       if (!writeback_job || !writeback_job->fb) {
+       if (!writeback_job)
                return 0;
-       }
 
        if (!crtc_st->active) {
                DRM_DEBUG_ATOMIC("Cannot write the composition result out on a inactive CRTC.\n");
@@ -166,8 +165,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
                                           &komeda_wb_encoder_helper_funcs,
                                           formats, n_formats);
        komeda_put_fourcc_list(formats);
-       if (err)
+       if (err) {
+               kfree(kwb_conn);
                return err;
+       }
 
        drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs);
 
index 22c0847..875a3a9 100644 (file)
@@ -131,7 +131,7 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
        struct drm_framebuffer *fb;
        int i, n_planes;
 
-       if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+       if (!conn_state->writeback_job)
                return 0;
 
        fb = conn_state->writeback_job->fb;
@@ -248,7 +248,7 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
 
        mw_state = to_mw_state(conn_state);
 
-       if (conn_state->writeback_job && conn_state->writeback_job->fb) {
+       if (conn_state->writeback_job) {
                struct drm_framebuffer *fb = conn_state->writeback_job->fb;
 
                DRM_DEV_DEBUG_DRIVER(drm->dev,
index cebc8e6..8a8d605 100644 (file)
@@ -728,6 +728,8 @@ static int tc_set_video_mode(struct tc_data *tc,
        int lower_margin = mode->vsync_start - mode->vdisplay;
        int vsync_len = mode->vsync_end - mode->vsync_start;
        u32 dp0_syncval;
+       u32 bits_per_pixel = 24;
+       u32 in_bw, out_bw;
 
        /*
         * Recommended maximum number of symbols transferred in a transfer unit:
@@ -735,7 +737,10 @@ static int tc_set_video_mode(struct tc_data *tc,
         *              (output active video bandwidth in bytes))
         * Must be less than tu_size.
         */
-       max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+
+       in_bw = mode->clock * bits_per_pixel / 8;
+       out_bw = tc->link.base.num_lanes * tc->link.base.rate;
+       max_tu_symbol = DIV_ROUND_UP(in_bw * TU_SIZE_RECOMMENDED, out_bw);
 
        dev_dbg(tc->dev, "set mode %dx%d\n",
                mode->hdisplay, mode->vdisplay);
index 419381a..14aeaf7 100644 (file)
@@ -430,10 +430,15 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
                return -EINVAL;
        }
 
-       if (writeback_job->out_fence && !writeback_job->fb) {
-               DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
-                                connector->base.id, connector->name);
-               return -EINVAL;
+       if (!writeback_job->fb) {
+               if (writeback_job->out_fence) {
+                       DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
+                                        connector->base.id, connector->name);
+                       return -EINVAL;
+               }
+
+               drm_writeback_cleanup_job(writeback_job);
+               state->writeback_job = NULL;
        }
 
        return 0;
index 82a4cee..6b01771 100644 (file)
@@ -159,6 +159,9 @@ static const struct edid_quirk {
        /* Medion MD 30217 PG */
        { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
 
+       /* Lenovo G50 */
+       { "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
+
        /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
        { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
 
index ff138b6..43d9e3b 100644 (file)
@@ -324,6 +324,9 @@ void drm_writeback_cleanup_job(struct drm_writeback_job *job)
        if (job->fb)
                drm_framebuffer_put(job->fb);
 
+       if (job->out_fence)
+               dma_fence_put(job->out_fence);
+
        kfree(job);
 }
 EXPORT_SYMBOL(drm_writeback_cleanup_job);
@@ -366,25 +369,29 @@ drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
 {
        unsigned long flags;
        struct drm_writeback_job *job;
+       struct dma_fence *out_fence;
 
        spin_lock_irqsave(&wb_connector->job_lock, flags);
        job = list_first_entry_or_null(&wb_connector->job_queue,
                                       struct drm_writeback_job,
                                       list_entry);
-       if (job) {
+       if (job)
                list_del(&job->list_entry);
-               if (job->out_fence) {
-                       if (status)
-                               dma_fence_set_error(job->out_fence, status);
-                       dma_fence_signal(job->out_fence);
-                       dma_fence_put(job->out_fence);
-               }
-       }
+
        spin_unlock_irqrestore(&wb_connector->job_lock, flags);
 
        if (WARN_ON(!job))
                return;
 
+       out_fence = job->out_fence;
+       if (out_fence) {
+               if (status)
+                       dma_fence_set_error(out_fence, status);
+               dma_fence_signal(out_fence);
+               dma_fence_put(out_fence);
+               job->out_fence = NULL;
+       }
+
        INIT_WORK(&job->cleanup_work, cleanup_work);
        queue_work(system_long_wq, &job->cleanup_work);
 }
index efb39f3..3250c1b 100644 (file)
@@ -1270,7 +1270,7 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
                DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
                              "disabling port %c DVI/HDMI support\n",
                              port_name(port), info->alternate_ddc_pin,
-                             port_name(p), port_name(port));
+                             port_name(p), port_name(p));
 
                /*
                 * If we have multiple ports supposedly sharing the
@@ -1278,9 +1278,14 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
                 * port. Otherwise they share the same ddc bin and
                 * system couldn't communicate with them separately.
                 *
-                * Give child device order the priority, first come first
-                * served.
+                * Give inverse child device order the priority,
+                * last one wins. Yes, there are real machines
+                * (eg. Asrock B250M-HDV) where VBT has both
+                * port A and port E with the same AUX ch and
+                * we must pick port E :(
                 */
+               info = &dev_priv->vbt.ddi_port_info[p];
+
                info->supports_dvi = false;
                info->supports_hdmi = false;
                info->alternate_ddc_pin = 0;
@@ -1316,7 +1321,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
                DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
                              "disabling port %c DP support\n",
                              port_name(port), info->alternate_aux_channel,
-                             port_name(p), port_name(port));
+                             port_name(p), port_name(p));
 
                /*
                 * If we have multiple ports supposedlt sharing the
@@ -1324,9 +1329,14 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
                 * port. Otherwise they share the same aux channel
                 * and system couldn't communicate with them separately.
                 *
-                * Give child device order the priority, first come first
-                * served.
+                * Give inverse child device order the priority,
+                * last one wins. Yes, there are real machines
+                * (eg. Asrock B250M-HDV) where VBT has both
+                * port A and port E with the same AUX ch and
+                * we must pick port E :(
                 */
+               info = &dev_priv->vbt.ddi_port_info[p];
+
                info->supports_dp = false;
                info->alternate_aux_channel = 0;
        }
index b51d1ce..aa54bb2 100644 (file)
@@ -3280,7 +3280,20 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb,
        switch (fb->modifier) {
        case DRM_FORMAT_MOD_LINEAR:
        case I915_FORMAT_MOD_X_TILED:
-               return 4096;
+               /*
+                * Validated limit is 4k, but has 5k should
+                * work apart from the following features:
+                * - Ytile (already limited to 4k)
+                * - FP16 (already limited to 4k)
+                * - render compression (already limited to 4k)
+                * - KVMR sprite and cursor (don't care)
+                * - horizontal panning (TODO verify this)
+                * - pipe and plane scaling (TODO verify this)
+                */
+               if (cpp == 8)
+                       return 4096;
+               else
+                       return 5120;
        case I915_FORMAT_MOD_Y_TILED_CCS:
        case I915_FORMAT_MOD_Yf_TILED_CCS:
                /* FIXME AUX plane? */
@@ -7261,7 +7274,7 @@ retry:
        pipe_config->fdi_lanes = lane;
 
        intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
-                              link_bw, &pipe_config->fdi_m_n, false);
+                              link_bw, &pipe_config->fdi_m_n, false, false);
 
        ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
        if (ret == -EDEADLK)
@@ -7508,11 +7521,15 @@ void
 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
                       int pixel_clock, int link_clock,
                       struct intel_link_m_n *m_n,
-                      bool constant_n)
+                      bool constant_n, bool fec_enable)
 {
-       m_n->tu = 64;
+       u32 data_clock = bits_per_pixel * pixel_clock;
 
-       compute_m_n(bits_per_pixel * pixel_clock,
+       if (fec_enable)
+               data_clock = intel_dp_mode_to_fec_clock(data_clock);
+
+       m_n->tu = 64;
+       compute_m_n(data_clock,
                    link_clock * nlanes * 8,
                    &m_n->gmch_m, &m_n->gmch_n,
                    constant_n);
index e57e696..01fa87a 100644 (file)
@@ -414,7 +414,7 @@ enum phy_fia {
 void intel_link_compute_m_n(u16 bpp, int nlanes,
                            int pixel_clock, int link_clock,
                            struct intel_link_m_n *m_n,
-                           bool constant_n);
+                           bool constant_n, bool fec_enable);
 bool is_ccs_modifier(u64 modifier);
 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
index 921ad0a..57e9f0b 100644 (file)
@@ -78,8 +78,8 @@
 #define DP_DSC_MAX_ENC_THROUGHPUT_0            340000
 #define DP_DSC_MAX_ENC_THROUGHPUT_1            400000
 
-/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
-#define DP_DSC_FEC_OVERHEAD_FACTOR             976
+/* DP DSC FEC Overhead factor = 1/(0.972261) */
+#define DP_DSC_FEC_OVERHEAD_FACTOR             972261
 
 /* Compliance test status bits  */
 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
@@ -494,6 +494,97 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
        return 0;
 }
 
+u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
+{
+       return div_u64(mul_u32_u32(mode_clock, 1000000U),
+                      DP_DSC_FEC_OVERHEAD_FACTOR);
+}
+
+static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count,
+                                      u32 mode_clock, u32 mode_hdisplay)
+{
+       u32 bits_per_pixel, max_bpp_small_joiner_ram;
+       int i;
+
+       /*
+        * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
+        * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
+        * for SST -> TimeSlotsPerMTP is 1,
+        * for MST -> TimeSlotsPerMTP has to be calculated
+        */
+       bits_per_pixel = (link_clock * lane_count * 8) /
+                        intel_dp_mode_to_fec_clock(mode_clock);
+       DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
+
+       /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
+       max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / mode_hdisplay;
+       DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
+
+       /*
+        * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
+        * check, output bpp from small joiner RAM check)
+        */
+       bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
+
+       /* Error out if the max bpp is less than smallest allowed valid bpp */
+       if (bits_per_pixel < valid_dsc_bpp[0]) {
+               DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n",
+                             bits_per_pixel, valid_dsc_bpp[0]);
+               return 0;
+       }
+
+       /* Find the nearest match in the array of known BPPs from VESA */
+       for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
+               if (bits_per_pixel < valid_dsc_bpp[i + 1])
+                       break;
+       }
+       bits_per_pixel = valid_dsc_bpp[i];
+
+       /*
+        * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
+        * fractional part is 0
+        */
+       return bits_per_pixel << 4;
+}
+
+static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
+                                      int mode_clock, int mode_hdisplay)
+{
+       u8 min_slice_count, i;
+       int max_slice_width;
+
+       if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
+               min_slice_count = DIV_ROUND_UP(mode_clock,
+                                              DP_DSC_MAX_ENC_THROUGHPUT_0);
+       else
+               min_slice_count = DIV_ROUND_UP(mode_clock,
+                                              DP_DSC_MAX_ENC_THROUGHPUT_1);
+
+       max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
+       if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
+               DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
+                             max_slice_width);
+               return 0;
+       }
+       /* Also take into account max slice width */
+       min_slice_count = min_t(u8, min_slice_count,
+                               DIV_ROUND_UP(mode_hdisplay,
+                                            max_slice_width));
+
+       /* Find the closest match to the valid slice count values */
+       for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
+               if (valid_dsc_slicecount[i] >
+                   drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+                                                   false))
+                       break;
+               if (min_slice_count  <= valid_dsc_slicecount[i])
+                       return valid_dsc_slicecount[i];
+       }
+
+       DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
+       return 0;
+}
+
 static enum drm_mode_status
 intel_dp_mode_valid(struct drm_connector *connector,
                    struct drm_display_mode *mode)
@@ -2226,7 +2317,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                               adjusted_mode->crtc_clock,
                               pipe_config->port_clock,
                               &pipe_config->dp_m_n,
-                              constant_n);
+                              constant_n, pipe_config->fec_enable);
 
        if (intel_connector->panel.downclock_mode != NULL &&
                dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -2236,7 +2327,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                                               intel_connector->panel.downclock_mode->clock,
                                               pipe_config->port_clock,
                                               &pipe_config->dp_m2_n2,
-                                              constant_n);
+                                              constant_n, pipe_config->fec_enable);
        }
 
        if (!HAS_DDI(dev_priv))
@@ -4323,91 +4414,6 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
                DP_DPRX_ESI_LEN;
 }
 
-u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
-                               int mode_clock, int mode_hdisplay)
-{
-       u16 bits_per_pixel, max_bpp_small_joiner_ram;
-       int i;
-
-       /*
-        * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
-        * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
-        * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
-        * for MST -> TimeSlotsPerMTP has to be calculated
-        */
-       bits_per_pixel = (link_clock * lane_count * 8 *
-                         DP_DSC_FEC_OVERHEAD_FACTOR) /
-               mode_clock;
-
-       /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
-       max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
-               mode_hdisplay;
-
-       /*
-        * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
-        * check, output bpp from small joiner RAM check)
-        */
-       bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
-
-       /* Error out if the max bpp is less than smallest allowed valid bpp */
-       if (bits_per_pixel < valid_dsc_bpp[0]) {
-               DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
-               return 0;
-       }
-
-       /* Find the nearest match in the array of known BPPs from VESA */
-       for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
-               if (bits_per_pixel < valid_dsc_bpp[i + 1])
-                       break;
-       }
-       bits_per_pixel = valid_dsc_bpp[i];
-
-       /*
-        * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
-        * fractional part is 0
-        */
-       return bits_per_pixel << 4;
-}
-
-u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
-                               int mode_clock,
-                               int mode_hdisplay)
-{
-       u8 min_slice_count, i;
-       int max_slice_width;
-
-       if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
-               min_slice_count = DIV_ROUND_UP(mode_clock,
-                                              DP_DSC_MAX_ENC_THROUGHPUT_0);
-       else
-               min_slice_count = DIV_ROUND_UP(mode_clock,
-                                              DP_DSC_MAX_ENC_THROUGHPUT_1);
-
-       max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
-       if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
-               DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
-                             max_slice_width);
-               return 0;
-       }
-       /* Also take into account max slice width */
-       min_slice_count = min_t(u8, min_slice_count,
-                               DIV_ROUND_UP(mode_hdisplay,
-                                            max_slice_width));
-
-       /* Find the closest match to the valid slice count values */
-       for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
-               if (valid_dsc_slicecount[i] >
-                   drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
-                                                   false))
-                       break;
-               if (min_slice_count  <= valid_dsc_slicecount[i])
-                       return valid_dsc_slicecount[i];
-       }
-
-       DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
-       return 0;
-}
-
 static void
 intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
                               const struct intel_crtc_state *crtc_state)
index 657bbb1..00981fb 100644 (file)
@@ -102,10 +102,6 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
 bool
 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status);
-u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
-                               int mode_clock, int mode_hdisplay);
-u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
-                               int mode_hdisplay);
 
 bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
@@ -118,4 +114,6 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
        return ~((1 << lane_count) - 1) & 0xf;
 }
 
+u32 intel_dp_mode_to_fec_clock(u32 mode_clock);
+
 #endif /* __INTEL_DP_H__ */
index 6df240a..600873c 100644 (file)
@@ -81,7 +81,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
                               adjusted_mode->crtc_clock,
                               crtc_state->port_clock,
                               &crtc_state->dp_m_n,
-                              constant_n);
+                              constant_n, crtc_state->fec_enable);
        crtc_state->dp_m_n.tu = slots;
 
        return 0;
@@ -615,7 +615,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
        intel_encoder->type = INTEL_OUTPUT_DP_MST;
        intel_encoder->power_domain = intel_dig_port->base.power_domain;
        intel_encoder->port = intel_dig_port->base.port;
-       intel_encoder->crtc_mask = BIT(pipe);
+       intel_encoder->crtc_mask = 0x7;
        intel_encoder->cloneable = 0;
 
        intel_encoder->compute_config = intel_dp_mst_compute_config;
index dea63be..cae25e4 100644 (file)
@@ -1528,6 +1528,7 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
        int src_x, src_w, src_h, crtc_w, crtc_h;
        const struct drm_display_mode *adjusted_mode =
                &crtc_state->base.adjusted_mode;
+       unsigned int stride = plane_state->color_plane[0].stride;
        unsigned int cpp = fb->format->cpp[0];
        unsigned int width_bytes;
        int min_width, min_height;
@@ -1569,9 +1570,9 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
                return -EINVAL;
        }
 
-       if (width_bytes > 4096 || fb->pitches[0] > 4096) {
+       if (stride > 4096) {
                DRM_DEBUG_KMS("Stride (%u) exceeds hardware max with scaling (%u)\n",
-                             fb->pitches[0], 4096);
+                             stride, 4096);
                return -EINVAL;
        }
 
index 261c9bd..05289ed 100644 (file)
@@ -245,11 +245,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 
        wakeref = intel_runtime_pm_get(rpm);
 
-       srcu = intel_gt_reset_trylock(ggtt->vm.gt);
-       if (srcu < 0) {
-               ret = srcu;
+       ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
+       if (ret)
                goto err_rpm;
-       }
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
@@ -318,7 +316,11 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
                intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
                                   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
 
-       i915_vma_set_ggtt_write(vma);
+       if (write) {
+               GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+               i915_vma_set_ggtt_write(vma);
+               obj->mm.dirty = true;
+       }
 
 err_fence:
        i915_vma_unpin_fence(vma);
@@ -362,6 +364,7 @@ err:
                return VM_FAULT_OOM;
        case -ENOSPC:
        case -EFAULT:
+       case -ENODEV: /* bad object, how did you get here! */
                return VM_FAULT_SIGBUS;
        default:
                WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
@@ -473,10 +476,16 @@ i915_gem_mmap_gtt(struct drm_file *file,
        if (!obj)
                return -ENOENT;
 
+       if (i915_gem_object_never_bind_ggtt(obj)) {
+               ret = -ENODEV;
+               goto out;
+       }
+
        ret = create_mmap_offset(obj);
        if (ret == 0)
                *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
 
+out:
        i915_gem_object_put(obj);
        return ret;
 }
index 5efb993..ddf3605 100644 (file)
@@ -153,6 +153,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
 }
 
 static inline bool
+i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
+{
+       return obj->ops->flags & I915_GEM_OBJECT_NO_GGTT;
+}
+
+static inline bool
 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
 {
        return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
index ede0eb4..646859f 100644 (file)
@@ -32,7 +32,8 @@ struct drm_i915_gem_object_ops {
 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE        BIT(0)
 #define I915_GEM_OBJECT_IS_SHRINKABLE  BIT(1)
 #define I915_GEM_OBJECT_IS_PROXY       BIT(2)
-#define I915_GEM_OBJECT_ASYNC_CANCEL   BIT(3)
+#define I915_GEM_OBJECT_NO_GGTT                BIT(3)
+#define I915_GEM_OBJECT_ASYNC_CANCEL   BIT(4)
 
        /* Interface between the GEM object and its backing storage.
         * get_pages() is called once prior to the use of the associated set
index 92e53c2..ad2a63d 100644 (file)
@@ -241,9 +241,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
        mutex_lock(&i915->drm.struct_mutex);
        intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
 
-       i915_gem_restore_gtt_mappings(i915);
-       i915_gem_restore_fences(i915);
-
        if (i915_gem_init_hw(i915))
                goto err_wedged;
 
index 11b231c..6b3b50f 100644 (file)
@@ -702,6 +702,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
        .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
                 I915_GEM_OBJECT_IS_SHRINKABLE |
+                I915_GEM_OBJECT_NO_GGTT |
                 I915_GEM_OBJECT_ASYNC_CANCEL,
        .get_pages = i915_gem_userptr_get_pages,
        .put_pages = i915_gem_userptr_put_pages,
index d3c6993..22aab85 100644 (file)
@@ -136,6 +136,20 @@ execlists_active(const struct intel_engine_execlists *execlists)
        return READ_ONCE(*execlists->active);
 }
 
+static inline void
+execlists_active_lock_bh(struct intel_engine_execlists *execlists)
+{
+       local_bh_disable(); /* prevent local softirq and lock recursion */
+       tasklet_lock(&execlists->tasklet);
+}
+
+static inline void
+execlists_active_unlock_bh(struct intel_engine_execlists *execlists)
+{
+       tasklet_unlock(&execlists->tasklet);
+       local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
+}
+
 struct i915_request *
 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
 
index 82630db..4ce8626 100644 (file)
@@ -1197,9 +1197,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
                                         struct drm_printer *m)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       const struct intel_engine_execlists * const execlists =
-               &engine->execlists;
-       unsigned long flags;
+       struct intel_engine_execlists * const execlists = &engine->execlists;
        u64 addr;
 
        if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
@@ -1281,7 +1279,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
                                   idx, hws[idx * 2], hws[idx * 2 + 1]);
                }
 
-               spin_lock_irqsave(&engine->active.lock, flags);
+               execlists_active_lock_bh(execlists);
                for (port = execlists->active; (rq = *port); port++) {
                        char hdr[80];
                        int len;
@@ -1309,7 +1307,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
                                 hwsp_seqno(rq));
                        print_request(m, rq, hdr);
                }
-               spin_unlock_irqrestore(&engine->active.lock, flags);
+               execlists_active_unlock_bh(execlists);
        } else if (INTEL_GEN(dev_priv) > 6) {
                drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
                           ENGINE_READ(engine, RING_PP_DIR_BASE));
@@ -1440,8 +1438,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
        if (!intel_engine_supports_stats(engine))
                return -ENODEV;
 
-       spin_lock_irqsave(&engine->active.lock, flags);
-       write_seqlock(&engine->stats.lock);
+       execlists_active_lock_bh(execlists);
+       write_seqlock_irqsave(&engine->stats.lock, flags);
 
        if (unlikely(engine->stats.enabled == ~0)) {
                err = -EBUSY;
@@ -1469,8 +1467,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
        }
 
 unlock:
-       write_sequnlock(&engine->stats.lock);
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       write_sequnlock_irqrestore(&engine->stats.lock, flags);
+       execlists_active_unlock_bh(execlists);
 
        return err;
 }
index d425844..06a506c 100644 (file)
@@ -234,6 +234,13 @@ static void execlists_init_reg_state(u32 *reg_state,
                                     struct intel_engine_cs *engine,
                                     struct intel_ring *ring);
 
+static void mark_eio(struct i915_request *rq)
+{
+       if (!i915_request_signaled(rq))
+               dma_fence_set_error(&rq->fence, -EIO);
+       i915_request_mark_complete(rq);
+}
+
 static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
 {
        return (i915_ggtt_offset(engine->status_page.vma) +
@@ -631,7 +638,6 @@ execlists_schedule_out(struct i915_request *rq)
        struct intel_engine_cs *cur, *old;
 
        trace_i915_request_out(rq);
-       GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
 
        old = READ_ONCE(ce->inflight);
        do
@@ -797,6 +803,17 @@ static bool can_merge_rq(const struct i915_request *prev,
        GEM_BUG_ON(prev == next);
        GEM_BUG_ON(!assert_priority_queue(prev, next));
 
+       /*
+        * We do not submit known completed requests. Therefore if the next
+        * request is already completed, we can pretend to merge it in
+        * with the previous context (and we will skip updating the ELSP
+        * and tracking). Thus hopefully keeping the ELSP full with active
+        * contexts, despite the best efforts of preempt-to-busy to confuse
+        * us.
+        */
+       if (i915_request_completed(next))
+               return true;
+
        if (!can_merge_ctx(prev->hw_context, next->hw_context))
                return false;
 
@@ -893,7 +910,7 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
 static struct i915_request *
 last_active(const struct intel_engine_execlists *execlists)
 {
-       struct i915_request * const *last = execlists->active;
+       struct i915_request * const *last = READ_ONCE(execlists->active);
 
        while (*last && i915_request_completed(*last))
                last++;
@@ -1172,21 +1189,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                continue;
                        }
 
-                       if (i915_request_completed(rq)) {
-                               ve->request = NULL;
-                               ve->base.execlists.queue_priority_hint = INT_MIN;
-                               rb_erase_cached(rb, &execlists->virtual);
-                               RB_CLEAR_NODE(rb);
-
-                               rq->engine = engine;
-                               __i915_request_submit(rq);
-
-                               spin_unlock(&ve->base.active.lock);
-
-                               rb = rb_first_cached(&execlists->virtual);
-                               continue;
-                       }
-
                        if (last && !can_merge_rq(last, rq)) {
                                spin_unlock(&ve->base.active.lock);
                                return; /* leave this for another */
@@ -1237,11 +1239,24 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                GEM_BUG_ON(ve->siblings[0] != engine);
                        }
 
-                       __i915_request_submit(rq);
-                       if (!i915_request_completed(rq)) {
+                       if (__i915_request_submit(rq)) {
                                submit = true;
                                last = rq;
                        }
+                       i915_request_put(rq);
+
+                       /*
+                        * Hmm, we have a bunch of virtual engine requests,
+                        * but the first one was already completed (thanks
+                        * preempt-to-busy!). Keep looking at the veng queue
+                        * until we have no more relevant requests (i.e.
+                        * the normal submit queue has higher priority).
+                        */
+                       if (!submit) {
+                               spin_unlock(&ve->base.active.lock);
+                               rb = rb_first_cached(&execlists->virtual);
+                               continue;
+                       }
                }
 
                spin_unlock(&ve->base.active.lock);
@@ -1254,8 +1269,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                int i;
 
                priolist_for_each_request_consume(rq, rn, p, i) {
-                       if (i915_request_completed(rq))
-                               goto skip;
+                       bool merge = true;
 
                        /*
                         * Can we combine this request with the current port?
@@ -1296,14 +1310,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                    ctx_single_port_submission(rq->hw_context))
                                        goto done;
 
-                               *port = execlists_schedule_in(last, port - execlists->pending);
-                               port++;
+                               merge = false;
                        }
 
-                       last = rq;
-                       submit = true;
-skip:
-                       __i915_request_submit(rq);
+                       if (__i915_request_submit(rq)) {
+                               if (!merge) {
+                                       *port = execlists_schedule_in(last, port - execlists->pending);
+                                       port++;
+                                       last = NULL;
+                               }
+
+                               GEM_BUG_ON(last &&
+                                          !can_merge_ctx(last->hw_context,
+                                                         rq->hw_context));
+
+                               submit = true;
+                               last = rq;
+                       }
                }
 
                rb_erase_cached(&p->node, &execlists->queue);
@@ -1593,8 +1616,11 @@ static void process_csb(struct intel_engine_cs *engine)
 static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
 {
        lockdep_assert_held(&engine->active.lock);
-       if (!engine->execlists.pending[0])
+       if (!engine->execlists.pending[0]) {
+               rcu_read_lock(); /* protect peeking at execlists->active */
                execlists_dequeue(engine);
+               rcu_read_unlock();
+       }
 }
 
 /*
@@ -2399,10 +2425,14 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
 
 static struct i915_request *active_request(struct i915_request *rq)
 {
-       const struct list_head * const list = &rq->timeline->requests;
        const struct intel_context * const ce = rq->hw_context;
        struct i915_request *active = NULL;
+       struct list_head *list;
 
+       if (!i915_request_is_active(rq)) /* unwound, but incomplete! */
+               return rq;
+
+       list = &rq->timeline->requests;
        list_for_each_entry_from_reverse(rq, list, link) {
                if (i915_request_completed(rq))
                        break;
@@ -2552,12 +2582,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
        __execlists_reset(engine, true);
 
        /* Mark all executing requests as skipped. */
-       list_for_each_entry(rq, &engine->active.requests, sched.link) {
-               if (!i915_request_signaled(rq))
-                       dma_fence_set_error(&rq->fence, -EIO);
-
-               i915_request_mark_complete(rq);
-       }
+       list_for_each_entry(rq, &engine->active.requests, sched.link)
+               mark_eio(rq);
 
        /* Flush the queued requests to the timeline list (for retiring). */
        while ((rb = rb_first_cached(&execlists->queue))) {
@@ -2565,10 +2591,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
                int i;
 
                priolist_for_each_request_consume(rq, rn, p, i) {
-                       list_del_init(&rq->sched.link);
+                       mark_eio(rq);
                        __i915_request_submit(rq);
-                       dma_fence_set_error(&rq->fence, -EIO);
-                       i915_request_mark_complete(rq);
                }
 
                rb_erase_cached(&p->node, &execlists->queue);
@@ -2584,13 +2608,15 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
                RB_CLEAR_NODE(rb);
 
                spin_lock(&ve->base.active.lock);
-               if (ve->request) {
-                       ve->request->engine = engine;
-                       __i915_request_submit(ve->request);
-                       dma_fence_set_error(&ve->request->fence, -EIO);
-                       i915_request_mark_complete(ve->request);
+               rq = fetch_and_zero(&ve->request);
+               if (rq) {
+                       mark_eio(rq);
+
+                       rq->engine = engine;
+                       __i915_request_submit(rq);
+                       i915_request_put(rq);
+
                        ve->base.execlists.queue_priority_hint = INT_MIN;
-                       ve->request = NULL;
                }
                spin_unlock(&ve->base.active.lock);
        }
@@ -3594,6 +3620,8 @@ submit_engine:
 static void virtual_submit_request(struct i915_request *rq)
 {
        struct virtual_engine *ve = to_virtual_engine(rq->engine);
+       struct i915_request *old;
+       unsigned long flags;
 
        GEM_TRACE("%s: rq=%llx:%lld\n",
                  ve->base.name,
@@ -3602,15 +3630,31 @@ static void virtual_submit_request(struct i915_request *rq)
 
        GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
 
-       GEM_BUG_ON(ve->request);
-       GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+       spin_lock_irqsave(&ve->base.active.lock, flags);
+
+       old = ve->request;
+       if (old) { /* background completion event from preempt-to-busy */
+               GEM_BUG_ON(!i915_request_completed(old));
+               __i915_request_submit(old);
+               i915_request_put(old);
+       }
+
+       if (i915_request_completed(rq)) {
+               __i915_request_submit(rq);
+
+               ve->base.execlists.queue_priority_hint = INT_MIN;
+               ve->request = NULL;
+       } else {
+               ve->base.execlists.queue_priority_hint = rq_prio(rq);
+               ve->request = i915_request_get(rq);
 
-       ve->base.execlists.queue_priority_hint = rq_prio(rq);
-       WRITE_ONCE(ve->request, rq);
+               GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+               list_move_tail(&rq->sched.link, virtual_queue(ve));
 
-       list_move_tail(&rq->sched.link, virtual_queue(ve));
+               tasklet_schedule(&ve->base.execlists.tasklet);
+       }
 
-       tasklet_schedule(&ve->base.execlists.tasklet);
+       spin_unlock_irqrestore(&ve->base.active.lock, flags);
 }
 
 static struct ve_bond *
@@ -3631,18 +3675,22 @@ static void
 virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
 {
        struct virtual_engine *ve = to_virtual_engine(rq->engine);
+       intel_engine_mask_t allowed, exec;
        struct ve_bond *bond;
 
+       allowed = ~to_request(signal)->engine->mask;
+
        bond = virtual_find_bond(ve, to_request(signal)->engine);
-       if (bond) {
-               intel_engine_mask_t old, new, cmp;
+       if (bond)
+               allowed &= bond->sibling_mask;
 
-               cmp = READ_ONCE(rq->execution_mask);
-               do {
-                       old = cmp;
-                       new = cmp & bond->sibling_mask;
-               } while ((cmp = cmpxchg(&rq->execution_mask, old, new)) != old);
-       }
+       /* Restrict the bonded request to run on only the available engines */
+       exec = READ_ONCE(rq->execution_mask);
+       while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
+               ;
+
+       /* Prevent the master from being re-run on the bonded engines */
+       to_request(signal)->execution_mask &= ~allowed;
 }
 
 struct intel_context *
index b9d84d5..8cea423 100644 (file)
@@ -42,11 +42,10 @@ static void engine_skip_context(struct i915_request *rq)
        struct intel_engine_cs *engine = rq->engine;
        struct i915_gem_context *hung_ctx = rq->gem_context;
 
-       lockdep_assert_held(&engine->active.lock);
-
        if (!i915_request_is_active(rq))
                return;
 
+       lockdep_assert_held(&engine->active.lock);
        list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
                if (rq->gem_context == hung_ctx)
                        i915_request_skip(rq, -EIO);
@@ -123,7 +122,6 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
                  rq->fence.seqno,
                  yesno(guilty));
 
-       lockdep_assert_held(&rq->engine->active.lock);
        GEM_BUG_ON(i915_request_completed(rq));
 
        if (guilty) {
@@ -1214,10 +1212,8 @@ out:
        intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
 }
 
-int intel_gt_reset_trylock(struct intel_gt *gt)
+int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
 {
-       int srcu;
-
        might_lock(&gt->reset.backoff_srcu);
        might_sleep();
 
@@ -1232,10 +1228,10 @@ int intel_gt_reset_trylock(struct intel_gt *gt)
 
                rcu_read_lock();
        }
-       srcu = srcu_read_lock(&gt->reset.backoff_srcu);
+       *srcu = srcu_read_lock(&gt->reset.backoff_srcu);
        rcu_read_unlock();
 
-       return srcu;
+       return 0;
 }
 
 void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
index 37a987b..52c0019 100644 (file)
@@ -38,7 +38,7 @@ int intel_engine_reset(struct intel_engine_cs *engine,
 
 void __i915_request_reset(struct i915_request *rq, bool guilty);
 
-int __must_check intel_gt_reset_trylock(struct intel_gt *gt);
+int __must_check intel_gt_reset_trylock(struct intel_gt *gt, int *srcu);
 void intel_gt_reset_unlock(struct intel_gt *gt, int tag);
 
 void intel_gt_set_wedged(struct intel_gt *gt);
index 601c162..bacaa7b 100644 (file)
@@ -1573,7 +1573,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
        struct intel_engine_cs *engine = rq->engine;
        enum intel_engine_id id;
        const int num_engines =
-               IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
+               IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
        bool force_restore = false;
        int len;
        u32 *cs;
index 45481eb..5f6ec2f 100644 (file)
@@ -1063,6 +1063,9 @@ static void gen9_whitelist_build(struct i915_wa_list *w)
 
        /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
        whitelist_reg(w, GEN8_HDC_CHICKEN1);
+
+       /* WaSendPushConstantsFromMMIO:skl,bxt */
+       whitelist_reg(w, COMMON_SLICE_CHICKEN2);
 }
 
 static void skl_whitelist_build(struct intel_engine_cs *engine)
index 0206967..bb6f86c 100644 (file)
@@ -1924,6 +1924,11 @@ static int i915_drm_resume(struct drm_device *dev)
        if (ret)
                DRM_ERROR("failed to re-enable GGTT\n");
 
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       i915_gem_restore_gtt_mappings(dev_priv);
+       i915_gem_restore_fences(dev_priv);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+
        intel_csr_ucode_resume(dev_priv);
 
        i915_restore_state(dev_priv);
index 95e7c52..d0f94f2 100644 (file)
@@ -969,6 +969,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 
        lockdep_assert_held(&obj->base.dev->struct_mutex);
 
+       if (i915_gem_object_never_bind_ggtt(obj))
+               return ERR_PTR(-ENODEV);
+
        if (flags & PIN_MAPPABLE &&
            (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
                /* If the required space is larger than the available
index 167a7b5..6795f1d 100644 (file)
@@ -77,6 +77,12 @@ struct drm_i915_private;
 
 #define I915_GEM_IDLE_TIMEOUT (HZ / 5)
 
+static inline void tasklet_lock(struct tasklet_struct *t)
+{
+       while (!tasklet_trylock(t))
+               cpu_relax();
+}
+
 static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
 {
        if (!atomic_fetch_inc(&t->count))
index a53777d..1c55068 100644 (file)
@@ -194,6 +194,27 @@ static void free_capture_list(struct i915_request *request)
        }
 }
 
+static void remove_from_engine(struct i915_request *rq)
+{
+       struct intel_engine_cs *engine, *locked;
+
+       /*
+        * Virtual engines complicate acquiring the engine timeline lock,
+        * as their rq->engine pointer is not stable until under that
+        * engine lock. The simple ploy we use is to take the lock then
+        * check that the rq still belongs to the newly locked engine.
+        */
+       locked = READ_ONCE(rq->engine);
+       spin_lock(&locked->active.lock);
+       while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
+               spin_unlock(&locked->active.lock);
+               spin_lock(&engine->active.lock);
+               locked = engine;
+       }
+       list_del(&rq->sched.link);
+       spin_unlock(&locked->active.lock);
+}
+
 static bool i915_request_retire(struct i915_request *rq)
 {
        struct i915_active_request *active, *next;
@@ -259,9 +280,7 @@ static bool i915_request_retire(struct i915_request *rq)
         * request that we have removed from the HW and put back on a run
         * queue.
         */
-       spin_lock(&rq->engine->active.lock);
-       list_del(&rq->sched.link);
-       spin_unlock(&rq->engine->active.lock);
+       remove_from_engine(rq);
 
        spin_lock(&rq->lock);
        i915_request_mark_complete(rq);
@@ -358,9 +377,10 @@ __i915_request_await_execution(struct i915_request *rq,
        return 0;
 }
 
-void __i915_request_submit(struct i915_request *request)
+bool __i915_request_submit(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
+       bool result = false;
 
        GEM_TRACE("%s fence %llx:%lld, current %d\n",
                  engine->name,
@@ -370,6 +390,25 @@ void __i915_request_submit(struct i915_request *request)
        GEM_BUG_ON(!irqs_disabled());
        lockdep_assert_held(&engine->active.lock);
 
+       /*
+        * With the advent of preempt-to-busy, we frequently encounter
+        * requests that we have unsubmitted from HW, but left running
+        * until the next ack and so have completed in the meantime. On
+        * resubmission of that completed request, we can skip
+        * updating the payload, and execlists can even skip submitting
+        * the request.
+        *
+        * We must remove the request from the caller's priority queue,
+        * and the caller must only call us when the request is in their
+        * priority queue, under the active.lock. This ensures that the
+        * request has *not* yet been retired and we can safely move
+        * the request into the engine->active.list where it will be
+        * dropped upon retiring. (Otherwise if resubmit a *retired*
+        * request, this would be a horrible use-after-free.)
+        */
+       if (i915_request_completed(request))
+               goto xfer;
+
        if (i915_gem_context_is_banned(request->gem_context))
                i915_request_skip(request, -EIO);
 
@@ -393,13 +432,18 @@ void __i915_request_submit(struct i915_request *request)
            i915_sw_fence_signaled(&request->semaphore))
                engine->saturated |= request->sched.semaphores;
 
-       /* We may be recursing from the signal callback of another i915 fence */
-       spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+       engine->emit_fini_breadcrumb(request,
+                                    request->ring->vaddr + request->postfix);
 
-       list_move_tail(&request->sched.link, &engine->active.requests);
+       trace_i915_request_execute(request);
+       engine->serial++;
+       result = true;
+
+xfer:  /* We may be recursing from the signal callback of another i915 fence */
+       spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
 
-       GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
-       set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
+       if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
+               list_move_tail(&request->sched.link, &engine->active.requests);
 
        if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
            !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
@@ -410,12 +454,7 @@ void __i915_request_submit(struct i915_request *request)
 
        spin_unlock(&request->lock);
 
-       engine->emit_fini_breadcrumb(request,
-                                    request->ring->vaddr + request->postfix);
-
-       engine->serial++;
-
-       trace_i915_request_execute(request);
+       return result;
 }
 
 void i915_request_submit(struct i915_request *request)
index 8ac6e12..e4dd013 100644 (file)
@@ -292,7 +292,7 @@ int i915_request_await_execution(struct i915_request *rq,
 
 void i915_request_add(struct i915_request *rq);
 
-void __i915_request_submit(struct i915_request *request);
+bool __i915_request_submit(struct i915_request *request);
 void i915_request_submit(struct i915_request *request);
 
 void i915_request_skip(struct i915_request *request, int error);
index fa864d8..15f8bff 100644 (file)
@@ -69,6 +69,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
                WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
                return PCH_CNP;
        case INTEL_PCH_CMP_DEVICE_ID_TYPE:
+       case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
                DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
                WARN_ON(!IS_COFFEELAKE(dev_priv));
                /* CometPoint is CNP Compatible */
index e6a2d65..c29c81e 100644 (file)
@@ -41,6 +41,7 @@ enum intel_pch {
 #define INTEL_PCH_CNP_DEVICE_ID_TYPE           0xA300
 #define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE                0x9D80
 #define INTEL_PCH_CMP_DEVICE_ID_TYPE           0x0280
+#define INTEL_PCH_CMP2_DEVICE_ID_TYPE          0x0680
 #define INTEL_PCH_ICP_DEVICE_ID_TYPE           0x3480
 #define INTEL_PCH_MCC_DEVICE_ID_TYPE           0x4B00
 #define INTEL_PCH_MCC2_DEVICE_ID_TYPE          0x3880
index bb6dd54..3759383 100644 (file)
@@ -118,6 +118,12 @@ static void pm_resume(struct drm_i915_private *i915)
        with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
                intel_gt_sanitize(&i915->gt, false);
                i915_gem_sanitize(i915);
+
+               mutex_lock(&i915->drm.struct_mutex);
+               i915_gem_restore_gtt_mappings(i915);
+               i915_gem_restore_fences(i915);
+               mutex_unlock(&i915->drm.struct_mutex);
+
                i915_gem_resume(i915);
        }
 }
index 663ff9f..1e7b1be 100644 (file)
@@ -26,6 +26,8 @@
 #include "dsi_cfg.h"
 #include "msm_kms.h"
 
+#define DSI_RESET_TOGGLE_DELAY_MS 20
+
 static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
 {
        u32 ver;
@@ -986,7 +988,7 @@ static void dsi_sw_reset(struct msm_dsi_host *msm_host)
        wmb(); /* clocks need to be enabled before reset */
 
        dsi_write(msm_host, REG_DSI_RESET, 1);
-       wmb(); /* make sure reset happen */
+       msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
        dsi_write(msm_host, REG_DSI_RESET, 0);
 }
 
@@ -1396,7 +1398,7 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
 
        /* dsi controller can only be reset while clocks are running */
        dsi_write(msm_host, REG_DSI_RESET, 1);
-       wmb();  /* make sure reset happen */
+       msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
        dsi_write(msm_host, REG_DSI_RESET, 0);
        wmb();  /* controller out of reset */
        dsi_write(msm_host, REG_DSI_CTRL, data0);
index e226324..4bdd63b 100644 (file)
@@ -1083,7 +1083,7 @@ static const struct dss_features omap34xx_dss_feats = {
 
 static const struct dss_features omap3630_dss_feats = {
        .model                  =       DSS_MODEL_OMAP3,
-       .fck_div_max            =       32,
+       .fck_div_max            =       31,
        .fck_freq_max           =       173000000,
        .dss_fck_multiplier     =       1,
        .parent_clk_name        =       "dpll4_ck",
index fc82a52..ee43797 100644 (file)
@@ -220,9 +220,17 @@ static const struct of_device_id lb035q02_of_match[] = {
 
 MODULE_DEVICE_TABLE(of, lb035q02_of_match);
 
+static const struct spi_device_id lb035q02_ids[] = {
+       { "lb035q02", 0 },
+       { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, lb035q02_ids);
+
 static struct spi_driver lb035q02_driver = {
        .probe          = lb035q02_probe,
        .remove         = lb035q02_remove,
+       .id_table       = lb035q02_ids,
        .driver         = {
                .name   = "panel-lg-lb035q02",
                .of_match_table = lb035q02_of_match,
@@ -231,7 +239,6 @@ static struct spi_driver lb035q02_driver = {
 
 module_spi_driver(lb035q02_driver);
 
-MODULE_ALIAS("spi:lgphilips,lb035q02");
 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
 MODULE_DESCRIPTION("LG.Philips LB035Q02 LCD Panel driver");
 MODULE_LICENSE("GPL");
index 299b217..20f17e4 100644 (file)
@@ -230,9 +230,17 @@ static const struct of_device_id nl8048_of_match[] = {
 
 MODULE_DEVICE_TABLE(of, nl8048_of_match);
 
+static const struct spi_device_id nl8048_ids[] = {
+       { "nl8048hl11", 0 },
+       { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, nl8048_ids);
+
 static struct spi_driver nl8048_driver = {
        .probe          = nl8048_probe,
        .remove         = nl8048_remove,
+       .id_table       = nl8048_ids,
        .driver         = {
                .name   = "panel-nec-nl8048hl11",
                .pm     = &nl8048_pm_ops,
@@ -242,7 +250,6 @@ static struct spi_driver nl8048_driver = {
 
 module_spi_driver(nl8048_driver);
 
-MODULE_ALIAS("spi:nec,nl8048hl11");
 MODULE_AUTHOR("Erik Gilling <konkers@android.com>");
 MODULE_DESCRIPTION("NEC-NL8048HL11 Driver");
 MODULE_LICENSE("GPL");
index 305259b..3d5b9c4 100644 (file)
@@ -684,9 +684,17 @@ static const struct of_device_id acx565akm_of_match[] = {
 
 MODULE_DEVICE_TABLE(of, acx565akm_of_match);
 
+static const struct spi_device_id acx565akm_ids[] = {
+       { "acx565akm", 0 },
+       { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, acx565akm_ids);
+
 static struct spi_driver acx565akm_driver = {
        .probe          = acx565akm_probe,
        .remove         = acx565akm_remove,
+       .id_table       = acx565akm_ids,
        .driver         = {
                .name   = "panel-sony-acx565akm",
                .of_match_table = acx565akm_of_match,
@@ -695,7 +703,6 @@ static struct spi_driver acx565akm_driver = {
 
 module_spi_driver(acx565akm_driver);
 
-MODULE_ALIAS("spi:sony,acx565akm");
 MODULE_AUTHOR("Nokia Corporation");
 MODULE_DESCRIPTION("Sony ACX565AKM LCD Panel Driver");
 MODULE_LICENSE("GPL");
index d7b2e34..f2baff8 100644 (file)
@@ -375,8 +375,7 @@ static const struct of_device_id td028ttec1_of_match[] = {
 MODULE_DEVICE_TABLE(of, td028ttec1_of_match);
 
 static const struct spi_device_id td028ttec1_ids[] = {
-       { "tpo,td028ttec1", 0},
-       { "toppoly,td028ttec1", 0 },
+       { "td028ttec1", 0 },
        { /* sentinel */ }
 };
 
index 8437056..ba163c7 100644 (file)
@@ -491,9 +491,17 @@ static const struct of_device_id td043mtea1_of_match[] = {
 
 MODULE_DEVICE_TABLE(of, td043mtea1_of_match);
 
+static const struct spi_device_id td043mtea1_ids[] = {
+       { "td043mtea1", 0 },
+       { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, td043mtea1_ids);
+
 static struct spi_driver td043mtea1_driver = {
        .probe          = td043mtea1_probe,
        .remove         = td043mtea1_remove,
+       .id_table       = td043mtea1_ids,
        .driver         = {
                .name   = "panel-tpo-td043mtea1",
                .pm     = &td043mtea1_pm_ops,
@@ -503,7 +511,6 @@ static struct spi_driver td043mtea1_driver = {
 
 module_spi_driver(td043mtea1_driver);
 
-MODULE_ALIAS("spi:tpo,td043mtea1");
 MODULE_AUTHOR("Gražvydas Ignotas <notasas@gmail.com>");
 MODULE_DESCRIPTION("TPO TD043MTEA1 Panel Driver");
 MODULE_LICENSE("GPL");
index f67ed92..8822ec1 100644 (file)
@@ -208,6 +208,9 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
        pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES);
        pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES);
        pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES);
+       pfdev->features.max_threads = gpu_read(pfdev, GPU_THREAD_MAX_THREADS);
+       pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
+       pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE);
        pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
        for (i = 0; i < 4; i++)
                pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
index a585516..21f34d4 100644 (file)
@@ -381,13 +381,19 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
                job_read(pfdev, JS_TAIL_LO(js)),
                sched_job);
 
-       mutex_lock(&pfdev->reset_lock);
+       if (!mutex_trylock(&pfdev->reset_lock))
+               return;
 
-       for (i = 0; i < NUM_JOB_SLOTS; i++)
-               drm_sched_stop(&pfdev->js->queue[i].sched, sched_job);
+       for (i = 0; i < NUM_JOB_SLOTS; i++) {
+               struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
+
+               drm_sched_stop(sched, sched_job);
+               if (js != i)
+                       /* Ensure any timeouts on other slots have finished */
+                       cancel_delayed_work_sync(&sched->work_tdr);
+       }
 
-       if (sched_job)
-               drm_sched_increase_karma(sched_job);
+       drm_sched_increase_karma(sched_job);
 
        spin_lock_irqsave(&pfdev->js->job_lock, flags);
        for (i = 0; i < NUM_JOB_SLOTS; i++) {
index 62eab82..09a4709 100644 (file)
@@ -9504,7 +9504,6 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
 {
        struct pci_dev *root = rdev->pdev->bus->self;
        enum pci_bus_speed speed_cap;
-       int bridge_pos, gpu_pos;
        u32 speed_cntl, current_data_rate;
        int i;
        u16 tmp16;
@@ -9546,12 +9545,7 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
                DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
        }
 
-       bridge_pos = pci_pcie_cap(root);
-       if (!bridge_pos)
-               return;
-
-       gpu_pos = pci_pcie_cap(rdev->pdev);
-       if (!gpu_pos)
+       if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev))
                return;
 
        if (speed_cap == PCIE_SPEED_8_0GT) {
@@ -9561,14 +9555,17 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
                        u16 bridge_cfg2, gpu_cfg2;
                        u32 max_lw, current_lw, tmp;
 
-                       pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
-                       pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+                       pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+                                                 &bridge_cfg);
+                       pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
+                                                 &gpu_cfg);
 
                        tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
-                       pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+                       pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
 
                        tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
-                       pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+                       pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
+                                                  tmp16);
 
                        tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
                        max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@@ -9586,15 +9583,23 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
 
                        for (i = 0; i < 10; i++) {
                                /* check status */
-                               pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+                               pcie_capability_read_word(rdev->pdev,
+                                                         PCI_EXP_DEVSTA,
+                                                         &tmp16);
                                if (tmp16 & PCI_EXP_DEVSTA_TRPND)
                                        break;
 
-                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
-                               pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+                               pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+                                                         &bridge_cfg);
+                               pcie_capability_read_word(rdev->pdev,
+                                                         PCI_EXP_LNKCTL,
+                                                         &gpu_cfg);
 
-                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
-                               pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+                               pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+                                                         &bridge_cfg2);
+                               pcie_capability_read_word(rdev->pdev,
+                                                         PCI_EXP_LNKCTL2,
+                                                         &gpu_cfg2);
 
                                tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
                                tmp |= LC_SET_QUIESCE;
@@ -9607,26 +9612,45 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
                                msleep(100);
 
                                /* linkctl */
-                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+                               pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+                                                         &tmp16);
                                tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
                                tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
-                               pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+                               pcie_capability_write_word(root, PCI_EXP_LNKCTL,
+                                                          tmp16);
 
-                               pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+                               pcie_capability_read_word(rdev->pdev,
+                                                         PCI_EXP_LNKCTL,
+                                                         &tmp16);
                                tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
                                tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
-                               pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+                               pcie_capability_write_word(rdev->pdev,
+                                                          PCI_EXP_LNKCTL,
+                                                          tmp16);
 
                                /* linkctl2 */
-                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
-                               tmp16 &= ~((1 << 4) | (7 << 9));
-                               tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
-                               pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
-                               pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
-                               tmp16 &= ~((1 << 4) | (7 << 9));
-                               tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
-                               pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+                               pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+                                                         &tmp16);
+                               tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+                                          PCI_EXP_LNKCTL2_TX_MARGIN);
+                               tmp16 |= (bridge_cfg2 &
+                                         (PCI_EXP_LNKCTL2_ENTER_COMP |
+                                          PCI_EXP_LNKCTL2_TX_MARGIN));
+                               pcie_capability_write_word(root,
+                                                          PCI_EXP_LNKCTL2,
+                                                          tmp16);
+
+                               pcie_capability_read_word(rdev->pdev,
+                                                         PCI_EXP_LNKCTL2,
+                                                         &tmp16);
+                               tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+                                          PCI_EXP_LNKCTL2_TX_MARGIN);
+                               tmp16 |= (gpu_cfg2 &
+                                         (PCI_EXP_LNKCTL2_ENTER_COMP |
+                                          PCI_EXP_LNKCTL2_TX_MARGIN));
+                               pcie_capability_write_word(rdev->pdev,
+                                                          PCI_EXP_LNKCTL2,
+                                                          tmp16);
 
                                tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
                                tmp &= ~LC_SET_QUIESCE;
@@ -9640,15 +9664,15 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)
        speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
        WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
-       pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
-       tmp16 &= ~0xf;
+       pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+       tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
        if (speed_cap == PCIE_SPEED_8_0GT)
-               tmp16 |= 3; /* gen3 */
+               tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
        else if (speed_cap == PCIE_SPEED_5_0GT)
-               tmp16 |= 2; /* gen2 */
+               tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
        else
-               tmp16 |= 1; /* gen1 */
-       pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+               tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+       pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16);
 
        speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
        speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
index d0bc91e..9e55076 100644 (file)
@@ -379,19 +379,11 @@ radeon_pci_remove(struct pci_dev *pdev)
 static void
 radeon_pci_shutdown(struct pci_dev *pdev)
 {
-       struct drm_device *ddev = pci_get_drvdata(pdev);
-
        /* if we are running in a VM, make sure the device
         * torn down properly on reboot/shutdown
         */
        if (radeon_device_is_virtual())
                radeon_pci_remove(pdev);
-
-       /* Some adapters need to be suspended before a
-       * shutdown occurs in order to prevent an error
-       * during kexec.
-       */
-       radeon_suspend_kms(ddev, true, true, false);
 }
 
 static int radeon_pmops_suspend(struct device *dev)
index 05894d1..67a98b3 100644 (file)
@@ -3257,7 +3257,7 @@ static void si_gpu_init(struct radeon_device *rdev)
                /* XXX what about 12? */
                rdev->config.si.tile_config |= (3 << 0);
                break;
-       }       
+       }
        switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
        case 0: /* four banks */
                rdev->config.si.tile_config |= 0 << 4;
@@ -7087,7 +7087,6 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
 {
        struct pci_dev *root = rdev->pdev->bus->self;
        enum pci_bus_speed speed_cap;
-       int bridge_pos, gpu_pos;
        u32 speed_cntl, current_data_rate;
        int i;
        u16 tmp16;
@@ -7129,12 +7128,7 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
                DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
        }
 
-       bridge_pos = pci_pcie_cap(root);
-       if (!bridge_pos)
-               return;
-
-       gpu_pos = pci_pcie_cap(rdev->pdev);
-       if (!gpu_pos)
+       if (!pci_is_pcie(root) || !pci_is_pcie(rdev->pdev))
                return;
 
        if (speed_cap == PCIE_SPEED_8_0GT) {
@@ -7144,14 +7138,17 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
                        u16 bridge_cfg2, gpu_cfg2;
                        u32 max_lw, current_lw, tmp;
 
-                       pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
-                       pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+                       pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+                                                 &bridge_cfg);
+                       pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL,
+                                                 &gpu_cfg);
 
                        tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
-                       pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+                       pcie_capability_write_word(root, PCI_EXP_LNKCTL, tmp16);
 
                        tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
-                       pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+                       pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL,
+                                                  tmp16);
 
                        tmp = RREG32_PCIE(PCIE_LC_STATUS1);
                        max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
@@ -7169,15 +7166,23 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
 
                        for (i = 0; i < 10; i++) {
                                /* check status */
-                               pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+                               pcie_capability_read_word(rdev->pdev,
+                                                         PCI_EXP_DEVSTA,
+                                                         &tmp16);
                                if (tmp16 & PCI_EXP_DEVSTA_TRPND)
                                        break;
 
-                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
-                               pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+                               pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+                                                         &bridge_cfg);
+                               pcie_capability_read_word(rdev->pdev,
+                                                         PCI_EXP_LNKCTL,
+                                                         &gpu_cfg);
 
-                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
-                               pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+                               pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+                                                         &bridge_cfg2);
+                               pcie_capability_read_word(rdev->pdev,
+                                                         PCI_EXP_LNKCTL2,
+                                                         &gpu_cfg2);
 
                                tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
                                tmp |= LC_SET_QUIESCE;
@@ -7190,26 +7195,46 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
                                msleep(100);
 
                                /* linkctl */
-                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+                               pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+                                                         &tmp16);
                                tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
                                tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
-                               pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+                               pcie_capability_write_word(root,
+                                                          PCI_EXP_LNKCTL,
+                                                          tmp16);
 
-                               pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+                               pcie_capability_read_word(rdev->pdev,
+                                                         PCI_EXP_LNKCTL,
+                                                         &tmp16);
                                tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
                                tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
-                               pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+                               pcie_capability_write_word(rdev->pdev,
+                                                          PCI_EXP_LNKCTL,
+                                                          tmp16);
 
                                /* linkctl2 */
-                               pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
-                               tmp16 &= ~((1 << 4) | (7 << 9));
-                               tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
-                               pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
-                               pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
-                               tmp16 &= ~((1 << 4) | (7 << 9));
-                               tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
-                               pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+                               pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+                                                         &tmp16);
+                               tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+                                          PCI_EXP_LNKCTL2_TX_MARGIN);
+                               tmp16 |= (bridge_cfg2 &
+                                         (PCI_EXP_LNKCTL2_ENTER_COMP |
+                                          PCI_EXP_LNKCTL2_TX_MARGIN));
+                               pcie_capability_write_word(root,
+                                                          PCI_EXP_LNKCTL2,
+                                                          tmp16);
+
+                               pcie_capability_read_word(rdev->pdev,
+                                                         PCI_EXP_LNKCTL2,
+                                                         &tmp16);
+                               tmp16 &= ~(PCI_EXP_LNKCTL2_ENTER_COMP |
+                                          PCI_EXP_LNKCTL2_TX_MARGIN);
+                               tmp16 |= (gpu_cfg2 &
+                                         (PCI_EXP_LNKCTL2_ENTER_COMP |
+                                          PCI_EXP_LNKCTL2_TX_MARGIN));
+                               pcie_capability_write_word(rdev->pdev,
+                                                          PCI_EXP_LNKCTL2,
+                                                          tmp16);
 
                                tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
                                tmp &= ~LC_SET_QUIESCE;
@@ -7223,15 +7248,15 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)
        speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
        WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
 
-       pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
-       tmp16 &= ~0xf;
+       pcie_capability_read_word(rdev->pdev, PCI_EXP_LNKCTL2, &tmp16);
+       tmp16 &= ~PCI_EXP_LNKCTL2_TLS;
        if (speed_cap == PCIE_SPEED_8_0GT)
-               tmp16 |= 3; /* gen3 */
+               tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
        else if (speed_cap == PCIE_SPEED_5_0GT)
-               tmp16 |= 2; /* gen2 */
+               tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
        else
-               tmp16 |= 1; /* gen1 */
-       pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+               tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+       pcie_capability_write_word(rdev->pdev, PCI_EXP_LNKCTL2, tmp16);
 
        speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
        speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
index ae07290..04efa78 100644 (file)
@@ -147,7 +147,7 @@ static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder,
        struct drm_device *dev = encoder->dev;
        struct drm_framebuffer *fb;
 
-       if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+       if (!conn_state->writeback_job)
                return 0;
 
        fb = conn_state->writeback_job->fb;
@@ -221,7 +221,7 @@ void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
        unsigned int i;
 
        state = rcrtc->writeback.base.state;
-       if (!state || !state->writeback_job || !state->writeback_job->fb)
+       if (!state || !state->writeback_job)
                return;
 
        fb = state->writeback_job->fb;
index 525dc1c..530edb3 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/gpio.h>
 #include <linux/mod_devicetable.h>
 #include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
 
 #include <drm/drm_atomic_helper.h>
index 5047634..a46ac28 100644 (file)
@@ -63,7 +63,6 @@ config TINYDRM_REPAPER
        depends on DRM && SPI
        select DRM_KMS_HELPER
        select DRM_KMS_CMA_HELPER
-       depends on THERMAL || !THERMAL
        help
          DRM driver for the following Pervasive Displays panels:
          1.44" TFT EPD Panel (E1144CS021)
index 20ff56f..9881946 100644 (file)
@@ -185,8 +185,9 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
        list_add_tail(&bo->lru, &man->lru[bo->priority]);
        kref_get(&bo->list_kref);
 
-       if (bo->ttm && !(bo->ttm->page_flags &
-                        (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
+       if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
+           !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
+                                    TTM_PAGE_FLAG_SWAPPED))) {
                list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
                kref_get(&bo->list_kref);
        }
@@ -878,11 +879,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 
        if (!bo) {
                if (busy_bo)
-                       ttm_bo_get(busy_bo);
+                       kref_get(&busy_bo->list_kref);
                spin_unlock(&glob->lru_lock);
                ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
                if (busy_bo)
-                       ttm_bo_put(busy_bo);
+                       kref_put(&busy_bo->list_kref, ttm_bo_release_list);
                return ret;
        }
 
index 76eedb9..46dc3de 100644 (file)
@@ -278,15 +278,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
                else
                        ret = vmf_insert_pfn(&cvma, address, pfn);
 
-               /*
-                * Somebody beat us to this PTE or prefaulting to
-                * an already populated PTE, or prefaulting error.
-                */
-
-               if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
-                       break;
-               else if (unlikely(ret & VM_FAULT_ERROR))
-                       goto out_io_unlock;
+               /* Never error on prefaulted PTEs */
+               if (unlikely((ret & VM_FAULT_ERROR))) {
+                       if (i == 0)
+                               goto out_io_unlock;
+                       else
+                               break;
+               }
 
                address += PAGE_SIZE;
                if (unlikely(++page_offset >= page_last))
index 1ce4d71..bf72020 100644 (file)
@@ -231,7 +231,7 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
        int i;
 
        conn_state = drm_atomic_get_new_connector_state(state, conn);
-       if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+       if (!conn_state->writeback_job)
                return 0;
 
        crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
@@ -271,8 +271,7 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
        u32 ctrl;
        int i;
 
-       if (WARN_ON(!conn_state->writeback_job ||
-                   !conn_state->writeback_job->fb))
+       if (WARN_ON(!conn_state->writeback_job))
                return;
 
        mode = &conn_state->crtc->state->adjusted_mode;
index ba1828a..4be49c1 100644 (file)
@@ -718,17 +718,9 @@ static int xen_drv_probe(struct xenbus_device *xb_dev,
        struct device *dev = &xb_dev->dev;
        int ret;
 
-       /*
-        * The device is not spawn from a device tree, so arch_setup_dma_ops
-        * is not called, thus leaving the device with dummy DMA ops.
-        * This makes the device return error on PRIME buffer import, which
-        * is not correct: to fix this call of_dma_configure() with a NULL
-        * node to set default DMA ops.
-        */
-       dev->coherent_dma_mask = DMA_BIT_MASK(32);
-       ret = of_dma_configure(dev, NULL, true);
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
        if (ret < 0) {
-               DRM_ERROR("Cannot setup DMA ops, ret %d", ret);
+               DRM_ERROR("Cannot setup DMA mask, ret %d", ret);
                return ret;
        }
 
index cc5b09b..79a28fc 100644 (file)
@@ -314,60 +314,24 @@ static void mousevsc_on_receive(struct hv_device *device,
 
 static void mousevsc_on_channel_callback(void *context)
 {
-       const int packet_size = 0x100;
-       int ret;
        struct hv_device *device = context;
-       u32 bytes_recvd;
-       u64 req_id;
        struct vmpacket_descriptor *desc;
-       unsigned char   *buffer;
-       int     bufferlen = packet_size;
-
-       buffer = kmalloc(bufferlen, GFP_ATOMIC);
-       if (!buffer)
-               return;
-
-       do {
-               ret = vmbus_recvpacket_raw(device->channel, buffer,
-                                       bufferlen, &bytes_recvd, &req_id);
-
-               switch (ret) {
-               case 0:
-                       if (bytes_recvd <= 0) {
-                               kfree(buffer);
-                               return;
-                       }
-                       desc = (struct vmpacket_descriptor *)buffer;
-
-                       switch (desc->type) {
-                       case VM_PKT_COMP:
-                               break;
-
-                       case VM_PKT_DATA_INBAND:
-                               mousevsc_on_receive(device, desc);
-                               break;
-
-                       default:
-                               pr_err("unhandled packet type %d, tid %llx len %d\n",
-                                       desc->type, req_id, bytes_recvd);
-                               break;
-                       }
 
+       foreach_vmbus_pkt(desc, device->channel) {
+               switch (desc->type) {
+               case VM_PKT_COMP:
                        break;
 
-               case -ENOBUFS:
-                       kfree(buffer);
-                       /* Handle large packet */
-                       bufferlen = bytes_recvd;
-                       buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
-
-                       if (!buffer)
-                               return;
+               case VM_PKT_DATA_INBAND:
+                       mousevsc_on_receive(device, desc);
+                       break;
 
+               default:
+                       pr_err("Unhandled packet type %d, tid %llx len %d\n",
+                              desc->type, desc->trans_id, desc->len8 * 8);
                        break;
                }
-       } while (1);
-
+       }
 }
 
 static int mousevsc_connect_to_vsp(struct hv_device *device)
index 391f0b2..53a60c8 100644 (file)
@@ -912,6 +912,7 @@ static void vmbus_shutdown(struct device *child_device)
                drv->shutdown(dev);
 }
 
+#ifdef CONFIG_PM_SLEEP
 /*
  * vmbus_suspend - Suspend a vmbus device
  */
@@ -949,6 +950,7 @@ static int vmbus_resume(struct device *child_device)
 
        return drv->resume(dev);
 }
+#endif /* CONFIG_PM_SLEEP */
 
 /*
  * vmbus_device_release - Final callback release of the vmbus child device
@@ -1070,6 +1072,7 @@ msg_handled:
        vmbus_signal_eom(msg, message_type);
 }
 
+#ifdef CONFIG_PM_SLEEP
 /*
  * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
  * hibernation, because hv_sock connections can not persist across hibernation.
@@ -1105,6 +1108,7 @@ static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
                      vmbus_connection.work_queue,
                      &ctx->work);
 }
+#endif /* CONFIG_PM_SLEEP */
 
 /*
  * Direct callback for channels using other deferred processing
@@ -2125,6 +2129,7 @@ acpi_walk_err:
        return ret_val;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int vmbus_bus_suspend(struct device *dev)
 {
        struct vmbus_channel *channel, *sc;
@@ -2247,6 +2252,7 @@ static int vmbus_bus_resume(struct device *dev)
 
        return 0;
 }
+#endif /* CONFIG_PM_SLEEP */
 
 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
        {"VMBUS", 0},
index 95b447c..b26419d 100644 (file)
@@ -99,6 +99,8 @@ struct nct7904_data {
        u8 enable_dts;
        u8 has_dts;
        u8 temp_mode; /* 0: TR mode, 1: TD mode */
+       u8 fan_alarm[2];
+       u8 vsen_alarm[3];
 };
 
 /* Access functions */
@@ -214,7 +216,15 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
                                       SMI_STS5_REG + (channel >> 3));
                if (ret < 0)
                        return ret;
-               *val = (ret >> (channel & 0x07)) & 1;
+               if (!data->fan_alarm[channel >> 3])
+                       data->fan_alarm[channel >> 3] = ret & 0xff;
+               else
+                       /* If there is new alarm showing up */
+                       data->fan_alarm[channel >> 3] |= (ret & 0xff);
+               *val = (data->fan_alarm[channel >> 3] >> (channel & 0x07)) & 1;
+               /* Needs to clean the alarm if alarm existing */
+               if (*val)
+                       data->fan_alarm[channel >> 3] ^= 1 << (channel & 0x07);
                return 0;
        default:
                return -EOPNOTSUPP;
@@ -298,7 +308,15 @@ static int nct7904_read_in(struct device *dev, u32 attr, int channel,
                                       SMI_STS1_REG + (index >> 3));
                if (ret < 0)
                        return ret;
-               *val = (ret >> (index & 0x07)) & 1;
+               if (!data->vsen_alarm[index >> 3])
+                       data->vsen_alarm[index >> 3] = ret & 0xff;
+               else
+                       /* If there is new alarm showing up */
+                       data->vsen_alarm[index >> 3] |= (ret & 0xff);
+               *val = (data->vsen_alarm[index >> 3] >> (index & 0x07)) & 1;
+               /* Needs to clean the alarm if alarm existing */
+               if (*val)
+                       data->vsen_alarm[index >> 3] ^= 1 << (index & 0x07);
                return 0;
        default:
                return -EOPNOTSUPP;
@@ -915,12 +933,15 @@ static int nct7904_probe(struct i2c_client *client,
 
        data->temp_mode = 0;
        for (i = 0; i < 4; i++) {
-               val = (ret & (0x03 << i)) >> (i * 2);
+               val = (ret >> (i * 2)) & 0x03;
                bit = (1 << i);
-               if (val == 0)
+               if (val == 0) {
                        data->tcpu_mask &= ~bit;
-               else if (val == 0x1 || val == 0x2)
-                       data->temp_mode |= bit;
+               } else {
+                       if (val == 0x1 || val == 0x2)
+                               data->temp_mode |= bit;
+                       data->vsen_mask &= ~(0x06 << (i * 2));
+               }
        }
 
        /* PECI */
index 055227c..67b8817 100644 (file)
@@ -474,12 +474,17 @@ static int adxl372_configure_fifo(struct adxl372_state *st)
        if (ret < 0)
                return ret;
 
-       fifo_samples = st->watermark & 0xFF;
+       /*
+        * watermark stores the number of sets; we need to write the FIFO
+        * registers with the number of samples
+        */
+       fifo_samples = (st->watermark * st->fifo_set_size);
        fifo_ctl = ADXL372_FIFO_CTL_FORMAT_MODE(st->fifo_format) |
                   ADXL372_FIFO_CTL_MODE_MODE(st->fifo_mode) |
-                  ADXL372_FIFO_CTL_SAMPLES_MODE(st->watermark);
+                  ADXL372_FIFO_CTL_SAMPLES_MODE(fifo_samples);
 
-       ret = regmap_write(st->regmap, ADXL372_FIFO_SAMPLES, fifo_samples);
+       ret = regmap_write(st->regmap,
+                          ADXL372_FIFO_SAMPLES, fifo_samples & 0xFF);
        if (ret < 0)
                return ret;
 
@@ -548,8 +553,7 @@ static irqreturn_t adxl372_trigger_handler(int irq, void  *p)
                        goto err;
 
                /* Each sample is 2 bytes */
-               for (i = 0; i < fifo_entries * sizeof(u16);
-                    i += st->fifo_set_size * sizeof(u16))
+               for (i = 0; i < fifo_entries; i += st->fifo_set_size)
                        iio_push_to_buffers(indio_dev, &st->fifo_buf[i]);
        }
 err:
@@ -571,6 +575,14 @@ static int adxl372_setup(struct adxl372_state *st)
                return -ENODEV;
        }
 
+       /*
+        * Perform a software reset to make sure the device is in a consistent
+        * state after start up.
+        */
+       ret = regmap_write(st->regmap, ADXL372_RESET, ADXL372_RESET_CODE);
+       if (ret < 0)
+               return ret;
+
        ret = adxl372_set_op_mode(st, ADXL372_STANDBY);
        if (ret < 0)
                return ret;
index cf6c0e3..121b4e8 100644 (file)
 #define BMC150_ACCEL_SLEEP_1_SEC               0x0F
 
 #define BMC150_ACCEL_REG_TEMP                  0x08
-#define BMC150_ACCEL_TEMP_CENTER_VAL           24
+#define BMC150_ACCEL_TEMP_CENTER_VAL           23
 
 #define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2))
 #define BMC150_AUTO_SUSPEND_DELAY_MS           2000
index 5a3ca59..f658012 100644 (file)
@@ -810,10 +810,10 @@ static int ad799x_probe(struct i2c_client *client,
 
        ret = ad799x_write_config(st, st->chip_config->default_config);
        if (ret < 0)
-               goto error_disable_reg;
+               goto error_disable_vref;
        ret = ad799x_read_config(st);
        if (ret < 0)
-               goto error_disable_reg;
+               goto error_disable_vref;
        st->config = ret;
 
        ret = iio_triggered_buffer_setup(indio_dev, NULL,
index adc9cf7..8ea2aed 100644 (file)
@@ -7,6 +7,7 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
+#include <linux/dmi.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
 #define AXP288_ADC_EN_MASK                             0xF0
 #define AXP288_ADC_TS_ENABLE                           0x01
 
+#define AXP288_ADC_TS_BIAS_MASK                                GENMASK(5, 4)
+#define AXP288_ADC_TS_BIAS_20UA                                (0 << 4)
+#define AXP288_ADC_TS_BIAS_40UA                                (1 << 4)
+#define AXP288_ADC_TS_BIAS_60UA                                (2 << 4)
+#define AXP288_ADC_TS_BIAS_80UA                                (3 << 4)
 #define AXP288_ADC_TS_CURRENT_ON_OFF_MASK              GENMASK(1, 0)
 #define AXP288_ADC_TS_CURRENT_OFF                      (0 << 0)
 #define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING         (1 << 0)
@@ -177,10 +183,36 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
        return ret;
 }
 
+/*
+ * We rely on the machine's firmware to correctly setup the TS pin bias current
+ * at boot. This lists systems with broken fw where we need to set it ourselves.
+ */
+static const struct dmi_system_id axp288_adc_ts_bias_override[] = {
+       {
+               /* Lenovo Ideapad 100S (11 inch) */
+               .matches = {
+                 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 100S-11IBY"),
+               },
+               .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
+       },
+       {}
+};
+
 static int axp288_adc_initialize(struct axp288_adc_info *info)
 {
+       const struct dmi_system_id *bias_override;
        int ret, adc_enable_val;
 
+       bias_override = dmi_first_match(axp288_adc_ts_bias_override);
+       if (bias_override) {
+               ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+                                        AXP288_ADC_TS_BIAS_MASK,
+                                        (uintptr_t)bias_override->driver_data);
+               if (ret)
+                       return ret;
+       }
+
        /*
         * Determine if the TS pin is enabled and set the TS current-source
         * accordingly.
index 88c7fe1..62e6c8b 100644 (file)
@@ -100,14 +100,14 @@ struct hx711_data {
 
 static int hx711_cycle(struct hx711_data *hx711_data)
 {
-       int val;
+       unsigned long flags;
 
        /*
         * if preempted for more then 60us while PD_SCK is high:
         * hx711 is going in reset
         * ==> measuring is false
         */
-       preempt_disable();
+       local_irq_save(flags);
        gpiod_set_value(hx711_data->gpiod_pd_sck, 1);
 
        /*
@@ -117,7 +117,6 @@ static int hx711_cycle(struct hx711_data *hx711_data)
         */
        ndelay(hx711_data->data_ready_delay_ns);
 
-       val = gpiod_get_value(hx711_data->gpiod_dout);
        /*
         * here we are not waiting for 0.2 us as suggested by the datasheet,
         * because the oscilloscope showed in a test scenario
@@ -125,7 +124,7 @@ static int hx711_cycle(struct hx711_data *hx711_data)
         * and 0.56 us for PD_SCK low on TI Sitara with 800 MHz
         */
        gpiod_set_value(hx711_data->gpiod_pd_sck, 0);
-       preempt_enable();
+       local_irq_restore(flags);
 
        /*
         * make it a square wave for addressing cases with capacitance on
@@ -133,7 +132,8 @@ static int hx711_cycle(struct hx711_data *hx711_data)
         */
        ndelay(hx711_data->data_ready_delay_ns);
 
-       return val;
+       /* sample as late as possible */
+       return gpiod_get_value(hx711_data->gpiod_dout);
 }
 
 static int hx711_read(struct hx711_data *hx711_data)
index 7b28d04..7b27306 100644 (file)
@@ -1219,6 +1219,11 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
        if (IS_ERR(base))
                return PTR_ERR(base);
 
+       priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+                                            priv->param->regmap_config);
+       if (IS_ERR(priv->regmap))
+               return PTR_ERR(priv->regmap);
+
        irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
        if (!irq)
                return -EINVAL;
@@ -1228,11 +1233,6 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
-                                            priv->param->regmap_config);
-       if (IS_ERR(priv->regmap))
-               return PTR_ERR(priv->regmap);
-
        priv->clkin = devm_clk_get(&pdev->dev, "clkin");
        if (IS_ERR(priv->clkin)) {
                dev_err(&pdev->dev, "failed to get clkin\n");
index 9b85fef..93a096a 100644 (file)
 
 #include "stm32-adc-core.h"
 
-/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
-#define STM32F4_ADC_CSR                        (STM32_ADCX_COMN_OFFSET + 0x00)
-#define STM32F4_ADC_CCR                        (STM32_ADCX_COMN_OFFSET + 0x04)
-
-/* STM32F4_ADC_CSR - bit fields */
-#define STM32F4_EOC3                   BIT(17)
-#define STM32F4_EOC2                   BIT(9)
-#define STM32F4_EOC1                   BIT(1)
-
-/* STM32F4_ADC_CCR - bit fields */
-#define STM32F4_ADC_ADCPRE_SHIFT       16
-#define STM32F4_ADC_ADCPRE_MASK                GENMASK(17, 16)
-
-/* STM32H7 - common registers for all ADC instances */
-#define STM32H7_ADC_CSR                        (STM32_ADCX_COMN_OFFSET + 0x00)
-#define STM32H7_ADC_CCR                        (STM32_ADCX_COMN_OFFSET + 0x08)
-
-/* STM32H7_ADC_CSR - bit fields */
-#define STM32H7_EOC_SLV                        BIT(18)
-#define STM32H7_EOC_MST                        BIT(2)
-
-/* STM32H7_ADC_CCR - bit fields */
-#define STM32H7_PRESC_SHIFT            18
-#define STM32H7_PRESC_MASK             GENMASK(21, 18)
-#define STM32H7_CKMODE_SHIFT           16
-#define STM32H7_CKMODE_MASK            GENMASK(17, 16)
-
 #define STM32_ADC_CORE_SLEEP_DELAY_MS  2000
 
 /* SYSCFG registers */
@@ -71,6 +44,8 @@
  * @eoc1:      adc1 end of conversion flag in @csr
  * @eoc2:      adc2 end of conversion flag in @csr
  * @eoc3:      adc3 end of conversion flag in @csr
+ * @ier:       interrupt enable register offset for each adc
+ * @eocie_msk: end of conversion interrupt enable mask in @ier
  */
 struct stm32_adc_common_regs {
        u32 csr;
@@ -78,6 +53,8 @@ struct stm32_adc_common_regs {
        u32 eoc1_msk;
        u32 eoc2_msk;
        u32 eoc3_msk;
+       u32 ier;
+       u32 eocie_msk;
 };
 
 struct stm32_adc_priv;
@@ -303,6 +280,8 @@ static const struct stm32_adc_common_regs stm32f4_adc_common_regs = {
        .eoc1_msk = STM32F4_EOC1,
        .eoc2_msk = STM32F4_EOC2,
        .eoc3_msk = STM32F4_EOC3,
+       .ier = STM32F4_ADC_CR1,
+       .eocie_msk = STM32F4_EOCIE,
 };
 
 /* STM32H7 common registers definitions */
@@ -311,8 +290,24 @@ static const struct stm32_adc_common_regs stm32h7_adc_common_regs = {
        .ccr = STM32H7_ADC_CCR,
        .eoc1_msk = STM32H7_EOC_MST,
        .eoc2_msk = STM32H7_EOC_SLV,
+       .ier = STM32H7_ADC_IER,
+       .eocie_msk = STM32H7_EOCIE,
+};
+
+static const unsigned int stm32_adc_offset[STM32_ADC_MAX_ADCS] = {
+       0, STM32_ADC_OFFSET, STM32_ADC_OFFSET * 2,
 };
 
+static unsigned int stm32_adc_eoc_enabled(struct stm32_adc_priv *priv,
+                                         unsigned int adc)
+{
+       u32 ier, offset = stm32_adc_offset[adc];
+
+       ier = readl_relaxed(priv->common.base + offset + priv->cfg->regs->ier);
+
+       return ier & priv->cfg->regs->eocie_msk;
+}
+
 /* ADC common interrupt for all instances */
 static void stm32_adc_irq_handler(struct irq_desc *desc)
 {
@@ -323,13 +318,28 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
        chained_irq_enter(chip, desc);
        status = readl_relaxed(priv->common.base + priv->cfg->regs->csr);
 
-       if (status & priv->cfg->regs->eoc1_msk)
+       /*
+        * End of conversion may be handled by using IRQ or DMA. There may be a
+        * race here when two conversions complete at the same time on several
+        * ADCs. EOC may be read 'set' for several ADCs, with:
+        * - an ADC configured to use DMA (EOC triggers the DMA request, and
+        *   is then automatically cleared by DR read in hardware)
+        * - an ADC configured to use IRQs (EOCIE bit is set. The handler must
+        *   be called in this case)
+        * So both EOC status bit in CSR and EOCIE control bit must be checked
+        * before invoking the interrupt handler (e.g. call ISR only for
+        * IRQ-enabled ADCs).
+        */
+       if (status & priv->cfg->regs->eoc1_msk &&
+           stm32_adc_eoc_enabled(priv, 0))
                generic_handle_irq(irq_find_mapping(priv->domain, 0));
 
-       if (status & priv->cfg->regs->eoc2_msk)
+       if (status & priv->cfg->regs->eoc2_msk &&
+           stm32_adc_eoc_enabled(priv, 1))
                generic_handle_irq(irq_find_mapping(priv->domain, 1));
 
-       if (status & priv->cfg->regs->eoc3_msk)
+       if (status & priv->cfg->regs->eoc3_msk &&
+           stm32_adc_eoc_enabled(priv, 2))
                generic_handle_irq(irq_find_mapping(priv->domain, 2));
 
        chained_irq_exit(chip, desc);
index 8af507b..2579d51 100644 (file)
  * --------------------------------------------------------
  */
 #define STM32_ADC_MAX_ADCS             3
+#define STM32_ADC_OFFSET               0x100
 #define STM32_ADCX_COMN_OFFSET         0x300
 
+/* STM32F4 - Registers for each ADC instance */
+#define STM32F4_ADC_SR                 0x00
+#define STM32F4_ADC_CR1                        0x04
+#define STM32F4_ADC_CR2                        0x08
+#define STM32F4_ADC_SMPR1              0x0C
+#define STM32F4_ADC_SMPR2              0x10
+#define STM32F4_ADC_HTR                        0x24
+#define STM32F4_ADC_LTR                        0x28
+#define STM32F4_ADC_SQR1               0x2C
+#define STM32F4_ADC_SQR2               0x30
+#define STM32F4_ADC_SQR3               0x34
+#define STM32F4_ADC_JSQR               0x38
+#define STM32F4_ADC_JDR1               0x3C
+#define STM32F4_ADC_JDR2               0x40
+#define STM32F4_ADC_JDR3               0x44
+#define STM32F4_ADC_JDR4               0x48
+#define STM32F4_ADC_DR                 0x4C
+
+/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
+#define STM32F4_ADC_CSR                        (STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32F4_ADC_CCR                        (STM32_ADCX_COMN_OFFSET + 0x04)
+
+/* STM32F4_ADC_SR - bit fields */
+#define STM32F4_STRT                   BIT(4)
+#define STM32F4_EOC                    BIT(1)
+
+/* STM32F4_ADC_CR1 - bit fields */
+#define STM32F4_RES_SHIFT              24
+#define STM32F4_RES_MASK               GENMASK(25, 24)
+#define STM32F4_SCAN                   BIT(8)
+#define STM32F4_EOCIE                  BIT(5)
+
+/* STM32F4_ADC_CR2 - bit fields */
+#define STM32F4_SWSTART                        BIT(30)
+#define STM32F4_EXTEN_SHIFT            28
+#define STM32F4_EXTEN_MASK             GENMASK(29, 28)
+#define STM32F4_EXTSEL_SHIFT           24
+#define STM32F4_EXTSEL_MASK            GENMASK(27, 24)
+#define STM32F4_EOCS                   BIT(10)
+#define STM32F4_DDS                    BIT(9)
+#define STM32F4_DMA                    BIT(8)
+#define STM32F4_ADON                   BIT(0)
+
+/* STM32F4_ADC_CSR - bit fields */
+#define STM32F4_EOC3                   BIT(17)
+#define STM32F4_EOC2                   BIT(9)
+#define STM32F4_EOC1                   BIT(1)
+
+/* STM32F4_ADC_CCR - bit fields */
+#define STM32F4_ADC_ADCPRE_SHIFT       16
+#define STM32F4_ADC_ADCPRE_MASK                GENMASK(17, 16)
+
+/* STM32H7 - Registers for each ADC instance */
+#define STM32H7_ADC_ISR                        0x00
+#define STM32H7_ADC_IER                        0x04
+#define STM32H7_ADC_CR                 0x08
+#define STM32H7_ADC_CFGR               0x0C
+#define STM32H7_ADC_SMPR1              0x14
+#define STM32H7_ADC_SMPR2              0x18
+#define STM32H7_ADC_PCSEL              0x1C
+#define STM32H7_ADC_SQR1               0x30
+#define STM32H7_ADC_SQR2               0x34
+#define STM32H7_ADC_SQR3               0x38
+#define STM32H7_ADC_SQR4               0x3C
+#define STM32H7_ADC_DR                 0x40
+#define STM32H7_ADC_DIFSEL             0xC0
+#define STM32H7_ADC_CALFACT            0xC4
+#define STM32H7_ADC_CALFACT2           0xC8
+
+/* STM32H7 - common registers for all ADC instances */
+#define STM32H7_ADC_CSR                        (STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32H7_ADC_CCR                        (STM32_ADCX_COMN_OFFSET + 0x08)
+
+/* STM32H7_ADC_ISR - bit fields */
+#define STM32MP1_VREGREADY             BIT(12)
+#define STM32H7_EOC                    BIT(2)
+#define STM32H7_ADRDY                  BIT(0)
+
+/* STM32H7_ADC_IER - bit fields */
+#define STM32H7_EOCIE                  STM32H7_EOC
+
+/* STM32H7_ADC_CR - bit fields */
+#define STM32H7_ADCAL                  BIT(31)
+#define STM32H7_ADCALDIF               BIT(30)
+#define STM32H7_DEEPPWD                        BIT(29)
+#define STM32H7_ADVREGEN               BIT(28)
+#define STM32H7_LINCALRDYW6            BIT(27)
+#define STM32H7_LINCALRDYW5            BIT(26)
+#define STM32H7_LINCALRDYW4            BIT(25)
+#define STM32H7_LINCALRDYW3            BIT(24)
+#define STM32H7_LINCALRDYW2            BIT(23)
+#define STM32H7_LINCALRDYW1            BIT(22)
+#define STM32H7_ADCALLIN               BIT(16)
+#define STM32H7_BOOST                  BIT(8)
+#define STM32H7_ADSTP                  BIT(4)
+#define STM32H7_ADSTART                        BIT(2)
+#define STM32H7_ADDIS                  BIT(1)
+#define STM32H7_ADEN                   BIT(0)
+
+/* STM32H7_ADC_CFGR bit fields */
+#define STM32H7_EXTEN_SHIFT            10
+#define STM32H7_EXTEN_MASK             GENMASK(11, 10)
+#define STM32H7_EXTSEL_SHIFT           5
+#define STM32H7_EXTSEL_MASK            GENMASK(9, 5)
+#define STM32H7_RES_SHIFT              2
+#define STM32H7_RES_MASK               GENMASK(4, 2)
+#define STM32H7_DMNGT_SHIFT            0
+#define STM32H7_DMNGT_MASK             GENMASK(1, 0)
+
+enum stm32h7_adc_dmngt {
+       STM32H7_DMNGT_DR_ONLY,          /* Regular data in DR only */
+       STM32H7_DMNGT_DMA_ONESHOT,      /* DMA one shot mode */
+       STM32H7_DMNGT_DFSDM,            /* DFSDM mode */
+       STM32H7_DMNGT_DMA_CIRC,         /* DMA circular mode */
+};
+
+/* STM32H7_ADC_CALFACT - bit fields */
+#define STM32H7_CALFACT_D_SHIFT                16
+#define STM32H7_CALFACT_D_MASK         GENMASK(26, 16)
+#define STM32H7_CALFACT_S_SHIFT                0
+#define STM32H7_CALFACT_S_MASK         GENMASK(10, 0)
+
+/* STM32H7_ADC_CALFACT2 - bit fields */
+#define STM32H7_LINCALFACT_SHIFT       0
+#define STM32H7_LINCALFACT_MASK                GENMASK(29, 0)
+
+/* STM32H7_ADC_CSR - bit fields */
+#define STM32H7_EOC_SLV                        BIT(18)
+#define STM32H7_EOC_MST                        BIT(2)
+
+/* STM32H7_ADC_CCR - bit fields */
+#define STM32H7_PRESC_SHIFT            18
+#define STM32H7_PRESC_MASK             GENMASK(21, 18)
+#define STM32H7_CKMODE_SHIFT           16
+#define STM32H7_CKMODE_MASK            GENMASK(17, 16)
+
 /**
  * struct stm32_adc_common - stm32 ADC driver common data (for all instances)
  * @base:              control registers base cpu addr
index 6a7dd08..663f8a5 100644 (file)
 
 #include "stm32-adc-core.h"
 
-/* STM32F4 - Registers for each ADC instance */
-#define STM32F4_ADC_SR                 0x00
-#define STM32F4_ADC_CR1                        0x04
-#define STM32F4_ADC_CR2                        0x08
-#define STM32F4_ADC_SMPR1              0x0C
-#define STM32F4_ADC_SMPR2              0x10
-#define STM32F4_ADC_HTR                        0x24
-#define STM32F4_ADC_LTR                        0x28
-#define STM32F4_ADC_SQR1               0x2C
-#define STM32F4_ADC_SQR2               0x30
-#define STM32F4_ADC_SQR3               0x34
-#define STM32F4_ADC_JSQR               0x38
-#define STM32F4_ADC_JDR1               0x3C
-#define STM32F4_ADC_JDR2               0x40
-#define STM32F4_ADC_JDR3               0x44
-#define STM32F4_ADC_JDR4               0x48
-#define STM32F4_ADC_DR                 0x4C
-
-/* STM32F4_ADC_SR - bit fields */
-#define STM32F4_STRT                   BIT(4)
-#define STM32F4_EOC                    BIT(1)
-
-/* STM32F4_ADC_CR1 - bit fields */
-#define STM32F4_RES_SHIFT              24
-#define STM32F4_RES_MASK               GENMASK(25, 24)
-#define STM32F4_SCAN                   BIT(8)
-#define STM32F4_EOCIE                  BIT(5)
-
-/* STM32F4_ADC_CR2 - bit fields */
-#define STM32F4_SWSTART                        BIT(30)
-#define STM32F4_EXTEN_SHIFT            28
-#define STM32F4_EXTEN_MASK             GENMASK(29, 28)
-#define STM32F4_EXTSEL_SHIFT           24
-#define STM32F4_EXTSEL_MASK            GENMASK(27, 24)
-#define STM32F4_EOCS                   BIT(10)
-#define STM32F4_DDS                    BIT(9)
-#define STM32F4_DMA                    BIT(8)
-#define STM32F4_ADON                   BIT(0)
-
-/* STM32H7 - Registers for each ADC instance */
-#define STM32H7_ADC_ISR                        0x00
-#define STM32H7_ADC_IER                        0x04
-#define STM32H7_ADC_CR                 0x08
-#define STM32H7_ADC_CFGR               0x0C
-#define STM32H7_ADC_SMPR1              0x14
-#define STM32H7_ADC_SMPR2              0x18
-#define STM32H7_ADC_PCSEL              0x1C
-#define STM32H7_ADC_SQR1               0x30
-#define STM32H7_ADC_SQR2               0x34
-#define STM32H7_ADC_SQR3               0x38
-#define STM32H7_ADC_SQR4               0x3C
-#define STM32H7_ADC_DR                 0x40
-#define STM32H7_ADC_DIFSEL             0xC0
-#define STM32H7_ADC_CALFACT            0xC4
-#define STM32H7_ADC_CALFACT2           0xC8
-
-/* STM32H7_ADC_ISR - bit fields */
-#define STM32MP1_VREGREADY             BIT(12)
-#define STM32H7_EOC                    BIT(2)
-#define STM32H7_ADRDY                  BIT(0)
-
-/* STM32H7_ADC_IER - bit fields */
-#define STM32H7_EOCIE                  STM32H7_EOC
-
-/* STM32H7_ADC_CR - bit fields */
-#define STM32H7_ADCAL                  BIT(31)
-#define STM32H7_ADCALDIF               BIT(30)
-#define STM32H7_DEEPPWD                        BIT(29)
-#define STM32H7_ADVREGEN               BIT(28)
-#define STM32H7_LINCALRDYW6            BIT(27)
-#define STM32H7_LINCALRDYW5            BIT(26)
-#define STM32H7_LINCALRDYW4            BIT(25)
-#define STM32H7_LINCALRDYW3            BIT(24)
-#define STM32H7_LINCALRDYW2            BIT(23)
-#define STM32H7_LINCALRDYW1            BIT(22)
-#define STM32H7_ADCALLIN               BIT(16)
-#define STM32H7_BOOST                  BIT(8)
-#define STM32H7_ADSTP                  BIT(4)
-#define STM32H7_ADSTART                        BIT(2)
-#define STM32H7_ADDIS                  BIT(1)
-#define STM32H7_ADEN                   BIT(0)
-
-/* STM32H7_ADC_CFGR bit fields */
-#define STM32H7_EXTEN_SHIFT            10
-#define STM32H7_EXTEN_MASK             GENMASK(11, 10)
-#define STM32H7_EXTSEL_SHIFT           5
-#define STM32H7_EXTSEL_MASK            GENMASK(9, 5)
-#define STM32H7_RES_SHIFT              2
-#define STM32H7_RES_MASK               GENMASK(4, 2)
-#define STM32H7_DMNGT_SHIFT            0
-#define STM32H7_DMNGT_MASK             GENMASK(1, 0)
-
-enum stm32h7_adc_dmngt {
-       STM32H7_DMNGT_DR_ONLY,          /* Regular data in DR only */
-       STM32H7_DMNGT_DMA_ONESHOT,      /* DMA one shot mode */
-       STM32H7_DMNGT_DFSDM,            /* DFSDM mode */
-       STM32H7_DMNGT_DMA_CIRC,         /* DMA circular mode */
-};
-
-/* STM32H7_ADC_CALFACT - bit fields */
-#define STM32H7_CALFACT_D_SHIFT                16
-#define STM32H7_CALFACT_D_MASK         GENMASK(26, 16)
-#define STM32H7_CALFACT_S_SHIFT                0
-#define STM32H7_CALFACT_S_MASK         GENMASK(10, 0)
-
-/* STM32H7_ADC_CALFACT2 - bit fields */
-#define STM32H7_LINCALFACT_SHIFT       0
-#define STM32H7_LINCALFACT_MASK                GENMASK(29, 0)
-
 /* Number of linear calibration shadow registers / LINCALRDYW control bits */
 #define STM32H7_LINCALFACT_NUM         6
 
index 9ac8356..4998a89 100644 (file)
@@ -35,8 +35,11 @@ static int adis_update_scan_mode_burst(struct iio_dev *indio_dev,
                return -ENOMEM;
 
        adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
-       if (!adis->buffer)
+       if (!adis->buffer) {
+               kfree(adis->xfer);
+               adis->xfer = NULL;
                return -ENOMEM;
+       }
 
        tx = adis->buffer + burst_length;
        tx[0] = ADIS_READ_REG(adis->burst->reg_cmd);
@@ -78,8 +81,11 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
                return -ENOMEM;
 
        adis->buffer = kcalloc(indio_dev->scan_bytes, 2, GFP_KERNEL);
-       if (!adis->buffer)
+       if (!adis->buffer) {
+               kfree(adis->xfer);
+               adis->xfer = NULL;
                return -ENOMEM;
+       }
 
        rx = adis->buffer;
        tx = rx + scan_count;
index 80e42c7..0fe6999 100644 (file)
@@ -99,7 +99,9 @@ struct st_lsm6dsx_fs {
 #define ST_LSM6DSX_FS_LIST_SIZE                4
 struct st_lsm6dsx_fs_table_entry {
        struct st_lsm6dsx_reg reg;
+
        struct st_lsm6dsx_fs fs_avl[ST_LSM6DSX_FS_LIST_SIZE];
+       int fs_len;
 };
 
 /**
index 2d34955..fd5ebe1 100644 (file)
@@ -145,6 +145,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
                                .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
                                .fs_avl[3] = { IIO_G_TO_M_S_2(732), 0x1 },
+                               .fs_len = 4,
                        },
                        [ST_LSM6DSX_ID_GYRO] = {
                                .reg = {
@@ -154,6 +155,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[0] = {  IIO_DEGREE_TO_RAD(245), 0x0 },
                                .fs_avl[1] = {  IIO_DEGREE_TO_RAD(500), 0x1 },
                                .fs_avl[2] = { IIO_DEGREE_TO_RAD(2000), 0x3 },
+                               .fs_len = 3,
                        },
                },
        },
@@ -215,6 +217,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
                                .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
                                .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+                               .fs_len = 4,
                        },
                        [ST_LSM6DSX_ID_GYRO] = {
                                .reg = {
@@ -225,6 +228,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
                                .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
                                .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+                               .fs_len = 4,
                        },
                },
                .decimator = {
@@ -327,6 +331,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
                                .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
                                .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+                               .fs_len = 4,
                        },
                        [ST_LSM6DSX_ID_GYRO] = {
                                .reg = {
@@ -337,6 +342,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
                                .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
                                .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+                               .fs_len = 4,
                        },
                },
                .decimator = {
@@ -448,6 +454,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
                                .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
                                .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+                               .fs_len = 4,
                        },
                        [ST_LSM6DSX_ID_GYRO] = {
                                .reg = {
@@ -458,6 +465,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
                                .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
                                .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+                               .fs_len = 4,
                        },
                },
                .decimator = {
@@ -563,6 +571,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
                                .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
                                .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+                               .fs_len = 4,
                        },
                        [ST_LSM6DSX_ID_GYRO] = {
                                .reg = {
@@ -573,6 +582,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
                                .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
                                .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+                               .fs_len = 4,
                        },
                },
                .batch = {
@@ -693,6 +703,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
                                .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
                                .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+                               .fs_len = 4,
                        },
                        [ST_LSM6DSX_ID_GYRO] = {
                                .reg = {
@@ -703,6 +714,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
                                .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
                                .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+                               .fs_len = 4,
                        },
                },
                .batch = {
@@ -800,6 +812,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
                                .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
                                .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+                               .fs_len = 4,
                        },
                        [ST_LSM6DSX_ID_GYRO] = {
                                .reg = {
@@ -810,6 +823,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
                                .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
                                .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+                               .fs_len = 4,
                        },
                },
                .batch = {
@@ -933,11 +947,12 @@ static int st_lsm6dsx_set_full_scale(struct st_lsm6dsx_sensor *sensor,
        int i, err;
 
        fs_table = &sensor->hw->settings->fs_table[sensor->id];
-       for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++)
+       for (i = 0; i < fs_table->fs_len; i++) {
                if (fs_table->fs_avl[i].gain == gain)
                        break;
+       }
 
-       if (i == ST_LSM6DSX_FS_LIST_SIZE)
+       if (i == fs_table->fs_len)
                return -EINVAL;
 
        data = ST_LSM6DSX_SHIFT_VAL(fs_table->fs_avl[i].val,
@@ -1196,18 +1211,13 @@ static ssize_t st_lsm6dsx_sysfs_scale_avail(struct device *dev,
 {
        struct st_lsm6dsx_sensor *sensor = iio_priv(dev_get_drvdata(dev));
        const struct st_lsm6dsx_fs_table_entry *fs_table;
-       enum st_lsm6dsx_sensor_id id = sensor->id;
        struct st_lsm6dsx_hw *hw = sensor->hw;
        int i, len = 0;
 
-       fs_table = &hw->settings->fs_table[id];
-       for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++) {
-               if (!fs_table->fs_avl[i].gain)
-                       break;
-
+       fs_table = &hw->settings->fs_table[sensor->id];
+       for (i = 0; i < fs_table->fs_len; i++)
                len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
                                 fs_table->fs_avl[i].gain);
-       }
        buf[len - 1] = '\n';
 
        return len;
index 66fbcd9..ea472cf 100644 (file)
@@ -61,6 +61,7 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
                                .gain = 1500,
                                .val = 0x0,
                        }, /* 1500 uG/LSB */
+                       .fs_len = 1,
                },
                .temp_comp = {
                        .addr = 0x60,
@@ -92,9 +93,11 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
 static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw)
 {
        struct st_lsm6dsx_sensor *sensor;
+       u16 odr;
 
        sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
-       msleep((2000U / sensor->odr) + 1);
+       odr = (hw->enable_mask & BIT(ST_LSM6DSX_ID_ACC)) ? sensor->odr : 13;
+       msleep((2000U / odr) + 1);
 }
 
 /**
@@ -555,13 +558,9 @@ static ssize_t st_lsm6dsx_shub_scale_avail(struct device *dev,
        int i, len = 0;
 
        settings = sensor->ext_info.settings;
-       for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++) {
-               u16 val = settings->fs_table.fs_avl[i].gain;
-
-               if (val > 0)
-                       len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
-                                        val);
-       }
+       for (i = 0; i < settings->fs_table.fs_len; i++)
+               len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
+                                settings->fs_table.fs_avl[i].gain);
        buf[len - 1] = '\n';
 
        return len;
index 08d7e1e..4a1a883 100644 (file)
@@ -314,6 +314,7 @@ config MAX44009
 config NOA1305
        tristate "ON Semiconductor NOA1305 ambient light sensor"
        depends on I2C
+       select REGMAP_I2C
        help
         Say Y here if you want to build support for the ON Semiconductor
         NOA1305 ambient light sensor.
index e666879..92004a2 100644 (file)
@@ -686,6 +686,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
        struct iio_dev *iio = _iio;
        struct opt3001 *opt = iio_priv(iio);
        int ret;
+       bool wake_result_ready_queue = false;
 
        if (!opt->ok_to_ignore_lock)
                mutex_lock(&opt->lock);
@@ -720,13 +721,16 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
                }
                opt->result = ret;
                opt->result_ready = true;
-               wake_up(&opt->result_ready_queue);
+               wake_result_ready_queue = true;
        }
 
 out:
        if (!opt->ok_to_ignore_lock)
                mutex_unlock(&opt->lock);
 
+       if (wake_result_ready_queue)
+               wake_up(&opt->result_ready_queue);
+
        return IRQ_HANDLED;
 }
 
index 51421ac..16dacea 100644 (file)
@@ -398,19 +398,23 @@ static int vcnl4000_probe(struct i2c_client *client,
 static const struct of_device_id vcnl_4000_of_match[] = {
        {
                .compatible = "vishay,vcnl4000",
-               .data = "VCNL4000",
+               .data = (void *)VCNL4000,
        },
        {
                .compatible = "vishay,vcnl4010",
-               .data = "VCNL4010",
+               .data = (void *)VCNL4010,
        },
        {
-               .compatible = "vishay,vcnl4010",
-               .data = "VCNL4020",
+               .compatible = "vishay,vcnl4020",
+               .data = (void *)VCNL4010,
+       },
+       {
+               .compatible = "vishay,vcnl4040",
+               .data = (void *)VCNL4040,
        },
        {
                .compatible = "vishay,vcnl4200",
-               .data = "VCNL4200",
+               .data = (void *)VCNL4200,
        },
        {},
 };
index da10e6c..5920c00 100644 (file)
@@ -4399,6 +4399,7 @@ error2:
 error1:
        port_modify.set_port_cap_mask = 0;
        port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
+       kfree(port);
        while (--i) {
                if (!rdma_cap_ib_cm(ib_device, i))
                        continue;
@@ -4407,6 +4408,7 @@ error1:
                ib_modify_port(ib_device, port->port_num, 0, &port_modify);
                ib_unregister_mad_agent(port->mad_agent);
                cm_remove_port_fs(port);
+               kfree(port);
        }
 free:
        kfree(cm_dev);
@@ -4460,6 +4462,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
                spin_unlock_irq(&cm.state_lock);
                ib_unregister_mad_agent(cur_mad_agent);
                cm_remove_port_fs(port);
+               kfree(port);
        }
 
        kfree(cm_dev);
index 0e3cf34..d78f676 100644 (file)
@@ -2396,9 +2396,10 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
                conn_id->cm_id.iw = NULL;
                cma_exch(conn_id, RDMA_CM_DESTROYING);
                mutex_unlock(&conn_id->handler_mutex);
+               mutex_unlock(&listen_id->handler_mutex);
                cma_deref_id(conn_id);
                rdma_destroy_id(&conn_id->id);
-               goto out;
+               return ret;
        }
 
        mutex_unlock(&conn_id->handler_mutex);
index 99c4a55..2dd2cfe 100644 (file)
@@ -1987,8 +1987,6 @@ static int iw_query_port(struct ib_device *device,
        if (!netdev)
                return -ENODEV;
 
-       dev_put(netdev);
-
        port_attr->max_mtu = IB_MTU_4096;
        port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
@@ -1996,19 +1994,22 @@ static int iw_query_port(struct ib_device *device,
                port_attr->state = IB_PORT_DOWN;
                port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
        } else {
-               inetdev = in_dev_get(netdev);
+               rcu_read_lock();
+               inetdev = __in_dev_get_rcu(netdev);
 
                if (inetdev && inetdev->ifa_list) {
                        port_attr->state = IB_PORT_ACTIVE;
                        port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
-                       in_dev_put(inetdev);
                } else {
                        port_attr->state = IB_PORT_INIT;
                        port_attr->phys_state =
                                IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
                }
+
+               rcu_read_unlock();
        }
 
+       dev_put(netdev);
        err = device->ops.query_port(device, port_num, port_attr);
        if (err)
                return err;
index 7a74740..65b3654 100644 (file)
@@ -1230,7 +1230,7 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!msg) {
                ret = -ENOMEM;
-               goto err;
+               goto err_get;
        }
 
        nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
@@ -1787,10 +1787,6 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
        qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
-       ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
-       if (ret)
-               goto err_unbind;
-
        if (fill_nldev_handle(msg, device) ||
            nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
            nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
@@ -1799,13 +1795,15 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
                goto err_fill;
        }
 
+       ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
+       if (ret)
+               goto err_fill;
+
        nlmsg_end(msg, nlh);
        ib_device_put(device);
        return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
 
 err_fill:
-       rdma_counter_bind_qpn(device, port, qpn, cntn);
-err_unbind:
        nlmsg_free(msg);
 err:
        ib_device_put(device);
index 1ab423b..6eb6d27 100644 (file)
@@ -426,7 +426,7 @@ int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
        int ret;
 
        rdma_for_each_port (dev, i) {
-               is_ib = rdma_protocol_ib(dev, i++);
+               is_ib = rdma_protocol_ib(dev, i);
                if (is_ib)
                        break;
        }
index f67a30f..163ff7b 100644 (file)
@@ -451,8 +451,10 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
         * that the hardware will not attempt to access the MR any more.
         */
        if (!umem_odp->is_implicit_odp) {
+               mutex_lock(&umem_odp->umem_mutex);
                ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
                                            ib_umem_end(umem_odp));
+               mutex_unlock(&umem_odp->umem_mutex);
                kvfree(umem_odp->dma_list);
                kvfree(umem_odp->page_list);
        }
@@ -719,6 +721,8 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
        u64 addr;
        struct ib_device *dev = umem_odp->umem.ibdev;
 
+       lockdep_assert_held(&umem_odp->umem_mutex);
+
        virt = max_t(u64, virt, ib_umem_start(umem_odp));
        bound = min_t(u64, bound, ib_umem_end(umem_odp));
        /* Note that during the run of this function, the
@@ -726,7 +730,6 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
         * faults from completion. We might be racing with other
         * invalidations, so we must make sure we free each page only
         * once. */
-       mutex_lock(&umem_odp->umem_mutex);
        for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) {
                idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
                if (umem_odp->page_list[idx]) {
@@ -757,7 +760,6 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
                        umem_odp->npages--;
                }
        }
-       mutex_unlock(&umem_odp->umem_mutex);
 }
 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
 
index a8b9548..599340c 100644 (file)
@@ -242,10 +242,13 @@ static void set_ep_sin6_addrs(struct c4iw_ep *ep,
        }
 }
 
-static int dump_qp(struct c4iw_qp *qp, struct c4iw_debugfs_data *qpd)
+static int dump_qp(unsigned long id, struct c4iw_qp *qp,
+                  struct c4iw_debugfs_data *qpd)
 {
        int space;
        int cc;
+       if (id != qp->wq.sq.qid)
+               return 0;
 
        space = qpd->bufsize - qpd->pos - 1;
        if (space == 0)
@@ -350,7 +353,7 @@ static int qp_open(struct inode *inode, struct file *file)
 
        xa_lock_irq(&qpd->devp->qps);
        xa_for_each(&qpd->devp->qps, index, qp)
-               dump_qp(qp, qpd);
+               dump_qp(index, qp, qpd);
        xa_unlock_irq(&qpd->devp->qps);
 
        qpd->buf[qpd->pos++] = 0;
index aa772ee..35c284a 100644 (file)
@@ -275,13 +275,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
                           struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
 {
        int err;
-       struct fw_ri_tpte tpt;
+       struct fw_ri_tpte *tpt;
        u32 stag_idx;
        static atomic_t key;
 
        if (c4iw_fatal_error(rdev))
                return -EIO;
 
+       tpt = kmalloc(sizeof(*tpt), GFP_KERNEL);
+       if (!tpt)
+               return -ENOMEM;
+
        stag_state = stag_state > 0;
        stag_idx = (*stag) >> 8;
 
@@ -291,6 +295,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
                        mutex_lock(&rdev->stats.lock);
                        rdev->stats.stag.fail++;
                        mutex_unlock(&rdev->stats.lock);
+                       kfree(tpt);
                        return -ENOMEM;
                }
                mutex_lock(&rdev->stats.lock);
@@ -305,28 +310,28 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
 
        /* write TPT entry */
        if (reset_tpt_entry)
-               memset(&tpt, 0, sizeof(tpt));
+               memset(tpt, 0, sizeof(*tpt));
        else {
-               tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
+               tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
                        FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
                        FW_RI_TPTE_STAGSTATE_V(stag_state) |
                        FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
-               tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
+               tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
                        (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
                        FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
                                                      FW_RI_VA_BASED_TO))|
                        FW_RI_TPTE_PS_V(page_size));
-               tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
+               tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
                        FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
-               tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
-               tpt.va_hi = cpu_to_be32((u32)(to >> 32));
-               tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
-               tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
-               tpt.len_hi = cpu_to_be32((u32)(len >> 32));
+               tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
+               tpt->va_hi = cpu_to_be32((u32)(to >> 32));
+               tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
+               tpt->dca_mwbcnt_pstag = cpu_to_be32(0);
+               tpt->len_hi = cpu_to_be32((u32)(len >> 32));
        }
        err = write_adapter_mem(rdev, stag_idx +
                                (rdev->lldi.vr->stag.start >> 5),
-                               sizeof(tpt), &tpt, skb, wr_waitp);
+                               sizeof(*tpt), tpt, skb, wr_waitp);
 
        if (reset_tpt_entry) {
                c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
@@ -334,6 +339,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
                rdev->stats.stag.cur -= 32;
                mutex_unlock(&rdev->stats.lock);
        }
+       kfree(tpt);
        return err;
 }
 
index eb9368b..bbcac53 100644 (file)
@@ -2737,15 +2737,11 @@ int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
        if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6)
                srq->flags = T4_SRQ_LIMIT_SUPPORT;
 
-       ret = xa_insert_irq(&rhp->qps, srq->wq.qid, srq, GFP_KERNEL);
-       if (ret)
-               goto err_free_queue;
-
        if (udata) {
                srq_key_mm = kmalloc(sizeof(*srq_key_mm), GFP_KERNEL);
                if (!srq_key_mm) {
                        ret = -ENOMEM;
-                       goto err_remove_handle;
+                       goto err_free_queue;
                }
                srq_db_key_mm = kmalloc(sizeof(*srq_db_key_mm), GFP_KERNEL);
                if (!srq_db_key_mm) {
@@ -2789,8 +2785,6 @@ err_free_srq_db_key_mm:
        kfree(srq_db_key_mm);
 err_free_srq_key_mm:
        kfree(srq_key_mm);
-err_remove_handle:
-       xa_erase_irq(&rhp->qps, srq->wq.qid);
 err_free_queue:
        free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
                       srq->wr_waitp);
@@ -2813,8 +2807,6 @@ void c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
        rhp = srq->rhp;
 
        pr_debug("%s id %d\n", __func__, srq->wq.qid);
-
-       xa_erase_irq(&rhp->qps, srq->wq.qid);
        ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
                                             ibucontext);
        free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
index 2395fd4..2ed7bfd 100644 (file)
@@ -1526,8 +1526,11 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
        }
 
        ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params);
-       if (ret < 0)
+       if (ret < 0) {
+               kfree(tmp_sdma_rht);
                goto bail;
+       }
+
        dd->sdma_rht = tmp_sdma_rht;
 
        dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
index 8056930..cd9ee16 100644 (file)
@@ -2773,6 +2773,10 @@ int i40iw_register_rdma_device(struct i40iw_device *iwdev)
                return -ENOMEM;
        iwibdev = iwdev->iwibdev;
        rdma_set_device_sysfs_group(&iwibdev->ibdev, &i40iw_attr_group);
+       ret = ib_device_set_netdev(&iwibdev->ibdev, iwdev->netdev, 1);
+       if (ret)
+               goto error;
+
        ret = ib_register_device(&iwibdev->ibdev, "i40iw%d");
        if (ret)
                goto error;
index 59022b7..d609f46 100644 (file)
@@ -1298,29 +1298,6 @@ static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
        return 0;
 }
 
-static void devx_free_indirect_mkey(struct rcu_head *rcu)
-{
-       kfree(container_of(rcu, struct devx_obj, devx_mr.rcu));
-}
-
-/* This function to delete from the radix tree needs to be called before
- * destroying the underlying mkey. Otherwise a race might occur in case that
- * other thread will get the same mkey before this one will be deleted,
- * in that case it will fail via inserting to the tree its own data.
- *
- * Note:
- * An error in the destroy is not expected unless there is some other indirect
- * mkey which points to this one. In a kernel cleanup flow it will be just
- * destroyed in the iterative destruction call. In a user flow, in case
- * the application didn't close in the expected order it's its own problem,
- * the mkey won't be part of the tree, in both cases the kernel is safe.
- */
-static void devx_cleanup_mkey(struct devx_obj *obj)
-{
-       xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
-                mlx5_base_mkey(obj->devx_mr.mmkey.key));
-}
-
 static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
                                      struct devx_event_subscription *sub)
 {
@@ -1362,8 +1339,16 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
        int ret;
 
        dev = mlx5_udata_to_mdev(&attrs->driver_udata);
-       if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
-               devx_cleanup_mkey(obj);
+       if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
+               /*
+                * The pagefault_single_data_segment() does commands against
+                * the mmkey, we must wait for that to stop before freeing the
+                * mkey, as another allocation could get the same mkey #.
+                */
+               xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
+                        mlx5_base_mkey(obj->devx_mr.mmkey.key));
+               synchronize_srcu(&dev->mr_srcu);
+       }
 
        if (obj->flags & DEVX_OBJ_FLAGS_DCT)
                ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
@@ -1382,12 +1367,6 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
                devx_cleanup_subscription(dev, sub_entry);
        mutex_unlock(&devx_event_table->event_xa_lock);
 
-       if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
-               call_srcu(&dev->mr_srcu, &obj->devx_mr.rcu,
-                         devx_free_indirect_mkey);
-               return ret;
-       }
-
        kfree(obj);
        return ret;
 }
@@ -1491,26 +1470,21 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
                                   &obj_id);
        WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
 
-       if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
-               err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
-               if (err)
-                       goto obj_destroy;
-       }
-
        err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
        if (err)
-               goto err_copy;
+               goto obj_destroy;
 
        if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
                obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
-
        obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
 
+       if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
+               err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
+               if (err)
+                       goto obj_destroy;
+       }
        return 0;
 
-err_copy:
-       if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
-               devx_cleanup_mkey(obj);
 obj_destroy:
        if (obj->flags & DEVX_OBJ_FLAGS_DCT)
                mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
index 2ceaef3..1a98ee2 100644 (file)
@@ -606,7 +606,7 @@ struct mlx5_ib_mr {
        struct mlx5_ib_dev     *dev;
        u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
        struct mlx5_core_sig_ctx    *sig;
-       int                     live;
+       unsigned int            live;
        void                    *descs_alloc;
        int                     access_flags; /* Needed for rereg MR */
 
@@ -639,7 +639,6 @@ struct mlx5_ib_mw {
 struct mlx5_ib_devx_mr {
        struct mlx5_core_mkey   mmkey;
        int                     ndescs;
-       struct rcu_head         rcu;
 };
 
 struct mlx5_ib_umr_context {
index 1eff031..6305993 100644 (file)
@@ -84,32 +84,6 @@ static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
                length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
 }
 
-static void update_odp_mr(struct mlx5_ib_mr *mr)
-{
-       if (is_odp_mr(mr)) {
-               /*
-                * This barrier prevents the compiler from moving the
-                * setting of umem->odp_data->private to point to our
-                * MR, before reg_umr finished, to ensure that the MR
-                * initialization have finished before starting to
-                * handle invalidations.
-                */
-               smp_wmb();
-               to_ib_umem_odp(mr->umem)->private = mr;
-               /*
-                * Make sure we will see the new
-                * umem->odp_data->private value in the invalidation
-                * routines, before we can get page faults on the
-                * MR. Page faults can happen once we put the MR in
-                * the tree, below this line. Without the barrier,
-                * there can be a fault handling and an invalidation
-                * before umem->odp_data->private == mr is visible to
-                * the invalidation handler.
-                */
-               smp_wmb();
-       }
-}
-
 static void reg_mr_callback(int status, struct mlx5_async_work *context)
 {
        struct mlx5_ib_mr *mr =
@@ -1346,8 +1320,6 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        mr->umem = umem;
        set_mr_fields(dev, mr, npages, length, access_flags);
 
-       update_odp_mr(mr);
-
        if (use_umr) {
                int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
 
@@ -1363,10 +1335,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                }
        }
 
-       if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
-               mr->live = 1;
+       if (is_odp_mr(mr)) {
+               to_ib_umem_odp(mr->umem)->private = mr;
                atomic_set(&mr->num_pending_prefetch, 0);
        }
+       if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+               smp_store_release(&mr->live, 1);
 
        return &mr->ibmr;
 error:
@@ -1441,6 +1415,9 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
        if (!mr->umem)
                return -EINVAL;
 
+       if (is_odp_mr(mr))
+               return -EOPNOTSUPP;
+
        if (flags & IB_MR_REREG_TRANS) {
                addr = virt_addr;
                len = length;
@@ -1486,8 +1463,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
                }
 
                mr->allocated_from_cache = 0;
-               if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
-                       mr->live = 1;
        } else {
                /*
                 * Send a UMR WQE
@@ -1516,7 +1491,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
 
        set_mr_fields(dev, mr, npages, len, access_flags);
 
-       update_odp_mr(mr);
        return 0;
 
 err:
@@ -1607,15 +1581,16 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
                /* Prevent new page faults and
                 * prefetch requests from succeeding
                 */
-               mr->live = 0;
+               WRITE_ONCE(mr->live, 0);
+
+               /* Wait for all running page-fault handlers to finish. */
+               synchronize_srcu(&dev->mr_srcu);
 
                /* dequeue pending prefetch requests for the mr */
                if (atomic_read(&mr->num_pending_prefetch))
                        flush_workqueue(system_unbound_wq);
                WARN_ON(atomic_read(&mr->num_pending_prefetch));
 
-               /* Wait for all running page-fault handlers to finish. */
-               synchronize_srcu(&dev->mr_srcu);
                /* Destroy all page mappings */
                if (!umem_odp->is_implicit_odp)
                        mlx5_ib_invalidate_range(umem_odp,
@@ -1987,14 +1962,25 @@ free:
 
 int mlx5_ib_dealloc_mw(struct ib_mw *mw)
 {
+       struct mlx5_ib_dev *dev = to_mdev(mw->device);
        struct mlx5_ib_mw *mmw = to_mmw(mw);
        int err;
 
-       err =  mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
-                                     &mmw->mmkey);
-       if (!err)
-               kfree(mmw);
-       return err;
+       if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+               xa_erase(&dev->mdev->priv.mkey_table,
+                        mlx5_base_mkey(mmw->mmkey.key));
+               /*
+                * pagefault_single_data_segment() may be accessing mmw under
+                * SRCU if the user bound an ODP MR to this MW.
+                */
+               synchronize_srcu(&dev->mr_srcu);
+       }
+
+       err = mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
+       if (err)
+               return err;
+       kfree(mmw);
+       return 0;
 }
 
 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
index 2e9b430..3f9478d 100644 (file)
@@ -178,6 +178,29 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
                return;
        }
 
+       /*
+        * The locking here is pretty subtle. Ideally the implicit children
+        * list would be protected by the umem_mutex, however that is not
+        * possible. Instead this uses a weaker update-then-lock pattern:
+        *
+        *  srcu_read_lock()
+        *    <change children list>
+        *    mutex_lock(umem_mutex)
+        *     mlx5_ib_update_xlt()
+        *    mutex_unlock(umem_mutex)
+        *    destroy lkey
+        *
+        * ie any change the children list must be followed by the locked
+        * update_xlt before destroying.
+        *
+        * The umem_mutex provides the acquire/release semantic needed to make
+        * the children list visible to a racing thread. While SRCU is not
+        * technically required, using it gives consistent use of the SRCU
+        * locking around the children list.
+        */
+       lockdep_assert_held(&to_ib_umem_odp(mr->umem)->umem_mutex);
+       lockdep_assert_held(&mr->dev->mr_srcu);
+
        odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE,
                         nentries * MLX5_IMR_MTT_SIZE, mr);
 
@@ -202,15 +225,22 @@ static void mr_leaf_free_action(struct work_struct *work)
        struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work);
        int idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
        struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
+       struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
+       int srcu_key;
 
        mr->parent = NULL;
        synchronize_srcu(&mr->dev->mr_srcu);
 
-       ib_umem_odp_release(odp);
-       if (imr->live)
+       if (smp_load_acquire(&imr->live)) {
+               srcu_key = srcu_read_lock(&mr->dev->mr_srcu);
+               mutex_lock(&odp_imr->umem_mutex);
                mlx5_ib_update_xlt(imr, idx, 1, 0,
                                   MLX5_IB_UPD_XLT_INDIRECT |
                                   MLX5_IB_UPD_XLT_ATOMIC);
+               mutex_unlock(&odp_imr->umem_mutex);
+               srcu_read_unlock(&mr->dev->mr_srcu, srcu_key);
+       }
+       ib_umem_odp_release(odp);
        mlx5_mr_cache_free(mr->dev, mr);
 
        if (atomic_dec_and_test(&imr->num_leaf_free))
@@ -278,7 +308,6 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
                                   idx - blk_start_idx + 1, 0,
                                   MLX5_IB_UPD_XLT_ZAP |
                                   MLX5_IB_UPD_XLT_ATOMIC);
-       mutex_unlock(&umem_odp->umem_mutex);
        /*
         * We are now sure that the device will not access the
         * memory. We can safely unmap it, and mark it as dirty if
@@ -289,10 +318,12 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
 
        if (unlikely(!umem_odp->npages && mr->parent &&
                     !umem_odp->dying)) {
-               WRITE_ONCE(umem_odp->dying, 1);
+               WRITE_ONCE(mr->live, 0);
+               umem_odp->dying = 1;
                atomic_inc(&mr->parent->num_leaf_free);
                schedule_work(&umem_odp->work);
        }
+       mutex_unlock(&umem_odp->umem_mutex);
 }
 
 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
@@ -429,8 +460,6 @@ static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
        mr->ibmr.lkey = mr->mmkey.key;
        mr->ibmr.rkey = mr->mmkey.key;
 
-       mr->live = 1;
-
        mlx5_ib_dbg(dev, "key %x dev %p mr %p\n",
                    mr->mmkey.key, dev->mdev, mr);
 
@@ -484,6 +513,8 @@ next_mr:
                mtt->parent = mr;
                INIT_WORK(&odp->work, mr_leaf_free_action);
 
+               smp_store_release(&mtt->live, 1);
+
                if (!nentries)
                        start_idx = addr >> MLX5_IMR_MTT_SHIFT;
                nentries++;
@@ -536,6 +567,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
        init_waitqueue_head(&imr->q_leaf_free);
        atomic_set(&imr->num_leaf_free, 0);
        atomic_set(&imr->num_pending_prefetch, 0);
+       smp_store_release(&imr->live, 1);
 
        return imr;
 }
@@ -555,15 +587,19 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
                if (mr->parent != imr)
                        continue;
 
+               mutex_lock(&umem_odp->umem_mutex);
                ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
                                            ib_umem_end(umem_odp));
 
-               if (umem_odp->dying)
+               if (umem_odp->dying) {
+                       mutex_unlock(&umem_odp->umem_mutex);
                        continue;
+               }
 
-               WRITE_ONCE(umem_odp->dying, 1);
+               umem_odp->dying = 1;
                atomic_inc(&imr->num_leaf_free);
                schedule_work(&umem_odp->work);
+               mutex_unlock(&umem_odp->umem_mutex);
        }
        up_read(&per_mm->umem_rwsem);
 
@@ -773,7 +809,7 @@ next_mr:
        switch (mmkey->type) {
        case MLX5_MKEY_MR:
                mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
-               if (!mr->live || !mr->ibmr.pd) {
+               if (!smp_load_acquire(&mr->live) || !mr->ibmr.pd) {
                        mlx5_ib_dbg(dev, "got dead MR\n");
                        ret = -EFAULT;
                        goto srcu_unlock;
@@ -1641,12 +1677,12 @@ static bool num_pending_prefetch_inc(struct ib_pd *pd,
 
                mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
 
-               if (mr->ibmr.pd != pd) {
+               if (!smp_load_acquire(&mr->live)) {
                        ret = false;
                        break;
                }
 
-               if (!mr->live) {
+               if (mr->ibmr.pd != pd) {
                        ret = false;
                        break;
                }
index 6cac0c8..36cdfbd 100644 (file)
@@ -230,8 +230,6 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
 
        pvrdma_page_dir_cleanup(dev, &srq->pdir);
 
-       kfree(srq);
-
        atomic_dec(&dev->num_srqs);
 }
 
index 430314c..52d402f 100644 (file)
@@ -182,12 +182,19 @@ void siw_qp_llp_close(struct siw_qp *qp)
  */
 void siw_qp_llp_write_space(struct sock *sk)
 {
-       struct siw_cep *cep = sk_to_cep(sk);
+       struct siw_cep *cep;
 
-       cep->sk_write_space(sk);
+       read_lock(&sk->sk_callback_lock);
+
+       cep  = sk_to_cep(sk);
+       if (cep) {
+               cep->sk_write_space(sk);
 
-       if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
-               (void)siw_sq_start(cep->qp);
+               if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
+                       (void)siw_sq_start(cep->qp);
+       }
+
+       read_unlock(&sk->sk_callback_lock);
 }
 
 static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
index dace857..7985192 100644 (file)
@@ -232,10 +232,7 @@ static int da9063_onkey_probe(struct platform_device *pdev)
        onkey->input->phys = onkey->phys;
        onkey->input->dev.parent = &pdev->dev;
 
-       if (onkey->key_power)
-               input_set_capability(onkey->input, EV_KEY, KEY_POWER);
-
-       input_set_capability(onkey->input, EV_KEY, KEY_SLEEP);
+       input_set_capability(onkey->input, EV_KEY, KEY_POWER);
 
        INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);
 
index 97e3639..08520b3 100644 (file)
@@ -92,11 +92,18 @@ soc_button_device_create(struct platform_device *pdev,
                        continue;
 
                gpio = soc_button_lookup_gpio(&pdev->dev, info->acpi_index);
-               if (gpio < 0 && gpio != -ENOENT) {
-                       error = gpio;
-                       goto err_free_mem;
-               } else if (!gpio_is_valid(gpio)) {
-                       /* Skip GPIO if not present */
+               if (!gpio_is_valid(gpio)) {
+                       /*
+                        * Skip GPIO if not present. Note we deliberately
+                        * ignore -EPROBE_DEFER errors here. On some devices
+                        * Intel is using so called virtual GPIOs which are not
+                        * GPIOs at all but some way for AML code to check some
+                        * random status bits without need a custom opregion.
+                        * In some cases the resources table we parse points to
+                        * such a virtual GPIO, since these are not real GPIOs
+                        * we do not have a driver for these so they will never
+                        * show up, therefore we ignore -EPROBE_DEFER.
+                        */
                        continue;
                }
 
index 04fe434..2d8434b 100644 (file)
@@ -1827,31 +1827,6 @@ static int elantech_create_smbus(struct psmouse *psmouse,
                                  leave_breadcrumbs);
 }
 
-static bool elantech_use_host_notify(struct psmouse *psmouse,
-                                    struct elantech_device_info *info)
-{
-       if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
-               return true;
-
-       switch (info->bus) {
-       case ETP_BUS_PS2_ONLY:
-               /* expected case */
-               break;
-       case ETP_BUS_SMB_HST_NTFY_ONLY:
-       case ETP_BUS_PS2_SMB_HST_NTFY:
-               /* SMbus implementation is stable since 2018 */
-               if (dmi_get_bios_year() >= 2018)
-                       return true;
-               /* fall through */
-       default:
-               psmouse_dbg(psmouse,
-                           "Ignoring SMBus bus provider %d\n", info->bus);
-               break;
-       }
-
-       return false;
-}
-
 /**
  * elantech_setup_smbus - called once the PS/2 devices are enumerated
  * and decides to instantiate a SMBus InterTouch device.
@@ -1871,7 +1846,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
                 * i2c_blacklist_pnp_ids.
                 * Old ICs are up to the user to decide.
                 */
-               if (!elantech_use_host_notify(psmouse, info) ||
+               if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
                    psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
                        return -ENXIO;
        }
@@ -1891,6 +1866,34 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
        return 0;
 }
 
+static bool elantech_use_host_notify(struct psmouse *psmouse,
+                                    struct elantech_device_info *info)
+{
+       if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
+               return true;
+
+       switch (info->bus) {
+       case ETP_BUS_PS2_ONLY:
+               /* expected case */
+               break;
+       case ETP_BUS_SMB_ALERT_ONLY:
+               /* fall-through  */
+       case ETP_BUS_PS2_SMB_ALERT:
+               psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
+               break;
+       case ETP_BUS_SMB_HST_NTFY_ONLY:
+               /* fall-through  */
+       case ETP_BUS_PS2_SMB_HST_NTFY:
+               return true;
+       default:
+               psmouse_dbg(psmouse,
+                           "Ignoring SMBus bus provider %d.\n",
+                           info->bus);
+       }
+
+       return false;
+}
+
 int elantech_init_smbus(struct psmouse *psmouse)
 {
        struct elantech_device_info info;
index 772493b..190b997 100644 (file)
@@ -146,7 +146,7 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
        }
 
        mutex_lock(&data->irq_mutex);
-       bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask,
+       bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
               data->irq_count);
        /*
         * At this point, irq_status has all bits that are set in the
@@ -385,6 +385,8 @@ static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
        bitmap_copy(data->current_irq_mask, data->new_irq_mask,
                    data->num_of_irq_regs);
 
+       bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
+
 error_unlock:
        mutex_unlock(&data->irq_mutex);
        return error;
@@ -398,6 +400,8 @@ static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
        struct device *dev = &rmi_dev->dev;
 
        mutex_lock(&data->irq_mutex);
+       bitmap_andnot(data->fn_irq_bits,
+                     data->fn_irq_bits, mask, data->irq_count);
        bitmap_andnot(data->new_irq_mask,
                  data->current_irq_mask, mask, data->irq_count);
 
index 5178ea8..fb43aa7 100644 (file)
@@ -53,6 +53,7 @@ struct goodix_ts_data {
        const char *cfg_name;
        struct completion firmware_loading_complete;
        unsigned long irq_flags;
+       unsigned int contact_size;
 };
 
 #define GOODIX_GPIO_INT_NAME           "irq"
@@ -62,6 +63,7 @@ struct goodix_ts_data {
 #define GOODIX_MAX_WIDTH               4096
 #define GOODIX_INT_TRIGGER             1
 #define GOODIX_CONTACT_SIZE            8
+#define GOODIX_MAX_CONTACT_SIZE                9
 #define GOODIX_MAX_CONTACTS            10
 
 #define GOODIX_CONFIG_MAX_LENGTH       240
@@ -144,6 +146,19 @@ static const struct dmi_system_id rotated_screen[] = {
        {}
 };
 
+static const struct dmi_system_id nine_bytes_report[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+       {
+               .ident = "Lenovo YogaBook",
+               /* YB1-X91L/F and YB1-X90L/F */
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9")
+               }
+       },
+#endif
+       {}
+};
+
 /**
  * goodix_i2c_read - read data from a register of the i2c slave device.
  *
@@ -249,7 +264,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
        max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT);
        do {
                error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR,
-                                       data, GOODIX_CONTACT_SIZE + 1);
+                                       data, ts->contact_size + 1);
                if (error) {
                        dev_err(&ts->client->dev, "I2C transfer error: %d\n",
                                        error);
@@ -262,12 +277,12 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
                                return -EPROTO;
 
                        if (touch_num > 1) {
-                               data += 1 + GOODIX_CONTACT_SIZE;
+                               data += 1 + ts->contact_size;
                                error = goodix_i2c_read(ts->client,
                                                GOODIX_READ_COOR_ADDR +
-                                                       1 + GOODIX_CONTACT_SIZE,
+                                                       1 + ts->contact_size,
                                                data,
-                                               GOODIX_CONTACT_SIZE *
+                                               ts->contact_size *
                                                        (touch_num - 1));
                                if (error)
                                        return error;
@@ -286,7 +301,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
        return 0;
 }
 
-static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
+static void goodix_ts_report_touch_8b(struct goodix_ts_data *ts, u8 *coor_data)
 {
        int id = coor_data[0] & 0x0F;
        int input_x = get_unaligned_le16(&coor_data[1]);
@@ -301,6 +316,21 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
        input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w);
 }
 
+static void goodix_ts_report_touch_9b(struct goodix_ts_data *ts, u8 *coor_data)
+{
+       int id = coor_data[1] & 0x0F;
+       int input_x = get_unaligned_le16(&coor_data[3]);
+       int input_y = get_unaligned_le16(&coor_data[5]);
+       int input_w = get_unaligned_le16(&coor_data[7]);
+
+       input_mt_slot(ts->input_dev, id);
+       input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
+       touchscreen_report_pos(ts->input_dev, &ts->prop,
+                              input_x, input_y, true);
+       input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, input_w);
+       input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w);
+}
+
 /**
  * goodix_process_events - Process incoming events
  *
@@ -311,7 +341,7 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
  */
 static void goodix_process_events(struct goodix_ts_data *ts)
 {
-       u8  point_data[1 + GOODIX_CONTACT_SIZE * GOODIX_MAX_CONTACTS];
+       u8  point_data[1 + GOODIX_MAX_CONTACT_SIZE * GOODIX_MAX_CONTACTS];
        int touch_num;
        int i;
 
@@ -326,8 +356,12 @@ static void goodix_process_events(struct goodix_ts_data *ts)
        input_report_key(ts->input_dev, KEY_LEFTMETA, point_data[0] & BIT(4));
 
        for (i = 0; i < touch_num; i++)
-               goodix_ts_report_touch(ts,
-                               &point_data[1 + GOODIX_CONTACT_SIZE * i]);
+               if (ts->contact_size == 9)
+                       goodix_ts_report_touch_9b(ts,
+                               &point_data[1 + ts->contact_size * i]);
+               else
+                       goodix_ts_report_touch_8b(ts,
+                               &point_data[1 + ts->contact_size * i]);
 
        input_mt_sync_frame(ts->input_dev);
        input_sync(ts->input_dev);
@@ -730,6 +764,13 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
                        "Applying '180 degrees rotated screen' quirk\n");
        }
 
+       if (dmi_check_system(nine_bytes_report)) {
+               ts->contact_size = 9;
+
+               dev_dbg(&ts->client->dev,
+                       "Non-standard 9-bytes report format quirk\n");
+       }
+
        error = input_mt_init_slots(ts->input_dev, ts->max_touch_num,
                                    INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
        if (error) {
@@ -810,6 +851,7 @@ static int goodix_ts_probe(struct i2c_client *client,
        ts->client = client;
        i2c_set_clientdata(client, ts);
        init_completion(&ts->firmware_loading_complete);
+       ts->contact_size = GOODIX_CONTACT_SIZE;
 
        error = goodix_get_gpio_config(ts);
        if (error)
index e3842ea..b183c9f 100644 (file)
@@ -207,6 +207,7 @@ config INTEL_IOMMU_SVM
        bool "Support for Shared Virtual Memory with Intel IOMMU"
        depends on INTEL_IOMMU && X86
        select PCI_PASID
+       select PCI_PRI
        select MMU_NOTIFIER
        help
          Shared Virtual Memory (SVM) provides a facility for devices
index 2369b8a..dd55507 100644 (file)
@@ -583,7 +583,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
 retry:
        type    = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
        devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
-       pasid   = PPR_PASID(*(u64 *)&event[0]);
+       pasid   = (event[0] & EVENT_DOMID_MASK_HI) |
+                 (event[1] & EVENT_DOMID_MASK_LO);
        flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
        address = (u64)(((u64)event[3]) << 32) | event[2];
 
@@ -616,7 +617,7 @@ retry:
                        address, flags);
                break;
        case EVENT_TYPE_PAGE_TAB_ERR:
-               dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
+               dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
                        PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
                        pasid, address, flags);
                break;
@@ -1463,6 +1464,7 @@ static void free_pagetable(struct protection_domain *domain)
  * to 64 bits.
  */
 static bool increase_address_space(struct protection_domain *domain,
+                                  unsigned long address,
                                   gfp_t gfp)
 {
        unsigned long flags;
@@ -1471,8 +1473,8 @@ static bool increase_address_space(struct protection_domain *domain,
 
        spin_lock_irqsave(&domain->lock, flags);
 
-       if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
-               /* address space already 64 bit large */
+       if (address <= PM_LEVEL_SIZE(domain->mode) ||
+           WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
                goto out;
 
        pte = (void *)get_zeroed_page(gfp);
@@ -1505,7 +1507,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
        BUG_ON(!is_power_of_2(page_size));
 
        while (address > PM_LEVEL_SIZE(domain->mode))
-               *updated = increase_address_space(domain, gfp) || *updated;
+               *updated = increase_address_space(domain, address, gfp) || *updated;
 
        level   = domain->mode - 1;
        pte     = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
index c9c1612..17bd5a3 100644 (file)
 #define EVENT_TYPE_INV_PPR_REQ 0x9
 #define EVENT_DEVID_MASK       0xffff
 #define EVENT_DEVID_SHIFT      0
-#define EVENT_DOMID_MASK       0xffff
-#define EVENT_DOMID_SHIFT      0
+#define EVENT_DOMID_MASK_LO    0xffff
+#define EVENT_DOMID_MASK_HI    0xf0000
 #define EVENT_FLAGS_MASK       0xfff
 #define EVENT_FLAGS_SHIFT      0x10
 
index b18aac4..7c503a6 100644 (file)
@@ -812,6 +812,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        return 0;
 
 out_clear_smmu:
+       __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
        smmu_domain->smmu = NULL;
 out_unlock:
        mutex_unlock(&smmu_domain->init_mutex);
index 4c91359..ca51036 100644 (file)
 #define ARM_MALI_LPAE_TTBR_READ_INNER  BIT(2)
 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
 
+#define ARM_MALI_LPAE_MEMATTR_IMP_DEF  0x88ULL
+#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
+
 /* IOPTE accessors */
 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
 
@@ -1015,27 +1018,56 @@ arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 static struct io_pgtable *
 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
 {
-       struct io_pgtable *iop;
+       struct arm_lpae_io_pgtable *data;
 
-       if (cfg->ias != 48 || cfg->oas > 40)
+       /* No quirks for Mali (hopefully) */
+       if (cfg->quirks)
+               return NULL;
+
+       if (cfg->ias > 48 || cfg->oas > 40)
                return NULL;
 
        cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
-       iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
-       if (iop) {
-               u64 mair, ttbr;
 
-               /* Copy values as union fields overlap */
-               mair = cfg->arm_lpae_s1_cfg.mair[0];
-               ttbr = cfg->arm_lpae_s1_cfg.ttbr[0];
+       data = arm_lpae_alloc_pgtable(cfg);
+       if (!data)
+               return NULL;
 
-               cfg->arm_mali_lpae_cfg.memattr = mair;
-               cfg->arm_mali_lpae_cfg.transtab = ttbr |
-                       ARM_MALI_LPAE_TTBR_READ_INNER |
-                       ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
+       /* Mali seems to need a full 4-level table regardless of IAS */
+       if (data->levels < ARM_LPAE_MAX_LEVELS) {
+               data->levels = ARM_LPAE_MAX_LEVELS;
+               data->pgd_size = sizeof(arm_lpae_iopte);
        }
+       /*
+        * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
+        * best we can do is mimic the out-of-tree driver and hope that the
+        * "implementation-defined caching policy" is good enough. Similarly,
+        * we'll use it for the sake of a valid attribute for our 'device'
+        * index, although callers should never request that in practice.
+        */
+       cfg->arm_mali_lpae_cfg.memattr =
+               (ARM_MALI_LPAE_MEMATTR_IMP_DEF
+                << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
+               (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
+                << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
+               (ARM_MALI_LPAE_MEMATTR_IMP_DEF
+                << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
 
-       return iop;
+       data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+       if (!data->pgd)
+               goto out_free_data;
+
+       /* Ensure the empty pgd is visible before TRANSTAB can be written */
+       wmb();
+
+       cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
+                                         ARM_MALI_LPAE_TTBR_READ_INNER |
+                                         ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
+       return &data->iop;
+
+out_free_data:
+       kfree(data);
+       return NULL;
 }
 
 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
index 9da8309..2371034 100644 (file)
@@ -1086,8 +1086,6 @@ static int ipmmu_probe(struct platform_device *pdev)
 
        mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
 
-       irq = platform_get_irq(pdev, 0);
-
        /*
         * Determine if this IPMMU instance is a root device by checking for
         * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
@@ -1106,6 +1104,7 @@ static int ipmmu_probe(struct platform_device *pdev)
 
        /* Root devices have mandatory IRQs */
        if (ipmmu_is_root(mmu)) {
+               irq = platform_get_irq(pdev, 0);
                if (irq < 0) {
                        dev_err(&pdev->dev, "no IRQ found\n");
                        return irq;
index 614a93a..026ad2b 100644 (file)
@@ -8,6 +8,8 @@
 #include <linux/export.h>
 #include <linux/iommu.h>
 #include <linux/limits.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
 #include <linux/of.h>
 #include <linux/of_iommu.h>
 #include <linux/of_pci.h>
index 26290f3..4dcbf68 100644 (file)
@@ -100,6 +100,7 @@ struct rk_iommu {
        struct device *dev;
        void __iomem **bases;
        int num_mmu;
+       int num_irq;
        struct clk_bulk_data *clocks;
        int num_clocks;
        bool reset_disabled;
@@ -1136,7 +1137,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
        struct rk_iommu *iommu;
        struct resource *res;
        int num_res = pdev->num_resources;
-       int err, i, irq;
+       int err, i;
 
        iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
        if (!iommu)
@@ -1163,6 +1164,10 @@ static int rk_iommu_probe(struct platform_device *pdev)
        if (iommu->num_mmu == 0)
                return PTR_ERR(iommu->bases[0]);
 
+       iommu->num_irq = platform_irq_count(pdev);
+       if (iommu->num_irq < 0)
+               return iommu->num_irq;
+
        iommu->reset_disabled = device_property_read_bool(dev,
                                        "rockchip,disable-mmu-reset");
 
@@ -1219,8 +1224,9 @@ static int rk_iommu_probe(struct platform_device *pdev)
 
        pm_runtime_enable(dev);
 
-       i = 0;
-       while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
+       for (i = 0; i < iommu->num_irq; i++) {
+               int irq = platform_get_irq(pdev, i);
+
                if (irq < 0)
                        return irq;
 
@@ -1245,10 +1251,13 @@ err_unprepare_clocks:
 static void rk_iommu_shutdown(struct platform_device *pdev)
 {
        struct rk_iommu *iommu = platform_get_drvdata(pdev);
-       int i = 0, irq;
+       int i;
+
+       for (i = 0; i < iommu->num_irq; i++) {
+               int irq = platform_get_irq(pdev, i);
 
-       while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
                devm_free_irq(iommu->dev, irq, iommu);
+       }
 
        pm_runtime_force_suspend(&pdev->dev);
 }
index 1a57cee..0b0a737 100644 (file)
@@ -15,6 +15,7 @@
 
 /* FIC Registers */
 #define AL_FIC_CAUSE           0x00
+#define AL_FIC_SET_CAUSE       0x08
 #define AL_FIC_MASK            0x10
 #define AL_FIC_CONTROL         0x28
 
@@ -126,6 +127,16 @@ static void al_fic_irq_handler(struct irq_desc *desc)
        chained_irq_exit(irqchip, desc);
 }
 
+static int al_fic_irq_retrigger(struct irq_data *data)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+       struct al_fic *fic = gc->private;
+
+       writel_relaxed(BIT(data->hwirq), fic->base + AL_FIC_SET_CAUSE);
+
+       return 1;
+}
+
 static int al_fic_register(struct device_node *node,
                           struct al_fic *fic)
 {
@@ -159,6 +170,7 @@ static int al_fic_register(struct device_node *node,
        gc->chip_types->chip.irq_unmask = irq_gc_mask_clr_bit;
        gc->chip_types->chip.irq_ack = irq_gc_ack_clr_bit;
        gc->chip_types->chip.irq_set_type = al_fic_irq_set_type;
+       gc->chip_types->chip.irq_retrigger = al_fic_irq_retrigger;
        gc->chip_types->chip.flags = IRQCHIP_SKIP_SET_WAKE;
        gc->private = fic;
 
index 6acad2e..2933349 100644 (file)
@@ -313,6 +313,7 @@ static void __init sama5d3_aic_irq_fixup(void)
 static const struct of_device_id aic5_irq_fixups[] __initconst = {
        { .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
        { .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
+       { .compatible = "microchip,sam9x60", .data = sama5d3_aic_irq_fixup },
        { /* sentinel */ },
 };
 
@@ -390,3 +391,12 @@ static int __init sama5d4_aic5_of_init(struct device_node *node,
        return aic5_of_init(node, parent, NR_SAMA5D4_IRQS);
 }
 IRQCHIP_DECLARE(sama5d4_aic5, "atmel,sama5d4-aic", sama5d4_aic5_of_init);
+
+#define NR_SAM9X60_IRQS                50
+
+static int __init sam9x60_aic5_of_init(struct device_node *node,
+                                      struct device_node *parent)
+{
+       return aic5_of_init(node, parent, NR_SAM9X60_IRQS);
+}
+IRQCHIP_DECLARE(sam9x60_aic5, "microchip,sam9x60-aic", sam9x60_aic5_of_init);
index e88e75c..fbec07d 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
 #include <linux/kernel.h>
+#include <linux/pci.h>
 #include <linux/msi.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
index 229d586..87711e0 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include <linux/acpi_iort.h>
+#include <linux/pci.h>
 #include <linux/msi.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
index 422664a..1edc993 100644 (file)
@@ -59,7 +59,7 @@ static struct gic_chip_data gic_data __read_mostly;
 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
 
 #define GIC_ID_NR      (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
-#define GIC_LINE_NR    max(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
+#define GIC_LINE_NR    min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
 #define GIC_ESPI_NR    GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
 
 /*
index c72c036..daefc52 100644 (file)
@@ -97,7 +97,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
        }
 }
 
-static void plic_irq_enable(struct irq_data *d)
+static void plic_irq_unmask(struct irq_data *d)
 {
        unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
                                           cpu_online_mask);
@@ -106,7 +106,7 @@ static void plic_irq_enable(struct irq_data *d)
        plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
 }
 
-static void plic_irq_disable(struct irq_data *d)
+static void plic_irq_mask(struct irq_data *d)
 {
        plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
 }
@@ -125,10 +125,8 @@ static int plic_set_affinity(struct irq_data *d,
        if (cpu >= nr_cpu_ids)
                return -EINVAL;
 
-       if (!irqd_irq_disabled(d)) {
-               plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
-               plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
-       }
+       plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
+       plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
 
        irq_data_update_effective_affinity(d, cpumask_of(cpu));
 
@@ -136,14 +134,18 @@ static int plic_set_affinity(struct irq_data *d,
 }
 #endif
 
+static void plic_irq_eoi(struct irq_data *d)
+{
+       struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+       writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+}
+
 static struct irq_chip plic_chip = {
        .name           = "SiFive PLIC",
-       /*
-        * There is no need to mask/unmask PLIC interrupts.  They are "masked"
-        * by reading claim and "unmasked" when writing it back.
-        */
-       .irq_enable     = plic_irq_enable,
-       .irq_disable    = plic_irq_disable,
+       .irq_mask       = plic_irq_mask,
+       .irq_unmask     = plic_irq_unmask,
+       .irq_eoi        = plic_irq_eoi,
 #ifdef CONFIG_SMP
        .irq_set_affinity = plic_set_affinity,
 #endif
@@ -152,7 +154,7 @@ static struct irq_chip plic_chip = {
 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
                              irq_hw_number_t hwirq)
 {
-       irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq);
+       irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq);
        irq_set_chip_data(irq, NULL);
        irq_set_noprobe(irq);
        return 0;
@@ -188,7 +190,6 @@ static void plic_handle_irq(struct pt_regs *regs)
                                        hwirq);
                else
                        generic_handle_irq(irq);
-               writel(hwirq, claim);
        }
        csr_set(sie, SIE_SEIE);
 }
index d249cf8..8346e6d 100644 (file)
@@ -542,7 +542,7 @@ static void wake_migration_worker(struct cache *cache)
 
 static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
 {
-       return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOWAIT);
+       return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
 }
 
 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
@@ -554,9 +554,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
 {
        struct dm_cache_migration *mg;
 
-       mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT);
-       if (!mg)
-               return NULL;
+       mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
 
        memset(mg, 0, sizeof(*mg));
 
@@ -664,10 +662,6 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi
        struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
 
        cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
-       if (!cell_prealloc) {
-               defer_bio(cache, bio);
-               return false;
-       }
 
        build_key(oblock, end, &key);
        r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
@@ -1493,11 +1487,6 @@ static int mg_lock_writes(struct dm_cache_migration *mg)
        struct dm_bio_prison_cell_v2 *prealloc;
 
        prealloc = alloc_prison_cell(cache);
-       if (!prealloc) {
-               DMERR_LIMIT("%s: alloc_prison_cell failed", cache_device_name(cache));
-               mg_complete(mg, false);
-               return -ENOMEM;
-       }
 
        /*
         * Prevent writes to the block, but allow reads to continue.
@@ -1535,11 +1524,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
        }
 
        mg = alloc_migration(cache);
-       if (!mg) {
-               policy_complete_background_work(cache->policy, op, false);
-               background_work_end(cache);
-               return -ENOMEM;
-       }
 
        mg->op = op;
        mg->overwrite_bio = bio;
@@ -1628,10 +1612,6 @@ static int invalidate_lock(struct dm_cache_migration *mg)
        struct dm_bio_prison_cell_v2 *prealloc;
 
        prealloc = alloc_prison_cell(cache);
-       if (!prealloc) {
-               invalidate_complete(mg, false);
-               return -ENOMEM;
-       }
 
        build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
        r = dm_cell_lock_v2(cache->prison, &key,
@@ -1669,10 +1649,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
                return -EPERM;
 
        mg = alloc_migration(cache);
-       if (!mg) {
-               background_work_end(cache);
-               return -ENOMEM;
-       }
 
        mg->overwrite_bio = bio;
        mg->invalidate_cblock = cblock;
index cd6f9e9..4ca8f19 100644 (file)
@@ -591,8 +591,8 @@ static struct hash_table_bucket *get_hash_table_bucket(struct clone *clone,
  *
  * NOTE: Must be called with the bucket lock held
  */
-struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
-                                             unsigned long region_nr)
+static struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
+                                                    unsigned long region_nr)
 {
        struct dm_clone_region_hydration *hd;
 
index f150f5c..4fb1a40 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/vmalloc.h>
 #include <linux/log2.h>
 #include <linux/dm-kcopyd.h>
-#include <linux/semaphore.h>
 
 #include "dm.h"
 
@@ -107,8 +106,8 @@ struct dm_snapshot {
        /* The on disk metadata handler */
        struct dm_exception_store *store;
 
-       /* Maximum number of in-flight COW jobs. */
-       struct semaphore cow_count;
+       unsigned in_progress;
+       struct wait_queue_head in_progress_wait;
 
        struct dm_kcopyd_client *kcopyd_client;
 
@@ -162,8 +161,8 @@ struct dm_snapshot {
  */
 #define DEFAULT_COW_THRESHOLD 2048
 
-static int cow_threshold = DEFAULT_COW_THRESHOLD;
-module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
+static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
+module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
 MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
 
 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
@@ -1327,7 +1326,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad_hash_tables;
        }
 
-       sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
+       init_waitqueue_head(&s->in_progress_wait);
 
        s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
        if (IS_ERR(s->kcopyd_client)) {
@@ -1509,9 +1508,56 @@ static void snapshot_dtr(struct dm_target *ti)
 
        dm_put_device(ti, s->origin);
 
+       WARN_ON(s->in_progress);
+
        kfree(s);
 }
 
+static void account_start_copy(struct dm_snapshot *s)
+{
+       spin_lock(&s->in_progress_wait.lock);
+       s->in_progress++;
+       spin_unlock(&s->in_progress_wait.lock);
+}
+
+static void account_end_copy(struct dm_snapshot *s)
+{
+       spin_lock(&s->in_progress_wait.lock);
+       BUG_ON(!s->in_progress);
+       s->in_progress--;
+       if (likely(s->in_progress <= cow_threshold) &&
+           unlikely(waitqueue_active(&s->in_progress_wait)))
+               wake_up_locked(&s->in_progress_wait);
+       spin_unlock(&s->in_progress_wait.lock);
+}
+
+static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
+{
+       if (unlikely(s->in_progress > cow_threshold)) {
+               spin_lock(&s->in_progress_wait.lock);
+               if (likely(s->in_progress > cow_threshold)) {
+                       /*
+                        * NOTE: this throttle doesn't account for whether
+                        * the caller is servicing an IO that will trigger a COW
+                        * so excess throttling may result for chunks not required
+                        * to be COW'd.  But if cow_threshold was reached, extra
+                        * throttling is unlikely to negatively impact performance.
+                        */
+                       DECLARE_WAITQUEUE(wait, current);
+                       __add_wait_queue(&s->in_progress_wait, &wait);
+                       __set_current_state(TASK_UNINTERRUPTIBLE);
+                       spin_unlock(&s->in_progress_wait.lock);
+                       if (unlock_origins)
+                               up_read(&_origins_lock);
+                       io_schedule();
+                       remove_wait_queue(&s->in_progress_wait, &wait);
+                       return false;
+               }
+               spin_unlock(&s->in_progress_wait.lock);
+       }
+       return true;
+}
+
 /*
  * Flush a list of buffers.
  */
@@ -1527,7 +1573,7 @@ static void flush_bios(struct bio *bio)
        }
 }
 
-static int do_origin(struct dm_dev *origin, struct bio *bio);
+static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
 
 /*
  * Flush a list of buffers.
@@ -1540,7 +1586,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
        while (bio) {
                n = bio->bi_next;
                bio->bi_next = NULL;
-               r = do_origin(s->origin, bio);
+               r = do_origin(s->origin, bio, false);
                if (r == DM_MAPIO_REMAPPED)
                        generic_make_request(bio);
                bio = n;
@@ -1732,7 +1778,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
                rb_link_node(&pe->out_of_order_node, parent, p);
                rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
        }
-       up(&s->cow_count);
+       account_end_copy(s);
 }
 
 /*
@@ -1756,7 +1802,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
        dest.count = src.count;
 
        /* Hand over to kcopyd */
-       down(&s->cow_count);
+       account_start_copy(s);
        dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
 }
 
@@ -1776,7 +1822,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
        pe->full_bio = bio;
        pe->full_bio_end_io = bio->bi_end_io;
 
-       down(&s->cow_count);
+       account_start_copy(s);
        callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
                                                   copy_callback, pe);
 
@@ -1866,7 +1912,7 @@ static void zero_callback(int read_err, unsigned long write_err, void *context)
        struct bio *bio = context;
        struct dm_snapshot *s = bio->bi_private;
 
-       up(&s->cow_count);
+       account_end_copy(s);
        bio->bi_status = write_err ? BLK_STS_IOERR : 0;
        bio_endio(bio);
 }
@@ -1880,7 +1926,7 @@ static void zero_exception(struct dm_snapshot *s, struct dm_exception *e,
        dest.sector = bio->bi_iter.bi_sector;
        dest.count = s->store->chunk_size;
 
-       down(&s->cow_count);
+       account_start_copy(s);
        WARN_ON_ONCE(bio->bi_private);
        bio->bi_private = s;
        dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio);
@@ -1916,6 +1962,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
        if (!s->valid)
                return DM_MAPIO_KILL;
 
+       if (bio_data_dir(bio) == WRITE) {
+               while (unlikely(!wait_for_in_progress(s, false)))
+                       ; /* wait_for_in_progress() has slept */
+       }
+
        down_read(&s->lock);
        dm_exception_table_lock(&lock);
 
@@ -2112,7 +2163,7 @@ redirect_to_origin:
 
        if (bio_data_dir(bio) == WRITE) {
                up_write(&s->lock);
-               return do_origin(s->origin, bio);
+               return do_origin(s->origin, bio, false);
        }
 
 out_unlock:
@@ -2487,15 +2538,24 @@ next_snapshot:
 /*
  * Called on a write from the origin driver.
  */
-static int do_origin(struct dm_dev *origin, struct bio *bio)
+static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
 {
        struct origin *o;
        int r = DM_MAPIO_REMAPPED;
 
+again:
        down_read(&_origins_lock);
        o = __lookup_origin(origin->bdev);
-       if (o)
+       if (o) {
+               if (limit) {
+                       struct dm_snapshot *s;
+                       list_for_each_entry(s, &o->snapshots, list)
+                               if (unlikely(!wait_for_in_progress(s, true)))
+                                       goto again;
+               }
+
                r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
+       }
        up_read(&_origins_lock);
 
        return r;
@@ -2608,7 +2668,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
                dm_accept_partial_bio(bio, available_sectors);
 
        /* Only tell snapshots if this is a write */
-       return do_origin(o->dev, bio);
+       return do_origin(o->dev, bio, true);
 }
 
 /*
index f61693e..1e77228 100644 (file)
@@ -154,7 +154,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
        } else {
                pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
                       mdname(mddev));
-               pr_err("md/raid0: please set raid.default_layout to 1 or 2\n");
+               pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
                err = -ENOTSUPP;
                goto abort;
        }
index cfca3c7..21f90a8 100644 (file)
@@ -643,8 +643,7 @@ static int v4l_stk_release(struct file *fp)
                dev->owner = NULL;
        }
 
-       if (is_present(dev))
-               usb_autopm_put_interface(dev->interface);
+       usb_autopm_put_interface(dev->interface);
        mutex_unlock(&dev->lock);
        return v4l2_fh_release(fp);
 }
index 3274742..719f54c 100644 (file)
@@ -848,7 +848,7 @@ static int jmb38x_ms_count_slots(struct pci_dev *pdev)
 {
        int cnt, rc = 0;
 
-       for (cnt = 0; cnt < PCI_ROM_RESOURCE; ++cnt) {
+       for (cnt = 0; cnt < PCI_STD_NUM_BARS; ++cnt) {
                if (!(IORESOURCE_MEM & pci_resource_flags(pdev, cnt)))
                        break;
 
@@ -941,7 +941,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
        if (!cnt) {
                rc = -ENODEV;
                pci_dev_busy = 1;
-               goto err_out;
+               goto err_out_int;
        }
 
        jm = kzalloc(sizeof(struct jmb38x_ms)
index 47ae84a..1b1a794 100644 (file)
@@ -527,6 +527,7 @@ static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
                              FASTRPC_PHYS(buffer->phys), buffer->size);
        if (ret < 0) {
                dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
+               kfree(a);
                return -EINVAL;
        }
 
index 32e9b1a..0a2b99e 100644 (file)
@@ -218,13 +218,21 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
 {
        int ret;
 
+       /* No need to enable the client if nothing is needed from it */
+       if (!cldev->bus->fw_f_fw_ver_supported &&
+           !cldev->bus->hbm_f_os_supported)
+               return;
+
        ret = mei_cldev_enable(cldev);
        if (ret)
                return;
 
-       ret = mei_fwver(cldev);
-       if (ret < 0)
-               dev_err(&cldev->dev, "FW version command failed %d\n", ret);
+       if (cldev->bus->fw_f_fw_ver_supported) {
+               ret = mei_fwver(cldev);
+               if (ret < 0)
+                       dev_err(&cldev->dev, "FW version command failed %d\n",
+                               ret);
+       }
 
        if (cldev->bus->hbm_f_os_supported) {
                ret = mei_osver(cldev);
index 77f7dff..c09f8bb 100644 (file)
@@ -79,6 +79,9 @@
 #define MEI_DEV_ID_CNP_H      0xA360  /* Cannon Point H */
 #define MEI_DEV_ID_CNP_H_4    0xA364  /* Cannon Point H 4 (iTouch) */
 
+#define MEI_DEV_ID_CMP_LP     0x02e0  /* Comet Point LP */
+#define MEI_DEV_ID_CMP_LP_3   0x02e4  /* Comet Point LP 3 (iTouch) */
+
 #define MEI_DEV_ID_ICP_LP     0x34E0  /* Ice Lake Point LP */
 
 #define MEI_DEV_ID_TGP_LP     0xA0E0  /* Tiger Lake Point LP */
index abe1b1f..c4f6991 100644 (file)
@@ -1355,6 +1355,8 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
 #define MEI_CFG_FW_SPS                           \
        .quirk_probe = mei_me_fw_type_sps
 
+#define MEI_CFG_FW_VER_SUPP                     \
+       .fw_ver_supported = 1
 
 #define MEI_CFG_ICH_HFS                      \
        .fw_status.count = 0
@@ -1392,31 +1394,41 @@ static const struct mei_cfg mei_me_ich10_cfg = {
        MEI_CFG_ICH10_HFS,
 };
 
-/* PCH devices */
-static const struct mei_cfg mei_me_pch_cfg = {
+/* PCH6 devices */
+static const struct mei_cfg mei_me_pch6_cfg = {
        MEI_CFG_PCH_HFS,
 };
 
+/* PCH7 devices */
+static const struct mei_cfg mei_me_pch7_cfg = {
+       MEI_CFG_PCH_HFS,
+       MEI_CFG_FW_VER_SUPP,
+};
+
 /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
 static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
        MEI_CFG_PCH_HFS,
+       MEI_CFG_FW_VER_SUPP,
        MEI_CFG_FW_NM,
 };
 
 /* PCH8 Lynx Point and newer devices */
 static const struct mei_cfg mei_me_pch8_cfg = {
        MEI_CFG_PCH8_HFS,
+       MEI_CFG_FW_VER_SUPP,
 };
 
 /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
 static const struct mei_cfg mei_me_pch8_sps_cfg = {
        MEI_CFG_PCH8_HFS,
+       MEI_CFG_FW_VER_SUPP,
        MEI_CFG_FW_SPS,
 };
 
 /* Cannon Lake and newer devices */
 static const struct mei_cfg mei_me_pch12_cfg = {
        MEI_CFG_PCH8_HFS,
+       MEI_CFG_FW_VER_SUPP,
        MEI_CFG_DMA_128,
 };
 
@@ -1428,7 +1440,8 @@ static const struct mei_cfg *const mei_cfg_list[] = {
        [MEI_ME_UNDEF_CFG] = NULL,
        [MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
        [MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
-       [MEI_ME_PCH_CFG] = &mei_me_pch_cfg,
+       [MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg,
+       [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
        [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
        [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
        [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
@@ -1473,6 +1486,8 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
        mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
        hw->cfg = cfg;
 
+       dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
+
        return dev;
 }
 
index 08c84a0..1d87948 100644 (file)
  * @fw_status: FW status
  * @quirk_probe: device exclusion quirk
  * @dma_size: device DMA buffers size
+ * @fw_ver_supported: is fw version retrievable from FW
  */
 struct mei_cfg {
        const struct mei_fw_status fw_status;
        bool (*quirk_probe)(struct pci_dev *pdev);
        size_t dma_size[DMA_DSCR_NUM];
+       u32 fw_ver_supported:1;
 };
 
 
@@ -62,7 +64,8 @@ struct mei_me_hw {
  * @MEI_ME_UNDEF_CFG:      Lower sentinel.
  * @MEI_ME_ICH_CFG:        I/O Controller Hub legacy devices.
  * @MEI_ME_ICH10_CFG:      I/O Controller Hub platforms Gen10
- * @MEI_ME_PCH_CFG:        Platform Controller Hub platforms (Up to Gen8).
+ * @MEI_ME_PCH6_CFG:       Platform Controller Hub platforms (Gen6).
+ * @MEI_ME_PCH7_CFG:       Platform Controller Hub platforms (Gen7).
  * @MEI_ME_PCH_CPT_PBG_CFG:Platform Controller Hub workstations
  *                         with quirk for Node Manager exclusion.
  * @MEI_ME_PCH8_CFG:       Platform Controller Hub Gen8 and newer
@@ -77,7 +80,8 @@ enum mei_cfg_idx {
        MEI_ME_UNDEF_CFG,
        MEI_ME_ICH_CFG,
        MEI_ME_ICH10_CFG,
-       MEI_ME_PCH_CFG,
+       MEI_ME_PCH6_CFG,
+       MEI_ME_PCH7_CFG,
        MEI_ME_PCH_CPT_PBG_CFG,
        MEI_ME_PCH8_CFG,
        MEI_ME_PCH8_SPS_CFG,
index f71a023..0f21411 100644 (file)
@@ -426,6 +426,8 @@ struct mei_fw_version {
  *
  * @fw_ver : FW versions
  *
+ * @fw_f_fw_ver_supported : fw feature: fw version supported
+ *
  * @me_clients_rwsem: rw lock over me_clients list
  * @me_clients  : list of FW clients
  * @me_clients_map : FW clients bit map
@@ -506,6 +508,8 @@ struct mei_device {
 
        struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS];
 
+       unsigned int fw_f_fw_ver_supported:1;
+
        struct rw_semaphore me_clients_rwsem;
        struct list_head me_clients;
        DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
index d5a92c6..3dca63e 100644 (file)
@@ -61,13 +61,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)},
 
-       {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH6_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH6_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
@@ -96,6 +96,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
 
+       {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
+
        {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
index 6e208a0..a5e3170 100644 (file)
@@ -94,7 +94,7 @@ enum pci_barno {
 struct pci_endpoint_test {
        struct pci_dev  *pdev;
        void __iomem    *base;
-       void __iomem    *bar[6];
+       void __iomem    *bar[PCI_STD_NUM_BARS];
        struct completion irq_raised;
        int             last_irq;
        int             num_irqs;
@@ -687,7 +687,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
        if (!pci_endpoint_test_request_irq(test))
                goto err_disable_irq;
 
-       for (bar = BAR_0; bar <= BAR_5; bar++) {
+       for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
                if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
                        base = pci_ioremap_bar(pdev, bar);
                        if (!base) {
@@ -740,7 +740,7 @@ err_ida_remove:
        ida_simple_remove(&pci_endpoint_test_ida, id);
 
 err_iounmap:
-       for (bar = BAR_0; bar <= BAR_5; bar++) {
+       for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
                if (test->bar[bar])
                        pci_iounmap(pdev, test->bar[bar]);
        }
@@ -771,7 +771,7 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev)
        misc_deregister(&test->miscdev);
        kfree(misc_device->name);
        ida_simple_remove(&pci_endpoint_test_ida, id);
-       for (bar = BAR_0; bar <= BAR_5; bar++) {
+       for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
                if (test->bar[bar])
                        pci_iounmap(pdev, test->bar[bar]);
        }
index d4ada5c..234551a 100644 (file)
@@ -646,8 +646,8 @@ int renesas_sdhi_probe(struct platform_device *pdev,
        struct tmio_mmc_dma *dma_priv;
        struct tmio_mmc_host *host;
        struct renesas_sdhi *priv;
+       int num_irqs, irq, ret, i;
        struct resource *res;
-       int irq, ret, i;
        u16 ver;
 
        of_data = of_device_get_match_data(&pdev->dev);
@@ -825,24 +825,31 @@ int renesas_sdhi_probe(struct platform_device *pdev,
                host->hs400_complete = renesas_sdhi_hs400_complete;
        }
 
-       i = 0;
-       while (1) {
+       num_irqs = platform_irq_count(pdev);
+       if (num_irqs < 0) {
+               ret = num_irqs;
+               goto eirq;
+       }
+
+       /* There must be at least one IRQ source */
+       if (!num_irqs) {
+               ret = -ENXIO;
+               goto eirq;
+       }
+
+       for (i = 0; i < num_irqs; i++) {
                irq = platform_get_irq(pdev, i);
-               if (irq < 0)
-                       break;
-               i++;
+               if (irq < 0) {
+                       ret = irq;
+                       goto eirq;
+               }
+
                ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
                                       dev_name(&pdev->dev), host);
                if (ret)
                        goto eirq;
        }
 
-       /* There must be at least one IRQ source */
-       if (!i) {
-               ret = irq;
-               goto eirq;
-       }
-
        dev_info(&pdev->dev, "%s base at 0x%08lx max clock rate %u MHz\n",
                 mmc_hostname(host->mmc), (unsigned long)
                 (platform_get_resource(pdev, IORESOURCE_MEM, 0)->start),
index 2b9cdcd..f4f5f0a 100644 (file)
@@ -262,6 +262,7 @@ static const struct sdhci_iproc_data bcm2835_data = {
 };
 
 static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = {
+       .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
        .ops = &sdhci_iproc_32only_ops,
 };
 
index 81bd9af..98c575d 100644 (file)
@@ -1393,11 +1393,9 @@ static int sh_mmcif_probe(struct platform_device *pdev)
        const char *name;
 
        irq[0] = platform_get_irq(pdev, 0);
-       irq[1] = platform_get_irq(pdev, 1);
-       if (irq[0] < 0) {
-               dev_err(dev, "Get irq error\n");
+       irq[1] = platform_get_irq_optional(pdev, 1);
+       if (irq[0] < 0)
                return -ENXIO;
-       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        reg = devm_ioremap_resource(dev, res);
index 97a97a9..e10b760 100644 (file)
@@ -134,16 +134,15 @@ static void au_write_buf16(struct nand_chip *this, const u_char *buf, int len)
 
 /**
  * au_read_buf16 -  read chip data into buffer
- * @mtd:       MTD device structure
+ * @this:      NAND chip object
  * @buf:       buffer to store date
  * @len:       number of bytes to read
  *
  * read function for 16bit buswidth
  */
-static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
+static void au_read_buf16(struct nand_chip *this, u_char *buf, int len)
 {
        int i;
-       struct nand_chip *this = mtd_to_nand(mtd);
        u16 *p = (u16 *) buf;
        len >>= 1;
 
index 1d8621d..7acf4a9 100644 (file)
@@ -487,7 +487,7 @@ static int write_sr(struct spi_nor *nor, u8 val)
                        SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
                                   SPI_MEM_OP_NO_ADDR,
                                   SPI_MEM_OP_NO_DUMMY,
-                                  SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+                                  SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
 
                return spi_mem_exec_op(nor->spimem, &op);
        }
index 931d9d9..21d8fcc 100644 (file)
@@ -4039,7 +4039,7 @@ out:
                 * this to-be-skipped slave to send a packet out.
                 */
                old_arr = rtnl_dereference(bond->slave_arr);
-               for (idx = 0; idx < old_arr->count; idx++) {
+               for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) {
                        if (skipslave == old_arr->arr[idx]) {
                                old_arr->arr[idx] =
                                    old_arr->arr[old_arr->count-1];
index 526ba2a..cc35363 100644 (file)
@@ -1845,7 +1845,6 @@ int b53_mirror_add(struct dsa_switch *ds, int port,
                loc = B53_EG_MIR_CTL;
 
        b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
-       reg &= ~MIRROR_MASK;
        reg |= BIT(port);
        b53_write16(dev, B53_MGMT_PAGE, loc, reg);
 
index a23d3ff..24a5e99 100644 (file)
@@ -1224,10 +1224,6 @@ static int ksz8795_switch_init(struct ksz_device *dev)
 {
        int i;
 
-       mutex_init(&dev->stats_mutex);
-       mutex_init(&dev->alu_mutex);
-       mutex_init(&dev->vlan_mutex);
-
        dev->ds->ops = &ksz8795_switch_ops;
 
        for (i = 0; i < ARRAY_SIZE(ksz8795_switch_chips); i++) {
index d0f8153..8b00f8e 100644 (file)
@@ -25,6 +25,7 @@ KSZ_REGMAP_TABLE(ksz8795, 16, SPI_ADDR_SHIFT,
 
 static int ksz8795_spi_probe(struct spi_device *spi)
 {
+       struct regmap_config rc;
        struct ksz_device *dev;
        int i, ret;
 
@@ -33,9 +34,9 @@ static int ksz8795_spi_probe(struct spi_device *spi)
                return -ENOMEM;
 
        for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
-               dev->regmap[i] = devm_regmap_init_spi(spi,
-                                                     &ksz8795_regmap_config
-                                                     [i]);
+               rc = ksz8795_regmap_config[i];
+               rc.lock_arg = &dev->regmap_mutex;
+               dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
                if (IS_ERR(dev->regmap[i])) {
                        ret = PTR_ERR(dev->regmap[i]);
                        dev_err(&spi->dev,
index 0b1e01f..fdffd9e 100644 (file)
@@ -17,6 +17,7 @@ KSZ_REGMAP_TABLE(ksz9477, not_used, 16, 0, 0);
 static int ksz9477_i2c_probe(struct i2c_client *i2c,
                             const struct i2c_device_id *i2c_id)
 {
+       struct regmap_config rc;
        struct ksz_device *dev;
        int i, ret;
 
@@ -25,8 +26,9 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c,
                return -ENOMEM;
 
        for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
-               dev->regmap[i] = devm_regmap_init_i2c(i2c,
-                                       &ksz9477_regmap_config[i]);
+               rc = ksz9477_regmap_config[i];
+               rc.lock_arg = &dev->regmap_mutex;
+               dev->regmap[i] = devm_regmap_init_i2c(i2c, &rc);
                if (IS_ERR(dev->regmap[i])) {
                        ret = PTR_ERR(dev->regmap[i]);
                        dev_err(&i2c->dev,
index 2938e89..16939f2 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
  * Microchip KSZ9477 register definitions
  *
  * Copyright (C) 2017-2018 Microchip Technology Inc.
index f4198d6..c5f6495 100644 (file)
@@ -24,6 +24,7 @@ KSZ_REGMAP_TABLE(ksz9477, 32, SPI_ADDR_SHIFT,
 
 static int ksz9477_spi_probe(struct spi_device *spi)
 {
+       struct regmap_config rc;
        struct ksz_device *dev;
        int i, ret;
 
@@ -32,8 +33,9 @@ static int ksz9477_spi_probe(struct spi_device *spi)
                return -ENOMEM;
 
        for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
-               dev->regmap[i] = devm_regmap_init_spi(spi,
-                                       &ksz9477_regmap_config[i]);
+               rc = ksz9477_regmap_config[i];
+               rc.lock_arg = &dev->regmap_mutex;
+               dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
                if (IS_ERR(dev->regmap[i])) {
                        ret = PTR_ERR(dev->regmap[i]);
                        dev_err(&spi->dev,
index b0b870f..fe47180 100644 (file)
@@ -436,7 +436,7 @@ int ksz_switch_register(struct ksz_device *dev,
        }
 
        mutex_init(&dev->dev_mutex);
-       mutex_init(&dev->stats_mutex);
+       mutex_init(&dev->regmap_mutex);
        mutex_init(&dev->alu_mutex);
        mutex_init(&dev->vlan_mutex);
 
index dd60d08..a20ebb7 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Microchip switch driver common header
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip switch driver common header
  *
  * Copyright (C) 2017-2019 Microchip Technology Inc.
  */
@@ -47,7 +47,7 @@ struct ksz_device {
        const char *name;
 
        struct mutex dev_mutex;         /* device access */
-       struct mutex stats_mutex;       /* status access */
+       struct mutex regmap_mutex;      /* regmap access */
        struct mutex alu_mutex;         /* ALU access */
        struct mutex vlan_mutex;        /* vlan access */
        const struct ksz_dev_ops *dev_ops;
@@ -290,6 +290,18 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
        ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data);
 }
 
+static inline void ksz_regmap_lock(void *__mtx)
+{
+       struct mutex *mtx = __mtx;
+       mutex_lock(mtx);
+}
+
+static inline void ksz_regmap_unlock(void *__mtx)
+{
+       struct mutex *mtx = __mtx;
+       mutex_unlock(mtx);
+}
+
 /* Regmap tables generation */
 #define KSZ_SPI_OP_RD          3
 #define KSZ_SPI_OP_WR          2
@@ -314,6 +326,8 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
                .write_flag_mask =                                      \
                        KSZ_SPI_OP_FLAG_MASK(KSZ_SPI_OP_WR, swp,        \
                                             regbits, regpad),          \
+               .lock = ksz_regmap_lock,                                \
+               .unlock = ksz_regmap_unlock,                            \
                .reg_format_endian = REGMAP_ENDIAN_BIG,                 \
                .val_format_endian = REGMAP_ENDIAN_BIG                  \
        }
index 684aa51..b00274c 100644 (file)
@@ -705,7 +705,7 @@ qca8k_setup(struct dsa_switch *ds)
                    BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
 
        /* Setup connection between CPU port & user ports */
-       for (i = 0; i < DSA_MAX_PORTS; i++) {
+       for (i = 0; i < QCA8K_NUM_PORTS; i++) {
                /* CPU port gets connected to all user ports of the switch */
                if (dsa_is_cpu_port(ds, i)) {
                        qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
@@ -1077,7 +1077,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
        if (id != QCA8K_ID_QCA8337)
                return -ENODEV;
 
-       priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
+       priv->ds = dsa_switch_alloc(&mdiodev->dev, QCA8K_NUM_PORTS);
        if (!priv->ds)
                return -ENOMEM;
 
index ca3d17e..ac88cac 100644 (file)
@@ -339,10 +339,12 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
                         const struct switchdev_obj_port_vlan *vlan)
 {
        struct realtek_smi *smi = ds->priv;
+       u16 vid;
        int ret;
 
-       if (!smi->ops->is_vlan_valid(smi, port))
-               return -EINVAL;
+       for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
+               if (!smi->ops->is_vlan_valid(smi, vid))
+                       return -EINVAL;
 
        dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
                 vlan->vid_begin, vlan->vid_end);
@@ -370,8 +372,9 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
        u16 vid;
        int ret;
 
-       if (!smi->ops->is_vlan_valid(smi, port))
-               return;
+       for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
+               if (!smi->ops->is_vlan_valid(smi, vid))
+                       return;
 
        dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
                 port,
index a268085..f5cc8b0 100644 (file)
@@ -507,7 +507,8 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
        irq = of_irq_get(intc, 0);
        if (irq <= 0) {
                dev_err(smi->dev, "failed to get parent IRQ\n");
-               return irq ? irq : -EINVAL;
+               ret = irq ? irq : -EINVAL;
+               goto out_put_node;
        }
 
        /* This clears the IRQ status register */
@@ -515,7 +516,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
                          &val);
        if (ret) {
                dev_err(smi->dev, "can't read interrupt status\n");
-               return ret;
+               goto out_put_node;
        }
 
        /* Fetch IRQ edge information from the descriptor */
@@ -537,7 +538,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
                                 val);
        if (ret) {
                dev_err(smi->dev, "could not configure IRQ polarity\n");
-               return ret;
+               goto out_put_node;
        }
 
        ret = devm_request_threaded_irq(smi->dev, irq, NULL,
@@ -545,7 +546,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
                                        "RTL8366RB", smi);
        if (ret) {
                dev_err(smi->dev, "unable to request irq: %d\n", ret);
-               return ret;
+               goto out_put_node;
        }
        smi->irqdomain = irq_domain_add_linear(intc,
                                               RTL8366RB_NUM_INTERRUPT,
@@ -553,12 +554,15 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
                                               smi);
        if (!smi->irqdomain) {
                dev_err(smi->dev, "failed to create IRQ domain\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out_put_node;
        }
        for (i = 0; i < smi->num_ports; i++)
                irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
 
-       return 0;
+out_put_node:
+       of_node_put(intc);
+       return ret;
 }
 
 static int rtl8366rb_set_addr(struct realtek_smi *smi)
index e53e494..fbb564c 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #ifndef _SJA1105_H
index 740dadf..1fc0d13 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #ifndef _SJA1105_DYNAMIC_CONFIG_H
 #define _SJA1105_DYNAMIC_CONFIG_H
index b9def74..7687ddc 100644 (file)
@@ -1897,7 +1897,9 @@ static int sja1105_set_ageing_time(struct dsa_switch *ds,
        return sja1105_static_config_reload(priv);
 }
 
-/* Caller must hold priv->tagger_data.meta_lock */
+/* Must be called only with priv->tagger_data.state bit
+ * SJA1105_HWTS_RX_EN cleared
+ */
 static int sja1105_change_rxtstamping(struct sja1105_private *priv,
                                      bool on)
 {
@@ -1954,16 +1956,17 @@ static int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
                break;
        }
 
-       if (rx_on != priv->tagger_data.hwts_rx_en) {
-               spin_lock(&priv->tagger_data.meta_lock);
+       if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) {
+               clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
+
                rc = sja1105_change_rxtstamping(priv, rx_on);
-               spin_unlock(&priv->tagger_data.meta_lock);
                if (rc < 0) {
                        dev_err(ds->dev,
                                "Failed to change RX timestamping: %d\n", rc);
-                       return -EFAULT;
+                       return rc;
                }
-               priv->tagger_data.hwts_rx_en = rx_on;
+               if (rx_on)
+                       set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
        }
 
        if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
@@ -1982,7 +1985,7 @@ static int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
                config.tx_type = HWTSTAMP_TX_ON;
        else
                config.tx_type = HWTSTAMP_TX_OFF;
-       if (priv->tagger_data.hwts_rx_en)
+       if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
                config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
        else
                config.rx_filter = HWTSTAMP_FILTER_NONE;
@@ -2005,12 +2008,12 @@ static void sja1105_rxtstamp_work(struct work_struct *work)
 
        mutex_lock(&priv->ptp_lock);
 
-       now = priv->tstamp_cc.read(&priv->tstamp_cc);
-
        while ((skb = skb_dequeue(&data->skb_rxtstamp_queue)) != NULL) {
                struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
                u64 ts;
 
+               now = priv->tstamp_cc.read(&priv->tstamp_cc);
+
                *shwt = (struct skb_shared_hwtstamps) {0};
 
                ts = SJA1105_SKB_CB(skb)->meta_tstamp;
@@ -2031,7 +2034,7 @@ static bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
        struct sja1105_private *priv = ds->priv;
        struct sja1105_tagger_data *data = &priv->tagger_data;
 
-       if (!data->hwts_rx_en)
+       if (!test_bit(SJA1105_HWTS_RX_EN, &data->state))
                return false;
 
        /* We need to read the full PTP clock to reconstruct the Rx
@@ -2201,6 +2204,7 @@ static int sja1105_probe(struct spi_device *spi)
        tagger_data = &priv->tagger_data;
        skb_queue_head_init(&tagger_data->skb_rxtstamp_queue);
        INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work);
+       spin_lock_init(&tagger_data->meta_lock);
 
        /* Connections between dsa_port and sja1105_port */
        for (i = 0; i < SJA1105_NUM_PORTS; i++) {
index af456b0..394e12a 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #ifndef _SJA1105_PTP_H
 #define _SJA1105_PTP_H
index 84dc603..58dd37e 100644 (file)
@@ -409,7 +409,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
        rc = static_config_buf_prepare_for_upload(priv, config_buf, buf_len);
        if (rc < 0) {
                dev_err(dev, "Invalid config, cannot upload\n");
-               return -EINVAL;
+               rc = -EINVAL;
+               goto out;
        }
        /* Prevent PHY jabbering during switch reset by inhibiting
         * Tx on all ports and waiting for current packet to drain.
@@ -418,7 +419,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
        rc = sja1105_inhibit_tx(priv, port_bitmap, true);
        if (rc < 0) {
                dev_err(dev, "Failed to inhibit Tx on ports\n");
-               return -ENXIO;
+               rc = -ENXIO;
+               goto out;
        }
        /* Wait for an eventual egress packet to finish transmission
         * (reach IFG). It is guaranteed that a second one will not
index 7f87022..f4a5c5c 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2016-2018, NXP Semiconductors
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2016-2018, NXP Semiconductors
  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #ifndef _SJA1105_STATIC_CONFIG_H
index 0b803c3..0aad212 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #ifndef _SJA1105_TAS_H
 #define _SJA1105_TAS_H
index b4a0fb2..bb65dd3 100644 (file)
@@ -194,9 +194,7 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
 {
        struct aq_nic_s *aq_nic = netdev_priv(ndev);
 
-       aq_nic_set_packet_filter(aq_nic, ndev->flags);
-
-       aq_nic_set_multicast_list(aq_nic, ndev);
+       (void)aq_nic_set_multicast_list(aq_nic, ndev);
 }
 
 static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
index 8f66e78..137c1de 100644 (file)
@@ -631,9 +631,12 @@ err_exit:
 
 int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
 {
-       unsigned int packet_filter = self->packet_filter;
+       const struct aq_hw_ops *hw_ops = self->aq_hw_ops;
+       struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+       unsigned int packet_filter = ndev->flags;
        struct netdev_hw_addr *ha = NULL;
        unsigned int i = 0U;
+       int err = 0;
 
        self->mc_list.count = 0;
        if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
@@ -641,29 +644,28 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
        } else {
                netdev_for_each_uc_addr(ha, ndev) {
                        ether_addr_copy(self->mc_list.ar[i++], ha->addr);
-
-                       if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
-                               break;
                }
        }
 
-       if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
-               packet_filter |= IFF_ALLMULTI;
-       } else {
-               netdev_for_each_mc_addr(ha, ndev) {
-                       ether_addr_copy(self->mc_list.ar[i++], ha->addr);
-
-                       if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
-                               break;
+       cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST);
+       if (cfg->is_mc_list_enabled) {
+               if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+                       packet_filter |= IFF_ALLMULTI;
+               } else {
+                       netdev_for_each_mc_addr(ha, ndev) {
+                               ether_addr_copy(self->mc_list.ar[i++],
+                                               ha->addr);
+                       }
                }
        }
 
        if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
-               packet_filter |= IFF_MULTICAST;
                self->mc_list.count = i;
-               self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
-                                                      self->mc_list.ar,
-                                                      self->mc_list.count);
+               err = hw_ops->hw_multicast_list_set(self->aq_hw,
+                                                   self->mc_list.ar,
+                                                   self->mc_list.count);
+               if (err < 0)
+                       return err;
        }
        return aq_nic_set_packet_filter(self, packet_filter);
 }
index 3901d79..76bdbe1 100644 (file)
@@ -313,6 +313,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                                        break;
 
                                buff->is_error |= buff_->is_error;
+                               buff->is_cso_err |= buff_->is_cso_err;
 
                        } while (!buff_->is_eop);
 
@@ -320,7 +321,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                                err = 0;
                                goto err_exit;
                        }
-                       if (buff->is_error) {
+                       if (buff->is_error || buff->is_cso_err) {
                                buff_ = buff;
                                do {
                                        next_ = buff_->next,
index 30f7fc4..2ad3fa6 100644 (file)
@@ -818,14 +818,15 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
                                     cfg->is_vlan_force_promisc);
 
        hw_atl_rpfl2multicast_flr_en_set(self,
-                                        IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
+                                        IS_FILTER_ENABLED(IFF_ALLMULTI) &&
+                                        IS_FILTER_ENABLED(IFF_MULTICAST), 0);
 
        hw_atl_rpfl2_accept_all_mc_packets_set(self,
-                                              IS_FILTER_ENABLED(IFF_ALLMULTI));
+                                             IS_FILTER_ENABLED(IFF_ALLMULTI) &&
+                                             IS_FILTER_ENABLED(IFF_MULTICAST));
 
        hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
 
-       cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
 
        for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
                hw_atl_rpfl2_uc_flr_en_set(self,
@@ -968,14 +969,26 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
 
 static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
 {
+       int err;
+       u32 val;
+
        hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
 
        /* Invalidate Descriptor Cache to prevent writing to the cached
         * descriptors and to the data pointer of those descriptors
         */
-       hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
+       hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
 
-       return aq_hw_err_from_flags(self);
+       err = aq_hw_err_from_flags(self);
+
+       if (err)
+               goto err_exit;
+
+       readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
+                                 self, val, val == 1, 1000U, 10000U);
+
+err_exit:
+       return err;
 }
 
 static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
index 1149812..6f34069 100644 (file)
@@ -606,12 +606,25 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode
                            HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
 }
 
-void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
+void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw)
 {
+       u32 val;
+
+       val = aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
+                                HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
+                                HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT);
+
        aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
                            HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
                            HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
-                           init);
+                           val ^ 1);
+}
+
+u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg_bit(aq_hw, RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR,
+                                 RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK,
+                                 RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT);
 }
 
 void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
index 0c37abb..c3ee278 100644 (file)
@@ -313,8 +313,11 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
                                            u32 rx_pkt_buff_size_per_tc,
                                            u32 buffer);
 
-/* set rdm rx dma descriptor cache init */
-void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init);
+/* toggle rdm rx dma descriptor cache init */
+void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw);
+
+/* get rdm rx dma descriptor cache init done */
+u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw);
 
 /* set rx xoff enable (per tc) */
 void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
index c3febcd..35887ad 100644 (file)
 /* default value of bitfield rdm_desc_init_i */
 #define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
 
+/* rdm_desc_init_done_i bitfield definitions
+ * preprocessor definitions for the bitfield rdm_desc_init_done_i.
+ * port="pif_rdm_desc_init_done_i"
+ */
+
+/* register address for bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR 0x00005a10
+/* bitmask for bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK 0x00000001U
+/* inverted bitmask for bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSKN 0xfffffffe
+/* lower bit position of bitfield  rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT 0U
+/* width of bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_WIDTH 1
+/* default value of bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_DEFAULT 0x0
+
+
 /* rx int_desc_wrb_en bitfield definitions
  * preprocessor definitions for the bitfield "int_desc_wrb_en".
  * port="pif_rdm_int_desc_wrb_en_i"
index da72648..7bc51f8 100644 (file)
@@ -337,7 +337,7 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
        /* Convert PHY temperature from 1/256 degree Celsius
         * to 1/1000 degree Celsius.
         */
-       *temp = temp_res  * 1000 / 256;
+       *temp = (temp_res & 0xFFFF) * 1000 / 256;
 
        return 0;
 }
index 7548247..1b1a090 100644 (file)
@@ -526,7 +526,7 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
        struct device *dev = &ag->pdev->dev;
        struct net_device *ndev = ag->ndev;
        static struct mii_bus *mii_bus;
-       struct device_node *np;
+       struct device_node *np, *mnp;
        int err;
 
        np = dev->of_node;
@@ -571,7 +571,9 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
                msleep(200);
        }
 
-       err = of_mdiobus_register(mii_bus, np);
+       mnp = of_get_child_by_name(np, "mdio");
+       err = of_mdiobus_register(mii_bus, mnp);
+       of_node_put(mnp);
        if (err)
                goto mdio_err_put_clk;
 
index e24f5d2..53055ce 100644 (file)
@@ -8,7 +8,6 @@ config NET_VENDOR_BROADCOM
        default y
        depends on (SSB_POSSIBLE && HAS_DMA) || PCI || BCM63XX || \
                   SIBYTE_SB1xxx_SOC
-       select DIMLIB
        ---help---
          If you have a network (Ethernet) chipset belonging to this class,
          say Y.
@@ -69,6 +68,7 @@ config BCMGENET
        select FIXED_PHY
        select BCM7XXX_PHY
        select MDIO_BCM_UNIMAC
+       select DIMLIB
        help
          This driver supports the built-in Ethernet MACs found in the
          Broadcom BCM7xxx Set Top Box family chipset.
@@ -188,6 +188,7 @@ config SYSTEMPORT
        select MII
        select PHYLIB
        select FIXED_PHY
+       select DIMLIB
        help
          This driver supports the built-in Ethernet MACs found in the
          Broadcom BCM7xxx Set Top Box family chipset using an internal
@@ -200,6 +201,7 @@ config BNXT
        select LIBCRC32C
        select NET_DEVLINK
        select PAGE_POOL
+       select DIMLIB
        ---help---
          This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
          Ethernet cards.  To compile this driver as a module, choose M here:
index 12cb77e..0f13828 100644 (file)
@@ -2018,6 +2018,8 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
         */
        if (priv->internal_phy) {
                int0_enable |= UMAC_IRQ_LINK_EVENT;
+               if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
+                       int0_enable |= UMAC_IRQ_PHY_DET_R;
        } else if (priv->ext_phy) {
                int0_enable |= UMAC_IRQ_LINK_EVENT;
        } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
@@ -2611,11 +2613,14 @@ static void bcmgenet_irq_task(struct work_struct *work)
        priv->irq0_stat = 0;
        spin_unlock_irq(&priv->lock);
 
+       if (status & UMAC_IRQ_PHY_DET_R &&
+           priv->dev->phydev->autoneg != AUTONEG_ENABLE)
+               phy_init_hw(priv->dev->phydev);
+
        /* Link UP/DOWN event */
-       if (status & UMAC_IRQ_LINK_EVENT) {
-               priv->dev->phydev->link = !!(status & UMAC_IRQ_LINK_UP);
+       if (status & UMAC_IRQ_LINK_EVENT)
                phy_mac_interrupt(priv->dev->phydev);
-       }
+
 }
 
 /* bcmgenet_isr1: handle Rx and Tx priority queues */
@@ -2710,7 +2715,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
        }
 
        /* all other interested interrupts handled in bottom half */
-       status &= UMAC_IRQ_LINK_EVENT;
+       status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
        if (status) {
                /* Save irq status for bottom-half processing. */
                spin_lock_irqsave(&priv->lock, flags);
@@ -2874,6 +2879,12 @@ static int bcmgenet_open(struct net_device *dev)
        if (priv->internal_phy)
                bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
 
+       ret = bcmgenet_mii_connect(dev);
+       if (ret) {
+               netdev_err(dev, "failed to connect to PHY\n");
+               goto err_clk_disable;
+       }
+
        /* take MAC out of reset */
        bcmgenet_umac_reset(priv);
 
@@ -2883,6 +2894,12 @@ static int bcmgenet_open(struct net_device *dev)
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
        priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
 
+       ret = bcmgenet_mii_config(dev, true);
+       if (ret) {
+               netdev_err(dev, "unsupported PHY\n");
+               goto err_disconnect_phy;
+       }
+
        bcmgenet_set_hw_addr(priv, dev->dev_addr);
 
        if (priv->internal_phy) {
@@ -2898,7 +2915,7 @@ static int bcmgenet_open(struct net_device *dev)
        ret = bcmgenet_init_dma(priv);
        if (ret) {
                netdev_err(dev, "failed to initialize DMA\n");
-               goto err_clk_disable;
+               goto err_disconnect_phy;
        }
 
        /* Always enable ring 16 - descriptor ring */
@@ -2921,25 +2938,19 @@ static int bcmgenet_open(struct net_device *dev)
                goto err_irq0;
        }
 
-       ret = bcmgenet_mii_probe(dev);
-       if (ret) {
-               netdev_err(dev, "failed to connect to PHY\n");
-               goto err_irq1;
-       }
-
        bcmgenet_netif_start(dev);
 
        netif_tx_start_all_queues(dev);
 
        return 0;
 
-err_irq1:
-       free_irq(priv->irq1, priv);
 err_irq0:
        free_irq(priv->irq0, priv);
 err_fini_dma:
        bcmgenet_dma_teardown(priv);
        bcmgenet_fini_dma(priv);
+err_disconnect_phy:
+       phy_disconnect(dev->phydev);
 err_clk_disable:
        if (priv->internal_phy)
                bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
@@ -3620,6 +3631,8 @@ static int bcmgenet_resume(struct device *d)
        if (priv->internal_phy)
                bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
 
+       phy_init_hw(dev->phydev);
+
        bcmgenet_umac_reset(priv);
 
        init_umac(priv);
@@ -3628,8 +3641,6 @@ static int bcmgenet_resume(struct device *d)
        if (priv->wolopts)
                clk_disable_unprepare(priv->clk_wol);
 
-       phy_init_hw(dev->phydev);
-
        /* Speed settings must be restored */
        bcmgenet_mii_config(priv->dev, false);
 
index 4a8fc03..7fbf573 100644 (file)
@@ -366,6 +366,7 @@ struct bcmgenet_mib_counters {
 #define  EXT_PWR_DOWN_PHY_EN           (1 << 20)
 
 #define EXT_RGMII_OOB_CTRL             0x0C
+#define  RGMII_MODE_EN_V123            (1 << 0)
 #define  RGMII_LINK                    (1 << 4)
 #define  OOB_DISABLE                   (1 << 5)
 #define  RGMII_MODE_EN                 (1 << 6)
@@ -719,8 +720,8 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
 
 /* MDIO routines */
 int bcmgenet_mii_init(struct net_device *dev);
+int bcmgenet_mii_connect(struct net_device *dev);
 int bcmgenet_mii_config(struct net_device *dev, bool init);
-int bcmgenet_mii_probe(struct net_device *dev);
 void bcmgenet_mii_exit(struct net_device *dev);
 void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
 void bcmgenet_mii_setup(struct net_device *dev);
index 970e478..17bb8d6 100644 (file)
@@ -173,6 +173,46 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
                                          bcmgenet_fixed_phy_link_update);
 }
 
+int bcmgenet_mii_connect(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct device_node *dn = priv->pdev->dev.of_node;
+       struct phy_device *phydev;
+       u32 phy_flags = 0;
+       int ret;
+
+       /* Communicate the integrated PHY revision */
+       if (priv->internal_phy)
+               phy_flags = priv->gphy_rev;
+
+       /* Initialize link state variables that bcmgenet_mii_setup() uses */
+       priv->old_link = -1;
+       priv->old_speed = -1;
+       priv->old_duplex = -1;
+       priv->old_pause = -1;
+
+       if (dn) {
+               phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
+                                       phy_flags, priv->phy_interface);
+               if (!phydev) {
+                       pr_err("could not attach to PHY\n");
+                       return -ENODEV;
+               }
+       } else {
+               phydev = dev->phydev;
+               phydev->dev_flags = phy_flags;
+
+               ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
+                                        priv->phy_interface);
+               if (ret) {
+                       pr_err("could not attach to PHY\n");
+                       return -ENODEV;
+               }
+       }
+
+       return 0;
+}
+
 int bcmgenet_mii_config(struct net_device *dev, bool init)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -258,74 +298,29 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
         */
        if (priv->ext_phy) {
                reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
-               reg |= RGMII_MODE_EN | id_mode_dis;
+               reg |= id_mode_dis;
+               if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
+                       reg |= RGMII_MODE_EN_V123;
+               else
+                       reg |= RGMII_MODE_EN;
                bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
        }
 
-       if (init)
-               dev_info(kdev, "configuring instance for %s\n", phy_name);
-
-       return 0;
-}
-
-int bcmgenet_mii_probe(struct net_device *dev)
-{
-       struct bcmgenet_priv *priv = netdev_priv(dev);
-       struct device_node *dn = priv->pdev->dev.of_node;
-       struct phy_device *phydev;
-       u32 phy_flags;
-       int ret;
-
-       /* Communicate the integrated PHY revision */
-       phy_flags = priv->gphy_rev;
-
-       /* Initialize link state variables that bcmgenet_mii_setup() uses */
-       priv->old_link = -1;
-       priv->old_speed = -1;
-       priv->old_duplex = -1;
-       priv->old_pause = -1;
-
-       if (dn) {
-               phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
-                                       phy_flags, priv->phy_interface);
-               if (!phydev) {
-                       pr_err("could not attach to PHY\n");
-                       return -ENODEV;
-               }
-       } else {
-               phydev = dev->phydev;
-               phydev->dev_flags = phy_flags;
+       if (init) {
+               linkmode_copy(phydev->advertising, phydev->supported);
 
-               ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
-                                        priv->phy_interface);
-               if (ret) {
-                       pr_err("could not attach to PHY\n");
-                       return -ENODEV;
-               }
-       }
+               /* The internal PHY has its link interrupts routed to the
+                * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
+                * that prevents the signaling of link UP interrupts when
+                * the link operates at 10Mbps, so fallback to polling for
+                * those versions of GENET.
+                */
+               if (priv->internal_phy && !GENET_IS_V5(priv))
+                       phydev->irq = PHY_IGNORE_INTERRUPT;
 
-       /* Configure port multiplexer based on what the probed PHY device since
-        * reading the 'max-speed' property determines the maximum supported
-        * PHY speed which is needed for bcmgenet_mii_config() to configure
-        * things appropriately.
-        */
-       ret = bcmgenet_mii_config(dev, true);
-       if (ret) {
-               phy_disconnect(dev->phydev);
-               return ret;
+               dev_info(kdev, "configuring instance for %s\n", phy_name);
        }
 
-       linkmode_copy(phydev->advertising, phydev->supported);
-
-       /* The internal PHY has its link interrupts routed to the
-        * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
-        * that prevents the signaling of link UP interrupts when
-        * the link operates at 10Mbps, so fallback to polling for
-        * those versions of GENET.
-        */
-       if (priv->internal_phy && !GENET_IS_V5(priv))
-               dev->phydev->irq = PHY_IGNORE_INTERRUPT;
-
        return 0;
 }
 
index 8e8d557..1e1b774 100644 (file)
@@ -3405,17 +3405,17 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
                return err;
        }
 
-       *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+       *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
        if (IS_ERR(*tx_clk))
-               *tx_clk = NULL;
+               return PTR_ERR(*tx_clk);
 
-       *rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
+       *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
        if (IS_ERR(*rx_clk))
-               *rx_clk = NULL;
+               return PTR_ERR(*rx_clk);
 
-       *tsu_clk = devm_clk_get(&pdev->dev, "tsu_clk");
+       *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
        if (IS_ERR(*tsu_clk))
-               *tsu_clk = NULL;
+               return PTR_ERR(*tsu_clk);
 
        err = clk_prepare_enable(*pclk);
        if (err) {
index be2bafc..a04eccb 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /* cavium_ptp.h - PTP 1588 clock on Cavium hardware
  * Copyright (c) 2003-2015, 2017 Cavium, Inc.
  */
index 5b60224..a4dead4 100644 (file)
@@ -137,13 +137,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
 static int alloc_uld_rxqs(struct adapter *adap,
                          struct sge_uld_rxq_info *rxq_info, bool lro)
 {
-       struct sge *s = &adap->sge;
        unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
+       int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
        struct sge_ofld_rxq *q = rxq_info->uldrxq;
        unsigned short *ids = rxq_info->rspq_id;
-       unsigned int bmap_idx = 0;
+       struct sge *s = &adap->sge;
        unsigned int per_chan;
-       int i, err, msi_idx, que_idx = 0;
 
        per_chan = rxq_info->nrxq / adap->params.nports;
 
@@ -161,6 +160,10 @@ static int alloc_uld_rxqs(struct adapter *adap,
 
                if (msi_idx >= 0) {
                        bmap_idx = get_msix_idx_from_bmap(adap);
+                       if (bmap_idx < 0) {
+                               err = -ENOSPC;
+                               goto freeout;
+                       }
                        msi_idx = adap->msix_info_ulds[bmap_idx].idx;
                }
                err = t4_sge_alloc_rxq(adap, &q->rspq, false,
index 162d7d8..19379ba 100644 (file)
@@ -1235,6 +1235,8 @@ static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
        priv->rx_td_enabled = enable;
 }
 
+static void update_tx_fqids(struct dpaa2_eth_priv *priv);
+
 static int link_state_update(struct dpaa2_eth_priv *priv)
 {
        struct dpni_link_state state = {0};
@@ -1261,6 +1263,7 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
                goto out;
 
        if (state.up) {
+               update_tx_fqids(priv);
                netif_carrier_on(priv->net_dev);
                netif_tx_start_all_queues(priv->net_dev);
        } else {
@@ -2533,6 +2536,47 @@ static int set_pause(struct dpaa2_eth_priv *priv)
        return 0;
 }
 
+static void update_tx_fqids(struct dpaa2_eth_priv *priv)
+{
+       struct dpni_queue_id qid = {0};
+       struct dpaa2_eth_fq *fq;
+       struct dpni_queue queue;
+       int i, j, err;
+
+       /* We only use Tx FQIDs for FQID-based enqueue, so check
+        * if DPNI version supports it before updating FQIDs
+        */
+       if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
+                                  DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
+               return;
+
+       for (i = 0; i < priv->num_fqs; i++) {
+               fq = &priv->fq[i];
+               if (fq->type != DPAA2_TX_CONF_FQ)
+                       continue;
+               for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
+                       err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+                                            DPNI_QUEUE_TX, j, fq->flowid,
+                                            &queue, &qid);
+                       if (err)
+                               goto out_err;
+
+                       fq->tx_fqid[j] = qid.fqid;
+                       if (fq->tx_fqid[j] == 0)
+                               goto out_err;
+               }
+       }
+
+       priv->enqueue = dpaa2_eth_enqueue_fq;
+
+       return;
+
+out_err:
+       netdev_info(priv->net_dev,
+                   "Error reading Tx FQID, fallback to QDID-based enqueue\n");
+       priv->enqueue = dpaa2_eth_enqueue_qd;
+}
+
 /* Configure the DPNI object this interface is associated with */
 static int setup_dpni(struct fsl_mc_device *ls_dev)
 {
@@ -3306,6 +3350,9 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
        if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
                link_state_update(netdev_priv(net_dev));
 
+       if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED)
+               set_mac_addr(netdev_priv(net_dev));
+
        return IRQ_HANDLED;
 }
 
@@ -3331,7 +3378,8 @@ static int setup_irqs(struct fsl_mc_device *ls_dev)
        }
 
        err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
-                               DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
+                               DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
+                               DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
        if (err < 0) {
                dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
                goto free_irq;
index fd58391..ee0711d 100644 (file)
@@ -133,9 +133,12 @@ int dpni_reset(struct fsl_mc_io    *mc_io,
  */
 #define DPNI_IRQ_INDEX                         0
 /**
- * IRQ event - indicates a change in link state
+ * IRQ events:
+ *       indicates a change in link state
+ *       indicates a change in endpoint
  */
 #define DPNI_IRQ_EVENT_LINK_CHANGED            0x00000001
+#define DPNI_IRQ_EVENT_ENDPOINT_CHANGED                0x00000002
 
 int dpni_set_irq_enable(struct fsl_mc_io       *mc_io,
                        u32                     cmd_flags,
index c4b7bf8..75ccc1e 100644 (file)
@@ -32,6 +32,8 @@
 
 #define HNAE3_MOD_VERSION "1.0"
 
+#define HNAE3_MIN_VECTOR_NUM   2 /* first one for misc, another for IO */
+
 /* Device IDs */
 #define HNAE3_DEV_ID_GE                                0xA220
 #define HNAE3_DEV_ID_25GE                      0xA221
index fd7f943..e02e01b 100644 (file)
@@ -906,6 +906,9 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
                hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
                                HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
 
+               /* nic's msix numbers is always equals to the roce's. */
+               hdev->num_nic_msi = hdev->num_roce_msi;
+
                /* PF should have NIC vectors and Roce vectors,
                 * NIC vectors are queued before Roce vectors.
                 */
@@ -915,6 +918,15 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
                hdev->num_msi =
                hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
                                HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+
+               hdev->num_nic_msi = hdev->num_msi;
+       }
+
+       if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
+               dev_err(&hdev->pdev->dev,
+                       "Just %u msi resources, not enough for pf(min:2).\n",
+                       hdev->num_nic_msi);
+               return -EINVAL;
        }
 
        return 0;
@@ -1507,6 +1519,10 @@ static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
        kinfo->rss_size = min_t(u16, hdev->rss_size_max,
                                vport->alloc_tqps / hdev->tm_info.num_tc);
 
+       /* ensure one to one mapping between irq and queue at default */
+       kinfo->rss_size = min_t(u16, kinfo->rss_size,
+                               (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
+
        return 0;
 }
 
@@ -2285,7 +2301,8 @@ static int hclge_init_msi(struct hclge_dev *hdev)
        int vectors;
        int i;
 
-       vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+       vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
+                                       hdev->num_msi,
                                        PCI_IRQ_MSI | PCI_IRQ_MSIX);
        if (vectors < 0) {
                dev_err(&pdev->dev,
@@ -2300,6 +2317,7 @@ static int hclge_init_msi(struct hclge_dev *hdev)
 
        hdev->num_msi = vectors;
        hdev->num_msi_left = vectors;
+
        hdev->base_msi_vector = pdev->irq;
        hdev->roce_base_vector = hdev->base_msi_vector +
                                hdev->roce_base_msix_offset;
@@ -3903,6 +3921,7 @@ static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
        int alloc = 0;
        int i, j;
 
+       vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
        vector_num = min(hdev->num_msi_left, vector_num);
 
        for (j = 0; j < vector_num; j++) {
index 3e9574a..c3d56b8 100644 (file)
@@ -763,6 +763,7 @@ struct hclge_dev {
        u32 base_msi_vector;
        u16 *vector_status;
        int *vector_irq;
+       u16 num_nic_msi;        /* Num of nic vectors for this PF */
        u16 num_roce_msi;       /* Num of roce vectors for this PF */
        int roce_base_vector;
 
index 9f0e35f..62399cc 100644 (file)
@@ -537,9 +537,16 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
                kinfo->rss_size = kinfo->req_rss_size;
        } else if (kinfo->rss_size > max_rss_size ||
                   (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
+               /* if user not set rss, the rss_size should compare with the
+                * valid msi numbers to ensure one to one map between tqp and
+                * irq as default.
+                */
+               if (!kinfo->req_rss_size)
+                       max_rss_size = min_t(u16, max_rss_size,
+                                            (hdev->num_nic_msi - 1) /
+                                            kinfo->num_tc);
+
                /* Set to the maximum specification value (max_rss_size). */
-               dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
-                        kinfo->rss_size, max_rss_size);
                kinfo->rss_size = max_rss_size;
        }
 
index e3090b3..7d7e712 100644 (file)
@@ -411,6 +411,13 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
                kinfo->tqp[i] = &hdev->htqp[i].q;
        }
 
+       /* after init the max rss_size and tqps, adjust the default tqp numbers
+        * and rss size with the actual vector numbers
+        */
+       kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
+       kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc,
+                               kinfo->rss_size);
+
        return 0;
 }
 
@@ -502,6 +509,7 @@ static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
        int alloc = 0;
        int i, j;
 
+       vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
        vector_num = min(hdev->num_msi_left, vector_num);
 
        for (j = 0; j < vector_num; j++) {
@@ -2246,13 +2254,14 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
        int vectors;
        int i;
 
-       if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
+       if (hnae3_dev_roce_supported(hdev))
                vectors = pci_alloc_irq_vectors(pdev,
                                                hdev->roce_base_msix_offset + 1,
                                                hdev->num_msi,
                                                PCI_IRQ_MSIX);
        else
-               vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+               vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
+                                               hdev->num_msi,
                                                PCI_IRQ_MSI | PCI_IRQ_MSIX);
 
        if (vectors < 0) {
@@ -2268,6 +2277,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
 
        hdev->num_msi = vectors;
        hdev->num_msi_left = vectors;
+
        hdev->base_msi_vector = pdev->irq;
        hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
 
@@ -2533,7 +2543,7 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
 
        req = (struct hclgevf_query_res_cmd *)desc.data;
 
-       if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
+       if (hnae3_dev_roce_supported(hdev)) {
                hdev->roce_base_msix_offset =
                hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
                                HCLGEVF_MSIX_OFT_ROCEE_M,
@@ -2542,6 +2552,9 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
                hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
                                HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
 
+               /* nic's msix numbers is always equals to the roce's. */
+               hdev->num_nic_msix = hdev->num_roce_msix;
+
                /* VF should have NIC vectors and Roce vectors, NIC vectors
                 * are queued before Roce vectors. The offset is fixed to 64.
                 */
@@ -2551,6 +2564,15 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
                hdev->num_msi =
                hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
                                HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+
+               hdev->num_nic_msix = hdev->num_msi;
+       }
+
+       if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
+               dev_err(&hdev->pdev->dev,
+                       "Just %u msi resources, not enough for vf(min:2).\n",
+                       hdev->num_nic_msix);
+               return -EINVAL;
        }
 
        return 0;
index bdde3af..2b8d6bc 100644 (file)
@@ -270,6 +270,7 @@ struct hclgevf_dev {
        u16 num_msi;
        u16 num_msi_left;
        u16 num_msi_used;
+       u16 num_nic_msix;       /* Num of nic vectors for this VF */
        u16 num_roce_msix;      /* Num of roce vectors for this VF */
        u16 roce_base_msix_offset;
        int roce_base_vector;
index 3e863a7..7df5d7d 100644 (file)
@@ -148,11 +148,15 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
 {
        u32 time_cnt;
        u32 reg_value;
+       int ret;
 
        regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
 
        for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
-               regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
+               ret = regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
+               if (ret)
+                       return ret;
+
                reg_value &= st_msk;
                if ((!!check_st) == (!!reg_value))
                        break;
index 211c5f7..aec7e98 100644 (file)
@@ -96,6 +96,8 @@
 
 #define OPT_SWAP_PORT  0x0001  /* Need to wordswp on the MPU port */
 
+#define LIB82596_DMA_ATTR      DMA_ATTR_NON_CONSISTENT
+
 #define DMA_WBACK(ndev, addr, len) \
        do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
 
@@ -200,7 +202,7 @@ static int __exit lan_remove_chip(struct parisc_device *pdev)
 
        unregister_netdev (dev);
        dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma,
-                      lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+                      lp->dma_addr, LIB82596_DMA_ATTR);
        free_netdev (dev);
        return 0;
 }
index 1274ad2..f9742af 100644 (file)
@@ -1065,7 +1065,7 @@ static int i82596_probe(struct net_device *dev)
 
        dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma),
                              &lp->dma_addr, GFP_KERNEL,
-                             DMA_ATTR_NON_CONSISTENT);
+                             LIB82596_DMA_ATTR);
        if (!dma) {
                printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
                return -ENOMEM;
@@ -1087,7 +1087,7 @@ static int i82596_probe(struct net_device *dev)
        i = register_netdev(dev);
        if (i) {
                dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma),
-                              dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+                              dma, lp->dma_addr, LIB82596_DMA_ATTR);
                return i;
        }
 
index 6eb6c2f..6436a98 100644 (file)
@@ -24,6 +24,8 @@
 
 static const char sni_82596_string[] = "snirm_82596";
 
+#define LIB82596_DMA_ATTR      0
+
 #define DMA_WBACK(priv, addr, len)     do { } while (0)
 #define DMA_INV(priv, addr, len)       do { } while (0)
 #define DMA_WBACK_INV(priv, addr, len) do { } while (0)
@@ -152,7 +154,7 @@ static int sni_82596_driver_remove(struct platform_device *pdev)
 
        unregister_netdev(dev);
        dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma,
-                      lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+                      lp->dma_addr, LIB82596_DMA_ATTR);
        iounmap(lp->ca);
        iounmap(lp->mpu_port);
        free_netdev (dev);
index 2b073a3..f59d9a8 100644 (file)
@@ -2878,12 +2878,10 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
 
        if (test_bit(0, &adapter->resetting) &&
            adapter->reset_reason == VNIC_RESET_MOBILITY) {
-               u64 val = (0xff000000) | scrq->hw_irq;
+               struct irq_desc *desc = irq_to_desc(scrq->irq);
+               struct irq_chip *chip = irq_desc_get_chip(desc);
 
-               rc = plpar_hcall_norets(H_EOI, val);
-               if (rc)
-                       dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
-                               val, rc);
+               chip->irq_eoi(&desc->irq_data);
        }
 
        rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
index c40729b..7fad2f2 100644 (file)
@@ -45,7 +45,6 @@
 
 #define BAR_0          0
 #define BAR_1          1
-#define BAR_5          5
 
 #define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
        PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
index 86493fe..78db336 100644 (file)
@@ -977,7 +977,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_ioremap;
 
        if (adapter->need_ioport) {
-               for (i = BAR_1; i <= BAR_5; i++) {
+               for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
                        if (pci_resource_len(pdev, i) == 0)
                                continue;
                        if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
index e85271b..681d44c 100644 (file)
@@ -42,7 +42,6 @@
 
 #define BAR_0          0
 #define BAR_1          1
-#define BAR_5          5
 
 struct ixgb_adapter;
 #include "ixgb_hw.h"
index 0940a0d..3d8c051 100644 (file)
@@ -412,7 +412,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_ioremap;
        }
 
-       for (i = BAR_1; i <= BAR_5; i++) {
+       for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
                if (pci_resource_len(pdev, i) == 0)
                        continue;
                if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
index c610693..703adb9 100644 (file)
@@ -261,6 +261,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
                ge_mode = 0;
                switch (state->interface) {
                case PHY_INTERFACE_MODE_MII:
+               case PHY_INTERFACE_MODE_GMII:
                        ge_mode = 1;
                        break;
                case PHY_INTERFACE_MODE_REVMII:
index 9231b39..c501bf2 100644 (file)
@@ -112,17 +112,11 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
        u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
        u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)]   = {0};
        struct xarray *mkeys = &dev->priv.mkey_table;
-       struct mlx5_core_mkey *deleted_mkey;
        unsigned long flags;
 
        xa_lock_irqsave(mkeys, flags);
-       deleted_mkey = __xa_erase(mkeys, mlx5_base_mkey(mkey->key));
+       __xa_erase(mkeys, mlx5_base_mkey(mkey->key));
        xa_unlock_irqrestore(mkeys, flags);
-       if (!deleted_mkey) {
-               mlx5_core_dbg(dev, "failed xarray delete of mkey 0x%x\n",
-                             mlx5_base_mkey(mkey->key));
-               return -ENOENT;
-       }
 
        MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
        MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
index 913f1e5..d7c7467 100644 (file)
@@ -137,7 +137,8 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool,
 
        icm_mr->icm_start_addr = icm_mr->dm.addr;
 
-       align_diff = icm_mr->icm_start_addr % align_base;
+       /* align_base is always a power of 2 */
+       align_diff = icm_mr->icm_start_addr & (align_base - 1);
        if (align_diff)
                icm_mr->used_length = align_base - align_diff;
 
index 4187f2b..e8b6560 100644 (file)
@@ -788,12 +788,10 @@ again:
                         * it means that all the previous stes are the same,
                         * if so, this rule is duplicated.
                         */
-                       if (mlx5dr_ste_is_last_in_rule(nic_matcher,
-                                                      matched_ste->ste_chain_location)) {
-                               mlx5dr_info(dmn, "Duplicate rule inserted, aborting!!\n");
-                               return NULL;
-                       }
-                       return matched_ste;
+                       if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
+                               return matched_ste;
+
+                       mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
                }
 
                if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
index 899450b..7c03b66 100644 (file)
@@ -99,6 +99,7 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
        devlink = priv_to_devlink(mlxsw_sp->core);
        in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
                                                           local_port);
+       skb_push(skb, ETH_HLEN);
        devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port);
        consume_skb(skb);
 }
index b063eb7..aac1151 100644 (file)
@@ -388,13 +388,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
                        continue;
 
                phy = of_phy_find_device(phy_node);
+               of_node_put(phy_node);
                if (!phy)
                        continue;
 
                err = ocelot_probe_port(ocelot, port, regs, phy);
                if (err) {
                        of_node_put(portnp);
-                       return err;
+                       goto out_put_ports;
                }
 
                phy_mode = of_get_phy_mode(portnp);
@@ -422,7 +423,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
                                "invalid phy mode for port%d, (Q)SGMII only\n",
                                port);
                        of_node_put(portnp);
-                       return -EINVAL;
+                       err = -EINVAL;
+                       goto out_put_ports;
                }
 
                serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
@@ -435,7 +437,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
                                        "missing SerDes phys for port%d\n",
                                        port);
 
-                       goto err_probe_ports;
+                       of_node_put(portnp);
+                       goto out_put_ports;
                }
 
                ocelot->ports[port]->serdes = serdes;
@@ -447,9 +450,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
 
        dev_info(&pdev->dev, "Ocelot switch probed\n");
 
-       return 0;
-
-err_probe_ports:
+out_put_ports:
+       of_node_put(ports);
        return err;
 }
 
index 141571e..544012a 100644 (file)
@@ -1356,9 +1356,6 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
        if (!is_valid_ether_addr(ndev->dev_addr))
                eth_hw_addr_random(ndev);
 
-       /* Reset the ethernet controller */
-       __lpc_eth_reset(pldat);
-
        /* then shut everything down to save power */
        __lpc_eth_shutdown(pldat);
 
index bd0583e..d25b88f 100644 (file)
@@ -20,6 +20,7 @@ if NET_VENDOR_PENSANDO
 config IONIC
        tristate "Pensando Ethernet IONIC Support"
        depends on 64BIT && PCI
+       select NET_DEVLINK
        help
          This enables the support for the Pensando family of Ethernet
          adapters.  More specific information on this driver can be
index 812190e..6a95b42 100644 (file)
@@ -182,6 +182,8 @@ struct ionic_lif {
 
 #define lif_to_txqcq(lif, i)   ((lif)->txqcqs[i].qcq)
 #define lif_to_rxqcq(lif, i)   ((lif)->rxqcqs[i].qcq)
+#define lif_to_txstats(lif, i) ((lif)->txqcqs[i].stats->tx)
+#define lif_to_rxstats(lif, i) ((lif)->rxqcqs[i].stats->rx)
 #define lif_to_txq(lif, i)     (&lif_to_txqcq((lif), i)->q)
 #define lif_to_rxq(lif, i)     (&lif_to_txqcq((lif), i)->q)
 
index e290788..03916b6 100644 (file)
@@ -117,7 +117,8 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
        /* rx stats */
        total += MAX_Q(lif) * IONIC_NUM_RX_STATS;
 
-       if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+       if (test_bit(IONIC_LIF_UP, lif->state) &&
+           test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
                /* tx debug stats */
                total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS +
                                      IONIC_NUM_TX_Q_STATS +
@@ -149,7 +150,8 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
                        *buf += ETH_GSTRING_LEN;
                }
 
-               if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+               if (test_bit(IONIC_LIF_UP, lif->state) &&
+                   test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
                        for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
                                snprintf(*buf, ETH_GSTRING_LEN,
                                         "txq_%d_%s",
@@ -187,7 +189,8 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
                        *buf += ETH_GSTRING_LEN;
                }
 
-               if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+               if (test_bit(IONIC_LIF_UP, lif->state) &&
+                   test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
                        for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
                                snprintf(*buf, ETH_GSTRING_LEN,
                                         "rxq_%d_cq_%s",
@@ -223,6 +226,8 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
 {
        struct ionic_lif_sw_stats lif_stats;
        struct ionic_qcq *txqcq, *rxqcq;
+       struct ionic_tx_stats *txstats;
+       struct ionic_rx_stats *rxstats;
        int i, q_num;
 
        ionic_get_lif_stats(lif, &lif_stats);
@@ -233,15 +238,17 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
        }
 
        for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
-               txqcq = lif_to_txqcq(lif, q_num);
+               txstats = &lif_to_txstats(lif, q_num);
 
                for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
-                       **buf = IONIC_READ_STAT64(&txqcq->stats->tx,
+                       **buf = IONIC_READ_STAT64(txstats,
                                                  &ionic_tx_stats_desc[i]);
                        (*buf)++;
                }
 
-               if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+               if (test_bit(IONIC_LIF_UP, lif->state) &&
+                   test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+                       txqcq = lif_to_txqcq(lif, q_num);
                        for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
                                **buf = IONIC_READ_STAT64(&txqcq->q,
                                                      &ionic_txq_stats_desc[i]);
@@ -258,22 +265,24 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
                                (*buf)++;
                        }
                        for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
-                               **buf = txqcq->stats->tx.sg_cntr[i];
+                               **buf = txstats->sg_cntr[i];
                                (*buf)++;
                        }
                }
        }
 
        for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
-               rxqcq = lif_to_rxqcq(lif, q_num);
+               rxstats = &lif_to_rxstats(lif, q_num);
 
                for (i = 0; i < IONIC_NUM_RX_STATS; i++) {
-                       **buf = IONIC_READ_STAT64(&rxqcq->stats->rx,
+                       **buf = IONIC_READ_STAT64(rxstats,
                                                  &ionic_rx_stats_desc[i]);
                        (*buf)++;
                }
 
-               if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+               if (test_bit(IONIC_LIF_UP, lif->state) &&
+                   test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+                       rxqcq = lif_to_rxqcq(lif, q_num);
                        for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
                                **buf = IONIC_READ_STAT64(&rxqcq->cq,
                                                   &ionic_dbg_cq_stats_desc[i]);
index 4574448..b4b8ba0 100644 (file)
@@ -2787,6 +2787,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
                                netdev_err(qdev->ndev,
                                           "PCI mapping failed with error: %d\n",
                                           err);
+                               dev_kfree_skb_irq(skb);
                                ql_free_large_buffers(qdev);
                                return -ENOMEM;
                        }
index 74f81fe..350b0d9 100644 (file)
@@ -4146,6 +4146,14 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
        rtl_lock_config_regs(tp);
 }
 
+static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu)
+{
+       if (mtu > ETH_DATA_LEN)
+               rtl_hw_jumbo_enable(tp);
+       else
+               rtl_hw_jumbo_disable(tp);
+}
+
 DECLARE_RTL_COND(rtl_chipcmd_cond)
 {
        return RTL_R8(tp, ChipCmd) & CmdReset;
@@ -4442,11 +4450,6 @@ static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp,
 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
 {
        RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
-
-       if (tp->dev->mtu <= ETH_DATA_LEN) {
-               rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B |
-                                        PCI_EXP_DEVCTL_NOSNOOP_EN);
-       }
 }
 
 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
@@ -4462,9 +4465,6 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
 
        RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
 
-       if (tp->dev->mtu <= ETH_DATA_LEN)
-               rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
        rtl_disable_clock_request(tp);
 }
 
@@ -4490,9 +4490,6 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
        rtl_set_def_aspm_entry_latency(tp);
 
        RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
-
-       if (tp->dev->mtu <= ETH_DATA_LEN)
-               rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
 }
 
 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
@@ -4503,9 +4500,6 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
 
        /* Magic. */
        RTL_W8(tp, DBG_REG, 0x20);
-
-       if (tp->dev->mtu <= ETH_DATA_LEN)
-               rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
 }
 
 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
@@ -4611,9 +4605,6 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
 
        rtl_ephy_init(tp, e_info_8168e_1);
 
-       if (tp->dev->mtu <= ETH_DATA_LEN)
-               rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
        rtl_disable_clock_request(tp);
 
        /* Reset tx FIFO pointer */
@@ -4636,9 +4627,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
 
        rtl_ephy_init(tp, e_info_8168e_2);
 
-       if (tp->dev->mtu <= ETH_DATA_LEN)
-               rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
        rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
        rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
        rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
@@ -5485,6 +5473,8 @@ static void rtl_hw_start(struct  rtl8169_private *tp)
        rtl_set_rx_tx_desc_registers(tp);
        rtl_lock_config_regs(tp);
 
+       rtl_jumbo_config(tp, tp->dev->mtu);
+
        /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
        RTL_R16(tp, CPlusCmd);
        RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -5498,10 +5488,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
 
-       if (new_mtu > ETH_DATA_LEN)
-               rtl_hw_jumbo_enable(tp);
-       else
-               rtl_hw_jumbo_disable(tp);
+       rtl_jumbo_config(tp, new_mtu);
 
        dev->mtu = new_mtu;
        netdev_update_features(dev);
index 55db7fb..f9e6744 100644 (file)
@@ -282,7 +282,6 @@ struct netsec_desc_ring {
        void *vaddr;
        u16 head, tail;
        u16 xdp_xmit; /* netsec_xdp_xmit packets */
-       bool is_xdp;
        struct page_pool *page_pool;
        struct xdp_rxq_info xdp_rxq;
        spinlock_t lock; /* XDP tx queue locking */
@@ -634,8 +633,7 @@ static bool netsec_clean_tx_dring(struct netsec_priv *priv)
        unsigned int bytes;
        int cnt = 0;
 
-       if (dring->is_xdp)
-               spin_lock(&dring->lock);
+       spin_lock(&dring->lock);
 
        bytes = 0;
        entry = dring->vaddr + DESC_SZ * tail;
@@ -682,8 +680,8 @@ next:
                entry = dring->vaddr + DESC_SZ * tail;
                cnt++;
        }
-       if (dring->is_xdp)
-               spin_unlock(&dring->lock);
+
+       spin_unlock(&dring->lock);
 
        if (!cnt)
                return false;
@@ -799,9 +797,6 @@ static void netsec_set_tx_de(struct netsec_priv *priv,
        de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
        de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
        de->attr = attr;
-       /* under spin_lock if using XDP */
-       if (!dring->is_xdp)
-               dma_wmb();
 
        dring->desc[idx] = *desc;
        if (desc->buf_type == TYPE_NETSEC_SKB)
@@ -1123,12 +1118,10 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
        u16 tso_seg_len = 0;
        int filled;
 
-       if (dring->is_xdp)
-               spin_lock_bh(&dring->lock);
+       spin_lock_bh(&dring->lock);
        filled = netsec_desc_used(dring);
        if (netsec_check_stop_tx(priv, filled)) {
-               if (dring->is_xdp)
-                       spin_unlock_bh(&dring->lock);
+               spin_unlock_bh(&dring->lock);
                net_warn_ratelimited("%s %s Tx queue full\n",
                                     dev_name(priv->dev), ndev->name);
                return NETDEV_TX_BUSY;
@@ -1161,8 +1154,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
        tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
                                          skb_headlen(skb), DMA_TO_DEVICE);
        if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
-               if (dring->is_xdp)
-                       spin_unlock_bh(&dring->lock);
+               spin_unlock_bh(&dring->lock);
                netif_err(priv, drv, priv->ndev,
                          "%s: DMA mapping failed\n", __func__);
                ndev->stats.tx_dropped++;
@@ -1177,8 +1169,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
        netdev_sent_queue(priv->ndev, skb->len);
 
        netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
-       if (dring->is_xdp)
-               spin_unlock_bh(&dring->lock);
+       spin_unlock_bh(&dring->lock);
        netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
 
        return NETDEV_TX_OK;
@@ -1262,7 +1253,6 @@ err:
 static void netsec_setup_tx_dring(struct netsec_priv *priv)
 {
        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
-       struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
        int i;
 
        for (i = 0; i < DESC_NUM; i++) {
@@ -1275,12 +1265,6 @@ static void netsec_setup_tx_dring(struct netsec_priv *priv)
                 */
                de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
        }
-
-       if (xdp_prog)
-               dring->is_xdp = true;
-       else
-               dring->is_xdp = false;
-
 }
 
 static int netsec_setup_rx_dring(struct netsec_priv *priv)
index f97a409..ddcc191 100644 (file)
@@ -651,7 +651,8 @@ static void sun8i_dwmac_set_filter(struct mac_device_info *hw,
                        }
                }
        } else {
-               netdev_info(dev, "Too many address, switching to promiscuous\n");
+               if (!(readl(ioaddr + EMAC_RX_FRM_FLT) & EMAC_FRM_FLT_RXALL))
+                       netdev_info(dev, "Too many address, switching to promiscuous\n");
                v = EMAC_FRM_FLT_RXALL;
        }
 
index 9b4b5f6..5a7b0ac 100644 (file)
@@ -401,8 +401,11 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
        int numhashregs = (hw->multicast_filter_bins >> 5);
        int mcbitslog2 = hw->mcast_bits_log2;
        unsigned int value;
+       u32 mc_filter[8];
        int i;
 
+       memset(mc_filter, 0, sizeof(mc_filter));
+
        value = readl(ioaddr + GMAC_PACKET_FILTER);
        value &= ~GMAC_PACKET_FILTER_HMC;
        value &= ~GMAC_PACKET_FILTER_HPF;
@@ -416,16 +419,13 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
                /* Pass all multi */
                value |= GMAC_PACKET_FILTER_PM;
                /* Set all the bits of the HASH tab */
-               for (i = 0; i < numhashregs; i++)
-                       writel(0xffffffff, ioaddr + GMAC_HASH_TAB(i));
+               memset(mc_filter, 0xff, sizeof(mc_filter));
        } else if (!netdev_mc_empty(dev)) {
                struct netdev_hw_addr *ha;
-               u32 mc_filter[8];
 
                /* Hash filter for multicast */
                value |= GMAC_PACKET_FILTER_HMC;
 
-               memset(mc_filter, 0, sizeof(mc_filter));
                netdev_for_each_mc_addr(ha, dev) {
                        /* The upper n bits of the calculated CRC are used to
                         * index the contents of the hash table. The number of
@@ -440,14 +440,15 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
                         */
                        mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
                }
-               for (i = 0; i < numhashregs; i++)
-                       writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
        }
 
+       for (i = 0; i < numhashregs; i++)
+               writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
+
        value |= GMAC_PACKET_FILTER_HPF;
 
        /* Handle multiple unicast addresses */
-       if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
+       if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
                /* Switch to promiscuous mode if more than 128 addrs
                 * are required
                 */
index 3f4f313..e436fa1 100644 (file)
@@ -515,6 +515,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
 
        if (!enable) {
                val |= PPSCMDx(index, 0x5);
+               val |= PPSEN0;
                writel(val, ioaddr + MAC_PPS_CONTROL);
                return 0;
        }
index 5923ca6..9903738 100644 (file)
@@ -84,7 +84,7 @@
 #define XGMAC_TSIE                     BIT(12)
 #define XGMAC_LPIIE                    BIT(5)
 #define XGMAC_PMTIE                    BIT(4)
-#define XGMAC_INT_DEFAULT_EN           (XGMAC_LPIIE | XGMAC_PMTIE | XGMAC_TSIE)
+#define XGMAC_INT_DEFAULT_EN           (XGMAC_LPIIE | XGMAC_PMTIE)
 #define XGMAC_Qx_TX_FLOW_CTRL(x)       (0x00000070 + (x) * 4)
 #define XGMAC_PT                       GENMASK(31, 16)
 #define XGMAC_PT_SHIFT                 16
 #define XGMAC_HWFEAT_GMIISEL           BIT(1)
 #define XGMAC_HW_FEATURE1              0x00000120
 #define XGMAC_HWFEAT_L3L4FNUM          GENMASK(30, 27)
+#define XGMAC_HWFEAT_HASHTBLSZ         GENMASK(25, 24)
 #define XGMAC_HWFEAT_RSSEN             BIT(20)
 #define XGMAC_HWFEAT_TSOEN             BIT(18)
 #define XGMAC_HWFEAT_SPHEN             BIT(17)
index 2b277b2..5031398 100644 (file)
@@ -472,7 +472,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
        dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
 
        /* Handle multiple unicast addresses */
-       if (netdev_uc_count(dev) > XGMAC_ADDR_MAX) {
+       if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
                value |= XGMAC_FILTER_PR;
        } else {
                struct netdev_hw_addr *ha;
@@ -523,8 +523,8 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
                                  struct stmmac_rss *cfg, u32 num_rxq)
 {
        void __iomem *ioaddr = hw->pcsr;
+       u32 value, *key;
        int i, ret;
-       u32 value;
 
        value = readl(ioaddr + XGMAC_RSS_CTRL);
        if (!cfg || !cfg->enable) {
@@ -533,8 +533,9 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
                return 0;
        }
 
-       for (i = 0; i < (sizeof(cfg->key) / sizeof(u32)); i++) {
-               ret = dwxgmac2_rss_write_reg(ioaddr, true, i, cfg->key[i]);
+       key = (u32 *)cfg->key;
+       for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
+               ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
                if (ret)
                        return ret;
        }
index 53c4a40..965cbe3 100644 (file)
@@ -380,6 +380,7 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
        /* MAC HW feature 1 */
        hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
        dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27;
+       dma_cap->hash_tb_sz = (hw_cap & XGMAC_HWFEAT_HASHTBLSZ) >> 24;
        dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20;
        dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
        dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17;
index d323273..3dfd04e 100644 (file)
@@ -629,6 +629,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
                        ptp_v2 = PTP_TCR_TSVER2ENA;
                        snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+                       ts_event_en = PTP_TCR_TSEVNTENA;
                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
                        ptp_over_ethernet = PTP_TCR_TSIPENA;
@@ -2609,7 +2610,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
        }
 
        if (priv->hw->pcs)
-               stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
+               stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
 
        /* set TX and RX rings length */
        stmmac_set_rings_length(priv);
@@ -4715,11 +4716,9 @@ int stmmac_suspend(struct device *dev)
        if (!ndev || !netif_running(ndev))
                return 0;
 
-       mutex_lock(&priv->lock);
+       phylink_mac_change(priv->phylink, false);
 
-       rtnl_lock();
-       phylink_stop(priv->phylink);
-       rtnl_unlock();
+       mutex_lock(&priv->lock);
 
        netif_device_detach(ndev);
        stmmac_stop_all_queues(priv);
@@ -4734,11 +4733,19 @@ int stmmac_suspend(struct device *dev)
                stmmac_pmt(priv, priv->hw, priv->wolopts);
                priv->irq_wake = 1;
        } else {
+               mutex_unlock(&priv->lock);
+               rtnl_lock();
+               phylink_stop(priv->phylink);
+               rtnl_unlock();
+               mutex_lock(&priv->lock);
+
                stmmac_mac_set(priv, priv->ioaddr, false);
                pinctrl_pm_select_sleep_state(priv->device);
                /* Disable clock in case of PWM is off */
-               clk_disable(priv->plat->pclk);
-               clk_disable(priv->plat->stmmac_clk);
+               if (priv->plat->clk_ptp_ref)
+                       clk_disable_unprepare(priv->plat->clk_ptp_ref);
+               clk_disable_unprepare(priv->plat->pclk);
+               clk_disable_unprepare(priv->plat->stmmac_clk);
        }
        mutex_unlock(&priv->lock);
 
@@ -4801,8 +4808,10 @@ int stmmac_resume(struct device *dev)
        } else {
                pinctrl_pm_select_default_state(priv->device);
                /* enable the clk previously disabled */
-               clk_enable(priv->plat->stmmac_clk);
-               clk_enable(priv->plat->pclk);
+               clk_prepare_enable(priv->plat->stmmac_clk);
+               clk_prepare_enable(priv->plat->pclk);
+               if (priv->plat->clk_ptp_ref)
+                       clk_prepare_enable(priv->plat->clk_ptp_ref);
                /* reset the phy so that it's ready */
                if (priv->mii)
                        stmmac_mdio_reset(priv->mii);
@@ -4824,12 +4833,16 @@ int stmmac_resume(struct device *dev)
 
        stmmac_start_all_queues(priv);
 
-       rtnl_lock();
-       phylink_start(priv->phylink);
-       rtnl_unlock();
-
        mutex_unlock(&priv->lock);
 
+       if (!device_may_wakeup(priv->device)) {
+               rtnl_lock();
+               phylink_start(priv->phylink);
+               rtnl_unlock();
+       }
+
+       phylink_mac_change(priv->phylink, true);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(stmmac_resume);
index 292045f..8237dbc 100644 (file)
@@ -489,7 +489,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
        }
 
        /* Get the base address of device */
-       for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                if (pci_resource_len(pdev, i) == 0)
                        continue;
                ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
@@ -532,7 +532,7 @@ static void stmmac_pci_remove(struct pci_dev *pdev)
        if (priv->plat->stmmac_clk)
                clk_unregister_fixed_rate(priv->plat->stmmac_clk);
 
-       for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                if (pci_resource_len(pdev, i) == 0)
                        continue;
                pcim_iounmap_regions(pdev, BIT(i));
index 173493d..df638b1 100644 (file)
@@ -164,7 +164,7 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
 /* structure describing a PTP hardware clock */
 static struct ptp_clock_info stmmac_ptp_clock_ops = {
        .owner = THIS_MODULE,
-       .name = "stmmac_ptp_clock",
+       .name = "stmmac ptp",
        .max_adj = 62500000,
        .n_alarm = 0,
        .n_ext_ts = 0,
index 5f66f61..e4ac3c4 100644 (file)
@@ -487,8 +487,8 @@ static int stmmac_filter_check(struct stmmac_priv *priv)
 
 static int stmmac_test_hfilt(struct stmmac_priv *priv)
 {
-       unsigned char gd_addr[ETH_ALEN] = {0x01, 0x00, 0xcc, 0xcc, 0xdd, 0xdd};
-       unsigned char bd_addr[ETH_ALEN] = {0x09, 0x00, 0xaa, 0xaa, 0xbb, 0xbb};
+       unsigned char gd_addr[ETH_ALEN] = {0x01, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
+       unsigned char bd_addr[ETH_ALEN] = {0x01, 0x01, 0x02, 0x03, 0x04, 0x05};
        struct stmmac_packet_attrs attr = { };
        int ret;
 
@@ -496,6 +496,9 @@ static int stmmac_test_hfilt(struct stmmac_priv *priv)
        if (ret)
                return ret;
 
+       if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
+               return -EOPNOTSUPP;
+
        ret = dev_mc_add(priv->dev, gd_addr);
        if (ret)
                return ret;
@@ -573,6 +576,8 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv)
 
        if (stmmac_filter_check(priv))
                return -EOPNOTSUPP;
+       if (!priv->hw->multicast_filter_bins)
+               return -EOPNOTSUPP;
 
        /* Remove all MC addresses */
        __dev_mc_unsync(priv->dev, NULL);
@@ -611,6 +616,8 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv)
 
        if (stmmac_filter_check(priv))
                return -EOPNOTSUPP;
+       if (!priv->hw->multicast_filter_bins)
+               return -EOPNOTSUPP;
 
        /* Remove all UC addresses */
        __dev_uc_unsync(priv->dev, NULL);
@@ -1564,10 +1571,6 @@ static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
        struct stmmac_packet_attrs attr = { };
        int size = priv->dma_buf_sz;
 
-       /* Only XGMAC has SW support for multiple RX descs in same packet */
-       if (priv->plat->has_xgmac)
-               size = priv->dev->max_mtu;
-
        attr.dst = priv->dev->dev_addr;
        attr.max_size = size - ETH_FCS_LEN;
        attr.queue_mapping = queue;
index e231098..f9a9a9d 100644 (file)
@@ -510,7 +510,7 @@ static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv,
        return NULL;
 }
 
-struct {
+static struct {
        int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
                  struct stmmac_flow_entry *entry);
 } tc_flow_parsers[] = {
index 386bafe..fa8604d 100644 (file)
@@ -34,7 +34,7 @@ static int xlgmac_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
                return ret;
        }
 
-       for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                if (pci_resource_len(pcidev, i) == 0)
                        continue;
                ret = pcim_iomap_regions(pcidev, BIT(i), XLGMAC_DRV_NAME);
index a65edd2..37ba708 100644 (file)
@@ -722,7 +722,7 @@ static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
  * cpdma_chan_split_pool - Splits ctrl pool between all channels.
  * Has to be called under ctlr lock
  */
-int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
+static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
 {
        int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
        int free_rx_num = 0, free_tx_num = 0;
index ceddb42..0dd0ba9 100644 (file)
@@ -1137,10 +1137,11 @@ static void atusb_disconnect(struct usb_interface *interface)
 
        ieee802154_unregister_hw(atusb->hw);
 
+       usb_put_dev(atusb->usb_dev);
+
        ieee802154_free_hw(atusb->hw);
 
        usb_set_intfdata(interface, NULL);
-       usb_put_dev(atusb->usb_dev);
 
        pr_debug("%s done\n", __func__);
 }
index 11402dc..430c937 100644 (file)
@@ -3145,12 +3145,12 @@ static int ca8210_probe(struct spi_device *spi_device)
                goto error;
        }
 
+       priv->spi->dev.platform_data = pdata;
        ret = ca8210_get_platform_data(priv->spi, pdata);
        if (ret) {
                dev_crit(&spi_device->dev, "ca8210_get_platform_data failed\n");
                goto error;
        }
-       priv->spi->dev.platform_data = pdata;
 
        ret = ca8210_dev_com_init(priv);
        if (ret) {
index 17f2300..8dc04e2 100644 (file)
@@ -800,7 +800,7 @@ mcr20a_handle_rx_read_buf_complete(void *context)
        if (!skb)
                return;
 
-       memcpy(skb_put(skb, len), lp->rx_buf, len);
+       __skb_put_data(skb, lp->rx_buf, len);
        ieee802154_rx_irqsafe(lp->hw, skb, lp->rx_lqi[0]);
 
        print_hex_dump_debug("mcr20a rx: ", DUMP_PREFIX_OFFSET, 16, 1,
index f61d094..1a251f7 100644 (file)
@@ -241,8 +241,8 @@ static struct pernet_operations nsim_fib_net_ops = {
 
 void nsim_fib_exit(void)
 {
-       unregister_pernet_subsys(&nsim_fib_net_ops);
        unregister_fib_notifier(&nsim_fib_nb);
+       unregister_pernet_subsys(&nsim_fib_net_ops);
 }
 
 int nsim_fib_init(void)
@@ -258,6 +258,7 @@ int nsim_fib_init(void)
        err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
        if (err < 0) {
                pr_err("Failed to register fib notifier\n");
+               unregister_pernet_subsys(&nsim_fib_net_ops);
                goto err_out;
        }
 
index 2aa7b2e..1eb5d4f 100644 (file)
 #include <linux/of_gpio.h>
 #include <linux/gpio/consumer.h>
 
+#define AT803X_SPECIFIC_STATUS                 0x11
+#define AT803X_SS_SPEED_MASK                   (3 << 14)
+#define AT803X_SS_SPEED_1000                   (2 << 14)
+#define AT803X_SS_SPEED_100                    (1 << 14)
+#define AT803X_SS_SPEED_10                     (0 << 14)
+#define AT803X_SS_DUPLEX                       BIT(13)
+#define AT803X_SS_SPEED_DUPLEX_RESOLVED                BIT(11)
+#define AT803X_SS_MDIX                         BIT(6)
+
 #define AT803X_INTR_ENABLE                     0x12
 #define AT803X_INTR_ENABLE_AUTONEG_ERR         BIT(15)
 #define AT803X_INTR_ENABLE_SPEED_CHANGED       BIT(14)
@@ -357,6 +366,64 @@ static int at803x_aneg_done(struct phy_device *phydev)
        return aneg_done;
 }
 
+static int at803x_read_status(struct phy_device *phydev)
+{
+       int ss, err, old_link = phydev->link;
+
+       /* Update the link, but return if there was an error */
+       err = genphy_update_link(phydev);
+       if (err)
+               return err;
+
+       /* why bother the PHY if nothing can have changed */
+       if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+               return 0;
+
+       phydev->speed = SPEED_UNKNOWN;
+       phydev->duplex = DUPLEX_UNKNOWN;
+       phydev->pause = 0;
+       phydev->asym_pause = 0;
+
+       err = genphy_read_lpa(phydev);
+       if (err < 0)
+               return err;
+
+       /* Read the AT8035 PHY-Specific Status register, which indicates the
+        * speed and duplex that the PHY is actually using, irrespective of
+        * whether we are in autoneg mode or not.
+        */
+       ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
+       if (ss < 0)
+               return ss;
+
+       if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
+               switch (ss & AT803X_SS_SPEED_MASK) {
+               case AT803X_SS_SPEED_10:
+                       phydev->speed = SPEED_10;
+                       break;
+               case AT803X_SS_SPEED_100:
+                       phydev->speed = SPEED_100;
+                       break;
+               case AT803X_SS_SPEED_1000:
+                       phydev->speed = SPEED_1000;
+                       break;
+               }
+               if (ss & AT803X_SS_DUPLEX)
+                       phydev->duplex = DUPLEX_FULL;
+               else
+                       phydev->duplex = DUPLEX_HALF;
+               if (ss & AT803X_SS_MDIX)
+                       phydev->mdix = ETH_TP_MDI_X;
+               else
+                       phydev->mdix = ETH_TP_MDI;
+       }
+
+       if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
+               phy_resolve_aneg_pause(phydev);
+
+       return 0;
+}
+
 static struct phy_driver at803x_driver[] = {
 {
        /* ATHEROS 8035 */
@@ -370,6 +437,7 @@ static struct phy_driver at803x_driver[] = {
        .suspend                = at803x_suspend,
        .resume                 = at803x_resume,
        /* PHY_GBIT_FEATURES */
+       .read_status            = at803x_read_status,
        .ack_interrupt          = at803x_ack_interrupt,
        .config_intr            = at803x_config_intr,
 }, {
@@ -399,6 +467,7 @@ static struct phy_driver at803x_driver[] = {
        .suspend                = at803x_suspend,
        .resume                 = at803x_resume,
        /* PHY_GBIT_FEATURES */
+       .read_status            = at803x_read_status,
        .aneg_done              = at803x_aneg_done,
        .ack_interrupt          = &at803x_ack_interrupt,
        .config_intr            = &at803x_config_intr,
index 8fc3386..af8eabe 100644 (file)
@@ -572,6 +572,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
        .name           = _name,                                        \
        /* PHY_BASIC_FEATURES */                                        \
        .flags          = PHY_IS_INTERNAL,                              \
+       .soft_reset     = genphy_soft_reset,                            \
        .config_init    = bcm7xxx_config_init,                          \
        .suspend        = bcm7xxx_suspend,                              \
        .resume         = bcm7xxx_config_init,                          \
index e282600..c1d345c 100644 (file)
@@ -121,7 +121,7 @@ void mdio_device_reset(struct mdio_device *mdiodev, int value)
                return;
 
        if (mdiodev->reset_gpio)
-               gpiod_set_value(mdiodev->reset_gpio, value);
+               gpiod_set_value_cansleep(mdiodev->reset_gpio, value);
 
        if (mdiodev->reset_ctrl) {
                if (value)
index 2fea554..63dedec 100644 (file)
@@ -341,6 +341,35 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
        return genphy_config_aneg(phydev);
 }
 
+static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
+                                           const u32 ksz_phy_id)
+{
+       int ret;
+
+       if ((phydev->phy_id & MICREL_PHY_ID_MASK) != ksz_phy_id)
+               return 0;
+
+       ret = phy_read(phydev, MII_BMSR);
+       if (ret < 0)
+               return ret;
+
+       /* KSZ8051 PHY and KSZ8794/KSZ8795/KSZ8765 switch share the same
+        * exact PHY ID. However, they can be told apart by the extended
+        * capability registers presence. The KSZ8051 PHY has them while
+        * the switch does not.
+        */
+       ret &= BMSR_ERCAP;
+       if (ksz_phy_id == PHY_ID_KSZ8051)
+               return ret;
+       else
+               return !ret;
+}
+
+static int ksz8051_match_phy_device(struct phy_device *phydev)
+{
+       return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ8051);
+}
+
 static int ksz8081_config_init(struct phy_device *phydev)
 {
        /* KSZPHY_OMSO_FACTORY_TEST is set at de-assertion of the reset line
@@ -364,6 +393,11 @@ static int ksz8061_config_init(struct phy_device *phydev)
        return kszphy_config_init(phydev);
 }
 
+static int ksz8795_match_phy_device(struct phy_device *phydev)
+{
+       return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ87XX);
+}
+
 static int ksz9021_load_values_from_of(struct phy_device *phydev,
                                       const struct device_node *of_node,
                                       u16 reg,
@@ -1017,8 +1051,6 @@ static struct phy_driver ksphy_driver[] = {
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
-       .phy_id         = PHY_ID_KSZ8051,
-       .phy_id_mask    = MICREL_PHY_ID_MASK,
        .name           = "Micrel KSZ8051",
        /* PHY_BASIC_FEATURES */
        .driver_data    = &ksz8051_type,
@@ -1029,6 +1061,7 @@ static struct phy_driver ksphy_driver[] = {
        .get_sset_count = kszphy_get_sset_count,
        .get_strings    = kszphy_get_strings,
        .get_stats      = kszphy_get_stats,
+       .match_phy_device = ksz8051_match_phy_device,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
@@ -1141,13 +1174,12 @@ static struct phy_driver ksphy_driver[] = {
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
-       .phy_id         = PHY_ID_KSZ8795,
-       .phy_id_mask    = MICREL_PHY_ID_MASK,
-       .name           = "Micrel KSZ8795",
+       .name           = "Micrel KSZ87XX Switch",
        /* PHY_BASIC_FEATURES */
        .config_init    = kszphy_config_init,
        .config_aneg    = ksz8873mll_config_aneg,
        .read_status    = ksz8873mll_read_status,
+       .match_phy_device = ksz8795_match_phy_device,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
index 7935593..a1caeee 100644 (file)
@@ -323,6 +323,8 @@ int genphy_c45_read_pma(struct phy_device *phydev)
 {
        int val;
 
+       linkmode_zero(phydev->lp_advertising);
+
        val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
        if (val < 0)
                return val;
index 369903d..9412669 100644 (file)
@@ -283,6 +283,18 @@ void of_set_phy_eee_broken(struct phy_device *phydev)
        phydev->eee_broken_modes = broken;
 }
 
+void phy_resolve_aneg_pause(struct phy_device *phydev)
+{
+       if (phydev->duplex == DUPLEX_FULL) {
+               phydev->pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+                                                 phydev->lp_advertising);
+               phydev->asym_pause = linkmode_test_bit(
+                       ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+                       phydev->lp_advertising);
+       }
+}
+EXPORT_SYMBOL_GPL(phy_resolve_aneg_pause);
+
 /**
  * phy_resolve_aneg_linkmode - resolve the advertisements into phy settings
  * @phydev: The phy_device struct
@@ -305,13 +317,7 @@ void phy_resolve_aneg_linkmode(struct phy_device *phydev)
                        break;
                }
 
-       if (phydev->duplex == DUPLEX_FULL) {
-               phydev->pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
-                                                 phydev->lp_advertising);
-               phydev->asym_pause = linkmode_test_bit(
-                       ETHTOOL_LINK_MODE_Asym_Pause_BIT,
-                       phydev->lp_advertising);
-       }
+       phy_resolve_aneg_pause(phydev);
 }
 EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
 
index 7c92afd..105d389 100644 (file)
@@ -457,6 +457,11 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
                                                           val);
                                change_autoneg = true;
                                break;
+                       case MII_CTRL1000:
+                               mii_ctrl1000_mod_linkmode_adv_t(phydev->advertising,
+                                                               val);
+                               change_autoneg = true;
+                               break;
                        default:
                                /* do nothing */
                                break;
@@ -567,9 +572,6 @@ int phy_start_aneg(struct phy_device *phydev)
        if (AUTONEG_DISABLE == phydev->autoneg)
                phy_sanitize_settings(phydev);
 
-       /* Invalidate LP advertising flags */
-       linkmode_zero(phydev->lp_advertising);
-
        err = phy_config_aneg(phydev);
        if (err < 0)
                goto out_unlock;
index d347ddc..adb66a2 100644 (file)
@@ -1783,34 +1783,18 @@ done:
 }
 EXPORT_SYMBOL(genphy_update_link);
 
-/**
- * genphy_read_status - check the link status and update current link state
- * @phydev: target phy_device struct
- *
- * Description: Check the link, then figure out the current state
- *   by comparing what we advertise with what the link partner
- *   advertises.  Start by checking the gigabit possibilities,
- *   then move on to 10/100.
- */
-int genphy_read_status(struct phy_device *phydev)
+int genphy_read_lpa(struct phy_device *phydev)
 {
-       int lpa, lpagb, err, old_link = phydev->link;
-
-       /* Update the link, but return if there was an error */
-       err = genphy_update_link(phydev);
-       if (err)
-               return err;
-
-       /* why bother the PHY if nothing can have changed */
-       if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
-               return 0;
+       int lpa, lpagb;
 
-       phydev->speed = SPEED_UNKNOWN;
-       phydev->duplex = DUPLEX_UNKNOWN;
-       phydev->pause = 0;
-       phydev->asym_pause = 0;
+       if (phydev->autoneg == AUTONEG_ENABLE) {
+               if (!phydev->autoneg_complete) {
+                       mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising,
+                                                       0);
+                       mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
+                       return 0;
+               }
 
-       if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
                if (phydev->is_gigabit_capable) {
                        lpagb = phy_read(phydev, MII_STAT1000);
                        if (lpagb < 0)
@@ -1838,6 +1822,46 @@ int genphy_read_status(struct phy_device *phydev)
                        return lpa;
 
                mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa);
+       } else {
+               linkmode_zero(phydev->lp_advertising);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(genphy_read_lpa);
+
+/**
+ * genphy_read_status - check the link status and update current link state
+ * @phydev: target phy_device struct
+ *
+ * Description: Check the link, then figure out the current state
+ *   by comparing what we advertise with what the link partner
+ *   advertises.  Start by checking the gigabit possibilities,
+ *   then move on to 10/100.
+ */
+int genphy_read_status(struct phy_device *phydev)
+{
+       int err, old_link = phydev->link;
+
+       /* Update the link, but return if there was an error */
+       err = genphy_update_link(phydev);
+       if (err)
+               return err;
+
+       /* why bother the PHY if nothing can have changed */
+       if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+               return 0;
+
+       phydev->speed = SPEED_UNKNOWN;
+       phydev->duplex = DUPLEX_UNKNOWN;
+       phydev->pause = 0;
+       phydev->asym_pause = 0;
+
+       err = genphy_read_lpa(phydev);
+       if (err < 0)
+               return err;
+
+       if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
                phy_resolve_aneg_linkmode(phydev);
        } else if (phydev->autoneg == AUTONEG_DISABLE) {
                int bmcr = phy_read(phydev, MII_BMCR);
index a5a57ca..20e2ebe 100644 (file)
@@ -576,7 +576,7 @@ static int phylink_register_sfp(struct phylink *pl,
 
 /**
  * phylink_create() - create a phylink instance
- * @ndev: a pointer to the &struct net_device
+ * @config: a pointer to the target &struct phylink_config
  * @fwnode: a pointer to a &struct fwnode_handle describing the network
  *     interface
  * @iface: the desired link mode defined by &typedef phy_interface_t
index 734de7d..e1fabb3 100644 (file)
@@ -238,7 +238,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        skb_dst_drop(skb);
        skb_dst_set(skb, &rt->dst);
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        skb->ip_summed = CHECKSUM_NONE;
        ip_select_ident(net, skb, NULL);
@@ -358,7 +358,7 @@ static int pptp_rcv(struct sk_buff *skb)
        po = lookup_chan(htons(header->call_id), iph->saddr);
        if (po) {
                skb_dst_drop(skb);
-               nf_reset(skb);
+               nf_reset_ct(skb);
                return sk_receive_skb(sk_pppox(po), skb, 0);
        }
 drop:
index aab0be4..a8d3141 100644 (file)
@@ -526,8 +526,8 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
        e = tun_flow_find(head, rxhash);
        if (likely(e)) {
                /* TODO: keep queueing to old queue until it's empty? */
-               if (e->queue_index != queue_index)
-                       e->queue_index = queue_index;
+               if (READ_ONCE(e->queue_index) != queue_index)
+                       WRITE_ONCE(e->queue_index, queue_index);
                if (e->updated != jiffies)
                        e->updated = jiffies;
                sock_rps_record_flow_hash(e->rps_rxhash);
@@ -1104,7 +1104,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
         */
        skb_orphan(skb);
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        if (ptr_ring_produce(&tfile->tx_ring, skb))
                goto drop;
index ce78714..74849da 100644 (file)
@@ -186,7 +186,7 @@ struct hso_tiocmget {
        int    intr_completed;
        struct usb_endpoint_descriptor *endp;
        struct urb *urb;
-       struct hso_serial_state_notification serial_state_notification;
+       struct hso_serial_state_notification *serial_state_notification;
        u16    prev_UART_state_bitmap;
        struct uart_icount icount;
 };
@@ -1432,7 +1432,7 @@ static int tiocmget_submit_urb(struct hso_serial *serial,
                         usb_rcvintpipe(usb,
                                        tiocmget->endp->
                                        bEndpointAddress & 0x7F),
-                        &tiocmget->serial_state_notification,
+                        tiocmget->serial_state_notification,
                         sizeof(struct hso_serial_state_notification),
                         tiocmget_intr_callback, serial,
                         tiocmget->endp->bInterval);
@@ -1479,7 +1479,7 @@ static void tiocmget_intr_callback(struct urb *urb)
        /* wIndex should be the USB interface number of the port to which the
         * notification applies, which should always be the Modem port.
         */
-       serial_state_notification = &tiocmget->serial_state_notification;
+       serial_state_notification = tiocmget->serial_state_notification;
        if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE ||
            serial_state_notification->bNotification != B_NOTIFICATION ||
            le16_to_cpu(serial_state_notification->wValue) != W_VALUE ||
@@ -2565,6 +2565,8 @@ static void hso_free_tiomget(struct hso_serial *serial)
                usb_free_urb(tiocmget->urb);
                tiocmget->urb = NULL;
                serial->tiocmget = NULL;
+               kfree(tiocmget->serial_state_notification);
+               tiocmget->serial_state_notification = NULL;
                kfree(tiocmget);
        }
 }
@@ -2615,19 +2617,26 @@ static struct hso_device *hso_create_bulk_serial_device(
                num_urbs = 2;
                serial->tiocmget = kzalloc(sizeof(struct hso_tiocmget),
                                           GFP_KERNEL);
+               serial->tiocmget->serial_state_notification
+                       = kzalloc(sizeof(struct hso_serial_state_notification),
+                                          GFP_KERNEL);
                /* it isn't going to break our heart if serial->tiocmget
                 *  allocation fails don't bother checking this.
                 */
-               if (serial->tiocmget) {
+               if (serial->tiocmget && serial->tiocmget->serial_state_notification) {
                        tiocmget = serial->tiocmget;
+                       tiocmget->endp = hso_get_ep(interface,
+                                                   USB_ENDPOINT_XFER_INT,
+                                                   USB_DIR_IN);
+                       if (!tiocmget->endp) {
+                               dev_err(&interface->dev, "Failed to find INT IN ep\n");
+                               goto exit;
+                       }
+
                        tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
                        if (tiocmget->urb) {
                                mutex_init(&tiocmget->mutex);
                                init_waitqueue_head(&tiocmget->waitq);
-                               tiocmget->endp = hso_get_ep(
-                                       interface,
-                                       USB_ENDPOINT_XFER_INT,
-                                       USB_DIR_IN);
                        } else
                                hso_free_tiomget(serial);
                }
index 58f5a21..6294809 100644 (file)
@@ -3782,10 +3782,14 @@ static int lan78xx_probe(struct usb_interface *intf,
        /* driver requires remote-wakeup capability during autosuspend. */
        intf->needs_remote_wakeup = 1;
 
+       ret = lan78xx_phy_init(dev);
+       if (ret < 0)
+               goto out4;
+
        ret = register_netdev(netdev);
        if (ret != 0) {
                netif_err(dev, probe, netdev, "couldn't register the device\n");
-               goto out4;
+               goto out5;
        }
 
        usb_set_intfdata(intf, dev);
@@ -3798,14 +3802,10 @@ static int lan78xx_probe(struct usb_interface *intf,
        pm_runtime_set_autosuspend_delay(&udev->dev,
                                         DEFAULT_AUTOSUSPEND_DELAY);
 
-       ret = lan78xx_phy_init(dev);
-       if (ret < 0)
-               goto out5;
-
        return 0;
 
 out5:
-       unregister_netdev(netdev);
+       phy_disconnect(netdev->phydev);
 out4:
        usb_free_urb(dev->urb_intr);
 out3:
index b6dc5d7..596428e 100644 (file)
@@ -1327,6 +1327,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
        {QMI_FIXED_INTF(0x2357, 0x9000, 4)},    /* TP-LINK MA260 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
        {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},    /* Telit ME910 */
        {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},    /* Telit ME910 dual modem */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
@@ -1350,6 +1351,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)},    /* Cinterion PHxx,PXxx (2 RmNet) */
        {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)},    /* Cinterion PHxx,PXxx (2 RmNet) */
        {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)},    /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
+       {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
        {QMI_FIXED_INTF(0x413c, 0x81a2, 8)},    /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a3, 8)},    /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a4, 8)},    /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
index 0872609..cee9fef 100644 (file)
@@ -4799,10 +4799,9 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
        struct r8152 *tp = usb_get_intfdata(intf);
 
        clear_bit(SELECTIVE_SUSPEND, &tp->flags);
-       mutex_lock(&tp->control);
        tp->rtl_ops.init(tp);
        queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
-       mutex_unlock(&tp->control);
+       set_ethernet_addr(tp);
        return rtl8152_resume(intf);
 }
 
index c5d4a00..681e0de 100644 (file)
@@ -335,7 +335,7 @@ static void sr_set_multicast(struct net_device *net)
 static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
 {
        struct usbnet *dev = netdev_priv(net);
-       __le16 res;
+       __le16 res = 0;
 
        mutex_lock(&dev->phy_mutex);
        sr_set_sw_mii(dev);
index ba98e09..5a635f0 100644 (file)
@@ -1585,7 +1585,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Don't wait up for transmitted skbs to be freed. */
        if (!use_napi) {
                skb_orphan(skb);
-               nf_reset(skb);
+               nf_reset_ct(skb);
        }
 
        /* If running out of space, stop queue to avoid getting packets that we
index a4b38a9..ee52bde 100644 (file)
@@ -366,7 +366,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
        struct neighbour *neigh;
        int ret;
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        skb->protocol = htons(ETH_P_IPV6);
        skb->dev = dev;
@@ -459,7 +459,7 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
 
        /* reset skb device */
        if (likely(err == 1))
-               nf_reset(skb);
+               nf_reset_ct(skb);
        else
                skb = NULL;
 
@@ -560,7 +560,7 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
        bool is_v6gw = false;
        int ret = -EINVAL;
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        /* Be paranoid, rather than too clever. */
        if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
@@ -670,7 +670,7 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
 
        /* reset skb device */
        if (likely(err == 1))
-               nf_reset(skb);
+               nf_reset_ct(skb);
        else
                skb = NULL;
 
index dc45d16..383d4fa 100644 (file)
@@ -2118,12 +2118,15 @@ static int ath10k_init_uart(struct ath10k *ar)
                return ret;
        }
 
-       if (!uart_print && ar->hw_params.uart_pin_workaround) {
-               ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin,
-                                        ar->hw_params.uart_pin);
-               if (ret) {
-                       ath10k_warn(ar, "failed to set UART TX pin: %d", ret);
-                       return ret;
+       if (!uart_print) {
+               if (ar->hw_params.uart_pin_workaround) {
+                       ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin,
+                                                ar->hw_params.uart_pin);
+                       if (ret) {
+                               ath10k_warn(ar, "failed to set UART TX pin: %d",
+                                           ret);
+                               return ret;
+                       }
                }
 
                return 0;
index 7573af2..c2db758 100644 (file)
@@ -162,12 +162,13 @@ int iwl_acpi_get_mcc(struct device *dev, char *mcc)
 
        wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE,
                                         &tbl_rev);
-       if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+       if (IS_ERR(wifi_pkg)) {
                ret = PTR_ERR(wifi_pkg);
                goto out_free;
        }
 
-       if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+       if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+           tbl_rev != 0) {
                ret = -EINVAL;
                goto out_free;
        }
@@ -224,12 +225,13 @@ int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
 
        wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE,
                                         &tbl_rev);
-       if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+       if (IS_ERR(wifi_pkg)) {
                ret = PTR_ERR(wifi_pkg);
                goto out_free;
        }
 
-       if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+       if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+           tbl_rev != 0) {
                ret = -EINVAL;
                goto out_free;
        }
index 5c8602d..8742180 100644 (file)
@@ -646,6 +646,7 @@ static struct scatterlist *alloc_sgtable(int size)
                                if (new_page)
                                        __free_page(new_page);
                        }
+                       kfree(table);
                        return NULL;
                }
                alloc_size = min_t(int, size, PAGE_SIZE);
index f8e4f0f..f09e368 100644 (file)
@@ -112,38 +112,38 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf);
  */
 static inline u32 iwl_umac_prph(struct iwl_trans *trans, u32 ofs)
 {
-       return ofs + trans->cfg->trans.umac_prph_offset;
+       return ofs + trans->trans_cfg->umac_prph_offset;
 }
 
 static inline u32 iwl_read_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs)
 {
        return iwl_read_prph_no_grab(trans, ofs +
-                                    trans->cfg->trans.umac_prph_offset);
+                                    trans->trans_cfg->umac_prph_offset);
 }
 
 static inline u32 iwl_read_umac_prph(struct iwl_trans *trans, u32 ofs)
 {
-       return iwl_read_prph(trans, ofs + trans->cfg->trans.umac_prph_offset);
+       return iwl_read_prph(trans, ofs + trans->trans_cfg->umac_prph_offset);
 }
 
 static inline void iwl_write_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs,
                                               u32 val)
 {
-       iwl_write_prph_no_grab(trans,  ofs + trans->cfg->trans.umac_prph_offset,
+       iwl_write_prph_no_grab(trans,  ofs + trans->trans_cfg->umac_prph_offset,
                               val);
 }
 
 static inline void iwl_write_umac_prph(struct iwl_trans *trans, u32 ofs,
                                       u32 val)
 {
-       iwl_write_prph(trans,  ofs + trans->cfg->trans.umac_prph_offset, val);
+       iwl_write_prph(trans,  ofs + trans->trans_cfg->umac_prph_offset, val);
 }
 
 static inline int iwl_poll_umac_prph_bit(struct iwl_trans *trans, u32 addr,
                                         u32 bits, u32 mask, int timeout)
 {
        return iwl_poll_prph_bit(trans, addr +
-                                trans->cfg->trans.umac_prph_offset,
+                                trans->trans_cfg->umac_prph_offset,
                                 bits, mask, timeout);
 }
 
index 32a5e4e..d9eb2b2 100644 (file)
@@ -420,6 +420,9 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
        };
        int ret;
 
+       if (mvm->trans->cfg->tx_with_siso_diversity)
+               init_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY));
+
        lockdep_assert_held(&mvm->mutex);
 
        mvm->rfkill_safe_init_done = false;
@@ -694,12 +697,13 @@ static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
 
        wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
                                         ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
-       if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+       if (IS_ERR(wifi_pkg)) {
                ret = PTR_ERR(wifi_pkg);
                goto out_free;
        }
 
-       if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+       if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+           tbl_rev != 0) {
                ret = -EINVAL;
                goto out_free;
        }
@@ -731,13 +735,14 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
 
        wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
                                         ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
-       if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+       if (IS_ERR(wifi_pkg)) {
                ret = PTR_ERR(wifi_pkg);
                goto out_free;
        }
 
        if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
-           (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) {
+           (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) ||
+           tbl_rev != 0) {
                ret = -EINVAL;
                goto out_free;
        }
@@ -791,11 +796,16 @@ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
 
        wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
                                         ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
-       if (IS_ERR(wifi_pkg) || tbl_rev > 1) {
+       if (IS_ERR(wifi_pkg)) {
                ret = PTR_ERR(wifi_pkg);
                goto out_free;
        }
 
+       if (tbl_rev != 0) {
+               ret = -EINVAL;
+               goto out_free;
+       }
+
        mvm->geo_rev = tbl_rev;
        for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
                for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
@@ -889,15 +899,17 @@ static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
         * firmware versions.  Unfortunately, we don't have a TLV API
         * flag to rely on, so rely on the major version which is in
         * the first byte of ucode_ver.  This was implemented
-        * initially on version 38 and then backported to29 and 17.
-        * The intention was to have it in 36 as well, but not all
-        * 8000 family got this feature enabled.  The 8000 family is
-        * the only one using version 36, so skip this version
-        * entirely.
+        * initially on version 38 and then backported to 17.  It was
+        * also backported to 29, but only for 7265D devices.  The
+        * intention was to have it in 36 as well, but not all 8000
+        * family got this feature enabled.  The 8000 family is the
+        * only one using version 36, so skip this version entirely.
         */
        return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
-              IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
-              IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
+              IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17 ||
+              (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 &&
+               ((mvm->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+                CSR_HW_REV_TYPE_7265D));
 }
 
 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
@@ -1020,11 +1032,16 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
        wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
                                         ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev);
 
-       if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+       if (IS_ERR(wifi_pkg)) {
                ret = PTR_ERR(wifi_pkg);
                goto out_free;
        }
 
+       if (tbl_rev != 0) {
+               ret = -EINVAL;
+               goto out_free;
+       }
+
        enabled = &wifi_pkg->package.elements[1];
        if (enabled->type != ACPI_TYPE_INTEGER ||
            (enabled->integer.value != 0 && enabled->integer.value != 1)) {
index cd1b100..d31f96c 100644 (file)
@@ -4881,11 +4881,11 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
        if (!iwl_mvm_has_new_rx_api(mvm))
                return;
 
-       notif->cookie = mvm->queue_sync_cookie;
-
-       if (notif->sync)
+       if (notif->sync) {
+               notif->cookie = mvm->queue_sync_cookie;
                atomic_set(&mvm->queue_sync_counter,
                           mvm->trans->num_rx_queues);
+       }
 
        ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif,
                                      size, !notif->sync);
@@ -4905,7 +4905,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
 
 out:
        atomic_set(&mvm->queue_sync_counter, 0);
-       mvm->queue_sync_cookie++;
+       if (notif->sync)
+               mvm->queue_sync_cookie++;
 }
 
 static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
index 75fa8a6..7498038 100644 (file)
@@ -107,13 +107,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
 
        /* allocate ucode sections in dram and set addresses */
        ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
-       if (ret) {
-               dma_free_coherent(trans->dev,
-                                 sizeof(*prph_scratch),
-                                 prph_scratch,
-                                 trans_pcie->prph_scratch_dma_addr);
-               return ret;
-       }
+       if (ret)
+               goto err_free_prph_scratch;
+
 
        /* Allocate prph information
         * currently we don't assign to the prph info anything, but it would get
@@ -121,16 +117,20 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
        prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
                                       &trans_pcie->prph_info_dma_addr,
                                       GFP_KERNEL);
-       if (!prph_info)
-               return -ENOMEM;
+       if (!prph_info) {
+               ret = -ENOMEM;
+               goto err_free_prph_scratch;
+       }
 
        /* Allocate context info */
        ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
                                            sizeof(*ctxt_info_gen3),
                                            &trans_pcie->ctxt_info_dma_addr,
                                            GFP_KERNEL);
-       if (!ctxt_info_gen3)
-               return -ENOMEM;
+       if (!ctxt_info_gen3) {
+               ret = -ENOMEM;
+               goto err_free_prph_info;
+       }
 
        ctxt_info_gen3->prph_info_base_addr =
                cpu_to_le64(trans_pcie->prph_info_dma_addr);
@@ -186,6 +186,20 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
                iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
 
        return 0;
+
+err_free_prph_info:
+       dma_free_coherent(trans->dev,
+                         sizeof(*prph_info),
+                       prph_info,
+                       trans_pcie->prph_info_dma_addr);
+
+err_free_prph_scratch:
+       dma_free_coherent(trans->dev,
+                         sizeof(*prph_scratch),
+                       prph_scratch,
+                       trans_pcie->prph_scratch_dma_addr);
+       return ret;
+
 }
 
 void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
index e29c477..6f4bb7c 100644 (file)
@@ -513,31 +513,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
 
 /* 9000 Series */
-       {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
        {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
        {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
        {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
@@ -643,34 +645,34 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
+
+       {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
        {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)},
        {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
        {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_160_cfg_shared_clk)},
@@ -726,62 +728,60 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
        {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
 
-       {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
+       {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
        {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
@@ -821,34 +821,34 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+
+       {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
        {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_160_cfg_soc)},
        {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_160_cfg_soc)},
index f8a1f98..6961f00 100644 (file)
@@ -3272,11 +3272,17 @@ static struct iwl_trans_dump_data
                ptr = cmdq->write_ptr;
                for (i = 0; i < cmdq->n_window; i++) {
                        u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
+                       u8 tfdidx;
                        u32 caplen, cmdlen;
 
+                       if (trans->trans_cfg->use_tfh)
+                               tfdidx = idx;
+                       else
+                               tfdidx = ptr;
+
                        cmdlen = iwl_trans_pcie_get_cmdlen(trans,
-                                                          cmdq->tfds +
-                                                          tfd_size * ptr);
+                                                          (u8 *)cmdq->tfds +
+                                                          tfd_size * tfdidx);
                        caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
 
                        if (cmdlen) {
@@ -3450,6 +3456,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        spin_lock_init(&trans_pcie->reg_lock);
        mutex_init(&trans_pcie->mutex);
        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
+
+       trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
+                                                  WQ_HIGHPRI | WQ_UNBOUND, 1);
+       if (!trans_pcie->rba.alloc_wq) {
+               ret = -ENOMEM;
+               goto out_free_trans;
+       }
+       INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
+
        trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
        if (!trans_pcie->tso_hdr_page) {
                ret = -ENOMEM;
@@ -3584,10 +3599,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                trans_pcie->inta_mask = CSR_INI_SET_MASK;
         }
 
-       trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
-                                                  WQ_HIGHPRI | WQ_UNBOUND, 1);
-       INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
-
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
        mutex_init(&trans_pcie->fw_mon_data.mutex);
@@ -3599,6 +3610,8 @@ out_free_ict:
        iwl_pcie_free_ict(trans);
 out_no_pci:
        free_percpu(trans_pcie->tso_hdr_page);
+       destroy_workqueue(trans_pcie->rba.alloc_wq);
+out_free_trans:
        iwl_trans_free(trans);
        return ERR_PTR(ret);
 }
index 6359560..14f562c 100644 (file)
@@ -1261,8 +1261,8 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
        skb_orphan(skb);
        skb_dst_drop(skb);
        skb->mark = 0;
-       secpath_reset(skb);
-       nf_reset(skb);
+       skb_ext_reset(skb);
+       nf_reset_ct(skb);
 
        /*
         * Get absolute mactime here so all HWs RX at the "same time", and
@@ -4026,7 +4026,7 @@ static int __init init_mac80211_hwsim(void)
        err = dev_alloc_name(hwsim_mon, hwsim_mon->name);
        if (err < 0) {
                rtnl_unlock();
-               goto out_free_radios;
+               goto out_free_mon;
        }
 
        err = register_netdevice(hwsim_mon);
index 2b216ed..a90a518 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/leds.h>
 #include <linux/mutex.h>
 #include <linux/etherdevice.h>
-#include <linux/input-polldev.h>
 #include <linux/kfifo.h>
 #include <linux/hrtimer.h>
 #include <linux/average.h>
index 4d4e388..f239530 100644 (file)
@@ -555,7 +555,7 @@ static ssize_t rt2x00debug_write_restart_hw(struct file *file,
 {
        struct rt2x00debug_intf *intf = file->private_data;
        struct rt2x00_dev *rt2x00dev = intf->rt2x00dev;
-       static unsigned long last_reset;
+       static unsigned long last_reset = INITIAL_JIFFIES;
 
        if (!rt2x00_has_cap_restart_hw(rt2x00dev))
                return -EOPNOTSUPP;
index 240f762..103ed00 100644 (file)
@@ -719,7 +719,6 @@ err_unmap:
        xenvif_unmap_frontend_data_rings(queue);
        netif_napi_del(&queue->napi);
 err:
-       module_put(THIS_MODULE);
        return err;
 }
 
index e14ec75..482c6c8 100644 (file)
@@ -887,9 +887,9 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
        return 0;
 }
 
-static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
-                                 struct sk_buff *skb,
-                                 struct sk_buff_head *list)
+static int xennet_fill_frags(struct netfront_queue *queue,
+                            struct sk_buff *skb,
+                            struct sk_buff_head *list)
 {
        RING_IDX cons = queue->rx.rsp_cons;
        struct sk_buff *nskb;
@@ -908,7 +908,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
                        queue->rx.rsp_cons = ++cons + skb_queue_len(list);
                        kfree_skb(nskb);
-                       return ~0U;
+                       return -ENOENT;
                }
 
                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
@@ -919,7 +919,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                kfree_skb(nskb);
        }
 
-       return cons;
+       queue->rx.rsp_cons = cons;
+
+       return 0;
 }
 
 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
@@ -1045,8 +1047,7 @@ err:
                skb->data_len = rx->status;
                skb->len += rx->status;
 
-               i = xennet_fill_frags(queue, skb, &tmpq);
-               if (unlikely(i == ~0U))
+               if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
                        goto err;
 
                if (rx->flags & XEN_NETRXF_csum_blank)
@@ -1056,7 +1057,7 @@ err:
 
                __skb_queue_tail(&rxq, skb);
 
-               queue->rx.rsp_cons = ++i;
+               i = ++queue->rx.rsp_cons;
                work_done++;
        }
 
index c5289ea..e897e4d 100644 (file)
@@ -547,18 +547,25 @@ static int pn533_usb_probe(struct usb_interface *interface,
 
        rc = pn533_finalize_setup(priv);
        if (rc)
-               goto error;
+               goto err_deregister;
 
        usb_set_intfdata(interface, phy);
 
        return 0;
 
+err_deregister:
+       pn533_unregister_device(phy->priv);
 error:
+       usb_kill_urb(phy->in_urb);
+       usb_kill_urb(phy->out_urb);
+       usb_kill_urb(phy->ack_urb);
+
        usb_free_urb(phy->in_urb);
        usb_free_urb(phy->out_urb);
        usb_free_urb(phy->ack_urb);
        usb_put_dev(phy->udev);
        kfree(in_buf);
+       kfree(phy->ack_buffer);
 
        return rc;
 }
index 108f60b..94bfbee 100644 (file)
@@ -102,10 +102,13 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
         */
        if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
                return;
-       revalidate_disk(ns->disk);
        blk_set_queue_dying(ns->queue);
        /* Forcibly unquiesce queues to avoid blocking dispatch */
        blk_mq_unquiesce_queue(ns->queue);
+       /*
+        * Revalidate after unblocking dispatchers that may be holding bd_butex
+        */
+       revalidate_disk(ns->disk);
 }
 
 static void nvme_queue_scan(struct nvme_ctrl *ctrl)
@@ -113,10 +116,26 @@ static void nvme_queue_scan(struct nvme_ctrl *ctrl)
        /*
         * Only new queue scan work when admin and IO queues are both alive
         */
-       if (ctrl->state == NVME_CTRL_LIVE)
+       if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
                queue_work(nvme_wq, &ctrl->scan_work);
 }
 
+/*
+ * Use this function to proceed with scheduling reset_work for a controller
+ * that had previously been set to the resetting state. This is intended for
+ * code paths that can't be interrupted by other reset attempts. A hot removal
+ * may prevent this from succeeding.
+ */
+int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
+{
+       if (ctrl->state != NVME_CTRL_RESETTING)
+               return -EBUSY;
+       if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
+               return -EBUSY;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
+
 int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
 {
        if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
@@ -134,8 +153,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
        ret = nvme_reset_ctrl(ctrl);
        if (!ret) {
                flush_work(&ctrl->reset_work);
-               if (ctrl->state != NVME_CTRL_LIVE &&
-                   ctrl->state != NVME_CTRL_ADMIN_ONLY)
+               if (ctrl->state != NVME_CTRL_LIVE)
                        ret = -ENETRESET;
        }
 
@@ -312,15 +330,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
 
        old_state = ctrl->state;
        switch (new_state) {
-       case NVME_CTRL_ADMIN_ONLY:
-               switch (old_state) {
-               case NVME_CTRL_CONNECTING:
-                       changed = true;
-                       /* FALLTHRU */
-               default:
-                       break;
-               }
-               break;
        case NVME_CTRL_LIVE:
                switch (old_state) {
                case NVME_CTRL_NEW:
@@ -336,7 +345,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
                switch (old_state) {
                case NVME_CTRL_NEW:
                case NVME_CTRL_LIVE:
-               case NVME_CTRL_ADMIN_ONLY:
                        changed = true;
                        /* FALLTHRU */
                default:
@@ -356,7 +364,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
        case NVME_CTRL_DELETING:
                switch (old_state) {
                case NVME_CTRL_LIVE:
-               case NVME_CTRL_ADMIN_ONLY:
                case NVME_CTRL_RESETTING:
                case NVME_CTRL_CONNECTING:
                        changed = true;
@@ -378,8 +385,10 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
                break;
        }
 
-       if (changed)
+       if (changed) {
                ctrl->state = new_state;
+               wake_up_all(&ctrl->state_wq);
+       }
 
        spin_unlock_irqrestore(&ctrl->lock, flags);
        if (changed && ctrl->state == NVME_CTRL_LIVE)
@@ -388,6 +397,39 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
 }
 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
 
+/*
+ * Returns true for sink states that can't ever transition back to live.
+ */
+static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
+{
+       switch (ctrl->state) {
+       case NVME_CTRL_NEW:
+       case NVME_CTRL_LIVE:
+       case NVME_CTRL_RESETTING:
+       case NVME_CTRL_CONNECTING:
+               return false;
+       case NVME_CTRL_DELETING:
+       case NVME_CTRL_DEAD:
+               return true;
+       default:
+               WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
+               return true;
+       }
+}
+
+/*
+ * Waits for the controller state to be resetting, or returns false if it is
+ * not possible to ever transition to that state.
+ */
+bool nvme_wait_reset(struct nvme_ctrl *ctrl)
+{
+       wait_event(ctrl->state_wq,
+                  nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
+                  nvme_state_terminal(ctrl));
+       return ctrl->state == NVME_CTRL_RESETTING;
+}
+EXPORT_SYMBOL_GPL(nvme_wait_reset);
+
 static void nvme_free_ns_head(struct kref *ref)
 {
        struct nvme_ns_head *head =
@@ -847,7 +889,7 @@ out:
 static int nvme_submit_user_cmd(struct request_queue *q,
                struct nvme_command *cmd, void __user *ubuffer,
                unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
-               u32 meta_seed, u32 *result, unsigned timeout)
+               u32 meta_seed, u64 *result, unsigned timeout)
 {
        bool write = nvme_is_write(cmd);
        struct nvme_ns *ns = q->queuedata;
@@ -888,7 +930,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
        else
                ret = nvme_req(req)->status;
        if (result)
-               *result = le32_to_cpu(nvme_req(req)->result.u32);
+               *result = le64_to_cpu(nvme_req(req)->result.u64);
        if (meta && !ret && !write) {
                if (copy_to_user(meta_buffer, meta, meta_len))
                        ret = -EFAULT;
@@ -1303,8 +1345,6 @@ static void nvme_update_formats(struct nvme_ctrl *ctrl)
                if (ns->disk && nvme_revalidate_disk(ns->disk))
                        nvme_set_queue_dying(ns);
        up_read(&ctrl->namespaces_rwsem);
-
-       nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
 }
 
 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
@@ -1320,6 +1360,7 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
                nvme_unfreeze(ctrl);
                nvme_mpath_unfreeze(ctrl->subsys);
                mutex_unlock(&ctrl->subsys->lock);
+               nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
                mutex_unlock(&ctrl->scan_lock);
        }
        if (effects & NVME_CMD_EFFECTS_CCC)
@@ -1335,6 +1376,54 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        struct nvme_command c;
        unsigned timeout = 0;
        u32 effects;
+       u64 result;
+       int status;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+       if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
+               return -EFAULT;
+       if (cmd.flags)
+               return -EINVAL;
+
+       memset(&c, 0, sizeof(c));
+       c.common.opcode = cmd.opcode;
+       c.common.flags = cmd.flags;
+       c.common.nsid = cpu_to_le32(cmd.nsid);
+       c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+       c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+       c.common.cdw10 = cpu_to_le32(cmd.cdw10);
+       c.common.cdw11 = cpu_to_le32(cmd.cdw11);
+       c.common.cdw12 = cpu_to_le32(cmd.cdw12);
+       c.common.cdw13 = cpu_to_le32(cmd.cdw13);
+       c.common.cdw14 = cpu_to_le32(cmd.cdw14);
+       c.common.cdw15 = cpu_to_le32(cmd.cdw15);
+
+       if (cmd.timeout_ms)
+               timeout = msecs_to_jiffies(cmd.timeout_ms);
+
+       effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
+       status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
+                       (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
+                       (void __user *)(uintptr_t)cmd.metadata,
+                       cmd.metadata_len, 0, &result, timeout);
+       nvme_passthru_end(ctrl, effects);
+
+       if (status >= 0) {
+               if (put_user(result, &ucmd->result))
+                       return -EFAULT;
+       }
+
+       return status;
+}
+
+static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+                       struct nvme_passthru_cmd64 __user *ucmd)
+{
+       struct nvme_passthru_cmd64 cmd;
+       struct nvme_command c;
+       unsigned timeout = 0;
+       u32 effects;
        int status;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -1405,6 +1494,41 @@ static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
                srcu_read_unlock(&head->srcu, idx);
 }
 
+static bool is_ctrl_ioctl(unsigned int cmd)
+{
+       if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
+               return true;
+       if (is_sed_ioctl(cmd))
+               return true;
+       return false;
+}
+
+static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
+                                 void __user *argp,
+                                 struct nvme_ns_head *head,
+                                 int srcu_idx)
+{
+       struct nvme_ctrl *ctrl = ns->ctrl;
+       int ret;
+
+       nvme_get_ctrl(ns->ctrl);
+       nvme_put_ns_from_disk(head, srcu_idx);
+
+       switch (cmd) {
+       case NVME_IOCTL_ADMIN_CMD:
+               ret = nvme_user_cmd(ctrl, NULL, argp);
+               break;
+       case NVME_IOCTL_ADMIN64_CMD:
+               ret = nvme_user_cmd64(ctrl, NULL, argp);
+               break;
+       default:
+               ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
+               break;
+       }
+       nvme_put_ctrl(ctrl);
+       return ret;
+}
+
 static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
                unsigned int cmd, unsigned long arg)
 {
@@ -1422,20 +1546,8 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
         * seperately and drop the ns SRCU reference early.  This avoids a
         * deadlock when deleting namespaces using the passthrough interface.
         */
-       if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) {
-               struct nvme_ctrl *ctrl = ns->ctrl;
-
-               nvme_get_ctrl(ns->ctrl);
-               nvme_put_ns_from_disk(head, srcu_idx);
-
-               if (cmd == NVME_IOCTL_ADMIN_CMD)
-                       ret = nvme_user_cmd(ctrl, NULL, argp);
-               else
-                       ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
-
-               nvme_put_ctrl(ctrl);
-               return ret;
-       }
+       if (is_ctrl_ioctl(cmd))
+               return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
 
        switch (cmd) {
        case NVME_IOCTL_ID:
@@ -1448,6 +1560,9 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
        case NVME_IOCTL_SUBMIT_IO:
                ret = nvme_submit_io(ns, argp);
                break;
+       case NVME_IOCTL_IO64_CMD:
+               ret = nvme_user_cmd64(ns->ctrl, ns, argp);
+               break;
        default:
                if (ns->ndev)
                        ret = nvme_nvm_ioctl(ns, cmd, arg);
@@ -2540,8 +2655,9 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
                list_add_tail(&subsys->entry, &nvme_subsystems);
        }
 
-       if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
-                       dev_name(ctrl->device))) {
+       ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
+                               dev_name(ctrl->device));
+       if (ret) {
                dev_err(ctrl->device,
                        "failed to create sysfs link from subsystem.\n");
                goto out_put_subsystem;
@@ -2786,7 +2902,6 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
 
        switch (ctrl->state) {
        case NVME_CTRL_LIVE:
-       case NVME_CTRL_ADMIN_ONLY:
                break;
        default:
                return -EWOULDBLOCK;
@@ -2838,6 +2953,8 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
        switch (cmd) {
        case NVME_IOCTL_ADMIN_CMD:
                return nvme_user_cmd(ctrl, NULL, argp);
+       case NVME_IOCTL_ADMIN64_CMD:
+               return nvme_user_cmd64(ctrl, NULL, argp);
        case NVME_IOCTL_IO_CMD:
                return nvme_dev_user_cmd(ctrl, argp);
        case NVME_IOCTL_RESET:
@@ -3045,6 +3162,8 @@ static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
 
 nvme_show_int_function(cntlid);
 nvme_show_int_function(numa_node);
+nvme_show_int_function(queue_count);
+nvme_show_int_function(sqsize);
 
 static ssize_t nvme_sysfs_delete(struct device *dev,
                                struct device_attribute *attr, const char *buf,
@@ -3076,7 +3195,6 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
        static const char *const state_name[] = {
                [NVME_CTRL_NEW]         = "new",
                [NVME_CTRL_LIVE]        = "live",
-               [NVME_CTRL_ADMIN_ONLY]  = "only-admin",
                [NVME_CTRL_RESETTING]   = "resetting",
                [NVME_CTRL_CONNECTING]  = "connecting",
                [NVME_CTRL_DELETING]    = "deleting",
@@ -3125,6 +3243,8 @@ static struct attribute *nvme_dev_attrs[] = {
        &dev_attr_address.attr,
        &dev_attr_state.attr,
        &dev_attr_numa_node.attr,
+       &dev_attr_queue_count.attr,
+       &dev_attr_sqsize.attr,
        NULL
 };
 
@@ -3585,11 +3705,10 @@ static void nvme_scan_work(struct work_struct *work)
        struct nvme_id_ctrl *id;
        unsigned nn;
 
-       if (ctrl->state != NVME_CTRL_LIVE)
+       /* No tagset on a live ctrl means IO queues could not created */
+       if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
                return;
 
-       WARN_ON_ONCE(!ctrl->tagset);
-
        if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
                dev_info(ctrl->device, "rescanning namespaces.\n");
                nvme_clear_changed_ns_log(ctrl);
@@ -3750,13 +3869,13 @@ static void nvme_fw_act_work(struct work_struct *work)
                if (time_after(jiffies, fw_act_timeout)) {
                        dev_warn(ctrl->device,
                                "Fw activation timeout, reset controller\n");
-                       nvme_reset_ctrl(ctrl);
-                       break;
+                       nvme_try_sched_reset(ctrl);
+                       return;
                }
                msleep(100);
        }
 
-       if (ctrl->state != NVME_CTRL_LIVE)
+       if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
                return;
 
        nvme_start_queues(ctrl);
@@ -3776,7 +3895,13 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
                nvme_queue_scan(ctrl);
                break;
        case NVME_AER_NOTICE_FW_ACT_STARTING:
-               queue_work(nvme_wq, &ctrl->fw_act_work);
+               /*
+                * We are (ab)using the RESETTING state to prevent subsequent
+                * recovery actions from interfering with the controller's
+                * firmware activation.
+                */
+               if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+                       queue_work(nvme_wq, &ctrl->fw_act_work);
                break;
 #ifdef CONFIG_NVME_MULTIPATH
        case NVME_AER_NOTICE_ANA:
@@ -3899,6 +4024,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
        INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
        INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
        INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
+       init_waitqueue_head(&ctrl->state_wq);
 
        INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
        memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
index 93f08d7..a0ec40a 100644 (file)
@@ -182,8 +182,7 @@ bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
 static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
                bool queue_live)
 {
-       if (likely(ctrl->state == NVME_CTRL_LIVE ||
-                  ctrl->state == NVME_CTRL_ADMIN_ONLY))
+       if (likely(ctrl->state == NVME_CTRL_LIVE))
                return true;
        return __nvmf_check_ready(ctrl, rq, queue_live);
 }
index b5013c1..22e8401 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/sed-opal.h>
 #include <linux/fault-inject.h>
 #include <linux/rcupdate.h>
+#include <linux/wait.h>
 
 #include <trace/events/block.h>
 
@@ -161,7 +162,6 @@ static inline u16 nvme_req_qid(struct request *req)
 enum nvme_ctrl_state {
        NVME_CTRL_NEW,
        NVME_CTRL_LIVE,
-       NVME_CTRL_ADMIN_ONLY,    /* Only admin queue live */
        NVME_CTRL_RESETTING,
        NVME_CTRL_CONNECTING,
        NVME_CTRL_DELETING,
@@ -199,6 +199,7 @@ struct nvme_ctrl {
        struct cdev cdev;
        struct work_struct reset_work;
        struct work_struct delete_work;
+       wait_queue_head_t state_wq;
 
        struct nvme_subsystem *subsys;
        struct list_head subsys_entry;
@@ -221,6 +222,7 @@ struct nvme_ctrl {
        u16 oacs;
        u16 nssa;
        u16 nr_streams;
+       u16 sqsize;
        u32 max_namespaces;
        atomic_t abort_limit;
        u8 vwc;
@@ -269,7 +271,6 @@ struct nvme_ctrl {
        u16 hmmaxd;
 
        /* Fabrics only */
-       u16 sqsize;
        u32 ioccsz;
        u32 iorcsz;
        u16 icdoff;
@@ -449,6 +450,7 @@ void nvme_complete_rq(struct request *req);
 bool nvme_cancel_request(struct request *req, void *data, bool reserved);
 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
                enum nvme_ctrl_state new_state);
+bool nvme_wait_reset(struct nvme_ctrl *ctrl);
 int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
 int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
@@ -499,6 +501,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
 int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
+int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
 int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
 
 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
index c0808f9..869f462 100644 (file)
@@ -773,7 +773,8 @@ static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
                struct bio_vec *bv)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-       unsigned int first_prp_len = dev->ctrl.page_size - bv->bv_offset;
+       unsigned int offset = bv->bv_offset & (dev->ctrl.page_size - 1);
+       unsigned int first_prp_len = dev->ctrl.page_size - offset;
 
        iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
        if (dma_mapping_error(dev->dev, iod->first_dma))
@@ -2263,10 +2264,7 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
        return true;
 }
 
-/*
- * return error value only when tagset allocation failed
- */
-static int nvme_dev_add(struct nvme_dev *dev)
+static void nvme_dev_add(struct nvme_dev *dev)
 {
        int ret;
 
@@ -2296,7 +2294,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
                if (ret) {
                        dev_warn(dev->ctrl.device,
                                "IO queues tagset allocation failed %d\n", ret);
-                       return ret;
+                       return;
                }
                dev->ctrl.tagset = &dev->tagset;
        } else {
@@ -2307,7 +2305,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
        }
 
        nvme_dbbuf_set(dev);
-       return 0;
 }
 
 static int nvme_pci_enable(struct nvme_dev *dev)
@@ -2467,6 +2464,14 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
        mutex_unlock(&dev->shutdown_lock);
 }
 
+static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
+{
+       if (!nvme_wait_reset(&dev->ctrl))
+               return -EBUSY;
+       nvme_dev_disable(dev, shutdown);
+       return 0;
+}
+
 static int nvme_setup_prp_pools(struct nvme_dev *dev)
 {
        dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
@@ -2490,14 +2495,20 @@ static void nvme_release_prp_pools(struct nvme_dev *dev)
        dma_pool_destroy(dev->prp_small_pool);
 }
 
+static void nvme_free_tagset(struct nvme_dev *dev)
+{
+       if (dev->tagset.tags)
+               blk_mq_free_tag_set(&dev->tagset);
+       dev->ctrl.tagset = NULL;
+}
+
 static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
 {
        struct nvme_dev *dev = to_nvme_dev(ctrl);
 
        nvme_dbbuf_dma_free(dev);
        put_device(dev->dev);
-       if (dev->tagset.tags)
-               blk_mq_free_tag_set(&dev->tagset);
+       nvme_free_tagset(dev);
        if (dev->ctrl.admin_q)
                blk_put_queue(dev->ctrl.admin_q);
        kfree(dev->queues);
@@ -2508,6 +2519,11 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
 
 static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
 {
+       /*
+        * Set state to deleting now to avoid blocking nvme_wait_reset(), which
+        * may be holding this pci_dev's device lock.
+        */
+       nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
        nvme_get_ctrl(&dev->ctrl);
        nvme_dev_disable(dev, false);
        nvme_kill_queues(&dev->ctrl);
@@ -2521,7 +2537,6 @@ static void nvme_reset_work(struct work_struct *work)
                container_of(work, struct nvme_dev, ctrl.reset_work);
        bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
        int result;
-       enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
 
        if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
                result = -ENODEV;
@@ -2615,13 +2630,11 @@ static void nvme_reset_work(struct work_struct *work)
                dev_warn(dev->ctrl.device, "IO queues not created\n");
                nvme_kill_queues(&dev->ctrl);
                nvme_remove_namespaces(&dev->ctrl);
-               new_state = NVME_CTRL_ADMIN_ONLY;
+               nvme_free_tagset(dev);
        } else {
                nvme_start_queues(&dev->ctrl);
                nvme_wait_freeze(&dev->ctrl);
-               /* hit this only when allocate tagset fails */
-               if (nvme_dev_add(dev))
-                       new_state = NVME_CTRL_ADMIN_ONLY;
+               nvme_dev_add(dev);
                nvme_unfreeze(&dev->ctrl);
        }
 
@@ -2629,9 +2642,9 @@ static void nvme_reset_work(struct work_struct *work)
         * If only admin queue live, keep it to do further investigation or
         * recovery.
         */
-       if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
+       if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
                dev_warn(dev->ctrl.device,
-                       "failed to mark controller state %d\n", new_state);
+                       "failed to mark controller live state\n");
                result = -ENODEV;
                goto out;
        }
@@ -2672,7 +2685,7 @@ static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
 
 static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
 {
-       *val = readq(to_nvme_dev(ctrl)->bar + off);
+       *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off);
        return 0;
 }
 
@@ -2836,19 +2849,28 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 static void nvme_reset_prepare(struct pci_dev *pdev)
 {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
-       nvme_dev_disable(dev, false);
+
+       /*
+        * We don't need to check the return value from waiting for the reset
+        * state as pci_dev device lock is held, making it impossible to race
+        * with ->remove().
+        */
+       nvme_disable_prepare_reset(dev, false);
+       nvme_sync_queues(&dev->ctrl);
 }
 
 static void nvme_reset_done(struct pci_dev *pdev)
 {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
-       nvme_reset_ctrl_sync(&dev->ctrl);
+
+       if (!nvme_try_sched_reset(&dev->ctrl))
+               flush_work(&dev->ctrl.reset_work);
 }
 
 static void nvme_shutdown(struct pci_dev *pdev)
 {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
-       nvme_dev_disable(dev, true);
+       nvme_disable_prepare_reset(dev, true);
 }
 
 /*
@@ -2901,7 +2923,7 @@ static int nvme_resume(struct device *dev)
 
        if (ndev->last_ps == U32_MAX ||
            nvme_set_power_state(ctrl, ndev->last_ps) != 0)
-               nvme_reset_ctrl(ctrl);
+               return nvme_try_sched_reset(&ndev->ctrl);
        return 0;
 }
 
@@ -2929,43 +2951,42 @@ static int nvme_suspend(struct device *dev)
         */
        if (pm_suspend_via_firmware() || !ctrl->npss ||
            !pcie_aspm_enabled(pdev) ||
-           (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) {
-               nvme_dev_disable(ndev, true);
-               return 0;
-       }
+           (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
+               return nvme_disable_prepare_reset(ndev, true);
 
        nvme_start_freeze(ctrl);
        nvme_wait_freeze(ctrl);
        nvme_sync_queues(ctrl);
 
-       if (ctrl->state != NVME_CTRL_LIVE &&
-           ctrl->state != NVME_CTRL_ADMIN_ONLY)
+       if (ctrl->state != NVME_CTRL_LIVE)
                goto unfreeze;
 
        ret = nvme_get_power_state(ctrl, &ndev->last_ps);
        if (ret < 0)
                goto unfreeze;
 
+       /*
+        * A saved state prevents pci pm from generically controlling the
+        * device's power. If we're using protocol specific settings, we don't
+        * want pci interfering.
+        */
+       pci_save_state(pdev);
+
        ret = nvme_set_power_state(ctrl, ctrl->npss);
        if (ret < 0)
                goto unfreeze;
 
        if (ret) {
+               /* discard the saved state */
+               pci_load_saved_state(pdev, NULL);
+
                /*
                 * Clearing npss forces a controller reset on resume. The
                 * correct value will be resdicovered then.
                 */
-               nvme_dev_disable(ndev, true);
+               ret = nvme_disable_prepare_reset(ndev, true);
                ctrl->npss = 0;
-               ret = 0;
-               goto unfreeze;
        }
-       /*
-        * A saved state prevents pci pm from generically controlling the
-        * device's power. If we're using protocol specific settings, we don't
-        * want pci interfering.
-        */
-       pci_save_state(pdev);
 unfreeze:
        nvme_unfreeze(ctrl);
        return ret;
@@ -2974,9 +2995,7 @@ unfreeze:
 static int nvme_simple_suspend(struct device *dev)
 {
        struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
-
-       nvme_dev_disable(ndev, true);
-       return 0;
+       return nvme_disable_prepare_reset(ndev, true);
 }
 
 static int nvme_simple_resume(struct device *dev)
@@ -2984,8 +3003,7 @@ static int nvme_simple_resume(struct device *dev)
        struct pci_dev *pdev = to_pci_dev(dev);
        struct nvme_dev *ndev = pci_get_drvdata(pdev);
 
-       nvme_reset_ctrl(&ndev->ctrl);
-       return 0;
+       return nvme_try_sched_reset(&ndev->ctrl);
 }
 
 static const struct dev_pm_ops nvme_dev_pm_ops = {
@@ -3090,6 +3108,9 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_LIGHTNVM, },
        { PCI_DEVICE(0x10ec, 0x5762),   /* ADATA SX6000LNP */
                .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+       { PCI_DEVICE(0x1cc1, 0x8201),   /* ADATA SX8200PNP 512GB */
+               .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+                               NVME_QUIRK_IGNORE_DEV_SUBNQN, },
        { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
        { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
        { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
index dfa07bb..f19a28b 100644 (file)
@@ -427,7 +427,7 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
 static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
 {
        return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
-                    ibdev->attrs.max_fast_reg_page_list_len);
+                    ibdev->attrs.max_fast_reg_page_list_len - 1);
 }
 
 static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
@@ -437,7 +437,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
        const int cq_factor = send_wr_factor + 1;       /* + RECV */
        int comp_vector, idx = nvme_rdma_queue_idx(queue);
        enum ib_poll_context poll_ctx;
-       int ret;
+       int ret, pages_per_mr;
 
        queue->device = nvme_rdma_find_get_device(queue->cm_id);
        if (!queue->device) {
@@ -479,10 +479,16 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
                goto out_destroy_qp;
        }
 
+       /*
+        * Currently we don't use SG_GAPS MR's so if the first entry is
+        * misaligned we'll end up using two entries for a single data page,
+        * so one additional entry is required.
+        */
+       pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev) + 1;
        ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
                              queue->queue_size,
                              IB_MR_TYPE_MEM_REG,
-                             nvme_rdma_get_max_fr_pages(ibdev), 0);
+                             pages_per_mr, 0);
        if (ret) {
                dev_err(queue->ctrl->ctrl.device,
                        "failed to initialize MR pool sized %d for QID %d\n",
@@ -614,7 +620,8 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
        if (!ret) {
                set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
        } else {
-               __nvme_rdma_stop_queue(queue);
+               if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
+                       __nvme_rdma_stop_queue(queue);
                dev_info(ctrl->ctrl.device,
                        "failed to connect queue: %d ret=%d\n", idx, ret);
        }
@@ -820,8 +827,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
        if (error)
                goto out_stop_queue;
 
-       ctrl->ctrl.max_hw_sectors =
-               (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
+       ctrl->ctrl.max_segments = ctrl->max_fr_pages;
+       ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
 
        blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
 
@@ -1694,6 +1701,14 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
        dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
                 rq->tag, nvme_rdma_queue_idx(queue));
 
+       /*
+        * Restart the timer if a controller reset is already scheduled. Any
+        * timed out commands would be handled before entering the connecting
+        * state.
+        */
+       if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
+               return BLK_EH_RESET_TIMER;
+
        if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
                /*
                 * Teardown immediately if controller times out while starting
index 4ffd595..770dbcb 100644 (file)
@@ -1042,7 +1042,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
 {
        struct nvme_tcp_queue *queue =
                container_of(w, struct nvme_tcp_queue, io_work);
-       unsigned long start = jiffies + msecs_to_jiffies(1);
+       unsigned long deadline = jiffies + msecs_to_jiffies(1);
 
        do {
                bool pending = false;
@@ -1067,7 +1067,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
                if (!pending)
                        return;
 
-       } while (time_after(jiffies, start)); /* quota is exhausted */
+       } while (!time_after(jiffies, deadline)); /* quota is exhausted */
 
        queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
 }
@@ -1386,7 +1386,9 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
        queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
        queue->sock->sk->sk_state_change = nvme_tcp_state_change;
        queue->sock->sk->sk_write_space = nvme_tcp_write_space;
+#ifdef CONFIG_NET_RX_BUSY_POLL
        queue->sock->sk->sk_ll_usec = 1;
+#endif
        write_unlock_bh(&queue->sock->sk->sk_callback_lock);
 
        return 0;
@@ -2044,6 +2046,14 @@ nvme_tcp_timeout(struct request *rq, bool reserved)
        struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
        struct nvme_tcp_cmd_pdu *pdu = req->pdu;
 
+       /*
+        * Restart the timer if a controller reset is already scheduled. Any
+        * timed out commands would be handled before entering the connecting
+        * state.
+        */
+       if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
+               return BLK_EH_RESET_TIMER;
+
        dev_warn(ctrl->ctrl.device,
                "queue %d: timeout request %#x type %d\n",
                nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
@@ -2126,6 +2136,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
 
        ret = nvme_tcp_map_data(queue, rq);
        if (unlikely(ret)) {
+               nvme_cleanup_cmd(rq);
                dev_err(queue->ctrl->ctrl.device,
                        "Failed to map data (%d)\n", ret);
                return ret;
index de0bff7..32008d8 100644 (file)
 void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
 {
        const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
-       /* Number of physical blocks per logical block. */
-       const u32 ppl = ql->physical_block_size / ql->logical_block_size;
-       /* Physical blocks per logical block, 0's based. */
-       const __le16 ppl0b = to0based(ppl);
+       /* Number of logical blocks per physical block. */
+       const u32 lpp = ql->physical_block_size / ql->logical_block_size;
+       /* Logical blocks per physical block, 0's based. */
+       const __le16 lpp0b = to0based(lpp);
 
        /*
         * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
@@ -25,9 +25,9 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
         * field from the identify controller data structure should be used.
         */
        id->nsfeat |= 1 << 1;
-       id->nawun = ppl0b;
-       id->nawupf = ppl0b;
-       id->nacwu = ppl0b;
+       id->nawun = lpp0b;
+       id->nawupf = lpp0b;
+       id->nacwu = lpp0b;
 
        /*
         * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
@@ -36,7 +36,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
         */
        id->nsfeat |= 1 << 4;
        /* NPWG = Namespace Preferred Write Granularity. 0's based */
-       id->npwg = ppl0b;
+       id->npwg = lpp0b;
        /* NPWA = Namespace Preferred Write Alignment. 0's based */
        id->npwa = id->npwg;
        /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
index 748a39f..11f5aea 100644 (file)
@@ -157,8 +157,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                iod->sg_table.sgl = iod->first_sgl;
                if (sg_alloc_table_chained(&iod->sg_table,
                                blk_rq_nr_phys_segments(req),
-                               iod->sg_table.sgl, SG_CHUNK_SIZE))
+                               iod->sg_table.sgl, SG_CHUNK_SIZE)) {
+                       nvme_cleanup_cmd(req);
                        return BLK_STS_RESOURCE;
+               }
 
                iod->req.sg = iod->sg_table.sgl;
                iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
index bf4f034..d535080 100644 (file)
@@ -348,8 +348,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
 
        return 0;
 err:
-       if (cmd->req.sg_cnt)
-               sgl_free(cmd->req.sg);
+       sgl_free(cmd->req.sg);
        return NVME_SC_INTERNAL;
 }
 
@@ -554,8 +553,7 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
 
        if (queue->nvme_sq.sqhd_disabled) {
                kfree(cmd->iov);
-               if (cmd->req.sg_cnt)
-                       sgl_free(cmd->req.sg);
+               sgl_free(cmd->req.sg);
        }
 
        return 1;
@@ -586,8 +584,7 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
                return -EAGAIN;
 
        kfree(cmd->iov);
-       if (cmd->req.sg_cnt)
-               sgl_free(cmd->req.sg);
+       sgl_free(cmd->req.sg);
        cmd->queue->snd_cmd = NULL;
        nvmet_tcp_put_cmd(cmd);
        return 1;
@@ -1310,8 +1307,7 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
        nvmet_req_uninit(&cmd->req);
        nvmet_tcp_unmap_pdu_iovec(cmd);
        kfree(cmd->iov);
-       if (cmd->req.sg_cnt)
-               sgl_free(cmd->req.sg);
+       sgl_free(cmd->req.sg);
 }
 
 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
index ed50502..de8e4e3 100644 (file)
@@ -678,14 +678,6 @@ static int sba_dma_supported( struct device *dev, u64 mask)
                return(0);
        }
 
-       /* Documentation/DMA-API-HOWTO.txt tells drivers to try 64-bit
-        * first, then fall back to 32-bit if that fails.
-        * We are just "encouraging" 32-bit DMA masks here since we can
-        * never allow IOMMU bypass unless we add special support for ZX1.
-        */
-       if (mask > ~0U)
-               return 0;
-
        ioc = GET_IOC(dev);
        if (!ioc)
                return 0;
index a304f5e..77c1428 100644 (file)
@@ -52,7 +52,7 @@ config PCI_MSI
           If you don't know what to do here, say Y.
 
 config PCI_MSI_IRQ_DOMAIN
-       def_bool ARC || ARM || ARM64 || X86 || RISCV
+       def_bool y
        depends on PCI_MSI
        select GENERIC_MSI_IRQ_DOMAIN
 
index 28cdd8c..522d2b9 100644 (file)
@@ -7,6 +7,8 @@ obj-$(CONFIG_PCI)               += access.o bus.o probe.o host-bridge.o \
                                   pci-sysfs.o rom.o setup-res.o irq.o vpd.o \
                                   setup-bus.o vc.o mmap.o setup-irq.o
 
+obj-$(CONFIG_PCI)              += pcie/
+
 ifdef CONFIG_PCI
 obj-$(CONFIG_PROC_FS)          += proc.o
 obj-$(CONFIG_SYSFS)            += slot.o
@@ -15,7 +17,6 @@ endif
 
 obj-$(CONFIG_OF)               += of.o
 obj-$(CONFIG_PCI_QUIRKS)       += quirks.o
-obj-$(CONFIG_PCIEPORTBUS)      += pcie/
 obj-$(CONFIG_HOTPLUG_PCI)      += hotplug/
 obj-$(CONFIG_PCI_MSI)          += msi.o
 obj-$(CONFIG_PCI_ATS)          += ats.o
index 2fccb57..79c4a2e 100644 (file)
@@ -355,7 +355,7 @@ static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
               pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
 }
 
-static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
+bool pcie_cap_has_rtctl(const struct pci_dev *dev)
 {
        int type = pci_pcie_type(dev);
 
index e184992..982b46f 100644 (file)
@@ -60,8 +60,6 @@ int pci_enable_ats(struct pci_dev *dev, int ps)
                pdev = pci_physfn(dev);
                if (pdev->ats_stu != ps)
                        return -EINVAL;
-
-               atomic_inc(&pdev->ats_ref_cnt);  /* count enabled VFs */
        } else {
                dev->ats_stu = ps;
                ctrl |= PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU);
@@ -71,7 +69,6 @@ int pci_enable_ats(struct pci_dev *dev, int ps)
        dev->ats_enabled = 1;
        return 0;
 }
-EXPORT_SYMBOL_GPL(pci_enable_ats);
 
 /**
  * pci_disable_ats - disable the ATS capability
@@ -79,27 +76,17 @@ EXPORT_SYMBOL_GPL(pci_enable_ats);
  */
 void pci_disable_ats(struct pci_dev *dev)
 {
-       struct pci_dev *pdev;
        u16 ctrl;
 
        if (WARN_ON(!dev->ats_enabled))
                return;
 
-       if (atomic_read(&dev->ats_ref_cnt))
-               return;         /* VFs still enabled */
-
-       if (dev->is_virtfn) {
-               pdev = pci_physfn(dev);
-               atomic_dec(&pdev->ats_ref_cnt);
-       }
-
        pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, &ctrl);
        ctrl &= ~PCI_ATS_CTRL_ENABLE;
        pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
 
        dev->ats_enabled = 0;
 }
-EXPORT_SYMBOL_GPL(pci_disable_ats);
 
 void pci_restore_ats_state(struct pci_dev *dev)
 {
@@ -113,7 +100,6 @@ void pci_restore_ats_state(struct pci_dev *dev)
                ctrl |= PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU);
        pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl);
 }
-EXPORT_SYMBOL_GPL(pci_restore_ats_state);
 
 /**
  * pci_ats_queue_depth - query the ATS Invalidate Queue Depth
@@ -140,7 +126,6 @@ int pci_ats_queue_depth(struct pci_dev *dev)
        pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CAP, &cap);
        return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : PCI_ATS_MAX_QDEP;
 }
-EXPORT_SYMBOL_GPL(pci_ats_queue_depth);
 
 /**
  * pci_ats_page_aligned - Return Page Aligned Request bit status.
@@ -167,9 +152,22 @@ int pci_ats_page_aligned(struct pci_dev *pdev)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(pci_ats_page_aligned);
 
 #ifdef CONFIG_PCI_PRI
+void pci_pri_init(struct pci_dev *pdev)
+{
+       u16 status;
+
+       pdev->pri_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+
+       if (!pdev->pri_cap)
+               return;
+
+       pci_read_config_word(pdev, pdev->pri_cap + PCI_PRI_STATUS, &status);
+       if (status & PCI_PRI_STATUS_PASID)
+               pdev->pasid_required = 1;
+}
+
 /**
  * pci_enable_pri - Enable PRI capability
  * @ pdev: PCI device structure
@@ -180,32 +178,41 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
 {
        u16 control, status;
        u32 max_requests;
-       int pos;
+       int pri = pdev->pri_cap;
+
+       /*
+        * VFs must not implement the PRI Capability.  If their PF
+        * implements PRI, it is shared by the VFs, so if the PF PRI is
+        * enabled, it is also enabled for the VF.
+        */
+       if (pdev->is_virtfn) {
+               if (pci_physfn(pdev)->pri_enabled)
+                       return 0;
+               return -EINVAL;
+       }
 
        if (WARN_ON(pdev->pri_enabled))
                return -EBUSY;
 
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
-       if (!pos)
+       if (!pri)
                return -EINVAL;
 
-       pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
+       pci_read_config_word(pdev, pri + PCI_PRI_STATUS, &status);
        if (!(status & PCI_PRI_STATUS_STOPPED))
                return -EBUSY;
 
-       pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests);
+       pci_read_config_dword(pdev, pri + PCI_PRI_MAX_REQ, &max_requests);
        reqs = min(max_requests, reqs);
        pdev->pri_reqs_alloc = reqs;
-       pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
+       pci_write_config_dword(pdev, pri + PCI_PRI_ALLOC_REQ, reqs);
 
        control = PCI_PRI_CTRL_ENABLE;
-       pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+       pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control);
 
        pdev->pri_enabled = 1;
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(pci_enable_pri);
 
 /**
  * pci_disable_pri - Disable PRI capability
@@ -216,18 +223,21 @@ EXPORT_SYMBOL_GPL(pci_enable_pri);
 void pci_disable_pri(struct pci_dev *pdev)
 {
        u16 control;
-       int pos;
+       int pri = pdev->pri_cap;
+
+       /* VFs share the PF PRI */
+       if (pdev->is_virtfn)
+               return;
 
        if (WARN_ON(!pdev->pri_enabled))
                return;
 
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
-       if (!pos)
+       if (!pri)
                return;
 
-       pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+       pci_read_config_word(pdev, pri + PCI_PRI_CTRL, &control);
        control &= ~PCI_PRI_CTRL_ENABLE;
-       pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+       pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control);
 
        pdev->pri_enabled = 0;
 }
@@ -241,19 +251,20 @@ void pci_restore_pri_state(struct pci_dev *pdev)
 {
        u16 control = PCI_PRI_CTRL_ENABLE;
        u32 reqs = pdev->pri_reqs_alloc;
-       int pos;
+       int pri = pdev->pri_cap;
+
+       if (pdev->is_virtfn)
+               return;
 
        if (!pdev->pri_enabled)
                return;
 
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
-       if (!pos)
+       if (!pri)
                return;
 
-       pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
-       pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+       pci_write_config_dword(pdev, pri + PCI_PRI_ALLOC_REQ, reqs);
+       pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control);
 }
-EXPORT_SYMBOL_GPL(pci_restore_pri_state);
 
 /**
  * pci_reset_pri - Resets device's PRI state
@@ -265,24 +276,45 @@ EXPORT_SYMBOL_GPL(pci_restore_pri_state);
 int pci_reset_pri(struct pci_dev *pdev)
 {
        u16 control;
-       int pos;
+       int pri = pdev->pri_cap;
+
+       if (pdev->is_virtfn)
+               return 0;
 
        if (WARN_ON(pdev->pri_enabled))
                return -EBUSY;
 
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
-       if (!pos)
+       if (!pri)
                return -EINVAL;
 
        control = PCI_PRI_CTRL_RESET;
-       pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+       pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control);
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(pci_reset_pri);
+
+/**
+ * pci_prg_resp_pasid_required - Return PRG Response PASID Required bit
+ *                              status.
+ * @pdev: PCI device structure
+ *
+ * Returns 1 if PASID is required in PRG Response Message, 0 otherwise.
+ */
+int pci_prg_resp_pasid_required(struct pci_dev *pdev)
+{
+       if (pdev->is_virtfn)
+               pdev = pci_physfn(pdev);
+
+       return pdev->pasid_required;
+}
 #endif /* CONFIG_PCI_PRI */
 
 #ifdef CONFIG_PCI_PASID
+void pci_pasid_init(struct pci_dev *pdev)
+{
+       pdev->pasid_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+}
+
 /**
  * pci_enable_pasid - Enable the PASID capability
  * @pdev: PCI device structure
@@ -295,7 +327,17 @@ EXPORT_SYMBOL_GPL(pci_reset_pri);
 int pci_enable_pasid(struct pci_dev *pdev, int features)
 {
        u16 control, supported;
-       int pos;
+       int pasid = pdev->pasid_cap;
+
+       /*
+        * VFs must not implement the PASID Capability, but if a PF
+        * supports PASID, its VFs share the PF PASID configuration.
+        */
+       if (pdev->is_virtfn) {
+               if (pci_physfn(pdev)->pasid_enabled)
+                       return 0;
+               return -EINVAL;
+       }
 
        if (WARN_ON(pdev->pasid_enabled))
                return -EBUSY;
@@ -303,11 +345,10 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
        if (!pdev->eetlp_prefix_path)
                return -EINVAL;
 
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
-       if (!pos)
+       if (!pasid)
                return -EINVAL;
 
-       pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
+       pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported);
        supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
 
        /* User wants to enable anything unsupported? */
@@ -317,13 +358,12 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
        control = PCI_PASID_CTRL_ENABLE | features;
        pdev->pasid_features = features;
 
-       pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+       pci_write_config_word(pdev, pasid + PCI_PASID_CTRL, control);
 
        pdev->pasid_enabled = 1;
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(pci_enable_pasid);
 
 /**
  * pci_disable_pasid - Disable the PASID capability
@@ -332,20 +372,22 @@ EXPORT_SYMBOL_GPL(pci_enable_pasid);
 void pci_disable_pasid(struct pci_dev *pdev)
 {
        u16 control = 0;
-       int pos;
+       int pasid = pdev->pasid_cap;
+
+       /* VFs share the PF PASID configuration */
+       if (pdev->is_virtfn)
+               return;
 
        if (WARN_ON(!pdev->pasid_enabled))
                return;
 
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
-       if (!pos)
+       if (!pasid)
                return;
 
-       pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+       pci_write_config_word(pdev, pasid + PCI_PASID_CTRL, control);
 
        pdev->pasid_enabled = 0;
 }
-EXPORT_SYMBOL_GPL(pci_disable_pasid);
 
 /**
  * pci_restore_pasid_state - Restore PASID capabilities
@@ -354,19 +396,20 @@ EXPORT_SYMBOL_GPL(pci_disable_pasid);
 void pci_restore_pasid_state(struct pci_dev *pdev)
 {
        u16 control;
-       int pos;
+       int pasid = pdev->pasid_cap;
+
+       if (pdev->is_virtfn)
+               return;
 
        if (!pdev->pasid_enabled)
                return;
 
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
-       if (!pos)
+       if (!pasid)
                return;
 
        control = PCI_PASID_CTRL_ENABLE | pdev->pasid_features;
-       pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
+       pci_write_config_word(pdev, pasid + PCI_PASID_CTRL, control);
 }
-EXPORT_SYMBOL_GPL(pci_restore_pasid_state);
 
 /**
  * pci_pasid_features - Check which PASID features are supported
@@ -381,49 +424,20 @@ EXPORT_SYMBOL_GPL(pci_restore_pasid_state);
 int pci_pasid_features(struct pci_dev *pdev)
 {
        u16 supported;
-       int pos;
+       int pasid = pdev->pasid_cap;
 
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
-       if (!pos)
+       if (pdev->is_virtfn)
+               pdev = pci_physfn(pdev);
+
+       if (!pasid)
                return -EINVAL;
 
-       pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
+       pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported);
 
        supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
 
        return supported;
 }
-EXPORT_SYMBOL_GPL(pci_pasid_features);
-
-/**
- * pci_prg_resp_pasid_required - Return PRG Response PASID Required bit
- *                              status.
- * @pdev: PCI device structure
- *
- * Returns 1 if PASID is required in PRG Response Message, 0 otherwise.
- *
- * Even though the PRG response PASID status is read from PRI Status
- * Register, since this API will mainly be used by PASID users, this
- * function is defined within #ifdef CONFIG_PCI_PASID instead of
- * CONFIG_PCI_PRI.
- */
-int pci_prg_resp_pasid_required(struct pci_dev *pdev)
-{
-       u16 status;
-       int pos;
-
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
-       if (!pos)
-               return 0;
-
-       pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
-
-       if (status & PCI_PRI_STATUS_PASID)
-               return 1;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(pci_prg_resp_pasid_required);
 
 #define PASID_NUMBER_SHIFT     8
 #define PASID_NUMBER_MASK      (0x1f << PASID_NUMBER_SHIFT)
@@ -437,17 +451,18 @@ EXPORT_SYMBOL_GPL(pci_prg_resp_pasid_required);
 int pci_max_pasids(struct pci_dev *pdev)
 {
        u16 supported;
-       int pos;
+       int pasid = pdev->pasid_cap;
 
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
-       if (!pos)
+       if (pdev->is_virtfn)
+               pdev = pci_physfn(pdev);
+
+       if (!pasid)
                return -EINVAL;
 
-       pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
+       pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported);
 
        supported = (supported & PASID_NUMBER_MASK) >> PASID_NUMBER_SHIFT;
 
        return (1 << supported);
 }
-EXPORT_SYMBOL_GPL(pci_max_pasids);
 #endif /* CONFIG_PCI_PASID */
index 70e0782..5da0034 100644 (file)
@@ -22,34 +22,6 @@ config PCI_AARDVARK
         controller is part of the South Bridge of the Marvel Armada
         3700 SoC.
 
-menu "Cadence PCIe controllers support"
-
-config PCIE_CADENCE
-       bool
-
-config PCIE_CADENCE_HOST
-       bool "Cadence PCIe host controller"
-       depends on OF
-       depends on PCI
-       select IRQ_DOMAIN
-       select PCIE_CADENCE
-       help
-         Say Y here if you want to support the Cadence PCIe controller in host
-         mode. This PCIe controller may be embedded into many different vendors
-         SoCs.
-
-config PCIE_CADENCE_EP
-       bool "Cadence PCIe endpoint controller"
-       depends on OF
-       depends on PCI_ENDPOINT
-       select PCIE_CADENCE
-       help
-         Say Y here if you want to support the Cadence PCIe  controller in
-         endpoint mode. This PCIe controller may be embedded into many
-         different vendors SoCs.
-
-endmenu
-
 config PCIE_XILINX_NWL
        bool "NWL PCIe Core"
        depends on ARCH_ZYNQMP || COMPILE_TEST
@@ -289,4 +261,5 @@ config PCI_HYPERV_INTERFACE
          have a common interface with the Hyper-V PCI frontend driver.
 
 source "drivers/pci/controller/dwc/Kconfig"
+source "drivers/pci/controller/cadence/Kconfig"
 endmenu
index a2a22c9..3d4f597 100644 (file)
@@ -1,7 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o
-obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
-obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
+obj-$(CONFIG_PCIE_CADENCE) += cadence/
 obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o
 obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o
 obj-$(CONFIG_PCI_HYPERV_INTERFACE) += pci-hyperv-intf.o
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
new file mode 100644 (file)
index 0000000..b76b3cf
--- /dev/null
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Cadence PCIe controllers support"
+       depends on PCI
+
+config PCIE_CADENCE
+       bool
+
+config PCIE_CADENCE_HOST
+       bool
+       depends on OF
+       select IRQ_DOMAIN
+       select PCIE_CADENCE
+
+config PCIE_CADENCE_EP
+       bool
+       depends on OF
+       depends on PCI_ENDPOINT
+       select PCIE_CADENCE
+
+config PCIE_CADENCE_PLAT
+       bool
+
+config PCIE_CADENCE_PLAT_HOST
+       bool "Cadence PCIe platform host controller"
+       depends on OF
+       select PCIE_CADENCE_HOST
+       select PCIE_CADENCE_PLAT
+       help
+         Say Y here if you want to support the Cadence PCIe platform controller in
+         host mode. This PCIe controller may be embedded into many different
+         vendors SoCs.
+
+config PCIE_CADENCE_PLAT_EP
+       bool "Cadence PCIe platform endpoint controller"
+       depends on OF
+       depends on PCI_ENDPOINT
+       select PCIE_CADENCE_EP
+       select PCIE_CADENCE_PLAT
+       help
+         Say Y here if you want to support the Cadence PCIe  platform controller in
+         endpoint mode. This PCIe controller may be embedded into many
+         different vendors SoCs.
+
+endmenu
diff --git a/drivers/pci/controller/cadence/Makefile b/drivers/pci/controller/cadence/Makefile
new file mode 100644 (file)
index 0000000..232a3f2
--- /dev/null
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o
+obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
+obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
+obj-$(CONFIG_PCIE_CADENCE_PLAT) += pcie-cadence-plat.o
similarity index 83%
rename from drivers/pci/controller/pcie-cadence-ep.c
rename to drivers/pci/controller/cadence/pcie-cadence-ep.c
index def7820..1c173da 100644 (file)
 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE         0x1
 #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY       0x3
 
-/**
- * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
- * @pcie: Cadence PCIe controller
- * @max_regions: maximum number of regions supported by hardware
- * @ob_region_map: bitmask of mapped outbound regions
- * @ob_addr: base addresses in the AXI bus where the outbound regions start
- * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
- *                dedicated outbound regions is mapped.
- * @irq_cpu_addr: base address in the CPU space where a write access triggers
- *               the sending of a memory write (MSI) / normal message (legacy
- *               IRQ) TLP through the PCIe bus.
- * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
- *               dedicated outbound region.
- * @irq_pci_fn: the latest PCI function that has updated the mapping of
- *             the MSI/legacy IRQ dedicated outbound region.
- * @irq_pending: bitmask of asserted legacy IRQs.
- */
-struct cdns_pcie_ep {
-       struct cdns_pcie                pcie;
-       u32                             max_regions;
-       unsigned long                   ob_region_map;
-       phys_addr_t                     *ob_addr;
-       phys_addr_t                     irq_phys_addr;
-       void __iomem                    *irq_cpu_addr;
-       u64                             irq_pci_addr;
-       u8                              irq_pci_fn;
-       u8                              irq_pending;
-};
-
 static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn,
                                     struct pci_epf_header *hdr)
 {
@@ -424,28 +395,17 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = {
        .get_features   = cdns_pcie_ep_get_features,
 };
 
-static const struct of_device_id cdns_pcie_ep_of_match[] = {
-       { .compatible = "cdns,cdns-pcie-ep" },
-
-       { },
-};
 
-static int cdns_pcie_ep_probe(struct platform_device *pdev)
+int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
 {
-       struct device *dev = &pdev->dev;
+       struct device *dev = ep->pcie.dev;
+       struct platform_device *pdev = to_platform_device(dev);
        struct device_node *np = dev->of_node;
-       struct cdns_pcie_ep *ep;
-       struct cdns_pcie *pcie;
-       struct pci_epc *epc;
+       struct cdns_pcie *pcie = &ep->pcie;
        struct resource *res;
+       struct pci_epc *epc;
        int ret;
-       int phy_count;
-
-       ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
-       if (!ep)
-               return -ENOMEM;
 
-       pcie = &ep->pcie;
        pcie->is_rc = false;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
@@ -474,19 +434,6 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
        if (!ep->ob_addr)
                return -ENOMEM;
 
-       ret = cdns_pcie_init_phy(dev, pcie);
-       if (ret) {
-               dev_err(dev, "failed to init phy\n");
-               return ret;
-       }
-       platform_set_drvdata(pdev, pcie);
-       pm_runtime_enable(dev);
-       ret = pm_runtime_get_sync(dev);
-       if (ret < 0) {
-               dev_err(dev, "pm_runtime_get_sync() failed\n");
-               goto err_get_sync;
-       }
-
        /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */
        cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0));
 
@@ -528,38 +475,5 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
  err_init:
        pm_runtime_put_sync(dev);
 
- err_get_sync:
-       pm_runtime_disable(dev);
-       cdns_pcie_disable_phy(pcie);
-       phy_count = pcie->phy_count;
-       while (phy_count--)
-               device_link_del(pcie->link[phy_count]);
-
        return ret;
 }
-
-static void cdns_pcie_ep_shutdown(struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-       struct cdns_pcie *pcie = dev_get_drvdata(dev);
-       int ret;
-
-       ret = pm_runtime_put_sync(dev);
-       if (ret < 0)
-               dev_dbg(dev, "pm_runtime_put_sync failed\n");
-
-       pm_runtime_disable(dev);
-
-       cdns_pcie_disable_phy(pcie);
-}
-
-static struct platform_driver cdns_pcie_ep_driver = {
-       .driver = {
-               .name = "cdns-pcie-ep",
-               .of_match_table = cdns_pcie_ep_of_match,
-               .pm     = &cdns_pcie_pm_ops,
-       },
-       .probe = cdns_pcie_ep_probe,
-       .shutdown = cdns_pcie_ep_shutdown,
-};
-builtin_platform_driver(cdns_pcie_ep_driver);
 
 #include "pcie-cadence.h"
 
-/**
- * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
- * @pcie: Cadence PCIe controller
- * @dev: pointer to PCIe device
- * @cfg_res: start/end offsets in the physical system memory to map PCI
- *           configuration space accesses
- * @bus_range: first/last buses behind the PCIe host controller
- * @cfg_base: IO mapped window to access the PCI configuration space of a
- *            single function at a time
- * @max_regions: maximum number of regions supported by the hardware
- * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
- *                translation (nbits sets into the "no BAR match" register)
- * @vendor_id: PCI vendor ID
- * @device_id: PCI device ID
- */
-struct cdns_pcie_rc {
-       struct cdns_pcie        pcie;
-       struct device           *dev;
-       struct resource         *cfg_res;
-       struct resource         *bus_range;
-       void __iomem            *cfg_base;
-       u32                     max_regions;
-       u32                     no_bar_nbits;
-       u16                     vendor_id;
-       u16                     device_id;
-};
-
 static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
                                      int where)
 {
@@ -92,11 +65,6 @@ static struct pci_ops cdns_pcie_host_ops = {
        .write          = pci_generic_config_write,
 };
 
-static const struct of_device_id cdns_pcie_host_of_match[] = {
-       { .compatible = "cdns,cdns-pcie-host" },
-
-       { },
-};
 
 static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
 {
@@ -136,10 +104,10 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
 static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
 {
        struct cdns_pcie *pcie = &rc->pcie;
-       struct resource *cfg_res = rc->cfg_res;
        struct resource *mem_res = pcie->mem_res;
        struct resource *bus_range = rc->bus_range;
-       struct device *dev = rc->dev;
+       struct resource *cfg_res = rc->cfg_res;
+       struct device *dev = pcie->dev;
        struct device_node *np = dev->of_node;
        struct of_pci_range_parser parser;
        struct of_pci_range range;
@@ -233,25 +201,21 @@ static int cdns_pcie_host_init(struct device *dev,
        return err;
 }
 
-static int cdns_pcie_host_probe(struct platform_device *pdev)
+int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
 {
-       struct device *dev = &pdev->dev;
+       struct device *dev = rc->pcie.dev;
+       struct platform_device *pdev = to_platform_device(dev);
        struct device_node *np = dev->of_node;
        struct pci_host_bridge *bridge;
        struct list_head resources;
-       struct cdns_pcie_rc *rc;
        struct cdns_pcie *pcie;
        struct resource *res;
        int ret;
-       int phy_count;
 
-       bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+       bridge = pci_host_bridge_from_priv(rc);
        if (!bridge)
                return -ENOMEM;
 
-       rc = pci_host_bridge_priv(bridge);
-       rc->dev = dev;
-
        pcie = &rc->pcie;
        pcie->is_rc = true;
 
@@ -287,21 +251,8 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
                dev_err(dev, "missing \"mem\"\n");
                return -EINVAL;
        }
-       pcie->mem_res = res;
 
-       ret = cdns_pcie_init_phy(dev, pcie);
-       if (ret) {
-               dev_err(dev, "failed to init phy\n");
-               return ret;
-       }
-       platform_set_drvdata(pdev, pcie);
-
-       pm_runtime_enable(dev);
-       ret = pm_runtime_get_sync(dev);
-       if (ret < 0) {
-               dev_err(dev, "pm_runtime_get_sync() failed\n");
-               goto err_get_sync;
-       }
+       pcie->mem_res = res;
 
        ret = cdns_pcie_host_init(dev, &resources, rc);
        if (ret)
@@ -326,37 +277,5 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
  err_init:
        pm_runtime_put_sync(dev);
 
- err_get_sync:
-       pm_runtime_disable(dev);
-       cdns_pcie_disable_phy(pcie);
-       phy_count = pcie->phy_count;
-       while (phy_count--)
-               device_link_del(pcie->link[phy_count]);
-
        return ret;
 }
-
-static void cdns_pcie_shutdown(struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-       struct cdns_pcie *pcie = dev_get_drvdata(dev);
-       int ret;
-
-       ret = pm_runtime_put_sync(dev);
-       if (ret < 0)
-               dev_dbg(dev, "pm_runtime_put_sync failed\n");
-
-       pm_runtime_disable(dev);
-       cdns_pcie_disable_phy(pcie);
-}
-
-static struct platform_driver cdns_pcie_host_driver = {
-       .driver = {
-               .name = "cdns-pcie-host",
-               .of_match_table = cdns_pcie_host_of_match,
-               .pm     = &cdns_pcie_pm_ops,
-       },
-       .probe = cdns_pcie_host_probe,
-       .shutdown = cdns_pcie_shutdown,
-};
-builtin_platform_driver(cdns_pcie_host_driver);
diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c
new file mode 100644 (file)
index 0000000..f5c6bf6
--- /dev/null
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence PCIe platform  driver.
+ *
+ * Copyright (c) 2019, Cadence Design Systems
+ * Author: Tom Joseph <tjoseph@cadence.com>
+ */
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_device.h>
+#include "pcie-cadence.h"
+
+/**
+ * struct cdns_plat_pcie - private data for this PCIe platform driver
+ * @pcie: Cadence PCIe controller
+ * @is_rc: Set to 1 indicates the PCIe controller mode is Root Complex,
+ *         if 0 it is in Endpoint mode.
+ */
+struct cdns_plat_pcie {
+       struct cdns_pcie        *pcie;
+       bool is_rc;
+};
+
+struct cdns_plat_pcie_of_data {
+       bool is_rc;
+};
+
+static const struct of_device_id cdns_plat_pcie_of_match[];
+
+static int cdns_plat_pcie_probe(struct platform_device *pdev)
+{
+       const struct cdns_plat_pcie_of_data *data;
+       struct cdns_plat_pcie *cdns_plat_pcie;
+       const struct of_device_id *match;
+       struct device *dev = &pdev->dev;
+       struct pci_host_bridge *bridge;
+       struct cdns_pcie_ep *ep;
+       struct cdns_pcie_rc *rc;
+       int phy_count;
+       bool is_rc;
+       int ret;
+
+       match = of_match_device(cdns_plat_pcie_of_match, dev);
+       if (!match)
+               return -EINVAL;
+
+       data = (struct cdns_plat_pcie_of_data *)match->data;
+       is_rc = data->is_rc;
+
+       pr_debug(" Started %s with is_rc: %d\n", __func__, is_rc);
+       cdns_plat_pcie = devm_kzalloc(dev, sizeof(*cdns_plat_pcie), GFP_KERNEL);
+       if (!cdns_plat_pcie)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, cdns_plat_pcie);
+       if (is_rc) {
+               if (!IS_ENABLED(CONFIG_PCIE_CADENCE_PLAT_HOST))
+                       return -ENODEV;
+
+               bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+               if (!bridge)
+                       return -ENOMEM;
+
+               rc = pci_host_bridge_priv(bridge);
+               rc->pcie.dev = dev;
+               cdns_plat_pcie->pcie = &rc->pcie;
+               cdns_plat_pcie->is_rc = is_rc;
+
+               ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie);
+               if (ret) {
+                       dev_err(dev, "failed to init phy\n");
+                       return ret;
+               }
+               pm_runtime_enable(dev);
+               ret = pm_runtime_get_sync(dev);
+               if (ret < 0) {
+                       dev_err(dev, "pm_runtime_get_sync() failed\n");
+                       goto err_get_sync;
+               }
+
+               ret = cdns_pcie_host_setup(rc);
+               if (ret)
+                       goto err_init;
+       } else {
+               if (!IS_ENABLED(CONFIG_PCIE_CADENCE_PLAT_EP))
+                       return -ENODEV;
+
+               ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+               if (!ep)
+                       return -ENOMEM;
+
+               ep->pcie.dev = dev;
+               cdns_plat_pcie->pcie = &ep->pcie;
+               cdns_plat_pcie->is_rc = is_rc;
+
+               ret = cdns_pcie_init_phy(dev, cdns_plat_pcie->pcie);
+               if (ret) {
+                       dev_err(dev, "failed to init phy\n");
+                       return ret;
+               }
+
+               pm_runtime_enable(dev);
+               ret = pm_runtime_get_sync(dev);
+               if (ret < 0) {
+                       dev_err(dev, "pm_runtime_get_sync() failed\n");
+                       goto err_get_sync;
+               }
+
+               ret = cdns_pcie_ep_setup(ep);
+               if (ret)
+                       goto err_init;
+       }
+
+ err_init:
+       pm_runtime_put_sync(dev);
+
+ err_get_sync:
+       pm_runtime_disable(dev);
+       cdns_pcie_disable_phy(cdns_plat_pcie->pcie);
+       phy_count = cdns_plat_pcie->pcie->phy_count;
+       while (phy_count--)
+               device_link_del(cdns_plat_pcie->pcie->link[phy_count]);
+
+       return 0;
+}
+
+static void cdns_plat_pcie_shutdown(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct cdns_pcie *pcie = dev_get_drvdata(dev);
+       int ret;
+
+       ret = pm_runtime_put_sync(dev);
+       if (ret < 0)
+               dev_dbg(dev, "pm_runtime_put_sync failed\n");
+
+       pm_runtime_disable(dev);
+
+       cdns_pcie_disable_phy(pcie);
+}
+
+static const struct cdns_plat_pcie_of_data cdns_plat_pcie_host_of_data = {
+       .is_rc = true,
+};
+
+static const struct cdns_plat_pcie_of_data cdns_plat_pcie_ep_of_data = {
+       .is_rc = false,
+};
+
+static const struct of_device_id cdns_plat_pcie_of_match[] = {
+       {
+               .compatible = "cdns,cdns-pcie-host",
+               .data = &cdns_plat_pcie_host_of_data,
+       },
+       {
+               .compatible = "cdns,cdns-pcie-ep",
+               .data = &cdns_plat_pcie_ep_of_data,
+       },
+       {},
+};
+
+static struct platform_driver cdns_plat_pcie_driver = {
+       .driver = {
+               .name = "cdns-pcie",
+               .of_match_table = cdns_plat_pcie_of_match,
+               .pm     = &cdns_pcie_pm_ops,
+       },
+       .probe = cdns_plat_pcie_probe,
+       .shutdown = cdns_plat_pcie_shutdown,
+};
+builtin_platform_driver(cdns_plat_pcie_driver);
similarity index 82%
rename from drivers/pci/controller/pcie-cadence.h
rename to drivers/pci/controller/cadence/pcie-cadence.h
index ae6bf2a..a2b28b9 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 // Copyright (c) 2017 Cadence
 // Cadence PCIe controller driver.
 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
@@ -190,6 +190,8 @@ enum cdns_pcie_rp_bar {
        (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
 #define CDNS_PCIE_MSG_NO_DATA                  BIT(16)
 
+struct cdns_pcie;
+
 enum cdns_pcie_msg_code {
        MSG_CODE_ASSERT_INTA    = 0x20,
        MSG_CODE_ASSERT_INTB    = 0x21,
@@ -231,13 +233,71 @@ enum cdns_pcie_msg_routing {
 struct cdns_pcie {
        void __iomem            *reg_base;
        struct resource         *mem_res;
+       struct device           *dev;
        bool                    is_rc;
        u8                      bus;
        int                     phy_count;
        struct phy              **phy;
        struct device_link      **link;
+       const struct cdns_pcie_common_ops *ops;
+};
+
+/**
+ * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
+ * @pcie: Cadence PCIe controller
+ * @dev: pointer to PCIe device
+ * @cfg_res: start/end offsets in the physical system memory to map PCI
+ *           configuration space accesses
+ * @bus_range: first/last buses behind the PCIe host controller
+ * @cfg_base: IO mapped window to access the PCI configuration space of a
+ *            single function at a time
+ * @max_regions: maximum number of regions supported by the hardware
+ * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
+ *                translation (nbits sets into the "no BAR match" register)
+ * @vendor_id: PCI vendor ID
+ * @device_id: PCI device ID
+ */
+struct cdns_pcie_rc {
+       struct cdns_pcie        pcie;
+       struct resource         *cfg_res;
+       struct resource         *bus_range;
+       void __iomem            *cfg_base;
+       u32                     max_regions;
+       u32                     no_bar_nbits;
+       u16                     vendor_id;
+       u16                     device_id;
 };
 
+/**
+ * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
+ * @pcie: Cadence PCIe controller
+ * @max_regions: maximum number of regions supported by hardware
+ * @ob_region_map: bitmask of mapped outbound regions
+ * @ob_addr: base addresses in the AXI bus where the outbound regions start
+ * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
+ *                dedicated outbound regions is mapped.
+ * @irq_cpu_addr: base address in the CPU space where a write access triggers
+ *               the sending of a memory write (MSI) / normal message (legacy
+ *               IRQ) TLP through the PCIe bus.
+ * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
+ *               dedicated outbound region.
+ * @irq_pci_fn: the latest PCI function that has updated the mapping of
+ *             the MSI/legacy IRQ dedicated outbound region.
+ * @irq_pending: bitmask of asserted legacy IRQs.
+ */
+struct cdns_pcie_ep {
+       struct cdns_pcie        pcie;
+       u32                     max_regions;
+       unsigned long           ob_region_map;
+       phys_addr_t             *ob_addr;
+       phys_addr_t             irq_phys_addr;
+       void __iomem            *irq_cpu_addr;
+       u64                     irq_pci_addr;
+       u8                      irq_pci_fn;
+       u8                      irq_pending;
+};
+
+
 /* Register access */
 static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value)
 {
@@ -306,6 +366,23 @@ static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
        return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
 }
 
+#ifdef CONFIG_PCIE_CADENCE_HOST
+int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
+#else
+static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
+{
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_PCIE_CADENCE_EP
+int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
+#else
+static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
+{
+       return 0;
+}
+#endif
 void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn,
                                   u32 r, bool is_io,
                                   u64 cpu_addr, u64 pci_addr, size_t size);
index 4234ddb..b20651c 100644 (file)
@@ -353,7 +353,7 @@ static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
        struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
        enum pci_barno bar;
 
-       for (bar = BAR_0; bar <= BAR_5; bar++)
+       for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
                dw_pcie_ep_reset_bar(pci, bar);
 
        dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
index ca9aa45..0d151ce 100644 (file)
@@ -58,7 +58,7 @@ static void ls_pcie_ep_init(struct dw_pcie_ep *ep)
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
        enum pci_barno bar;
 
-       for (bar = BAR_0; bar <= BAR_5; bar++)
+       for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
                dw_pcie_ep_reset_bar(pci, bar);
 }
 
index 3a5fa26..f24f79a 100644 (file)
@@ -263,6 +263,7 @@ static const struct ls_pcie_drvdata ls2088_drvdata = {
 static const struct of_device_id ls_pcie_of_match[] = {
        { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
        { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
+       { .compatible = "fsl,ls1028a-pcie", .data = &ls2088_drvdata },
        { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
        { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
        { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
index e35e9ea..3772b02 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/reset.h>
 #include <linux/resource.h>
 #include <linux/types.h>
+#include <linux/phy/phy.h>
 
 #include "pcie-designware.h"
 
@@ -96,12 +97,18 @@ struct meson_pcie_rc_reset {
        struct reset_control *apb;
 };
 
+struct meson_pcie_param {
+       bool has_shared_phy;
+};
+
 struct meson_pcie {
        struct dw_pcie pci;
        struct meson_pcie_mem_res mem_res;
        struct meson_pcie_clk_res clk_res;
        struct meson_pcie_rc_reset mrst;
        struct gpio_desc *reset_gpio;
+       struct phy *phy;
+       const struct meson_pcie_param *param;
 };
 
 static struct reset_control *meson_pcie_get_reset(struct meson_pcie *mp,
@@ -123,10 +130,12 @@ static int meson_pcie_get_resets(struct meson_pcie *mp)
 {
        struct meson_pcie_rc_reset *mrst = &mp->mrst;
 
-       mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET);
-       if (IS_ERR(mrst->phy))
-               return PTR_ERR(mrst->phy);
-       reset_control_deassert(mrst->phy);
+       if (!mp->param->has_shared_phy) {
+               mrst->phy = meson_pcie_get_reset(mp, "phy", PCIE_SHARED_RESET);
+               if (IS_ERR(mrst->phy))
+                       return PTR_ERR(mrst->phy);
+               reset_control_deassert(mrst->phy);
+       }
 
        mrst->port = meson_pcie_get_reset(mp, "port", PCIE_NORMAL_RESET);
        if (IS_ERR(mrst->port))
@@ -180,27 +189,52 @@ static int meson_pcie_get_mems(struct platform_device *pdev,
        if (IS_ERR(mp->mem_res.cfg_base))
                return PTR_ERR(mp->mem_res.cfg_base);
 
-       /* Meson SoC has two PCI controllers use same phy register*/
-       mp->mem_res.phy_base = meson_pcie_get_mem_shared(pdev, mp, "phy");
-       if (IS_ERR(mp->mem_res.phy_base))
-               return PTR_ERR(mp->mem_res.phy_base);
+       /* Meson AXG SoC has two PCI controllers use same phy register */
+       if (!mp->param->has_shared_phy) {
+               mp->mem_res.phy_base =
+                       meson_pcie_get_mem_shared(pdev, mp, "phy");
+               if (IS_ERR(mp->mem_res.phy_base))
+                       return PTR_ERR(mp->mem_res.phy_base);
+       }
 
        return 0;
 }
 
-static void meson_pcie_power_on(struct meson_pcie *mp)
+static int meson_pcie_power_on(struct meson_pcie *mp)
 {
-       writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base);
+       int ret = 0;
+
+       if (mp->param->has_shared_phy) {
+               ret = phy_init(mp->phy);
+               if (ret)
+                       return ret;
+
+               ret = phy_power_on(mp->phy);
+               if (ret) {
+                       phy_exit(mp->phy);
+                       return ret;
+               }
+       } else
+               writel(MESON_PCIE_PHY_POWERUP, mp->mem_res.phy_base);
+
+       return 0;
 }
 
-static void meson_pcie_reset(struct meson_pcie *mp)
+static int meson_pcie_reset(struct meson_pcie *mp)
 {
        struct meson_pcie_rc_reset *mrst = &mp->mrst;
-
-       reset_control_assert(mrst->phy);
-       udelay(PCIE_RESET_DELAY);
-       reset_control_deassert(mrst->phy);
-       udelay(PCIE_RESET_DELAY);
+       int ret = 0;
+
+       if (mp->param->has_shared_phy) {
+               ret = phy_reset(mp->phy);
+               if (ret)
+                       return ret;
+       } else {
+               reset_control_assert(mrst->phy);
+               udelay(PCIE_RESET_DELAY);
+               reset_control_deassert(mrst->phy);
+               udelay(PCIE_RESET_DELAY);
+       }
 
        reset_control_assert(mrst->port);
        reset_control_assert(mrst->apb);
@@ -208,6 +242,8 @@ static void meson_pcie_reset(struct meson_pcie *mp)
        reset_control_deassert(mrst->port);
        reset_control_deassert(mrst->apb);
        udelay(PCIE_RESET_DELAY);
+
+       return 0;
 }
 
 static inline struct clk *meson_pcie_probe_clock(struct device *dev,
@@ -250,15 +286,17 @@ static int meson_pcie_probe_clocks(struct meson_pcie *mp)
        if (IS_ERR(res->port_clk))
                return PTR_ERR(res->port_clk);
 
-       res->mipi_gate = meson_pcie_probe_clock(dev, "pcie_mipi_en", 0);
-       if (IS_ERR(res->mipi_gate))
-               return PTR_ERR(res->mipi_gate);
+       if (!mp->param->has_shared_phy) {
+               res->mipi_gate = meson_pcie_probe_clock(dev, "mipi", 0);
+               if (IS_ERR(res->mipi_gate))
+                       return PTR_ERR(res->mipi_gate);
+       }
 
-       res->general_clk = meson_pcie_probe_clock(dev, "pcie_general", 0);
+       res->general_clk = meson_pcie_probe_clock(dev, "general", 0);
        if (IS_ERR(res->general_clk))
                return PTR_ERR(res->general_clk);
 
-       res->clk = meson_pcie_probe_clock(dev, "pcie", 0);
+       res->clk = meson_pcie_probe_clock(dev, "pclk", 0);
        if (IS_ERR(res->clk))
                return PTR_ERR(res->clk);
 
@@ -287,9 +325,9 @@ static inline void meson_cfg_writel(struct meson_pcie *mp, u32 val, u32 reg)
 
 static void meson_pcie_assert_reset(struct meson_pcie *mp)
 {
-       gpiod_set_value_cansleep(mp->reset_gpio, 0);
-       udelay(500);
        gpiod_set_value_cansleep(mp->reset_gpio, 1);
+       udelay(500);
+       gpiod_set_value_cansleep(mp->reset_gpio, 0);
 }
 
 static void meson_pcie_init_dw(struct meson_pcie *mp)
@@ -524,6 +562,7 @@ static const struct dw_pcie_ops dw_pcie_ops = {
 
 static int meson_pcie_probe(struct platform_device *pdev)
 {
+       const struct meson_pcie_param *match_data;
        struct device *dev = &pdev->dev;
        struct dw_pcie *pci;
        struct meson_pcie *mp;
@@ -537,6 +576,19 @@ static int meson_pcie_probe(struct platform_device *pdev)
        pci->dev = dev;
        pci->ops = &dw_pcie_ops;
 
+       match_data = of_device_get_match_data(dev);
+       if (!match_data) {
+               dev_err(dev, "failed to get match data\n");
+               return -ENODEV;
+       }
+       mp->param = match_data;
+
+       if (mp->param->has_shared_phy) {
+               mp->phy = devm_phy_get(dev, "pcie");
+               if (IS_ERR(mp->phy))
+                       return PTR_ERR(mp->phy);
+       }
+
        mp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
        if (IS_ERR(mp->reset_gpio)) {
                dev_err(dev, "get reset gpio failed\n");
@@ -555,13 +607,22 @@ static int meson_pcie_probe(struct platform_device *pdev)
                return ret;
        }
 
-       meson_pcie_power_on(mp);
-       meson_pcie_reset(mp);
+       ret = meson_pcie_power_on(mp);
+       if (ret) {
+               dev_err(dev, "phy power on failed, %d\n", ret);
+               return ret;
+       }
+
+       ret = meson_pcie_reset(mp);
+       if (ret) {
+               dev_err(dev, "reset failed, %d\n", ret);
+               goto err_phy;
+       }
 
        ret = meson_pcie_probe_clocks(mp);
        if (ret) {
                dev_err(dev, "init clock resources failed, %d\n", ret);
-               return ret;
+               goto err_phy;
        }
 
        platform_set_drvdata(pdev, mp);
@@ -569,15 +630,36 @@ static int meson_pcie_probe(struct platform_device *pdev)
        ret = meson_add_pcie_port(mp, pdev);
        if (ret < 0) {
                dev_err(dev, "Add PCIe port failed, %d\n", ret);
-               return ret;
+               goto err_phy;
        }
 
        return 0;
+
+err_phy:
+       if (mp->param->has_shared_phy) {
+               phy_power_off(mp->phy);
+               phy_exit(mp->phy);
+       }
+
+       return ret;
 }
 
+static struct meson_pcie_param meson_pcie_axg_param = {
+       .has_shared_phy = false,
+};
+
+static struct meson_pcie_param meson_pcie_g12a_param = {
+       .has_shared_phy = true,
+};
+
 static const struct of_device_id meson_pcie_of_match[] = {
        {
                .compatible = "amlogic,axg-pcie",
+               .data = &meson_pcie_axg_param,
+       },
+       {
+               .compatible = "amlogic,g12a-pcie",
+               .data = &meson_pcie_g12a_param,
        },
        {},
 };
index d00252b..9e2482b 100644 (file)
@@ -422,7 +422,7 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
        artpec6_pcie_wait_for_phy(artpec6_pcie);
        artpec6_pcie_set_nfts(artpec6_pcie);
 
-       for (bar = BAR_0; bar <= BAR_5; bar++)
+       for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
                dw_pcie_ep_reset_bar(pci, bar);
 }
 
index 0f36a92..e570530 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/irqchip/chained_irq.h>
 #include <linux/irqdomain.h>
+#include <linux/msi.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
 #include <linux/pci_regs.h>
@@ -78,7 +79,8 @@ static struct msi_domain_info dw_pcie_msi_domain_info = {
 irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
 {
        int i, pos, irq;
-       u32 val, num_ctrls;
+       unsigned long val;
+       u32 status, num_ctrls;
        irqreturn_t ret = IRQ_NONE;
 
        num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
@@ -86,14 +88,14 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
        for (i = 0; i < num_ctrls; i++) {
                dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
                                        (i * MSI_REG_CTRL_BLOCK_SIZE),
-                                   4, &val);
-               if (!val)
+                                   4, &status);
+               if (!status)
                        continue;
 
                ret = IRQ_HANDLED;
+               val = status;
                pos = 0;
-               while ((pos = find_next_bit((unsigned long *) &val,
-                                           MAX_MSI_IRQS_PER_CTRL,
+               while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
                                            pos)) != MAX_MSI_IRQS_PER_CTRL) {
                        irq = irq_find_mapping(pp->irq_domain,
                                               (i * MAX_MSI_IRQS_PER_CTRL) +
index b58fdcb..73646b6 100644 (file)
@@ -70,7 +70,7 @@ static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
        struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
        enum pci_barno bar;
 
-       for (bar = BAR_0; bar <= BAR_5; bar++)
+       for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
                dw_pcie_ep_reset_bar(pci, bar);
 }
 
index 5a18e94..5accdd6 100644 (file)
@@ -214,7 +214,7 @@ struct dw_pcie_ep {
        phys_addr_t             phys_base;
        size_t                  addr_size;
        size_t                  page_size;
-       u8                      bar_to_atu[6];
+       u8                      bar_to_atu[PCI_STD_NUM_BARS];
        phys_addr_t             *outbound_addr;
        unsigned long           *ib_window_map;
        unsigned long           *ob_window_map;
index f89f5ac..cbe95f0 100644 (file)
@@ -40,8 +40,6 @@
 #define APPL_PINMUX_CLKREQ_OVERRIDE            BIT(3)
 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN  BIT(4)
 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE     BIT(5)
-#define APPL_PINMUX_CLKREQ_OUT_OVRD_EN         BIT(9)
-#define APPL_PINMUX_CLKREQ_OUT_OVRD            BIT(10)
 
 #define APPL_CTRL                              0x4
 #define APPL_CTRL_SYS_PRE_DET_STATE            BIT(6)
@@ -1193,8 +1191,8 @@ static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
 
        if (!pcie->supports_clkreq) {
                val = appl_readl(pcie, APPL_PINMUX);
-               val |= APPL_PINMUX_CLKREQ_OUT_OVRD_EN;
-               val |= APPL_PINMUX_CLKREQ_OUT_OVRD;
+               val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
+               val &= ~APPL_PINMUX_CLKREQ_OVERRIDE;
                appl_writel(pcie, val, APPL_PINMUX);
        }
 
index 3f30ee4..8fd7bad 100644 (file)
 #define PCL_PIPEMON                    0x0044
 #define PCL_PCLK_ALIVE                 BIT(15)
 
+#define PCL_MODE                       0x8000
+#define PCL_MODE_REGEN                 BIT(8)
+#define PCL_MODE_REGVAL                        BIT(0)
+
 #define PCL_APP_READY_CTRL             0x8008
 #define PCL_APP_LTSSM_ENABLE           BIT(0)
 
@@ -85,6 +89,12 @@ static void uniphier_pcie_init_rc(struct uniphier_pcie_priv *priv)
 {
        u32 val;
 
+       /* set RC MODE */
+       val = readl(priv->base + PCL_MODE);
+       val |= PCL_MODE_REGEN;
+       val &= ~PCL_MODE_REGVAL;
+       writel(val, priv->base + PCL_MODE);
+
        /* use auxiliary power detection */
        val = readl(priv->base + PCL_APP_PM0);
        val |= PCL_SYS_AUX_PWR_DET;
index fc0fe4d..a938af4 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/platform_device.h>
+#include <linux/msi.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
 
        (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn))    | \
         PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where))
 
-#define PIO_TIMEOUT_MS                 1
+#define PIO_RETRY_CNT                  500
+#define PIO_RETRY_DELAY                        2 /* 2 us*/
 
 #define LINK_WAIT_MAX_RETRIES          10
 #define LINK_WAIT_USLEEP_MIN           90000
 #define LINK_WAIT_USLEEP_MAX           100000
+#define RETRAIN_WAIT_MAX_RETRIES       10
+#define RETRAIN_WAIT_USLEEP_US         2000
 
 #define MSI_IRQ_NUM                    32
 
@@ -239,6 +243,17 @@ static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
        return -ETIMEDOUT;
 }
 
+static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
+{
+       size_t retries;
+
+       for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) {
+               if (!advk_pcie_link_up(pcie))
+                       break;
+               udelay(RETRAIN_WAIT_USLEEP_US);
+       }
+}
+
 static void advk_pcie_setup_hw(struct advk_pcie *pcie)
 {
        u32 reg;
@@ -324,6 +339,14 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
        reg |= PIO_CTRL_ADDR_WIN_DISABLE;
        advk_writel(pcie, reg, PIO_CTRL);
 
+       /*
+        * PERST# signal could have been asserted by pinctrl subsystem before
+        * probe() callback has been called, making the endpoint going into
+        * fundamental reset. As required by PCI Express spec a delay for at
+        * least 100ms after such a reset before link training is needed.
+        */
+       msleep(PCI_PM_D3COLD_WAIT);
+
        /* Start link training */
        reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG);
        reg |= PCIE_CORE_LINK_TRAINING;
@@ -383,17 +406,16 @@ static void advk_pcie_check_pio_status(struct advk_pcie *pcie)
 static int advk_pcie_wait_pio(struct advk_pcie *pcie)
 {
        struct device *dev = &pcie->pdev->dev;
-       unsigned long timeout;
+       int i;
 
-       timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS);
-
-       while (time_before(jiffies, timeout)) {
+       for (i = 0; i < PIO_RETRY_CNT; i++) {
                u32 start, isr;
 
                start = advk_readl(pcie, PIO_START);
                isr = advk_readl(pcie, PIO_ISR);
                if (!start && isr)
                        return 0;
+               udelay(PIO_RETRY_DELAY);
        }
 
        dev_err(dev, "config read/write timed out\n");
@@ -415,7 +437,7 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
 
        case PCI_EXP_RTCTL: {
                u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
-               *value = (val & PCIE_MSG_PM_PME_MASK) ? PCI_EXP_RTCTL_PMEIE : 0;
+               *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
                return PCI_BRIDGE_EMUL_HANDLED;
        }
 
@@ -426,11 +448,20 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
                return PCI_BRIDGE_EMUL_HANDLED;
        }
 
+       case PCI_EXP_LNKCTL: {
+               /* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */
+               u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) &
+                       ~(PCI_EXP_LNKSTA_LT << 16);
+               if (!advk_pcie_link_up(pcie))
+                       val |= (PCI_EXP_LNKSTA_LT << 16);
+               *value = val;
+               return PCI_BRIDGE_EMUL_HANDLED;
+       }
+
        case PCI_CAP_LIST_ID:
        case PCI_EXP_DEVCAP:
        case PCI_EXP_DEVCTL:
        case PCI_EXP_LNKCAP:
-       case PCI_EXP_LNKCTL:
                *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
                return PCI_BRIDGE_EMUL_HANDLED;
        default:
@@ -447,14 +478,24 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
 
        switch (reg) {
        case PCI_EXP_DEVCTL:
+               advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
+               break;
+
        case PCI_EXP_LNKCTL:
                advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
+               if (new & PCI_EXP_LNKCTL_RL)
+                       advk_pcie_wait_for_retrain(pcie);
                break;
 
-       case PCI_EXP_RTCTL:
-               new = (new & PCI_EXP_RTCTL_PMEIE) << 3;
-               advk_writel(pcie, new, PCIE_ISR0_MASK_REG);
+       case PCI_EXP_RTCTL: {
+               /* Only mask/unmask PME interrupt */
+               u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) &
+                       ~PCIE_MSG_PM_PME_MASK;
+               if ((new & PCI_EXP_RTCTL_PMEIE) == 0)
+                       val |= PCIE_MSG_PM_PME_MASK;
+               advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
                break;
+       }
 
        case PCI_EXP_RTSTA:
                new = (new & PCI_EXP_RTSTA_PME) >> 9;
@@ -479,18 +520,20 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
 {
        struct pci_bridge_emul *bridge = &pcie->bridge;
 
-       bridge->conf.vendor = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff;
-       bridge->conf.device = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16;
+       bridge->conf.vendor =
+               cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
+       bridge->conf.device =
+               cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16);
        bridge->conf.class_revision =
-               advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff;
+               cpu_to_le32(advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff);
 
        /* Support 32 bits I/O addressing */
        bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
        bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
 
        /* Support 64 bits memory pref */
-       bridge->conf.pref_mem_base = PCI_PREF_RANGE_TYPE_64;
-       bridge->conf.pref_mem_limit = PCI_PREF_RANGE_TYPE_64;
+       bridge->conf.pref_mem_base = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
+       bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
 
        /* Support interrupt A for MSI feature */
        bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
index f1f3002..9977abf 100644 (file)
@@ -76,11 +76,6 @@ static enum pci_protocol_version_t pci_protocol_versions[] = {
        PCI_PROTOCOL_VERSION_1_1,
 };
 
-/*
- * Protocol version negotiated by hv_pci_protocol_negotiation().
- */
-static enum pci_protocol_version_t pci_protocol_version;
-
 #define PCI_CONFIG_MMIO_LENGTH 0x2000
 #define CFG_PAGE_OFFSET 0x1000
 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
@@ -307,7 +302,7 @@ struct pci_bus_relations {
 struct pci_q_res_req_response {
        struct vmpacket_descriptor hdr;
        s32 status;                     /* negative values are failures */
-       u32 probed_bar[6];
+       u32 probed_bar[PCI_STD_NUM_BARS];
 } __packed;
 
 struct pci_set_power {
@@ -455,12 +450,15 @@ enum hv_pcibus_state {
        hv_pcibus_init = 0,
        hv_pcibus_probed,
        hv_pcibus_installed,
+       hv_pcibus_removing,
        hv_pcibus_removed,
        hv_pcibus_maximum
 };
 
 struct hv_pcibus_device {
        struct pci_sysdata sysdata;
+       /* Protocol version negotiated with the host */
+       enum pci_protocol_version_t protocol_version;
        enum hv_pcibus_state state;
        refcount_t remove_lock;
        struct hv_device *hdev;
@@ -539,7 +537,7 @@ struct hv_pci_dev {
         * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
         * read it back, for each of the BAR offsets within config space.
         */
-       u32 probed_bar[6];
+       u32 probed_bar[PCI_STD_NUM_BARS];
 };
 
 struct hv_pci_compl {
@@ -1224,7 +1222,7 @@ static void hv_irq_unmask(struct irq_data *data)
         * negative effect (yet?).
         */
 
-       if (pci_protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
+       if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
                /*
                 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
                 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
@@ -1394,7 +1392,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
        ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
        ctxt.pci_pkt.compl_ctxt = &comp;
 
-       switch (pci_protocol_version) {
+       switch (hbus->protocol_version) {
        case PCI_PROTOCOL_VERSION_1_1:
                size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
                                        dest,
@@ -1610,7 +1608,7 @@ static void survey_child_resources(struct hv_pcibus_device *hbus)
         * so it's sufficient to just add them up without tracking alignment.
         */
        list_for_each_entry(hpdev, &hbus->children, list_entry) {
-               for (i = 0; i < 6; i++) {
+               for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                        if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
                                dev_err(&hbus->hdev->device,
                                        "There's an I/O BAR in this list!\n");
@@ -1681,10 +1679,27 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
 
        spin_lock_irqsave(&hbus->device_list_lock, flags);
 
+       /*
+        * Clear the memory enable bit, in case it's already set. This occurs
+        * in the suspend path of hibernation, where the device is suspended,
+        * resumed and suspended again: see hibernation_snapshot() and
+        * hibernation_platform_enter().
+        *
+        * If the memory enable bit is already set, Hyper-V sliently ignores
+        * the below BAR updates, and the related PCI device driver can not
+        * work, because reading from the device register(s) always returns
+        * 0xFFFFFFFF.
+        */
+       list_for_each_entry(hpdev, &hbus->children, list_entry) {
+               _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
+               command &= ~PCI_COMMAND_MEMORY;
+               _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
+       }
+
        /* Pick addresses for the BARs. */
        do {
                list_for_each_entry(hpdev, &hbus->children, list_entry) {
-                       for (i = 0; i < 6; i++) {
+                       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                                bar_val = hpdev->probed_bar[i];
                                if (bar_val == 0)
                                        continue;
@@ -1841,7 +1856,7 @@ static void q_resource_requirements(void *context, struct pci_response *resp,
                        "query resource requirements failed: %x\n",
                        resp->status);
        } else {
-               for (i = 0; i < 6; i++) {
+               for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                        completion->hpdev->probed_bar[i] =
                                q_res_req->probed_bar[i];
                }
@@ -2107,6 +2122,12 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
        unsigned long flags;
        bool pending_dr;
 
+       if (hbus->state == hv_pcibus_removing) {
+               dev_info(&hbus->hdev->device,
+                        "PCI VMBus BUS_RELATIONS: ignored\n");
+               return;
+       }
+
        dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
        if (!dr_wrk)
                return;
@@ -2223,11 +2244,19 @@ static void hv_eject_device_work(struct work_struct *work)
  */
 static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
 {
+       struct hv_pcibus_device *hbus = hpdev->hbus;
+       struct hv_device *hdev = hbus->hdev;
+
+       if (hbus->state == hv_pcibus_removing) {
+               dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
+               return;
+       }
+
        hpdev->state = hv_pcichild_ejecting;
        get_pcichild(hpdev);
        INIT_WORK(&hpdev->wrk, hv_eject_device_work);
-       get_hvpcibus(hpdev->hbus);
-       queue_work(hpdev->hbus->wq, &hpdev->wrk);
+       get_hvpcibus(hbus);
+       queue_work(hbus->wq, &hpdev->wrk);
 }
 
 /**
@@ -2379,8 +2408,11 @@ static void hv_pci_onchannelcallback(void *context)
  * failing if the host doesn't support the necessary protocol
  * level.
  */
-static int hv_pci_protocol_negotiation(struct hv_device *hdev)
+static int hv_pci_protocol_negotiation(struct hv_device *hdev,
+                                      enum pci_protocol_version_t version[],
+                                      int num_version)
 {
+       struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
        struct pci_version_request *version_req;
        struct hv_pci_compl comp_pkt;
        struct pci_packet *pkt;
@@ -2403,8 +2435,8 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
        version_req = (struct pci_version_request *)&pkt->message;
        version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
 
-       for (i = 0; i < ARRAY_SIZE(pci_protocol_versions); i++) {
-               version_req->protocol_version = pci_protocol_versions[i];
+       for (i = 0; i < num_version; i++) {
+               version_req->protocol_version = version[i];
                ret = vmbus_sendpacket(hdev->channel, version_req,
                                sizeof(struct pci_version_request),
                                (unsigned long)pkt, VM_PKT_DATA_INBAND,
@@ -2420,10 +2452,10 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
                }
 
                if (comp_pkt.completion_status >= 0) {
-                       pci_protocol_version = pci_protocol_versions[i];
+                       hbus->protocol_version = version[i];
                        dev_info(&hdev->device,
                                "PCI VMBus probing: Using version %#x\n",
-                               pci_protocol_version);
+                               hbus->protocol_version);
                        goto exit;
                }
 
@@ -2707,7 +2739,7 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
        u32 wslot;
        int ret;
 
-       size_res = (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2)
+       size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
                        ? sizeof(*res_assigned) : sizeof(*res_assigned2);
 
        pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
@@ -2726,7 +2758,7 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
                pkt->completion_func = hv_pci_generic_compl;
                pkt->compl_ctxt = &comp_pkt;
 
-               if (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) {
+               if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
                        res_assigned =
                                (struct pci_resources_assigned *)&pkt->message;
                        res_assigned->message_type.type =
@@ -2870,9 +2902,27 @@ static int hv_pci_probe(struct hv_device *hdev,
         * hv_pcibus_device contains the hypercall arguments for retargeting in
         * hv_irq_unmask(). Those must not cross a page boundary.
         */
-       BUILD_BUG_ON(sizeof(*hbus) > PAGE_SIZE);
+       BUILD_BUG_ON(sizeof(*hbus) > HV_HYP_PAGE_SIZE);
 
-       hbus = (struct hv_pcibus_device *)get_zeroed_page(GFP_KERNEL);
+       /*
+        * With the recent 59bb47985c1d ("mm, sl[aou]b: guarantee natural
+        * alignment for kmalloc(power-of-two)"), kzalloc() is able to allocate
+        * a 4KB buffer that is guaranteed to be 4KB-aligned. Here the size and
+        * alignment of hbus is important because hbus's field
+        * retarget_msi_interrupt_params must not cross a 4KB page boundary.
+        *
+        * Here we prefer kzalloc to get_zeroed_page(), because a buffer
+        * allocated by the latter is not tracked and scanned by kmemleak, and
+        * hence kmemleak reports the pointer contained in the hbus buffer
+        * (i.e. the hpdev struct, which is created in new_pcichild_device() and
+        * is tracked by hbus->children) as memory leak (false positive).
+        *
+        * If the kernel doesn't have 59bb47985c1d, get_zeroed_page() *must* be
+        * used to allocate the hbus buffer and we can avoid the kmemleak false
+        * positive by using kmemleak_alloc() and kmemleak_free() to ask
+        * kmemleak to track and scan the hbus buffer.
+        */
+       hbus = (struct hv_pcibus_device *)kzalloc(HV_HYP_PAGE_SIZE, GFP_KERNEL);
        if (!hbus)
                return -ENOMEM;
        hbus->state = hv_pcibus_init;
@@ -2930,7 +2980,8 @@ static int hv_pci_probe(struct hv_device *hdev,
 
        hv_set_drvdata(hdev, hbus);
 
-       ret = hv_pci_protocol_negotiation(hdev);
+       ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
+                                         ARRAY_SIZE(pci_protocol_versions));
        if (ret)
                goto close;
 
@@ -3011,7 +3062,7 @@ free_bus:
        return ret;
 }
 
-static void hv_pci_bus_exit(struct hv_device *hdev)
+static int hv_pci_bus_exit(struct hv_device *hdev, bool hibernating)
 {
        struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
        struct {
@@ -3027,16 +3078,20 @@ static void hv_pci_bus_exit(struct hv_device *hdev)
         * access the per-channel ringbuffer any longer.
         */
        if (hdev->channel->rescind)
-               return;
+               return 0;
 
-       /* Delete any children which might still exist. */
-       memset(&relations, 0, sizeof(relations));
-       hv_pci_devices_present(hbus, &relations);
+       if (!hibernating) {
+               /* Delete any children which might still exist. */
+               memset(&relations, 0, sizeof(relations));
+               hv_pci_devices_present(hbus, &relations);
+       }
 
        ret = hv_send_resources_released(hdev);
-       if (ret)
+       if (ret) {
                dev_err(&hdev->device,
                        "Couldn't send resources released packet(s)\n");
+               return ret;
+       }
 
        memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
        init_completion(&comp_pkt.host_event);
@@ -3049,8 +3104,13 @@ static void hv_pci_bus_exit(struct hv_device *hdev)
                               (unsigned long)&pkt.teardown_packet,
                               VM_PKT_DATA_INBAND,
                               VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
-       if (!ret)
-               wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ);
+       if (ret)
+               return ret;
+
+       if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0)
+               return -ETIMEDOUT;
+
+       return 0;
 }
 
 /**
@@ -3062,6 +3122,7 @@ static void hv_pci_bus_exit(struct hv_device *hdev)
 static int hv_pci_remove(struct hv_device *hdev)
 {
        struct hv_pcibus_device *hbus;
+       int ret;
 
        hbus = hv_get_drvdata(hdev);
        if (hbus->state == hv_pcibus_installed) {
@@ -3074,7 +3135,7 @@ static int hv_pci_remove(struct hv_device *hdev)
                hbus->state = hv_pcibus_removed;
        }
 
-       hv_pci_bus_exit(hdev);
+       ret = hv_pci_bus_exit(hdev, false);
 
        vmbus_close(hdev->channel);
 
@@ -3090,10 +3151,97 @@ static int hv_pci_remove(struct hv_device *hdev)
 
        hv_put_dom_num(hbus->sysdata.domain);
 
-       free_page((unsigned long)hbus);
+       kfree(hbus);
+       return ret;
+}
+
+static int hv_pci_suspend(struct hv_device *hdev)
+{
+       struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+       enum hv_pcibus_state old_state;
+       int ret;
+
+       /*
+        * hv_pci_suspend() must make sure there are no pending work items
+        * before calling vmbus_close(), since it runs in a process context
+        * as a callback in dpm_suspend().  When it starts to run, the channel
+        * callback hv_pci_onchannelcallback(), which runs in a tasklet
+        * context, can be still running concurrently and scheduling new work
+        * items onto hbus->wq in hv_pci_devices_present() and
+        * hv_pci_eject_device(), and the work item handlers can access the
+        * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
+        * the work item handler pci_devices_present_work() ->
+        * new_pcichild_device() writes to the vmbus channel.
+        *
+        * To eliminate the race, hv_pci_suspend() disables the channel
+        * callback tasklet, sets hbus->state to hv_pcibus_removing, and
+        * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
+        * it knows that no new work item can be scheduled, and then it flushes
+        * hbus->wq and safely closes the vmbus channel.
+        */
+       tasklet_disable(&hdev->channel->callback_event);
+
+       /* Change the hbus state to prevent new work items. */
+       old_state = hbus->state;
+       if (hbus->state == hv_pcibus_installed)
+               hbus->state = hv_pcibus_removing;
+
+       tasklet_enable(&hdev->channel->callback_event);
+
+       if (old_state != hv_pcibus_installed)
+               return -EINVAL;
+
+       flush_workqueue(hbus->wq);
+
+       ret = hv_pci_bus_exit(hdev, true);
+       if (ret)
+               return ret;
+
+       vmbus_close(hdev->channel);
+
        return 0;
 }
 
+static int hv_pci_resume(struct hv_device *hdev)
+{
+       struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
+       enum pci_protocol_version_t version[1];
+       int ret;
+
+       hbus->state = hv_pcibus_init;
+
+       ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
+                        hv_pci_onchannelcallback, hbus);
+       if (ret)
+               return ret;
+
+       /* Only use the version that was in use before hibernation. */
+       version[0] = hbus->protocol_version;
+       ret = hv_pci_protocol_negotiation(hdev, version, 1);
+       if (ret)
+               goto out;
+
+       ret = hv_pci_query_relations(hdev);
+       if (ret)
+               goto out;
+
+       ret = hv_pci_enter_d0(hdev);
+       if (ret)
+               goto out;
+
+       ret = hv_send_resources_allocated(hdev);
+       if (ret)
+               goto out;
+
+       prepopulate_bars(hbus);
+
+       hbus->state = hv_pcibus_installed;
+       return 0;
+out:
+       vmbus_close(hdev->channel);
+       return ret;
+}
+
 static const struct hv_vmbus_device_id hv_pci_id_table[] = {
        /* PCI Pass-through Class ID */
        /* 44C4F61D-4444-4400-9D52-802E27EDE19F */
@@ -3108,6 +3256,8 @@ static struct hv_driver hv_pci_drv = {
        .id_table       = hv_pci_id_table,
        .probe          = hv_pci_probe,
        .remove         = hv_pci_remove,
+       .suspend        = hv_pci_suspend,
+       .resume         = hv_pci_resume,
 };
 
 static void __exit exit_hv_pci_drv(void)
index f127ce8..9491e26 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/bitfield.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/pci.h>
 #include <linux/of_address.h>
 #include <linux/of_pci.h>
 #include <linux/pci-acpi.h>
index 2d457bf..9a5c352 100644 (file)
@@ -1245,6 +1245,32 @@ out:
        return ret;
 }
 
+static void iproc_pcie_invalidate_mapping(struct iproc_pcie *pcie)
+{
+       struct iproc_pcie_ib *ib = &pcie->ib;
+       struct iproc_pcie_ob *ob = &pcie->ob;
+       int idx;
+
+       if (pcie->ep_is_internal)
+               return;
+
+       if (pcie->need_ob_cfg) {
+               /* iterate through all OARR mapping regions */
+               for (idx = ob->nr_windows - 1; idx >= 0; idx--) {
+                       iproc_pcie_write_reg(pcie,
+                                            MAP_REG(IPROC_PCIE_OARR0, idx), 0);
+               }
+       }
+
+       if (pcie->need_ib_cfg) {
+               /* iterate through all IARR mapping regions */
+               for (idx = 0; idx < ib->nr_regions; idx++) {
+                       iproc_pcie_write_reg(pcie,
+                                            MAP_REG(IPROC_PCIE_IARR0, idx), 0);
+               }
+       }
+}
+
 static int iproce_pcie_get_msi(struct iproc_pcie *pcie,
                               struct device_node *msi_node,
                               u64 *msi_addr)
@@ -1517,6 +1543,8 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
        iproc_pcie_perst_ctrl(pcie, true);
        iproc_pcie_perst_ctrl(pcie, false);
 
+       iproc_pcie_invalidate_mapping(pcie);
+
        if (pcie->need_ob_cfg) {
                ret = iproc_pcie_map_ranges(pcie, res);
                if (ret) {
index a45a644..32f37d0 100644 (file)
@@ -235,7 +235,7 @@ static int mobiveil_pcie_write(void __iomem *addr, int size, u32 val)
        return PCIBIOS_SUCCESSFUL;
 }
 
-static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
+static u32 mobiveil_csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
 {
        void *addr;
        u32 val;
@@ -250,7 +250,8 @@ static u32 csr_read(struct mobiveil_pcie *pcie, u32 off, size_t size)
        return val;
 }
 
-static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
+static void mobiveil_csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off,
+                              size_t size)
 {
        void *addr;
        int ret;
@@ -262,19 +263,19 @@ static void csr_write(struct mobiveil_pcie *pcie, u32 val, u32 off, size_t size)
                dev_err(&pcie->pdev->dev, "write CSR address failed\n");
 }
 
-static u32 csr_readl(struct mobiveil_pcie *pcie, u32 off)
+static u32 mobiveil_csr_readl(struct mobiveil_pcie *pcie, u32 off)
 {
-       return csr_read(pcie, off, 0x4);
+       return mobiveil_csr_read(pcie, off, 0x4);
 }
 
-static void csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
+static void mobiveil_csr_writel(struct mobiveil_pcie *pcie, u32 val, u32 off)
 {
-       csr_write(pcie, val, off, 0x4);
+       mobiveil_csr_write(pcie, val, off, 0x4);
 }
 
 static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie)
 {
-       return (csr_readl(pcie, LTSSM_STATUS) &
+       return (mobiveil_csr_readl(pcie, LTSSM_STATUS) &
                LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0;
 }
 
@@ -323,7 +324,7 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus,
                PCI_SLOT(devfn) << PAB_DEVICE_SHIFT |
                PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT;
 
-       csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
+       mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0));
 
        return pcie->config_axi_slave_base + where;
 }
@@ -353,13 +354,14 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
        chained_irq_enter(chip, desc);
 
        /* read INTx status */
-       val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
-       mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+       val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+       mask = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
        intr_status = val & mask;
 
        /* Handle INTx */
        if (intr_status & PAB_INTP_INTX_MASK) {
-               shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT);
+               shifted_status = mobiveil_csr_readl(pcie,
+                                                   PAB_INTP_AMBA_MISC_STAT);
                shifted_status &= PAB_INTP_INTX_MASK;
                shifted_status >>= PAB_INTX_START;
                do {
@@ -373,12 +375,13 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
                                                            bit);
 
                                /* clear interrupt handled */
-                               csr_writel(pcie, 1 << (PAB_INTX_START + bit),
-                                          PAB_INTP_AMBA_MISC_STAT);
+                               mobiveil_csr_writel(pcie,
+                                                   1 << (PAB_INTX_START + bit),
+                                                   PAB_INTP_AMBA_MISC_STAT);
                        }
 
-                       shifted_status = csr_readl(pcie,
-                                                  PAB_INTP_AMBA_MISC_STAT);
+                       shifted_status = mobiveil_csr_readl(pcie,
+                                                           PAB_INTP_AMBA_MISC_STAT);
                        shifted_status &= PAB_INTP_INTX_MASK;
                        shifted_status >>= PAB_INTX_START;
                } while (shifted_status != 0);
@@ -413,7 +416,7 @@ static void mobiveil_pcie_isr(struct irq_desc *desc)
        }
 
        /* Clear the interrupt status */
-       csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
+       mobiveil_csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT);
        chained_irq_exit(chip, desc);
 }
 
@@ -474,24 +477,24 @@ static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
                return;
        }
 
-       value = csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
+       value = mobiveil_csr_readl(pcie, PAB_PEX_AMAP_CTRL(win_num));
        value &= ~(AMAP_CTRL_TYPE_MASK << AMAP_CTRL_TYPE_SHIFT | WIN_SIZE_MASK);
        value |= type << AMAP_CTRL_TYPE_SHIFT | 1 << AMAP_CTRL_EN_SHIFT |
                 (lower_32_bits(size64) & WIN_SIZE_MASK);
-       csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
+       mobiveil_csr_writel(pcie, value, PAB_PEX_AMAP_CTRL(win_num));
 
-       csr_writel(pcie, upper_32_bits(size64),
-                  PAB_EXT_PEX_AMAP_SIZEN(win_num));
+       mobiveil_csr_writel(pcie, upper_32_bits(size64),
+                           PAB_EXT_PEX_AMAP_SIZEN(win_num));
 
-       csr_writel(pcie, lower_32_bits(cpu_addr),
-                  PAB_PEX_AMAP_AXI_WIN(win_num));
-       csr_writel(pcie, upper_32_bits(cpu_addr),
-                  PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
+       mobiveil_csr_writel(pcie, lower_32_bits(cpu_addr),
+                           PAB_PEX_AMAP_AXI_WIN(win_num));
+       mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
+                           PAB_EXT_PEX_AMAP_AXI_WIN(win_num));
 
-       csr_writel(pcie, lower_32_bits(pci_addr),
-                  PAB_PEX_AMAP_PEX_WIN_L(win_num));
-       csr_writel(pcie, upper_32_bits(pci_addr),
-                  PAB_PEX_AMAP_PEX_WIN_H(win_num));
+       mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
+                           PAB_PEX_AMAP_PEX_WIN_L(win_num));
+       mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
+                           PAB_PEX_AMAP_PEX_WIN_H(win_num));
 
        pcie->ib_wins_configured++;
 }
@@ -515,27 +518,29 @@ static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
         * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit
         * to 4 KB in PAB_AXI_AMAP_CTRL register
         */
-       value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
+       value = mobiveil_csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num));
        value &= ~(WIN_TYPE_MASK << WIN_TYPE_SHIFT | WIN_SIZE_MASK);
        value |= 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT |
                 (lower_32_bits(size64) & WIN_SIZE_MASK);
-       csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
+       mobiveil_csr_writel(pcie, value, PAB_AXI_AMAP_CTRL(win_num));
 
-       csr_writel(pcie, upper_32_bits(size64), PAB_EXT_AXI_AMAP_SIZE(win_num));
+       mobiveil_csr_writel(pcie, upper_32_bits(size64),
+                           PAB_EXT_AXI_AMAP_SIZE(win_num));
 
        /*
         * program AXI window base with appropriate value in
         * PAB_AXI_AMAP_AXI_WIN0 register
         */
-       csr_writel(pcie, lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
-                  PAB_AXI_AMAP_AXI_WIN(win_num));
-       csr_writel(pcie, upper_32_bits(cpu_addr),
-                  PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
+       mobiveil_csr_writel(pcie,
+                           lower_32_bits(cpu_addr) & (~AXI_WINDOW_ALIGN_MASK),
+                           PAB_AXI_AMAP_AXI_WIN(win_num));
+       mobiveil_csr_writel(pcie, upper_32_bits(cpu_addr),
+                           PAB_EXT_AXI_AMAP_AXI_WIN(win_num));
 
-       csr_writel(pcie, lower_32_bits(pci_addr),
-                  PAB_AXI_AMAP_PEX_WIN_L(win_num));
-       csr_writel(pcie, upper_32_bits(pci_addr),
-                  PAB_AXI_AMAP_PEX_WIN_H(win_num));
+       mobiveil_csr_writel(pcie, lower_32_bits(pci_addr),
+                           PAB_AXI_AMAP_PEX_WIN_L(win_num));
+       mobiveil_csr_writel(pcie, upper_32_bits(pci_addr),
+                           PAB_AXI_AMAP_PEX_WIN_H(win_num));
 
        pcie->ob_wins_configured++;
 }
@@ -579,42 +584,42 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
        struct resource_entry *win;
 
        /* setup bus numbers */
-       value = csr_readl(pcie, PCI_PRIMARY_BUS);
+       value = mobiveil_csr_readl(pcie, PCI_PRIMARY_BUS);
        value &= 0xff000000;
        value |= 0x00ff0100;
-       csr_writel(pcie, value, PCI_PRIMARY_BUS);
+       mobiveil_csr_writel(pcie, value, PCI_PRIMARY_BUS);
 
        /*
         * program Bus Master Enable Bit in Command Register in PAB Config
         * Space
         */
-       value = csr_readl(pcie, PCI_COMMAND);
+       value = mobiveil_csr_readl(pcie, PCI_COMMAND);
        value |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
-       csr_writel(pcie, value, PCI_COMMAND);
+       mobiveil_csr_writel(pcie, value, PCI_COMMAND);
 
        /*
         * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL
         * register
         */
-       pab_ctrl = csr_readl(pcie, PAB_CTRL);
+       pab_ctrl = mobiveil_csr_readl(pcie, PAB_CTRL);
        pab_ctrl |= (1 << AMBA_PIO_ENABLE_SHIFT) | (1 << PEX_PIO_ENABLE_SHIFT);
-       csr_writel(pcie, pab_ctrl, PAB_CTRL);
+       mobiveil_csr_writel(pcie, pab_ctrl, PAB_CTRL);
 
-       csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
-                  PAB_INTP_AMBA_MISC_ENB);
+       mobiveil_csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK),
+                           PAB_INTP_AMBA_MISC_ENB);
 
        /*
         * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in
         * PAB_AXI_PIO_CTRL Register
         */
-       value = csr_readl(pcie, PAB_AXI_PIO_CTRL);
+       value = mobiveil_csr_readl(pcie, PAB_AXI_PIO_CTRL);
        value |= APIO_EN_MASK;
-       csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
+       mobiveil_csr_writel(pcie, value, PAB_AXI_PIO_CTRL);
 
        /* Enable PCIe PIO master */
-       value = csr_readl(pcie, PAB_PEX_PIO_CTRL);
+       value = mobiveil_csr_readl(pcie, PAB_PEX_PIO_CTRL);
        value |= 1 << PIO_ENABLE_SHIFT;
-       csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
+       mobiveil_csr_writel(pcie, value, PAB_PEX_PIO_CTRL);
 
        /*
         * we'll program one outbound window for config reads and
@@ -647,10 +652,10 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
        }
 
        /* fixup for PCIe class register */
-       value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
+       value = mobiveil_csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
        value &= 0xff;
        value |= (PCI_CLASS_BRIDGE_PCI << 16);
-       csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
+       mobiveil_csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
 
        /* setup MSI hardware registers */
        mobiveil_pcie_enable_msi(pcie);
@@ -668,9 +673,9 @@ static void mobiveil_mask_intx_irq(struct irq_data *data)
        pcie = irq_desc_get_chip_data(desc);
        mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
        raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
-       shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+       shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
        shifted_val &= ~mask;
-       csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
+       mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
        raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
 }
 
@@ -684,9 +689,9 @@ static void mobiveil_unmask_intx_irq(struct irq_data *data)
        pcie = irq_desc_get_chip_data(desc);
        mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
        raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags);
-       shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
+       shifted_val = mobiveil_csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB);
        shifted_val |= mask;
-       csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
+       mobiveil_csr_writel(pcie, shifted_val, PAB_INTP_AMBA_MISC_ENB);
        raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags);
 }
 
index f6a669a..94ba4fe 100644 (file)
@@ -30,8 +30,6 @@
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
 
-#include "../pci.h"
-
 #define PCIECAR                        0x000010
 #define PCIECCTLR              0x000018
 #define  CONFIG_SEND_ENABLE    BIT(31)
 #define  LINK_SPEED_2_5GTS     (1 << 16)
 #define  LINK_SPEED_5_0GTS     (2 << 16)
 #define MACCTLR                        0x011058
+#define  MACCTLR_NFTS_MASK     GENMASK(23, 16) /* The name is from SH7786 */
 #define  SPEED_CHANGE          BIT(24)
 #define  SCRAMBLE_DISABLE      BIT(27)
+#define  LTSMDIS               BIT(31)
+#define  MACCTLR_INIT_VAL      (LTSMDIS | MACCTLR_NFTS_MASK)
 #define PMSR                   0x01105c
 #define MACS2R                 0x011078
 #define MACCGSPSETR            0x011084
@@ -615,6 +616,8 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
        if (IS_ENABLED(CONFIG_PCI_MSI))
                rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
 
+       rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
+
        /* Finish initialization - establish a PCI Express link */
        rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
 
@@ -1029,25 +1032,30 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
        if (restype & IORESOURCE_PREFETCH)
                flags |= LAM_PREFETCH;
 
-       /*
-        * If the size of the range is larger than the alignment of the start
-        * address, we have to use multiple entries to perform the mapping.
-        */
-       if (cpu_addr > 0) {
-               unsigned long nr_zeros = __ffs64(cpu_addr);
-               u64 alignment = 1ULL << nr_zeros;
+       while (cpu_addr < cpu_end) {
+               if (idx >= MAX_NR_INBOUND_MAPS - 1) {
+                       dev_err(pcie->dev, "Failed to map inbound regions!\n");
+                       return -EINVAL;
+               }
+               /*
+                * If the size of the range is larger than the alignment of
+                * the start address, we have to use multiple entries to
+                * perform the mapping.
+                */
+               if (cpu_addr > 0) {
+                       unsigned long nr_zeros = __ffs64(cpu_addr);
+                       u64 alignment = 1ULL << nr_zeros;
 
-               size = min(range->size, alignment);
-       } else {
-               size = range->size;
-       }
-       /* Hardware supports max 4GiB inbound region */
-       size = min(size, 1ULL << 32);
+                       size = min(range->size, alignment);
+               } else {
+                       size = range->size;
+               }
+               /* Hardware supports max 4GiB inbound region */
+               size = min(size, 1ULL << 32);
 
-       mask = roundup_pow_of_two(size) - 1;
-       mask &= ~0xf;
+               mask = roundup_pow_of_two(size) - 1;
+               mask &= ~0xf;
 
-       while (cpu_addr < cpu_end) {
                /*
                 * Set up 64-bit inbound regions as the range parser doesn't
                 * distinguish between 32 and 64-bit types.
@@ -1067,11 +1075,6 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
                pci_addr += size;
                cpu_addr += size;
                idx += 2;
-
-               if (idx > MAX_NR_INBOUND_MAPS) {
-                       dev_err(pcie->dev, "Failed to map inbound regions!\n");
-                       return -EINVAL;
-               }
        }
        *index = idx;
 
@@ -1237,6 +1240,7 @@ static int rcar_pcie_resume_noirq(struct device *dev)
                return 0;
 
        /* Re-establish the PCIe link */
+       rcar_pci_write_reg(pcie, MACCTLR_INIT_VAL, MACCTLR);
        rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
        return rcar_pcie_wait_for_dl(pcie);
 }
index ef8e677..68525f8 100644 (file)
@@ -620,19 +620,13 @@ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip)
                dev_info(dev, "no vpcie3v3 regulator found\n");
        }
 
-       rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8");
-       if (IS_ERR(rockchip->vpcie1v8)) {
-               if (PTR_ERR(rockchip->vpcie1v8) != -ENODEV)
-                       return PTR_ERR(rockchip->vpcie1v8);
-               dev_info(dev, "no vpcie1v8 regulator found\n");
-       }
+       rockchip->vpcie1v8 = devm_regulator_get(dev, "vpcie1v8");
+       if (IS_ERR(rockchip->vpcie1v8))
+               return PTR_ERR(rockchip->vpcie1v8);
 
-       rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9");
-       if (IS_ERR(rockchip->vpcie0v9)) {
-               if (PTR_ERR(rockchip->vpcie0v9) != -ENODEV)
-                       return PTR_ERR(rockchip->vpcie0v9);
-               dev_info(dev, "no vpcie0v9 regulator found\n");
-       }
+       rockchip->vpcie0v9 = devm_regulator_get(dev, "vpcie0v9");
+       if (IS_ERR(rockchip->vpcie0v9))
+               return PTR_ERR(rockchip->vpcie0v9);
 
        return 0;
 }
@@ -658,27 +652,22 @@ static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip)
                }
        }
 
-       if (!IS_ERR(rockchip->vpcie1v8)) {
-               err = regulator_enable(rockchip->vpcie1v8);
-               if (err) {
-                       dev_err(dev, "fail to enable vpcie1v8 regulator\n");
-                       goto err_disable_3v3;
-               }
+       err = regulator_enable(rockchip->vpcie1v8);
+       if (err) {
+               dev_err(dev, "fail to enable vpcie1v8 regulator\n");
+               goto err_disable_3v3;
        }
 
-       if (!IS_ERR(rockchip->vpcie0v9)) {
-               err = regulator_enable(rockchip->vpcie0v9);
-               if (err) {
-                       dev_err(dev, "fail to enable vpcie0v9 regulator\n");
-                       goto err_disable_1v8;
-               }
+       err = regulator_enable(rockchip->vpcie0v9);
+       if (err) {
+               dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+               goto err_disable_1v8;
        }
 
        return 0;
 
 err_disable_1v8:
-       if (!IS_ERR(rockchip->vpcie1v8))
-               regulator_disable(rockchip->vpcie1v8);
+       regulator_disable(rockchip->vpcie1v8);
 err_disable_3v3:
        if (!IS_ERR(rockchip->vpcie3v3))
                regulator_disable(rockchip->vpcie3v3);
@@ -897,8 +886,7 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev)
 
        rockchip_pcie_disable_clocks(rockchip);
 
-       if (!IS_ERR(rockchip->vpcie0v9))
-               regulator_disable(rockchip->vpcie0v9);
+       regulator_disable(rockchip->vpcie0v9);
 
        return ret;
 }
@@ -908,12 +896,10 @@ static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev)
        struct rockchip_pcie *rockchip = dev_get_drvdata(dev);
        int err;
 
-       if (!IS_ERR(rockchip->vpcie0v9)) {
-               err = regulator_enable(rockchip->vpcie0v9);
-               if (err) {
-                       dev_err(dev, "fail to enable vpcie0v9 regulator\n");
-                       return err;
-               }
+       err = regulator_enable(rockchip->vpcie0v9);
+       if (err) {
+               dev_err(dev, "fail to enable vpcie0v9 regulator\n");
+               return err;
        }
 
        err = rockchip_pcie_enable_clocks(rockchip);
@@ -939,8 +925,7 @@ err_err_deinit_port:
 err_pcie_resume:
        rockchip_pcie_disable_clocks(rockchip);
 err_disable_0v9:
-       if (!IS_ERR(rockchip->vpcie0v9))
-               regulator_disable(rockchip->vpcie0v9);
+       regulator_disable(rockchip->vpcie0v9);
        return err;
 }
 
@@ -1081,10 +1066,8 @@ err_vpcie:
                regulator_disable(rockchip->vpcie12v);
        if (!IS_ERR(rockchip->vpcie3v3))
                regulator_disable(rockchip->vpcie3v3);
-       if (!IS_ERR(rockchip->vpcie1v8))
-               regulator_disable(rockchip->vpcie1v8);
-       if (!IS_ERR(rockchip->vpcie0v9))
-               regulator_disable(rockchip->vpcie0v9);
+       regulator_disable(rockchip->vpcie1v8);
+       regulator_disable(rockchip->vpcie0v9);
 err_set_vpcie:
        rockchip_pcie_disable_clocks(rockchip);
        return err;
@@ -1108,10 +1091,8 @@ static int rockchip_pcie_remove(struct platform_device *pdev)
                regulator_disable(rockchip->vpcie12v);
        if (!IS_ERR(rockchip->vpcie3v3))
                regulator_disable(rockchip->vpcie3v3);
-       if (!IS_ERR(rockchip->vpcie1v8))
-               regulator_disable(rockchip->vpcie1v8);
-       if (!IS_ERR(rockchip->vpcie0v9))
-               regulator_disable(rockchip->vpcie0v9);
+       regulator_disable(rockchip->vpcie1v8);
+       regulator_disable(rockchip->vpcie0v9);
 
        return 0;
 }
index 8e87a05..53e4f9e 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * Rockchip AXI PCIe controller driver
  *
index 1cfe368..5d74f81 100644 (file)
@@ -44,7 +44,7 @@
 static struct workqueue_struct *kpcitest_workqueue;
 
 struct pci_epf_test {
-       void                    *reg[6];
+       void                    *reg[PCI_STD_NUM_BARS];
        struct pci_epf          *epf;
        enum pci_barno          test_reg_bar;
        struct delayed_work     cmd_handler;
@@ -377,7 +377,7 @@ static void pci_epf_test_unbind(struct pci_epf *epf)
 
        cancel_delayed_work(&epf_test->cmd_handler);
        pci_epc_stop(epc);
-       for (bar = BAR_0; bar <= BAR_5; bar++) {
+       for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
                epf_bar = &epf->bar[bar];
 
                if (epf_test->reg[bar]) {
@@ -400,7 +400,7 @@ static int pci_epf_test_set_bar(struct pci_epf *epf)
 
        epc_features = epf_test->epc_features;
 
-       for (bar = BAR_0; bar <= BAR_5; bar += add) {
+       for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
                epf_bar = &epf->bar[bar];
                /*
                 * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
@@ -450,7 +450,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
        }
        epf_test->reg[test_reg_bar] = base;
 
-       for (bar = BAR_0; bar <= BAR_5; bar += add) {
+       for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
                epf_bar = &epf->bar[bar];
                add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
 
@@ -478,7 +478,7 @@ static void pci_epf_configure_bar(struct pci_epf *epf,
        bool bar_fixed_64bit;
        int i;
 
-       for (i = BAR_0; i <= BAR_5; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                epf_bar = &epf->bar[i];
                bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
                if (bar_fixed_64bit)
index 2bf8bd1..d2b174c 100644 (file)
@@ -134,7 +134,7 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
        if (pageno < 0)
                return NULL;
 
-       *phys_addr = mem->phys_base + (pageno << page_shift);
+       *phys_addr = mem->phys_base + ((phys_addr_t)pageno << page_shift);
        virt_addr = ioremap(*phys_addr, size);
        if (!virt_addr)
                bitmap_release_region(mem->bitmap, pageno, order);
index e4c4663..b386995 100644 (file)
@@ -449,8 +449,15 @@ static void acpiphp_native_scan_bridge(struct pci_dev *bridge)
 
        /* Scan non-hotplug bridges that need to be reconfigured */
        for_each_pci_bridge(dev, bus) {
-               if (!hotplug_is_native(dev))
-                       max = pci_scan_bridge(bus, dev, max, 1);
+               if (hotplug_is_native(dev))
+                       continue;
+
+               max = pci_scan_bridge(bus, dev, max, 1);
+               if (dev->subordinate) {
+                       pcibios_resource_survey_bus(dev->subordinate);
+                       pci_bus_size_bridges(dev->subordinate);
+                       pci_bus_assign_resources(dev->subordinate);
+               }
        }
 }
 
@@ -480,7 +487,6 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge)
                        if (PCI_SLOT(dev->devfn) == slot->device)
                                acpiphp_native_scan_bridge(dev);
                }
-               pci_assign_unassigned_bridge_resources(bus->self);
        } else {
                LIST_HEAD(add_list);
                int max, pass;
index 654c972..aa61d4c 100644 (file)
@@ -72,6 +72,7 @@ extern int pciehp_poll_time;
  * @reset_lock: prevents access to the Data Link Layer Link Active bit in the
  *     Link Status register and to the Presence Detect State bit in the Slot
  *     Status register during a slot reset which may cause them to flap
+ * @ist_running: flag to keep user request waiting while IRQ thread is running
  * @request_result: result of last user request submitted to the IRQ thread
  * @requester: wait queue to wake up on completion of user request,
  *     used for synchronous slot enable/disable request via sysfs
@@ -101,6 +102,7 @@ struct controller {
 
        struct hotplug_slot hotplug_slot;       /* hotplug core interface */
        struct rw_semaphore reset_lock;
+       unsigned int ist_running;
        int request_result;
        wait_queue_head_t requester;
 };
@@ -172,10 +174,10 @@ void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn);
 
 void pciehp_get_latch_status(struct controller *ctrl, u8 *status);
 int pciehp_query_power_fault(struct controller *ctrl);
-bool pciehp_card_present(struct controller *ctrl);
-bool pciehp_card_present_or_link_active(struct controller *ctrl);
+int pciehp_card_present(struct controller *ctrl);
+int pciehp_card_present_or_link_active(struct controller *ctrl);
 int pciehp_check_link_status(struct controller *ctrl);
-bool pciehp_check_link_active(struct controller *ctrl);
+int pciehp_check_link_active(struct controller *ctrl);
 void pciehp_release_ctrl(struct controller *ctrl);
 
 int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot);
index b3122c1..312cc45 100644 (file)
@@ -139,10 +139,15 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
 {
        struct controller *ctrl = to_ctrl(hotplug_slot);
        struct pci_dev *pdev = ctrl->pcie->port;
+       int ret;
 
        pci_config_pm_runtime_get(pdev);
-       *value = pciehp_card_present_or_link_active(ctrl);
+       ret = pciehp_card_present_or_link_active(ctrl);
        pci_config_pm_runtime_put(pdev);
+       if (ret < 0)
+               return ret;
+
+       *value = ret;
        return 0;
 }
 
@@ -158,13 +163,13 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
  */
 static void pciehp_check_presence(struct controller *ctrl)
 {
-       bool occupied;
+       int occupied;
 
        down_read(&ctrl->reset_lock);
        mutex_lock(&ctrl->state_lock);
 
        occupied = pciehp_card_present_or_link_active(ctrl);
-       if ((occupied && (ctrl->state == OFF_STATE ||
+       if ((occupied > 0 && (ctrl->state == OFF_STATE ||
                          ctrl->state == BLINKINGON_STATE)) ||
            (!occupied && (ctrl->state == ON_STATE ||
                           ctrl->state == BLINKINGOFF_STATE)))
@@ -253,7 +258,7 @@ static bool pme_is_native(struct pcie_device *dev)
        return pcie_ports_native || host->native_pme;
 }
 
-static int pciehp_suspend(struct pcie_device *dev)
+static void pciehp_disable_interrupt(struct pcie_device *dev)
 {
        /*
         * Disable hotplug interrupt so that it does not trigger
@@ -261,7 +266,19 @@ static int pciehp_suspend(struct pcie_device *dev)
         */
        if (pme_is_native(dev))
                pcie_disable_interrupt(get_service_data(dev));
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pciehp_suspend(struct pcie_device *dev)
+{
+       /*
+        * If the port is already runtime suspended we can keep it that
+        * way.
+        */
+       if (dev_pm_smart_suspend_and_suspended(&dev->port->dev))
+               return 0;
 
+       pciehp_disable_interrupt(dev);
        return 0;
 }
 
@@ -279,6 +296,7 @@ static int pciehp_resume_noirq(struct pcie_device *dev)
 
        return 0;
 }
+#endif
 
 static int pciehp_resume(struct pcie_device *dev)
 {
@@ -292,6 +310,12 @@ static int pciehp_resume(struct pcie_device *dev)
        return 0;
 }
 
+static int pciehp_runtime_suspend(struct pcie_device *dev)
+{
+       pciehp_disable_interrupt(dev);
+       return 0;
+}
+
 static int pciehp_runtime_resume(struct pcie_device *dev)
 {
        struct controller *ctrl = get_service_data(dev);
@@ -318,10 +342,12 @@ static struct pcie_port_service_driver hpdriver_portdrv = {
        .remove         = pciehp_remove,
 
 #ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
        .suspend        = pciehp_suspend,
        .resume_noirq   = pciehp_resume_noirq,
        .resume         = pciehp_resume,
-       .runtime_suspend = pciehp_suspend,
+#endif
+       .runtime_suspend = pciehp_runtime_suspend,
        .runtime_resume = pciehp_runtime_resume,
 #endif /* PM */
 };
index 21af7b1..6503d15 100644 (file)
@@ -226,7 +226,7 @@ void pciehp_handle_disable_request(struct controller *ctrl)
 
 void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
 {
-       bool present, link_active;
+       int present, link_active;
 
        /*
         * If the slot is on and presence or link has changed, turn it off.
@@ -257,7 +257,7 @@ void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
        mutex_lock(&ctrl->state_lock);
        present = pciehp_card_present(ctrl);
        link_active = pciehp_check_link_active(ctrl);
-       if (!present && !link_active) {
+       if (present <= 0 && link_active <= 0) {
                mutex_unlock(&ctrl->state_lock);
                return;
        }
@@ -375,7 +375,8 @@ int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot)
                ctrl->request_result = -ENODEV;
                pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
                wait_event(ctrl->requester,
-                          !atomic_read(&ctrl->pending_events));
+                          !atomic_read(&ctrl->pending_events) &&
+                          !ctrl->ist_running);
                return ctrl->request_result;
        case POWERON_STATE:
                ctrl_info(ctrl, "Slot(%s): Already in powering on state\n",
@@ -408,7 +409,8 @@ int pciehp_sysfs_disable_slot(struct hotplug_slot *hotplug_slot)
                mutex_unlock(&ctrl->state_lock);
                pciehp_request(ctrl, DISABLE_SLOT);
                wait_event(ctrl->requester,
-                          !atomic_read(&ctrl->pending_events));
+                          !atomic_read(&ctrl->pending_events) &&
+                          !ctrl->ist_running);
                return ctrl->request_result;
        case POWEROFF_STATE:
                ctrl_info(ctrl, "Slot(%s): Already in powering off state\n",
index 1a522c1..8a2cb17 100644 (file)
@@ -68,7 +68,7 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout)
        struct pci_dev *pdev = ctrl_dev(ctrl);
        u16 slot_status;
 
-       while (true) {
+       do {
                pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
                if (slot_status == (u16) ~0) {
                        ctrl_info(ctrl, "%s: no response from device\n",
@@ -81,11 +81,9 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout)
                                                   PCI_EXP_SLTSTA_CC);
                        return 1;
                }
-               if (timeout < 0)
-                       break;
                msleep(10);
                timeout -= 10;
-       }
+       } while (timeout >= 0);
        return 0;       /* timeout */
 }
 
@@ -201,17 +199,29 @@ static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
        pcie_do_write_cmd(ctrl, cmd, mask, false);
 }
 
-bool pciehp_check_link_active(struct controller *ctrl)
+/**
+ * pciehp_check_link_active() - Is the link active
+ * @ctrl: PCIe hotplug controller
+ *
+ * Check whether the downstream link is currently active. Note it is
+ * possible that the card is removed immediately after this so the
+ * caller may need to take it into account.
+ *
+ * If the hotplug controller itself is not available anymore returns
+ * %-ENODEV.
+ */
+int pciehp_check_link_active(struct controller *ctrl)
 {
        struct pci_dev *pdev = ctrl_dev(ctrl);
        u16 lnk_status;
-       bool ret;
+       int ret;
 
-       pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
-       ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
+       ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
+       if (ret == PCIBIOS_DEVICE_NOT_FOUND || lnk_status == (u16)~0)
+               return -ENODEV;
 
-       if (ret)
-               ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
+       ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
+       ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
 
        return ret;
 }
@@ -373,13 +383,29 @@ void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
        *status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
 }
 
-bool pciehp_card_present(struct controller *ctrl)
+/**
+ * pciehp_card_present() - Is the card present
+ * @ctrl: PCIe hotplug controller
+ *
+ * Function checks whether the card is currently present in the slot and
+ * in that case returns true. Note it is possible that the card is
+ * removed immediately after the check so the caller may need to take
+ * this into account.
+ *
+ * It the hotplug controller itself is not available anymore returns
+ * %-ENODEV.
+ */
+int pciehp_card_present(struct controller *ctrl)
 {
        struct pci_dev *pdev = ctrl_dev(ctrl);
        u16 slot_status;
+       int ret;
 
-       pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
-       return slot_status & PCI_EXP_SLTSTA_PDS;
+       ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
+       if (ret == PCIBIOS_DEVICE_NOT_FOUND || slot_status == (u16)~0)
+               return -ENODEV;
+
+       return !!(slot_status & PCI_EXP_SLTSTA_PDS);
 }
 
 /**
@@ -390,10 +416,19 @@ bool pciehp_card_present(struct controller *ctrl)
  * Presence Detect State bit, this helper also returns true if the Link Active
  * bit is set.  This is a concession to broken hotplug ports which hardwire
  * Presence Detect State to zero, such as Wilocity's [1ae9:0200].
+ *
+ * Returns: %1 if the slot is occupied and %0 if it is not. If the hotplug
+ *         port is not present anymore returns %-ENODEV.
  */
-bool pciehp_card_present_or_link_active(struct controller *ctrl)
+int pciehp_card_present_or_link_active(struct controller *ctrl)
 {
-       return pciehp_card_present(ctrl) || pciehp_check_link_active(ctrl);
+       int ret;
+
+       ret = pciehp_card_present(ctrl);
+       if (ret)
+               return ret;
+
+       return pciehp_check_link_active(ctrl);
 }
 
 int pciehp_query_power_fault(struct controller *ctrl)
@@ -583,6 +618,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
        irqreturn_t ret;
        u32 events;
 
+       ctrl->ist_running = true;
        pci_config_pm_runtime_get(pdev);
 
        /* rerun pciehp_isr() if the port was inaccessible on interrupt */
@@ -629,6 +665,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
        up_read(&ctrl->reset_lock);
 
        pci_config_pm_runtime_put(pdev);
+       ctrl->ist_running = false;
        wake_up(&ctrl->requester);
        return IRQ_HANDLED;
 }
index b3f972e..1d3de1e 100644 (file)
@@ -254,8 +254,14 @@ static ssize_t sriov_numvfs_show(struct device *dev,
                                 char *buf)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
+       u16 num_vfs;
+
+       /* Serialize vs sriov_numvfs_store() so readers see valid num_VFs */
+       device_lock(&pdev->dev);
+       num_vfs = pdev->sriov->num_VFs;
+       device_unlock(&pdev->dev);
 
-       return sprintf(buf, "%u\n", pdev->sriov->num_VFs);
+       return sprintf(buf, "%u\n", num_vfs);
 }
 
 /*
index 0884bed..c7709e4 100644 (file)
@@ -213,12 +213,13 @@ u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag)
 
        if (pci_msi_ignore_mask)
                return 0;
+
        desc_addr = pci_msix_desc_addr(desc);
        if (!desc_addr)
                return 0;
 
        mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
-       if (flag)
+       if (flag & PCI_MSIX_ENTRY_CTRL_MASKBIT)
                mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
 
        writel(mask_bits, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
@@ -861,7 +862,7 @@ static int pci_msi_supported(struct pci_dev *dev, int nvec)
        if (!pci_msi_enable)
                return 0;
 
-       if (!dev || dev->no_msi || dev->current_state != PCI_D0)
+       if (!dev || dev->no_msi)
                return 0;
 
        /*
@@ -972,7 +973,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
        int nr_entries;
        int i, j;
 
-       if (!pci_msi_supported(dev, nvec))
+       if (!pci_msi_supported(dev, nvec) || dev->current_state != PCI_D0)
                return -EINVAL;
 
        nr_entries = pci_msix_vec_count(dev);
@@ -1058,7 +1059,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
        int nvec;
        int rc;
 
-       if (!pci_msi_supported(dev, minvec))
+       if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0)
                return -EINVAL;
 
        /* Check whether driver already requested MSI-X IRQs */
@@ -1315,22 +1316,6 @@ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
 }
 EXPORT_SYMBOL(pci_irq_get_affinity);
 
-/**
- * pci_irq_get_node - return the NUMA node of a particular MSI vector
- * @pdev:      PCI device to operate on
- * @vec:       device-relative interrupt vector index (0-based).
- */
-int pci_irq_get_node(struct pci_dev *pdev, int vec)
-{
-       const struct cpumask *mask;
-
-       mask = pci_irq_get_affinity(pdev, vec);
-       if (mask)
-               return local_memory_node(cpu_to_node(cpumask_first(mask)));
-       return dev_to_node(&pdev->dev);
-}
-EXPORT_SYMBOL(pci_irq_get_node);
-
 struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
 {
        return to_pci_dev(desc->dev);
index 5fd9010..fffa770 100644 (file)
@@ -270,10 +270,10 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
 int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
                         unsigned int flags)
 {
-       bridge->conf.class_revision |= PCI_CLASS_BRIDGE_PCI << 16;
+       bridge->conf.class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI << 16);
        bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
        bridge->conf.cache_line_size = 0x10;
-       bridge->conf.status = PCI_STATUS_CAP_LIST;
+       bridge->conf.status = cpu_to_le16(PCI_STATUS_CAP_LIST);
        bridge->pci_regs_behavior = kmemdup(pci_regs_behavior,
                                            sizeof(pci_regs_behavior),
                                            GFP_KERNEL);
@@ -284,8 +284,9 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
                bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START;
                bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP;
                /* Set PCIe v2, root port, slot support */
-               bridge->pcie_conf.cap = PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
-                       PCI_EXP_FLAGS_SLOT;
+               bridge->pcie_conf.cap =
+                       cpu_to_le16(PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
+                                   PCI_EXP_FLAGS_SLOT);
                bridge->pcie_cap_regs_behavior =
                        kmemdup(pcie_cap_regs_behavior,
                                sizeof(pcie_cap_regs_behavior),
@@ -327,7 +328,7 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
        int reg = where & ~3;
        pci_bridge_emul_read_status_t (*read_op)(struct pci_bridge_emul *bridge,
                                                 int reg, u32 *value);
-       u32 *cfgspace;
+       __le32 *cfgspace;
        const struct pci_bridge_reg_behavior *behavior;
 
        if (bridge->has_pcie && reg >= PCI_CAP_PCIE_END) {
@@ -343,11 +344,11 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
        if (bridge->has_pcie && reg >= PCI_CAP_PCIE_START) {
                reg -= PCI_CAP_PCIE_START;
                read_op = bridge->ops->read_pcie;
-               cfgspace = (u32 *) &bridge->pcie_conf;
+               cfgspace = (__le32 *) &bridge->pcie_conf;
                behavior = bridge->pcie_cap_regs_behavior;
        } else {
                read_op = bridge->ops->read_base;
-               cfgspace = (u32 *) &bridge->conf;
+               cfgspace = (__le32 *) &bridge->conf;
                behavior = bridge->pci_regs_behavior;
        }
 
@@ -357,7 +358,7 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
                ret = PCI_BRIDGE_EMUL_NOT_HANDLED;
 
        if (ret == PCI_BRIDGE_EMUL_NOT_HANDLED)
-               *value = cfgspace[reg / 4];
+               *value = le32_to_cpu(cfgspace[reg / 4]);
 
        /*
         * Make sure we never return any reserved bit with a value
@@ -387,7 +388,7 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
        int mask, ret, old, new, shift;
        void (*write_op)(struct pci_bridge_emul *bridge, int reg,
                         u32 old, u32 new, u32 mask);
-       u32 *cfgspace;
+       __le32 *cfgspace;
        const struct pci_bridge_reg_behavior *behavior;
 
        if (bridge->has_pcie && reg >= PCI_CAP_PCIE_END)
@@ -414,11 +415,11 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
        if (bridge->has_pcie && reg >= PCI_CAP_PCIE_START) {
                reg -= PCI_CAP_PCIE_START;
                write_op = bridge->ops->write_pcie;
-               cfgspace = (u32 *) &bridge->pcie_conf;
+               cfgspace = (__le32 *) &bridge->pcie_conf;
                behavior = bridge->pcie_cap_regs_behavior;
        } else {
                write_op = bridge->ops->write_base;
-               cfgspace = (u32 *) &bridge->conf;
+               cfgspace = (__le32 *) &bridge->conf;
                behavior = bridge->pci_regs_behavior;
        }
 
@@ -431,7 +432,7 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
        /* Clear the W1C bits */
        new &= ~((value << shift) & (behavior[reg / 4].w1c & mask));
 
-       cfgspace[reg / 4] = new;
+       cfgspace[reg / 4] = cpu_to_le32(new);
 
        if (write_op)
                write_op(bridge, reg, old, new, mask);
index e65b1b7..b318830 100644 (file)
@@ -6,65 +6,65 @@
 
 /* PCI configuration space of a PCI-to-PCI bridge. */
 struct pci_bridge_emul_conf {
-       u16 vendor;
-       u16 device;
-       u16 command;
-       u16 status;
-       u32 class_revision;
+       __le16 vendor;
+       __le16 device;
+       __le16 command;
+       __le16 status;
+       __le32 class_revision;
        u8 cache_line_size;
        u8 latency_timer;
        u8 header_type;
        u8 bist;
-       u32 bar[2];
+       __le32 bar[2];
        u8 primary_bus;
        u8 secondary_bus;
        u8 subordinate_bus;
        u8 secondary_latency_timer;
        u8 iobase;
        u8 iolimit;
-       u16 secondary_status;
-       u16 membase;
-       u16 memlimit;
-       u16 pref_mem_base;
-       u16 pref_mem_limit;
-       u32 prefbaseupper;
-       u32 preflimitupper;
-       u16 iobaseupper;
-       u16 iolimitupper;
+       __le16 secondary_status;
+       __le16 membase;
+       __le16 memlimit;
+       __le16 pref_mem_base;
+       __le16 pref_mem_limit;
+       __le32 prefbaseupper;
+       __le32 preflimitupper;
+       __le16 iobaseupper;
+       __le16 iolimitupper;
        u8 capabilities_pointer;
        u8 reserve[3];
-       u32 romaddr;
+       __le32 romaddr;
        u8 intline;
        u8 intpin;
-       u16 bridgectrl;
+       __le16 bridgectrl;
 };
 
 /* PCI configuration space of the PCIe capabilities */
 struct pci_bridge_emul_pcie_conf {
        u8 cap_id;
        u8 next;
-       u16 cap;
-       u32 devcap;
-       u16 devctl;
-       u16 devsta;
-       u32 lnkcap;
-       u16 lnkctl;
-       u16 lnksta;
-       u32 slotcap;
-       u16 slotctl;
-       u16 slotsta;
-       u16 rootctl;
-       u16 rsvd;
-       u32 rootsta;
-       u32 devcap2;
-       u16 devctl2;
-       u16 devsta2;
-       u32 lnkcap2;
-       u16 lnkctl2;
-       u16 lnksta2;
-       u32 slotcap2;
-       u16 slotctl2;
-       u16 slotsta2;
+       __le16 cap;
+       __le32 devcap;
+       __le16 devctl;
+       __le16 devsta;
+       __le32 lnkcap;
+       __le16 lnkctl;
+       __le16 lnksta;
+       __le32 slotcap;
+       __le16 slotctl;
+       __le16 slotsta;
+       __le16 rootctl;
+       __le16 rsvd;
+       __le32 rootsta;
+       __le32 devcap2;
+       __le16 devctl2;
+       __le16 devsta2;
+       __le32 lnkcap2;
+       __le16 lnkctl2;
+       __le16 lnksta2;
+       __le32 slotcap2;
+       __le16 slotctl2;
+       __le16 slotsta2;
 };
 
 struct pci_bridge_emul;
index a8124e4..0454ca0 100644 (file)
@@ -315,7 +315,8 @@ static long local_pci_probe(void *_ddi)
         * Probe function should return < 0 for failure, 0 for success
         * Treat values > 0 as success, but warn.
         */
-       dev_warn(dev, "Driver probe function unexpectedly returned %d\n", rc);
+       pci_warn(pci_dev, "Driver probe function unexpectedly returned %d\n",
+                rc);
        return 0;
 }
 
@@ -517,6 +518,12 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
        return 0;
 }
 
+static void pci_pm_default_resume(struct pci_dev *pci_dev)
+{
+       pci_fixup_device(pci_fixup_resume, pci_dev);
+       pci_enable_wake(pci_dev, PCI_D0, false);
+}
+
 #endif
 
 #ifdef CONFIG_PM_SLEEP
@@ -524,6 +531,7 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
 static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
 {
        pci_power_up(pci_dev);
+       pci_update_current_state(pci_dev, PCI_D0);
        pci_restore_state(pci_dev);
        pci_pme_restore(pci_dev);
 }
@@ -578,9 +586,9 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
 
                if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
                    && pci_dev->current_state != PCI_UNKNOWN) {
-                       WARN_ONCE(pci_dev->current_state != prev,
-                               "PCI PM: Device state not saved by %pS\n",
-                               drv->suspend);
+                       pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
+                                     "PCI PM: Device state not saved by %pS\n",
+                                     drv->suspend);
                }
        }
 
@@ -592,46 +600,17 @@ static int pci_legacy_suspend(struct device *dev, pm_message_t state)
 static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
 {
        struct pci_dev *pci_dev = to_pci_dev(dev);
-       struct pci_driver *drv = pci_dev->driver;
-
-       if (drv && drv->suspend_late) {
-               pci_power_t prev = pci_dev->current_state;
-               int error;
-
-               error = drv->suspend_late(pci_dev, state);
-               suspend_report_result(drv->suspend_late, error);
-               if (error)
-                       return error;
-
-               if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
-                   && pci_dev->current_state != PCI_UNKNOWN) {
-                       WARN_ONCE(pci_dev->current_state != prev,
-                               "PCI PM: Device state not saved by %pS\n",
-                               drv->suspend_late);
-                       goto Fixup;
-               }
-       }
 
        if (!pci_dev->state_saved)
                pci_save_state(pci_dev);
 
        pci_pm_set_unknown_state(pci_dev);
 
-Fixup:
        pci_fixup_device(pci_fixup_suspend_late, pci_dev);
 
        return 0;
 }
 
-static int pci_legacy_resume_early(struct device *dev)
-{
-       struct pci_dev *pci_dev = to_pci_dev(dev);
-       struct pci_driver *drv = pci_dev->driver;
-
-       return drv && drv->resume_early ?
-                       drv->resume_early(pci_dev) : 0;
-}
-
 static int pci_legacy_resume(struct device *dev)
 {
        struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -645,12 +624,6 @@ static int pci_legacy_resume(struct device *dev)
 
 /* Auxiliary functions used by the new power management framework */
 
-static void pci_pm_default_resume(struct pci_dev *pci_dev)
-{
-       pci_fixup_device(pci_fixup_resume, pci_dev);
-       pci_enable_wake(pci_dev, PCI_D0, false);
-}
-
 static void pci_pm_default_suspend(struct pci_dev *pci_dev)
 {
        /* Disable non-bridge devices without PM support */
@@ -661,16 +634,15 @@ static void pci_pm_default_suspend(struct pci_dev *pci_dev)
 static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
 {
        struct pci_driver *drv = pci_dev->driver;
-       bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume
-               || drv->resume_early);
+       bool ret = drv && (drv->suspend || drv->resume);
 
        /*
         * Legacy PM support is used by default, so warn if the new framework is
         * supported as well.  Drivers are supposed to support either the
         * former, or the latter, but not both at the same time.
         */
-       WARN(ret && drv->driver.pm, "driver %s device %04x:%04x\n",
-               drv->name, pci_dev->vendor, pci_dev->device);
+       pci_WARN(pci_dev, ret && drv->driver.pm, "device %04x:%04x\n",
+                pci_dev->vendor, pci_dev->device);
 
        return ret;
 }
@@ -679,11 +651,11 @@ static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
 
 static int pci_pm_prepare(struct device *dev)
 {
-       struct device_driver *drv = dev->driver;
        struct pci_dev *pci_dev = to_pci_dev(dev);
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
-       if (drv && drv->pm && drv->pm->prepare) {
-               int error = drv->pm->prepare(dev);
+       if (pm && pm->prepare) {
+               int error = pm->prepare(dev);
                if (error < 0)
                        return error;
 
@@ -793,9 +765,9 @@ static int pci_pm_suspend(struct device *dev)
 
                if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
                    && pci_dev->current_state != PCI_UNKNOWN) {
-                       WARN_ONCE(pci_dev->current_state != prev,
-                               "PCI PM: State of device not saved by %pS\n",
-                               pm->suspend);
+                       pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
+                                     "PCI PM: State of device not saved by %pS\n",
+                                     pm->suspend);
                }
        }
 
@@ -841,9 +813,9 @@ static int pci_pm_suspend_noirq(struct device *dev)
 
                if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
                    && pci_dev->current_state != PCI_UNKNOWN) {
-                       WARN_ONCE(pci_dev->current_state != prev,
-                               "PCI PM: State of device not saved by %pS\n",
-                               pm->suspend_noirq);
+                       pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
+                                     "PCI PM: State of device not saved by %pS\n",
+                                     pm->suspend_noirq);
                        goto Fixup;
                }
        }
@@ -865,7 +837,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
                        pci_prepare_to_sleep(pci_dev);
        }
 
-       dev_dbg(dev, "PCI PM: Suspend power state: %s\n",
+       pci_dbg(pci_dev, "PCI PM: Suspend power state: %s\n",
                pci_power_name(pci_dev->current_state));
 
        if (pci_dev->current_state == PCI_D0) {
@@ -880,7 +852,7 @@ static int pci_pm_suspend_noirq(struct device *dev)
        }
 
        if (pci_dev->skip_bus_pm && pm_suspend_no_platform()) {
-               dev_dbg(dev, "PCI PM: Skipped\n");
+               pci_dbg(pci_dev, "PCI PM: Skipped\n");
                goto Fixup;
        }
 
@@ -917,8 +889,9 @@ Fixup:
 static int pci_pm_resume_noirq(struct device *dev)
 {
        struct pci_dev *pci_dev = to_pci_dev(dev);
-       struct device_driver *drv = dev->driver;
-       int error = 0;
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+       pci_power_t prev_state = pci_dev->current_state;
+       bool skip_bus_pm = pci_dev->skip_bus_pm;
 
        if (dev_pm_may_skip_resume(dev))
                return 0;
@@ -937,27 +910,28 @@ static int pci_pm_resume_noirq(struct device *dev)
         * configuration here and attempting to put them into D0 again is
         * pointless, so avoid doing that.
         */
-       if (!(pci_dev->skip_bus_pm && pm_suspend_no_platform()))
+       if (!(skip_bus_pm && pm_suspend_no_platform()))
                pci_pm_default_resume_early(pci_dev);
 
        pci_fixup_device(pci_fixup_resume_early, pci_dev);
+       pcie_pme_root_status_cleanup(pci_dev);
 
-       if (pci_has_legacy_pm_support(pci_dev))
-               return pci_legacy_resume_early(dev);
+       if (!skip_bus_pm && prev_state == PCI_D3cold)
+               pci_bridge_wait_for_secondary_bus(pci_dev);
 
-       pcie_pme_root_status_cleanup(pci_dev);
+       if (pci_has_legacy_pm_support(pci_dev))
+               return 0;
 
-       if (drv && drv->pm && drv->pm->resume_noirq)
-               error = drv->pm->resume_noirq(dev);
+       if (pm && pm->resume_noirq)
+               return pm->resume_noirq(dev);
 
-       return error;
+       return 0;
 }
 
 static int pci_pm_resume(struct device *dev)
 {
        struct pci_dev *pci_dev = to_pci_dev(dev);
        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-       int error = 0;
 
        /*
         * This is necessary for the suspend error path in which resume is
@@ -973,12 +947,12 @@ static int pci_pm_resume(struct device *dev)
 
        if (pm) {
                if (pm->resume)
-                       error = pm->resume(dev);
+                       return pm->resume(dev);
        } else {
                pci_pm_reenable_device(pci_dev);
        }
 
-       return error;
+       return 0;
 }
 
 #else /* !CONFIG_SUSPEND */
@@ -993,7 +967,6 @@ static int pci_pm_resume(struct device *dev)
 
 #ifdef CONFIG_HIBERNATE_CALLBACKS
 
-
 /*
  * pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing
  * a hibernate transition
@@ -1039,16 +1012,16 @@ static int pci_pm_freeze(struct device *dev)
 static int pci_pm_freeze_noirq(struct device *dev)
 {
        struct pci_dev *pci_dev = to_pci_dev(dev);
-       struct device_driver *drv = dev->driver;
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
        if (pci_has_legacy_pm_support(pci_dev))
                return pci_legacy_suspend_late(dev, PMSG_FREEZE);
 
-       if (drv && drv->pm && drv->pm->freeze_noirq) {
+       if (pm && pm->freeze_noirq) {
                int error;
 
-               error = drv->pm->freeze_noirq(dev);
-               suspend_report_result(drv->pm->freeze_noirq, error);
+               error = pm->freeze_noirq(dev);
+               suspend_report_result(pm->freeze_noirq, error);
                if (error)
                        return error;
        }
@@ -1067,8 +1040,8 @@ static int pci_pm_freeze_noirq(struct device *dev)
 static int pci_pm_thaw_noirq(struct device *dev)
 {
        struct pci_dev *pci_dev = to_pci_dev(dev);
-       struct device_driver *drv = dev->driver;
-       int error = 0;
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+       int error;
 
        if (pcibios_pm_ops.thaw_noirq) {
                error = pcibios_pm_ops.thaw_noirq(dev);
@@ -1076,21 +1049,25 @@ static int pci_pm_thaw_noirq(struct device *dev)
                        return error;
        }
 
-       if (pci_has_legacy_pm_support(pci_dev))
-               return pci_legacy_resume_early(dev);
-
        /*
-        * pci_restore_state() requires the device to be in D0 (because of MSI
-        * restoration among other things), so force it into D0 in case the
-        * driver's "freeze" callbacks put it into a low-power state directly.
+        * The pm->thaw_noirq() callback assumes the device has been
+        * returned to D0 and its config state has been restored.
+        *
+        * In addition, pci_restore_state() restores MSI-X state in MMIO
+        * space, which requires the device to be in D0, so return it to D0
+        * in case the driver's "freeze" callbacks put it into a low-power
+        * state.
         */
        pci_set_power_state(pci_dev, PCI_D0);
        pci_restore_state(pci_dev);
 
-       if (drv && drv->pm && drv->pm->thaw_noirq)
-               error = drv->pm->thaw_noirq(dev);
+       if (pci_has_legacy_pm_support(pci_dev))
+               return 0;
+
+       if (pm && pm->thaw_noirq)
+               return pm->thaw_noirq(dev);
 
-       return error;
+       return 0;
 }
 
 static int pci_pm_thaw(struct device *dev)
@@ -1161,24 +1138,24 @@ static int pci_pm_poweroff_late(struct device *dev)
 static int pci_pm_poweroff_noirq(struct device *dev)
 {
        struct pci_dev *pci_dev = to_pci_dev(dev);
-       struct device_driver *drv = dev->driver;
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 
        if (dev_pm_smart_suspend_and_suspended(dev))
                return 0;
 
-       if (pci_has_legacy_pm_support(to_pci_dev(dev)))
+       if (pci_has_legacy_pm_support(pci_dev))
                return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
 
-       if (!drv || !drv->pm) {
+       if (!pm) {
                pci_fixup_device(pci_fixup_suspend_late, pci_dev);
                return 0;
        }
 
-       if (drv->pm->poweroff_noirq) {
+       if (pm->poweroff_noirq) {
                int error;
 
-               error = drv->pm->poweroff_noirq(dev);
-               suspend_report_result(drv->pm->poweroff_noirq, error);
+               error = pm->poweroff_noirq(dev);
+               suspend_report_result(pm->poweroff_noirq, error);
                if (error)
                        return error;
        }
@@ -1204,8 +1181,8 @@ static int pci_pm_poweroff_noirq(struct device *dev)
 static int pci_pm_restore_noirq(struct device *dev)
 {
        struct pci_dev *pci_dev = to_pci_dev(dev);
-       struct device_driver *drv = dev->driver;
-       int error = 0;
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+       int error;
 
        if (pcibios_pm_ops.restore_noirq) {
                error = pcibios_pm_ops.restore_noirq(dev);
@@ -1217,19 +1194,18 @@ static int pci_pm_restore_noirq(struct device *dev)
        pci_fixup_device(pci_fixup_resume_early, pci_dev);
 
        if (pci_has_legacy_pm_support(pci_dev))
-               return pci_legacy_resume_early(dev);
+               return 0;
 
-       if (drv && drv->pm && drv->pm->restore_noirq)
-               error = drv->pm->restore_noirq(dev);
+       if (pm && pm->restore_noirq)
+               return pm->restore_noirq(dev);
 
-       return error;
+       return 0;
 }
 
 static int pci_pm_restore(struct device *dev)
 {
        struct pci_dev *pci_dev = to_pci_dev(dev);
        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-       int error = 0;
 
        /*
         * This is necessary for the hibernation error path in which restore is
@@ -1245,12 +1221,12 @@ static int pci_pm_restore(struct device *dev)
 
        if (pm) {
                if (pm->restore)
-                       error = pm->restore(dev);
+                       return pm->restore(dev);
        } else {
                pci_pm_reenable_device(pci_dev);
        }
 
-       return error;
+       return 0;
 }
 
 #else /* !CONFIG_HIBERNATE_CALLBACKS */
@@ -1295,11 +1271,11 @@ static int pci_pm_runtime_suspend(struct device *dev)
                 * log level.
                 */
                if (error == -EBUSY || error == -EAGAIN) {
-                       dev_dbg(dev, "can't suspend now (%ps returned %d)\n",
+                       pci_dbg(pci_dev, "can't suspend now (%ps returned %d)\n",
                                pm->runtime_suspend, error);
                        return error;
                } else if (error) {
-                       dev_err(dev, "can't suspend (%ps returned %d)\n",
+                       pci_err(pci_dev, "can't suspend (%ps returned %d)\n",
                                pm->runtime_suspend, error);
                        return error;
                }
@@ -1310,9 +1286,9 @@ static int pci_pm_runtime_suspend(struct device *dev)
        if (pm && pm->runtime_suspend
            && !pci_dev->state_saved && pci_dev->current_state != PCI_D0
            && pci_dev->current_state != PCI_UNKNOWN) {
-               WARN_ONCE(pci_dev->current_state != prev,
-                       "PCI PM: State of device not saved by %pS\n",
-                       pm->runtime_suspend);
+               pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev,
+                             "PCI PM: State of device not saved by %pS\n",
+                             pm->runtime_suspend);
                return 0;
        }
 
@@ -1326,9 +1302,10 @@ static int pci_pm_runtime_suspend(struct device *dev)
 
 static int pci_pm_runtime_resume(struct device *dev)
 {
-       int rc = 0;
        struct pci_dev *pci_dev = to_pci_dev(dev);
        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+       pci_power_t prev_state = pci_dev->current_state;
+       int error = 0;
 
        /*
         * Restoring config space is necessary even if the device is not bound
@@ -1341,22 +1318,23 @@ static int pci_pm_runtime_resume(struct device *dev)
                return 0;
 
        pci_fixup_device(pci_fixup_resume_early, pci_dev);
-       pci_enable_wake(pci_dev, PCI_D0, false);
-       pci_fixup_device(pci_fixup_resume, pci_dev);
+       pci_pm_default_resume(pci_dev);
+
+       if (prev_state == PCI_D3cold)
+               pci_bridge_wait_for_secondary_bus(pci_dev);
 
        if (pm && pm->runtime_resume)
-               rc = pm->runtime_resume(dev);
+               error = pm->runtime_resume(dev);
 
        pci_dev->runtime_d3cold = false;
 
-       return rc;
+       return error;
 }
 
 static int pci_pm_runtime_idle(struct device *dev)
 {
        struct pci_dev *pci_dev = to_pci_dev(dev);
        const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-       int ret = 0;
 
        /*
         * If pci_dev->driver is not set (unbound), the device should
@@ -1369,9 +1347,9 @@ static int pci_pm_runtime_idle(struct device *dev)
                return -ENOSYS;
 
        if (pm->runtime_idle)
-               ret = pm->runtime_idle(dev);
+               return pm->runtime_idle(dev);
 
-       return ret;
+       return 0;
 }
 
 static const struct dev_pm_ops pci_dev_pm_ops = {
index 7934129..13f766d 100644 (file)
@@ -1122,7 +1122,7 @@ static void pci_remove_resource_files(struct pci_dev *pdev)
 {
        int i;
 
-       for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                struct bin_attribute *res_attr;
 
                res_attr = pdev->res_attr[i];
@@ -1193,7 +1193,7 @@ static int pci_create_resource_files(struct pci_dev *pdev)
        int retval;
 
        /* Expose the PCI resources from this device as files */
-       for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 
                /* skip empty resources */
                if (!pci_resource_len(pdev, i))
@@ -1330,7 +1330,6 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
        int retval;
 
        pcie_vpd_create_sysfs_dev_files(dev);
-       pcie_aspm_create_sysfs_dev_files(dev);
 
        if (dev->reset_fn) {
                retval = device_create_file(&dev->dev, &dev_attr_reset);
@@ -1340,7 +1339,6 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
        return 0;
 
 error:
-       pcie_aspm_remove_sysfs_dev_files(dev);
        pcie_vpd_remove_sysfs_dev_files(dev);
        return retval;
 }
@@ -1416,7 +1414,6 @@ err:
 static void pci_remove_capabilities_sysfs(struct pci_dev *dev)
 {
        pcie_vpd_remove_sysfs_dev_files(dev);
-       pcie_aspm_remove_sysfs_dev_files(dev);
        if (dev->reset_fn) {
                device_remove_file(&dev->dev, &dev_attr_reset);
                dev->reset_fn = 0;
@@ -1539,24 +1536,6 @@ const struct attribute_group *pci_dev_groups[] = {
        NULL,
 };
 
-static const struct attribute_group pci_bridge_group = {
-       .attrs = pci_bridge_attrs,
-};
-
-const struct attribute_group *pci_bridge_groups[] = {
-       &pci_bridge_group,
-       NULL,
-};
-
-static const struct attribute_group pcie_dev_group = {
-       .attrs = pcie_dev_attrs,
-};
-
-const struct attribute_group *pcie_dev_groups[] = {
-       &pcie_dev_group,
-       NULL,
-};
-
 static const struct attribute_group pci_dev_hp_attr_group = {
        .attrs = pci_dev_hp_attrs,
        .is_visible = pci_dev_hp_attrs_are_visible,
@@ -1588,6 +1567,9 @@ static const struct attribute_group *pci_dev_attr_groups[] = {
 #ifdef CONFIG_PCIEAER
        &aer_stats_attr_group,
 #endif
+#ifdef CONFIG_PCIEASPM
+       &aspm_ctrl_attr_group,
+#endif
        NULL,
 };
 
index e7982af..1ffe736 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/delay.h>
 #include <linux/dmi.h>
 #include <linux/init.h>
+#include <linux/msi.h>
 #include <linux/of.h>
 #include <linux/of_pci.h>
 #include <linux/pci.h>
@@ -85,10 +86,17 @@ unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
 unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
 
 #define DEFAULT_HOTPLUG_IO_SIZE                (256)
-#define DEFAULT_HOTPLUG_MEM_SIZE       (2*1024*1024)
-/* pci=hpmemsize=nnM,hpiosize=nn can override this */
+#define DEFAULT_HOTPLUG_MMIO_SIZE      (2*1024*1024)
+#define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024)
+/* hpiosize=nn can override this */
 unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
-unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
+/*
+ * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
+ * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
+ * pci=hpmemsize=nnM overrides both
+ */
+unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
+unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
 
 #define DEFAULT_HOTPLUG_BUS_SIZE       1
 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
@@ -674,7 +682,7 @@ struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
 {
        int i;
 
-       for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                struct resource *r = &dev->resource[i];
 
                if (r->start && resource_contains(r, res))
@@ -834,14 +842,16 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
                return -EINVAL;
 
        /*
-        * Validate current state:
-        * Can enter D0 from any state, but if we can only go deeper
-        * to sleep if we're already in a low power state
+        * Validate transition: We can enter D0 from any state, but if
+        * we're already in a low-power state, we can only go deeper.  E.g.,
+        * we can go from D1 to D3, but we can't go directly from D3 to D1;
+        * we'd have to go from D3 to D0, then to D1.
         */
        if (state != PCI_D0 && dev->current_state <= PCI_D3cold
            && dev->current_state > state) {
-               pci_err(dev, "invalid power transition (from state %d to %d)\n",
-                       dev->current_state, state);
+               pci_err(dev, "invalid power transition (from %s to %s)\n",
+                       pci_power_name(dev->current_state),
+                       pci_power_name(state));
                return -EINVAL;
        }
 
@@ -851,6 +861,12 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
                return -EIO;
 
        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+       if (pmcsr == (u16) ~0) {
+               pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
+                       pci_power_name(dev->current_state),
+                       pci_power_name(state));
+               return -EIO;
+       }
 
        /*
         * If we're (effectively) in D3, force entire word to 0.
@@ -886,13 +902,14 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
        if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
                pci_dev_d3_sleep(dev);
        else if (state == PCI_D2 || dev->current_state == PCI_D2)
-               udelay(PCI_PM_D2_DELAY);
+               msleep(PCI_PM_D2_DELAY);
 
        pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
        dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
        if (dev->current_state != state)
-               pci_info_ratelimited(dev, "Refused to change power state, currently in D%d\n",
-                        dev->current_state);
+               pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
+                        pci_power_name(dev->current_state),
+                        pci_power_name(state));
 
        /*
         * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
@@ -959,24 +976,11 @@ void pci_refresh_power_state(struct pci_dev *dev)
 }
 
 /**
- * pci_power_up - Put the given device into D0 forcibly
- * @dev: PCI device to power up
- */
-void pci_power_up(struct pci_dev *dev)
-{
-       if (platform_pci_power_manageable(dev))
-               platform_pci_set_power_state(dev, PCI_D0);
-
-       pci_raw_set_power_state(dev, PCI_D0);
-       pci_update_current_state(dev, PCI_D0);
-}
-
-/**
  * pci_platform_power_transition - Use platform to change device power state
  * @dev: PCI device to handle.
  * @state: State to put the device into.
  */
-static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
+int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
 {
        int error;
 
@@ -992,6 +996,7 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
 
        return error;
 }
+EXPORT_SYMBOL_GPL(pci_platform_power_transition);
 
 /**
  * pci_wakeup - Wake up a PCI device
@@ -1015,34 +1020,70 @@ void pci_wakeup_bus(struct pci_bus *bus)
                pci_walk_bus(bus, pci_wakeup, NULL);
 }
 
+static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
+{
+       int delay = 1;
+       u32 id;
+
+       /*
+        * After reset, the device should not silently discard config
+        * requests, but it may still indicate that it needs more time by
+        * responding to them with CRS completions.  The Root Port will
+        * generally synthesize ~0 data to complete the read (except when
+        * CRS SV is enabled and the read was for the Vendor ID; in that
+        * case it synthesizes 0x0001 data).
+        *
+        * Wait for the device to return a non-CRS completion.  Read the
+        * Command register instead of Vendor ID so we don't have to
+        * contend with the CRS SV value.
+        */
+       pci_read_config_dword(dev, PCI_COMMAND, &id);
+       while (id == ~0) {
+               if (delay > timeout) {
+                       pci_warn(dev, "not ready %dms after %s; giving up\n",
+                                delay - 1, reset_type);
+                       return -ENOTTY;
+               }
+
+               if (delay > 1000)
+                       pci_info(dev, "not ready %dms after %s; waiting\n",
+                                delay - 1, reset_type);
+
+               msleep(delay);
+               delay *= 2;
+               pci_read_config_dword(dev, PCI_COMMAND, &id);
+       }
+
+       if (delay > 1000)
+               pci_info(dev, "ready %dms after %s\n", delay - 1,
+                        reset_type);
+
+       return 0;
+}
+
 /**
- * __pci_start_power_transition - Start power transition of a PCI device
- * @dev: PCI device to handle.
- * @state: State to put the device into.
+ * pci_power_up - Put the given device into D0
+ * @dev: PCI device to power up
  */
-static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
+int pci_power_up(struct pci_dev *dev)
 {
-       if (state == PCI_D0) {
-               pci_platform_power_transition(dev, PCI_D0);
+       pci_platform_power_transition(dev, PCI_D0);
+
+       /*
+        * Mandatory power management transition delays are handled in
+        * pci_pm_resume_noirq() and pci_pm_runtime_resume() of the
+        * corresponding bridge.
+        */
+       if (dev->runtime_d3cold) {
                /*
-                * Mandatory power management transition delays, see
-                * PCI Express Base Specification Revision 2.0 Section
-                * 6.6.1: Conventional Reset.  Do not delay for
-                * devices powered on/off by corresponding bridge,
-                * because have already delayed for the bridge.
+                * When powering on a bridge from D3cold, the whole hierarchy
+                * may be powered on into D0uninitialized state, resume them to
+                * give them a chance to suspend again
                 */
-               if (dev->runtime_d3cold) {
-                       if (dev->d3cold_delay && !dev->imm_ready)
-                               msleep(dev->d3cold_delay);
-                       /*
-                        * When powering on a bridge from D3cold, the
-                        * whole hierarchy may be powered on into
-                        * D0uninitialized state, resume them to give
-                        * them a chance to suspend again
-                        */
-                       pci_wakeup_bus(dev->subordinate);
-               }
+               pci_wakeup_bus(dev->subordinate);
        }
+
+       return pci_raw_set_power_state(dev, PCI_D0);
 }
 
 /**
@@ -1070,27 +1111,6 @@ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
 }
 
 /**
- * __pci_complete_power_transition - Complete power transition of a PCI device
- * @dev: PCI device to handle.
- * @state: State to put the device into.
- *
- * This function should not be called directly by device drivers.
- */
-int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
-{
-       int ret;
-
-       if (state <= PCI_D0)
-               return -EINVAL;
-       ret = pci_platform_power_transition(dev, state);
-       /* Power off the bridge may power off the whole hierarchy */
-       if (!ret && state == PCI_D3cold)
-               pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
-
-/**
  * pci_set_power_state - Set the power state of a PCI device
  * @dev: PCI device to handle.
  * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
@@ -1130,7 +1150,8 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
        if (dev->current_state == state)
                return 0;
 
-       __pci_start_power_transition(dev, state);
+       if (state == PCI_D0)
+               return pci_power_up(dev);
 
        /*
         * This device is quirked not to be put into D3, so don't put it in
@@ -1146,10 +1167,14 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
        error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
                                        PCI_D3hot : state);
 
-       if (!__pci_complete_power_transition(dev, state))
-               error = 0;
+       if (pci_platform_power_transition(dev, state))
+               return error;
 
-       return error;
+       /* Powering off a bridge may power off the whole hierarchy */
+       if (state == PCI_D3cold)
+               pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
+
+       return 0;
 }
 EXPORT_SYMBOL(pci_set_power_state);
 
@@ -1361,6 +1386,7 @@ int pci_save_state(struct pci_dev *dev)
 
        pci_save_ltr_state(dev);
        pci_save_dpc_state(dev);
+       pci_save_aer_state(dev);
        return pci_save_vc_state(dev);
 }
 EXPORT_SYMBOL(pci_save_state);
@@ -1474,6 +1500,7 @@ void pci_restore_state(struct pci_dev *dev)
        pci_restore_dpc_state(dev);
 
        pci_cleanup_aer_error_status_regs(dev);
+       pci_restore_aer_state(dev);
 
        pci_restore_config_space(dev);
 
@@ -3768,7 +3795,7 @@ void pci_release_selected_regions(struct pci_dev *pdev, int bars)
 {
        int i;
 
-       for (i = 0; i < 6; i++)
+       for (i = 0; i < PCI_STD_NUM_BARS; i++)
                if (bars & (1 << i))
                        pci_release_region(pdev, i);
 }
@@ -3779,7 +3806,7 @@ static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
 {
        int i;
 
-       for (i = 0; i < 6; i++)
+       for (i = 0; i < PCI_STD_NUM_BARS; i++)
                if (bars & (1 << i))
                        if (__pci_request_region(pdev, i, res_name, excl))
                                goto err_out;
@@ -3827,7 +3854,7 @@ EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
 
 void pci_release_regions(struct pci_dev *pdev)
 {
-       pci_release_selected_regions(pdev, (1 << 6) - 1);
+       pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
 }
 EXPORT_SYMBOL(pci_release_regions);
 
@@ -3846,7 +3873,8 @@ EXPORT_SYMBOL(pci_release_regions);
  */
 int pci_request_regions(struct pci_dev *pdev, const char *res_name)
 {
-       return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
+       return pci_request_selected_regions(pdev,
+                       ((1 << PCI_STD_NUM_BARS) - 1), res_name);
 }
 EXPORT_SYMBOL(pci_request_regions);
 
@@ -3868,7 +3896,7 @@ EXPORT_SYMBOL(pci_request_regions);
 int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
 {
        return pci_request_selected_regions_exclusive(pdev,
-                                       ((1 << 6) - 1), res_name);
+                               ((1 << PCI_STD_NUM_BARS) - 1), res_name);
 }
 EXPORT_SYMBOL(pci_request_regions_exclusive);
 
@@ -4430,47 +4458,6 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev)
 }
 EXPORT_SYMBOL(pci_wait_for_pending_transaction);
 
-static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
-{
-       int delay = 1;
-       u32 id;
-
-       /*
-        * After reset, the device should not silently discard config
-        * requests, but it may still indicate that it needs more time by
-        * responding to them with CRS completions.  The Root Port will
-        * generally synthesize ~0 data to complete the read (except when
-        * CRS SV is enabled and the read was for the Vendor ID; in that
-        * case it synthesizes 0x0001 data).
-        *
-        * Wait for the device to return a non-CRS completion.  Read the
-        * Command register instead of Vendor ID so we don't have to
-        * contend with the CRS SV value.
-        */
-       pci_read_config_dword(dev, PCI_COMMAND, &id);
-       while (id == ~0) {
-               if (delay > timeout) {
-                       pci_warn(dev, "not ready %dms after %s; giving up\n",
-                                delay - 1, reset_type);
-                       return -ENOTTY;
-               }
-
-               if (delay > 1000)
-                       pci_info(dev, "not ready %dms after %s; waiting\n",
-                                delay - 1, reset_type);
-
-               msleep(delay);
-               delay *= 2;
-               pci_read_config_dword(dev, PCI_COMMAND, &id);
-       }
-
-       if (delay > 1000)
-               pci_info(dev, "ready %dms after %s\n", delay - 1,
-                        reset_type);
-
-       return 0;
-}
-
 /**
  * pcie_has_flr - check if a device supports function level resets
  * @dev: device to check
@@ -4605,16 +4592,19 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
        pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
        pci_dev_d3_sleep(dev);
 
-       return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
+       return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
 }
+
 /**
- * pcie_wait_for_link - Wait until link is active or inactive
+ * pcie_wait_for_link_delay - Wait until link is active or inactive
  * @pdev: Bridge device
  * @active: waiting for active or inactive?
+ * @delay: Delay to wait after link has become active (in ms)
  *
  * Use this to wait till link becomes active or inactive.
  */
-bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
+                                    int delay)
 {
        int timeout = 1000;
        bool ret;
@@ -4651,13 +4641,144 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
                timeout -= 10;
        }
        if (active && ret)
-               msleep(100);
+               msleep(delay);
        else if (ret != active)
                pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
                        active ? "set" : "cleared");
        return ret == active;
 }
 
+/**
+ * pcie_wait_for_link - Wait until link is active or inactive
+ * @pdev: Bridge device
+ * @active: waiting for active or inactive?
+ *
+ * Use this to wait till link becomes active or inactive.
+ */
+bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+{
+       return pcie_wait_for_link_delay(pdev, active, 100);
+}
+
+/*
+ * Find maximum D3cold delay required by all the devices on the bus.  The
+ * spec says 100 ms, but firmware can lower it and we allow drivers to
+ * increase it as well.
+ *
+ * Called with @pci_bus_sem locked for reading.
+ */
+static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
+{
+       const struct pci_dev *pdev;
+       int min_delay = 100;
+       int max_delay = 0;
+
+       list_for_each_entry(pdev, &bus->devices, bus_list) {
+               if (pdev->d3cold_delay < min_delay)
+                       min_delay = pdev->d3cold_delay;
+               if (pdev->d3cold_delay > max_delay)
+                       max_delay = pdev->d3cold_delay;
+       }
+
+       return max(min_delay, max_delay);
+}
+
+/**
+ * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
+ * @dev: PCI bridge
+ *
+ * Handle necessary delays before access to the devices on the secondary
+ * side of the bridge are permitted after D3cold to D0 transition.
+ *
+ * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
+ * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
+ * 4.3.2.
+ */
+void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
+{
+       struct pci_dev *child;
+       int delay;
+
+       if (pci_dev_is_disconnected(dev))
+               return;
+
+       if (!pci_is_bridge(dev) || !dev->bridge_d3)
+               return;
+
+       down_read(&pci_bus_sem);
+
+       /*
+        * We only deal with devices that are present currently on the bus.
+        * For any hot-added devices the access delay is handled in pciehp
+        * board_added(). In case of ACPI hotplug the firmware is expected
+        * to configure the devices before OS is notified.
+        */
+       if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
+               up_read(&pci_bus_sem);
+               return;
+       }
+
+       /* Take d3cold_delay requirements into account */
+       delay = pci_bus_max_d3cold_delay(dev->subordinate);
+       if (!delay) {
+               up_read(&pci_bus_sem);
+               return;
+       }
+
+       child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
+                                bus_list);
+       up_read(&pci_bus_sem);
+
+       /*
+        * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
+        * accessing the device after reset (that is 1000 ms + 100 ms). In
+        * practice this should not be needed because we don't do power
+        * management for them (see pci_bridge_d3_possible()).
+        */
+       if (!pci_is_pcie(dev)) {
+               pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
+               msleep(1000 + delay);
+               return;
+       }
+
+       /*
+        * For PCIe downstream and root ports that do not support speeds
+        * greater than 5 GT/s need to wait minimum 100 ms. For higher
+        * speeds (gen3) we need to wait first for the data link layer to
+        * become active.
+        *
+        * However, 100 ms is the minimum and the PCIe spec says the
+        * software must allow at least 1s before it can determine that the
+        * device that did not respond is a broken device. There is
+        * evidence that 100 ms is not always enough, for example certain
+        * Titan Ridge xHCI controller does not always respond to
+        * configuration requests if we only wait for 100 ms (see
+        * https://bugzilla.kernel.org/show_bug.cgi?id=203885).
+        *
+        * Therefore we wait for 100 ms and check for the device presence.
+        * If it is still not present give it an additional 100 ms.
+        */
+       if (!pcie_downstream_port(dev))
+               return;
+
+       if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
+               pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
+               msleep(delay);
+       } else {
+               pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
+                       delay);
+               if (!pcie_wait_for_link_delay(dev, true, delay)) {
+                       /* Did not train, no need to wait any further */
+                       return;
+               }
+       }
+
+       if (!pci_device_is_present(child)) {
+               pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
+               msleep(delay);
+       }
+}
+
 void pci_reset_secondary_bus(struct pci_dev *dev)
 {
        u16 ctrl;
@@ -6288,8 +6409,13 @@ static int __init pci_setup(char *str)
                                pcie_ecrc_get_policy(str + 5);
                        } else if (!strncmp(str, "hpiosize=", 9)) {
                                pci_hotplug_io_size = memparse(str + 9, &str);
+                       } else if (!strncmp(str, "hpmmiosize=", 11)) {
+                               pci_hotplug_mmio_size = memparse(str + 11, &str);
+                       } else if (!strncmp(str, "hpmmioprefsize=", 15)) {
+                               pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
                        } else if (!strncmp(str, "hpmemsize=", 10)) {
-                               pci_hotplug_mem_size = memparse(str + 10, &str);
+                               pci_hotplug_mmio_size = memparse(str + 10, &str);
+                               pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
                        } else if (!strncmp(str, "hpbussize=", 10)) {
                                pci_hotplug_bus_size =
                                        simple_strtoul(str + 10, &str, 0);
index 3f6947e..a603b74 100644 (file)
@@ -12,6 +12,7 @@ extern const unsigned char pcie_link_speed[];
 extern bool pci_early_dump;
 
 bool pcie_cap_has_lnkctl(const struct pci_dev *dev);
+bool pcie_cap_has_rtctl(const struct pci_dev *dev);
 
 /* Functions internal to the PCI core code */
 
@@ -85,7 +86,7 @@ struct pci_platform_pm_ops {
 int pci_set_platform_pm(const struct pci_platform_pm_ops *ops);
 void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
 void pci_refresh_power_state(struct pci_dev *dev);
-void pci_power_up(struct pci_dev *dev);
+int pci_power_up(struct pci_dev *dev);
 void pci_disable_enabled_device(struct pci_dev *dev);
 int pci_finish_runtime_suspend(struct pci_dev *dev);
 void pcie_clear_root_pme_status(struct pci_dev *dev);
@@ -104,6 +105,7 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev);
 void pci_free_cap_save_buffers(struct pci_dev *dev);
 bool pci_bridge_d3_possible(struct pci_dev *dev);
 void pci_bridge_d3_update(struct pci_dev *dev);
+void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev);
 
 static inline void pci_wakeup_event(struct pci_dev *dev)
 {
@@ -218,7 +220,8 @@ extern const struct device_type pci_dev_type;
 extern const struct attribute_group *pci_bus_groups[];
 
 extern unsigned long pci_hotplug_io_size;
-extern unsigned long pci_hotplug_mem_size;
+extern unsigned long pci_hotplug_mmio_size;
+extern unsigned long pci_hotplug_mmio_pref_size;
 extern unsigned long pci_hotplug_bus_size;
 
 /**
@@ -456,6 +459,22 @@ static inline void pci_ats_init(struct pci_dev *d) { }
 static inline void pci_restore_ats_state(struct pci_dev *dev) { }
 #endif /* CONFIG_PCI_ATS */
 
+#ifdef CONFIG_PCI_PRI
+void pci_pri_init(struct pci_dev *dev);
+void pci_restore_pri_state(struct pci_dev *pdev);
+#else
+static inline void pci_pri_init(struct pci_dev *dev) { }
+static inline void pci_restore_pri_state(struct pci_dev *pdev) { }
+#endif
+
+#ifdef CONFIG_PCI_PASID
+void pci_pasid_init(struct pci_dev *dev);
+void pci_restore_pasid_state(struct pci_dev *pdev);
+#else
+static inline void pci_pasid_init(struct pci_dev *dev) { }
+static inline void pci_restore_pasid_state(struct pci_dev *pdev) { }
+#endif
+
 #ifdef CONFIG_PCI_IOV
 int pci_iov_init(struct pci_dev *dev);
 void pci_iov_release(struct pci_dev *dev);
@@ -541,14 +560,6 @@ static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
 static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
 #endif
 
-#ifdef CONFIG_PCIEASPM_DEBUG
-void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev);
-void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev);
-#else
-static inline void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) { }
-static inline void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) { }
-#endif
-
 #ifdef CONFIG_PCIE_ECRC
 void pcie_set_ecrc_checking(struct pci_dev *dev);
 void pcie_ecrc_get_policy(char *str);
@@ -667,4 +678,8 @@ static inline int pci_acpi_program_hp_params(struct pci_dev *dev)
 }
 #endif
 
+#ifdef CONFIG_PCIEASPM
+extern const struct attribute_group aspm_ctrl_attr_group;
+#endif
+
 #endif /* DRIVERS_PCI_H */
index 362eb8c..6e3c04b 100644 (file)
@@ -4,7 +4,6 @@
 #
 config PCIEPORTBUS
        bool "PCI Express Port Bus support"
-       depends on PCI
        help
          This enables PCI Express Port Bus support. Users can then enable
          support for Native Hot-Plug, Advanced Error Reporting, Power
@@ -63,7 +62,6 @@ config PCIE_ECRC
 #
 config PCIEASPM
        bool "PCI Express ASPM control" if EXPERT
-       depends on PCI && PCIEPORTBUS
        default y
        help
          This enables OS control over PCI Express ASPM (Active State
@@ -79,13 +77,6 @@ config PCIEASPM
 
          When in doubt, say Y.
 
-config PCIEASPM_DEBUG
-       bool "Debug PCI Express ASPM"
-       depends on PCIEASPM
-       help
-         This enables PCI Express ASPM debug support. It will add per-device
-         interface to control ASPM.
-
 choice
        prompt "Default ASPM policy"
        default PCIEASPM_DEFAULT
@@ -135,7 +126,6 @@ config PCIE_DPC
 
 config PCIE_PTM
        bool "PCI Express Precision Time Measurement support"
-       depends on PCIEPORTBUS
        help
          This enables PCI Express Precision Time Measurement (PTM)
          support.
index b45bc47..1ca86f2 100644 (file)
@@ -15,6 +15,7 @@
 #define pr_fmt(fmt) "AER: " fmt
 #define dev_fmt pr_fmt
 
+#include <linux/bitops.h>
 #include <linux/cper.h>
 #include <linux/pci.h>
 #include <linux/pci-acpi.h>
@@ -36,7 +37,7 @@
 #define AER_ERROR_SOURCES_MAX          128
 
 #define AER_MAX_TYPEOF_COR_ERRS                16      /* as per PCI_ERR_COR_STATUS */
-#define AER_MAX_TYPEOF_UNCOR_ERRS      26      /* as per PCI_ERR_UNCOR_STATUS*/
+#define AER_MAX_TYPEOF_UNCOR_ERRS      27      /* as per PCI_ERR_UNCOR_STATUS*/
 
 struct aer_err_source {
        unsigned int status;
@@ -201,6 +202,7 @@ void pcie_set_ecrc_checking(struct pci_dev *dev)
 
 /**
  * pcie_ecrc_get_policy - parse kernel command-line ecrc option
+ * @str: ECRC policy from kernel command line to use
  */
 void pcie_ecrc_get_policy(char *str)
 {
@@ -448,12 +450,70 @@ int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
        return 0;
 }
 
+void pci_save_aer_state(struct pci_dev *dev)
+{
+       struct pci_cap_saved_state *save_state;
+       u32 *cap;
+       int pos;
+
+       pos = dev->aer_cap;
+       if (!pos)
+               return;
+
+       save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR);
+       if (!save_state)
+               return;
+
+       cap = &save_state->cap.data[0];
+       pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, cap++);
+       pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, cap++);
+       pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, cap++);
+       pci_read_config_dword(dev, pos + PCI_ERR_CAP, cap++);
+       if (pcie_cap_has_rtctl(dev))
+               pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, cap++);
+}
+
+void pci_restore_aer_state(struct pci_dev *dev)
+{
+       struct pci_cap_saved_state *save_state;
+       u32 *cap;
+       int pos;
+
+       pos = dev->aer_cap;
+       if (!pos)
+               return;
+
+       save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_ERR);
+       if (!save_state)
+               return;
+
+       cap = &save_state->cap.data[0];
+       pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, *cap++);
+       pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, *cap++);
+       pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, *cap++);
+       pci_write_config_dword(dev, pos + PCI_ERR_CAP, *cap++);
+       if (pcie_cap_has_rtctl(dev))
+               pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, *cap++);
+}
+
 void pci_aer_init(struct pci_dev *dev)
 {
+       int n;
+
        dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+       if (!dev->aer_cap)
+               return;
 
-       if (dev->aer_cap)
-               dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
+       dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
+
+       /*
+        * We save/restore PCI_ERR_UNCOR_MASK, PCI_ERR_UNCOR_SEVER,
+        * PCI_ERR_COR_MASK, and PCI_ERR_CAP.  Root and Root Complex Event
+        * Collectors also implement PCI_ERR_ROOT_COMMAND (PCIe r5.0, sec
+        * 7.8.4).
+        */
+       n = pcie_cap_has_rtctl(dev) ? 5 : 4;
+       pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_ERR, sizeof(u32) * n);
 
        pci_cleanup_aer_error_status_regs(dev);
 }
@@ -560,6 +620,7 @@ static const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = {
        "BlockedTLP",                   /* Bit Position 23      */
        "AtomicOpBlocked",              /* Bit Position 24      */
        "TLPBlockedErr",                /* Bit Position 25      */
+       "PoisonTLPBlocked",             /* Bit Position 26      */
 };
 
 static const char *aer_agent_string[] = {
@@ -657,7 +718,8 @@ const struct attribute_group aer_stats_attr_group = {
 static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
                                   struct aer_err_info *info)
 {
-       int status, i, max = -1;
+       unsigned long status = info->status & ~info->mask;
+       int i, max = -1;
        u64 *counter = NULL;
        struct aer_stats *aer_stats = pdev->aer_stats;
 
@@ -682,10 +744,8 @@ static void pci_dev_aer_stats_incr(struct pci_dev *pdev,
                break;
        }
 
-       status = (info->status & ~info->mask);
-       for (i = 0; i < max; i++)
-               if (status & (1 << i))
-                       counter[i]++;
+       for_each_set_bit(i, &status, max)
+               counter[i]++;
 }
 
 static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
@@ -717,14 +777,11 @@ static void __print_tlp_header(struct pci_dev *dev,
 static void __aer_print_error(struct pci_dev *dev,
                              struct aer_err_info *info)
 {
-       int i, status;
+       unsigned long status = info->status & ~info->mask;
        const char *errmsg = NULL;
-       status = (info->status & ~info->mask);
-
-       for (i = 0; i < 32; i++) {
-               if (!(status & (1 << i)))
-                       continue;
+       int i;
 
+       for_each_set_bit(i, &status, 32) {
                if (info->severity == AER_CORRECTABLE)
                        errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ?
                                aer_correctable_error_string[i] : NULL;
@@ -1204,7 +1261,8 @@ static void aer_isr_one_error(struct aer_rpc *rpc,
 
 /**
  * aer_isr - consume errors detected by root port
- * @work: definition of this work item
+ * @irq: IRQ assigned to Root Port
+ * @context: pointer to Root Port data structure
  *
  * Invoked, as DPC, when root port records new detected error
  */
index 652ef23..0dcd443 100644 (file)
@@ -64,6 +64,7 @@ struct pcie_link_state {
        u32 clkpm_capable:1;            /* Clock PM capable? */
        u32 clkpm_enabled:1;            /* Current Clock PM state */
        u32 clkpm_default:1;            /* Default Clock PM state by BIOS */
+       u32 clkpm_disable:1;            /* Clock PM disabled */
 
        /* Exit latencies */
        struct aspm_latency latency_up; /* Upstream direction exit latency */
@@ -161,8 +162,11 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
 
 static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
 {
-       /* Don't enable Clock PM if the link is not Clock PM capable */
-       if (!link->clkpm_capable)
+       /*
+        * Don't enable Clock PM if the link is not Clock PM capable
+        * or Clock PM is disabled
+        */
+       if (!link->clkpm_capable || link->clkpm_disable)
                enable = 0;
        /* Need nothing if the specified equals to current state */
        if (link->clkpm_enabled == enable)
@@ -192,7 +196,8 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
        }
        link->clkpm_enabled = enabled;
        link->clkpm_default = enabled;
-       link->clkpm_capable = (blacklist) ? 0 : capable;
+       link->clkpm_capable = capable;
+       link->clkpm_disable = blacklist ? 1 : 0;
 }
 
 static bool pcie_retrain_link(struct pcie_link_state *link)
@@ -894,6 +899,14 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
        return link;
 }
 
+static void pcie_aspm_update_sysfs_visibility(struct pci_dev *pdev)
+{
+       struct pci_dev *child;
+
+       list_for_each_entry(child, &pdev->subordinate->devices, bus_list)
+               sysfs_update_group(&child->dev.kobj, &aspm_ctrl_attr_group);
+}
+
 /*
  * pcie_aspm_init_link_state: Initiate PCI express link state.
  * It is called after the pcie and its children devices are scanned.
@@ -955,6 +968,8 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
                pcie_set_clkpm(link, policy_to_clkpm_state(link));
        }
 
+       pcie_aspm_update_sysfs_visibility(pdev);
+
 unlock:
        mutex_unlock(&aspm_lock);
 out:
@@ -1061,19 +1076,26 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
        up_read(&pci_bus_sem);
 }
 
-static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+static struct pcie_link_state *pcie_aspm_get_link(struct pci_dev *pdev)
 {
-       struct pci_dev *parent = pdev->bus->self;
-       struct pcie_link_state *link;
+       struct pci_dev *bridge;
 
        if (!pci_is_pcie(pdev))
-               return 0;
+               return NULL;
 
-       if (pcie_downstream_port(pdev))
-               parent = pdev;
-       if (!parent || !parent->link_state)
-               return -EINVAL;
+       bridge = pci_upstream_bridge(pdev);
+       if (!bridge || !pci_is_pcie(bridge))
+               return NULL;
 
+       return bridge->link_state;
+}
+
+static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+{
+       struct pcie_link_state *link = pcie_aspm_get_link(pdev);
+
+       if (!link)
+               return -EINVAL;
        /*
         * A driver requested that ASPM be disabled on this device, but
         * if we don't have permission to manage ASPM (e.g., on ACPI
@@ -1090,17 +1112,24 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
        if (sem)
                down_read(&pci_bus_sem);
        mutex_lock(&aspm_lock);
-       link = parent->link_state;
        if (state & PCIE_LINK_STATE_L0S)
                link->aspm_disable |= ASPM_STATE_L0S;
        if (state & PCIE_LINK_STATE_L1)
-               link->aspm_disable |= ASPM_STATE_L1;
+               /* L1 PM substates require L1 */
+               link->aspm_disable |= ASPM_STATE_L1 | ASPM_STATE_L1SS;
+       if (state & PCIE_LINK_STATE_L1_1)
+               link->aspm_disable |= ASPM_STATE_L1_1;
+       if (state & PCIE_LINK_STATE_L1_2)
+               link->aspm_disable |= ASPM_STATE_L1_2;
+       if (state & PCIE_LINK_STATE_L1_1_PCIPM)
+               link->aspm_disable |= ASPM_STATE_L1_1_PCIPM;
+       if (state & PCIE_LINK_STATE_L1_2_PCIPM)
+               link->aspm_disable |= ASPM_STATE_L1_2_PCIPM;
        pcie_config_aspm_link(link, policy_to_aspm_state(link));
 
-       if (state & PCIE_LINK_STATE_CLKPM) {
-               link->clkpm_capable = 0;
-               pcie_set_clkpm(link, 0);
-       }
+       if (state & PCIE_LINK_STATE_CLKPM)
+               link->clkpm_disable = 1;
+       pcie_set_clkpm(link, policy_to_clkpm_state(link));
        mutex_unlock(&aspm_lock);
        if (sem)
                up_read(&pci_bus_sem);
@@ -1172,127 +1201,161 @@ module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy,
 /**
  * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device.
  * @pdev: Target device.
+ *
+ * Relies on the upstream bridge's link_state being valid.  The link_state
+ * is deallocated only when the last child of the bridge (i.e., @pdev or a
+ * sibling) is removed, and the caller should be holding a reference to
+ * @pdev, so this should be safe.
  */
 bool pcie_aspm_enabled(struct pci_dev *pdev)
 {
-       struct pci_dev *bridge = pci_upstream_bridge(pdev);
-       bool ret;
+       struct pcie_link_state *link = pcie_aspm_get_link(pdev);
 
-       if (!bridge)
+       if (!link)
                return false;
 
-       mutex_lock(&aspm_lock);
-       ret = bridge->link_state ? !!bridge->link_state->aspm_enabled : false;
-       mutex_unlock(&aspm_lock);
-
-       return ret;
+       return link->aspm_enabled;
 }
 EXPORT_SYMBOL_GPL(pcie_aspm_enabled);
 
-#ifdef CONFIG_PCIEASPM_DEBUG
-static ssize_t link_state_show(struct device *dev,
-               struct device_attribute *attr,
-               char *buf)
+static ssize_t aspm_attr_show_common(struct device *dev,
+                                    struct device_attribute *attr,
+                                    char *buf, u8 state)
 {
-       struct pci_dev *pci_device = to_pci_dev(dev);
-       struct pcie_link_state *link_state = pci_device->link_state;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct pcie_link_state *link = pcie_aspm_get_link(pdev);
 
-       return sprintf(buf, "%d\n", link_state->aspm_enabled);
+       return sprintf(buf, "%d\n", (link->aspm_enabled & state) ? 1 : 0);
 }
 
-static ssize_t link_state_store(struct device *dev,
-               struct device_attribute *attr,
-               const char *buf,
-               size_t n)
+static ssize_t aspm_attr_store_common(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t len, u8 state)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
-       struct pcie_link_state *link, *root = pdev->link_state->root;
-       u32 state;
-
-       if (aspm_disabled)
-               return -EPERM;
+       struct pcie_link_state *link = pcie_aspm_get_link(pdev);
+       bool state_enable;
 
-       if (kstrtouint(buf, 10, &state))
-               return -EINVAL;
-       if ((state & ~ASPM_STATE_ALL) != 0)
+       if (strtobool(buf, &state_enable) < 0)
                return -EINVAL;
 
        down_read(&pci_bus_sem);
        mutex_lock(&aspm_lock);
-       list_for_each_entry(link, &link_list, sibling) {
-               if (link->root != root)
-                       continue;
-               pcie_config_aspm_link(link, state);
+
+       if (state_enable) {
+               link->aspm_disable &= ~state;
+               /* need to enable L1 for substates */
+               if (state & ASPM_STATE_L1SS)
+                       link->aspm_disable &= ~ASPM_STATE_L1;
+       } else {
+               link->aspm_disable |= state;
        }
+
+       pcie_config_aspm_link(link, policy_to_aspm_state(link));
+
        mutex_unlock(&aspm_lock);
        up_read(&pci_bus_sem);
-       return n;
+
+       return len;
 }
 
-static ssize_t clk_ctl_show(struct device *dev,
-               struct device_attribute *attr,
-               char *buf)
+#define ASPM_ATTR(_f, _s)                                              \
+static ssize_t _f##_show(struct device *dev,                           \
+                        struct device_attribute *attr, char *buf)      \
+{ return aspm_attr_show_common(dev, attr, buf, ASPM_STATE_##_s); }     \
+                                                                       \
+static ssize_t _f##_store(struct device *dev,                          \
+                         struct device_attribute *attr,                \
+                         const char *buf, size_t len)                  \
+{ return aspm_attr_store_common(dev, attr, buf, len, ASPM_STATE_##_s); }
+
+ASPM_ATTR(l0s_aspm, L0S)
+ASPM_ATTR(l1_aspm, L1)
+ASPM_ATTR(l1_1_aspm, L1_1)
+ASPM_ATTR(l1_2_aspm, L1_2)
+ASPM_ATTR(l1_1_pcipm, L1_1_PCIPM)
+ASPM_ATTR(l1_2_pcipm, L1_2_PCIPM)
+
+static ssize_t clkpm_show(struct device *dev,
+                         struct device_attribute *attr, char *buf)
 {
-       struct pci_dev *pci_device = to_pci_dev(dev);
-       struct pcie_link_state *link_state = pci_device->link_state;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct pcie_link_state *link = pcie_aspm_get_link(pdev);
 
-       return sprintf(buf, "%d\n", link_state->clkpm_enabled);
+       return sprintf(buf, "%d\n", link->clkpm_enabled);
 }
 
-static ssize_t clk_ctl_store(struct device *dev,
-               struct device_attribute *attr,
-               const char *buf,
-               size_t n)
+static ssize_t clkpm_store(struct device *dev,
+                          struct device_attribute *attr,
+                          const char *buf, size_t len)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
-       bool state;
+       struct pcie_link_state *link = pcie_aspm_get_link(pdev);
+       bool state_enable;
 
-       if (strtobool(buf, &state))
+       if (strtobool(buf, &state_enable) < 0)
                return -EINVAL;
 
        down_read(&pci_bus_sem);
        mutex_lock(&aspm_lock);
-       pcie_set_clkpm_nocheck(pdev->link_state, state);
+
+       link->clkpm_disable = !state_enable;
+       pcie_set_clkpm(link, policy_to_clkpm_state(link));
+
        mutex_unlock(&aspm_lock);
        up_read(&pci_bus_sem);
 
-       return n;
+       return len;
 }
 
-static DEVICE_ATTR_RW(link_state);
-static DEVICE_ATTR_RW(clk_ctl);
+static DEVICE_ATTR_RW(clkpm);
+static DEVICE_ATTR_RW(l0s_aspm);
+static DEVICE_ATTR_RW(l1_aspm);
+static DEVICE_ATTR_RW(l1_1_aspm);
+static DEVICE_ATTR_RW(l1_2_aspm);
+static DEVICE_ATTR_RW(l1_1_pcipm);
+static DEVICE_ATTR_RW(l1_2_pcipm);
+
+static struct attribute *aspm_ctrl_attrs[] = {
+       &dev_attr_clkpm.attr,
+       &dev_attr_l0s_aspm.attr,
+       &dev_attr_l1_aspm.attr,
+       &dev_attr_l1_1_aspm.attr,
+       &dev_attr_l1_2_aspm.attr,
+       &dev_attr_l1_1_pcipm.attr,
+       &dev_attr_l1_2_pcipm.attr,
+       NULL
+};
 
-static char power_group[] = "power";
-void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
+static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj,
+                                          struct attribute *a, int n)
 {
-       struct pcie_link_state *link_state = pdev->link_state;
-
-       if (!link_state)
-               return;
-
-       if (link_state->aspm_support)
-               sysfs_add_file_to_group(&pdev->dev.kobj,
-                       &dev_attr_link_state.attr, power_group);
-       if (link_state->clkpm_capable)
-               sysfs_add_file_to_group(&pdev->dev.kobj,
-                       &dev_attr_clk_ctl.attr, power_group);
-}
+       struct device *dev = kobj_to_dev(kobj);
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct pcie_link_state *link = pcie_aspm_get_link(pdev);
+       static const u8 aspm_state_map[] = {
+               ASPM_STATE_L0S,
+               ASPM_STATE_L1,
+               ASPM_STATE_L1_1,
+               ASPM_STATE_L1_2,
+               ASPM_STATE_L1_1_PCIPM,
+               ASPM_STATE_L1_2_PCIPM,
+       };
 
-void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
-{
-       struct pcie_link_state *link_state = pdev->link_state;
+       if (aspm_disabled || !link)
+               return 0;
 
-       if (!link_state)
-               return;
+       if (n == 0)
+               return link->clkpm_capable ? a->mode : 0;
 
-       if (link_state->aspm_support)
-               sysfs_remove_file_from_group(&pdev->dev.kobj,
-                       &dev_attr_link_state.attr, power_group);
-       if (link_state->clkpm_capable)
-               sysfs_remove_file_from_group(&pdev->dev.kobj,
-                       &dev_attr_clk_ctl.attr, power_group);
+       return link->aspm_capable & aspm_state_map[n - 1] ? a->mode : 0;
 }
-#endif
+
+const struct attribute_group aspm_ctrl_attr_group = {
+       .name = "link",
+       .attrs = aspm_ctrl_attrs,
+       .is_visible = aspm_ctrl_attrs_are_visible,
+};
 
 static int __init pcie_aspm_disable(char *str)
 {
index a32ec34..e06f42f 100644 (file)
@@ -291,7 +291,7 @@ static int dpc_probe(struct pcie_device *dev)
        int status;
        u16 ctl, cap;
 
-       if (pcie_aer_get_firmware_first(pdev))
+       if (pcie_aer_get_firmware_first(pdev) && !pcie_ports_dpc_native)
                return -ENOTSUPP;
 
        dpc = devm_kzalloc(device, sizeof(*dpc), GFP_KERNEL);
index 944827a..1e67361 100644 (file)
@@ -25,6 +25,8 @@
 
 #define PCIE_PORT_DEVICE_MAXSERVICES   5
 
+extern bool pcie_ports_dpc_native;
+
 #ifdef CONFIG_PCIEAER
 int pcie_aer_init(void);
 #else
index 1b33012..5075cb9 100644 (file)
@@ -250,8 +250,13 @@ static int get_port_device_capability(struct pci_dev *dev)
                pcie_pme_interrupt_enable(dev, false);
        }
 
+       /*
+        * With dpc-native, allow Linux to use DPC even if it doesn't have
+        * permission to use AER.
+        */
        if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC) &&
-           pci_aer_available() && services & PCIE_PORT_SERVICE_AER)
+           pci_aer_available() &&
+           (pcie_ports_dpc_native || (services & PCIE_PORT_SERVICE_AER)))
                services |= PCIE_PORT_SERVICE_DPC;
 
        if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
index 0a87091..160d67c 100644 (file)
@@ -29,12 +29,20 @@ bool pcie_ports_disabled;
  */
 bool pcie_ports_native;
 
+/*
+ * If the user specified "pcie_ports=dpc-native", use the Linux DPC PCIe
+ * service even if the platform hasn't given us permission.
+ */
+bool pcie_ports_dpc_native;
+
 static int __init pcie_port_setup(char *str)
 {
        if (!strncmp(str, "compat", 6))
                pcie_ports_disabled = true;
        else if (!strncmp(str, "native", 6))
                pcie_ports_native = true;
+       else if (!strncmp(str, "dpc-native", 10))
+               pcie_ports_dpc_native = true;
 
        return 1;
 }
index 98cfa30..9361f3a 100644 (file)
@@ -21,7 +21,7 @@ static void pci_ptm_info(struct pci_dev *dev)
                snprintf(clock_desc, sizeof(clock_desc), ">254ns");
                break;
        default:
-               snprintf(clock_desc, sizeof(clock_desc), "%udns",
+               snprintf(clock_desc, sizeof(clock_desc), "%uns",
                         dev->ptm_granularity);
                break;
        }
index 3d5271a..3cc1c32 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/pci.h>
+#include <linux/msi.h>
 #include <linux/of_device.h>
 #include <linux/of_pci.h>
 #include <linux/pci_hotplug.h>
@@ -572,6 +573,7 @@ static void devm_pci_release_host_bridge_dev(struct device *dev)
                bridge->release_fn(bridge);
 
        pci_free_resource_list(&bridge->windows);
+       pci_free_resource_list(&bridge->dma_ranges);
 }
 
 static void pci_release_host_bridge_dev(struct device *dev)
@@ -897,6 +899,9 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
        else
                pr_info("PCI host bridge to bus %s\n", name);
 
+       if (nr_node_ids > 1 && pcibus_to_node(bus) == NUMA_NO_NODE)
+               dev_warn(&bus->dev, "Unknown NUMA node; performance will be reduced\n");
+
        /* Add initial resources to the bus */
        resource_list_for_each_entry_safe(window, n, &resources) {
                list_move_tail(&window->node, &bridge->windows);
@@ -1089,14 +1094,15 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
  * @sec: updated with secondary bus number from EA
  * @sub: updated with subordinate bus number from EA
  *
- * If @dev is a bridge with EA capability, update @sec and @sub with
- * fixed bus numbers from the capability and return true.  Otherwise,
- * return false.
+ * If @dev is a bridge with EA capability that specifies valid secondary
+ * and subordinate bus numbers, return true with the bus numbers in @sec
+ * and @sub.  Otherwise return false.
  */
 static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub)
 {
        int ea, offset;
        u32 dw;
+       u8 ea_sec, ea_sub;
 
        if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
                return false;
@@ -1108,8 +1114,13 @@ static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub)
 
        offset = ea + PCI_EA_FIRST_ENT;
        pci_read_config_dword(dev, offset, &dw);
-       *sec =  dw & PCI_EA_SEC_BUS_MASK;
-       *sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT;
+       ea_sec =  dw & PCI_EA_SEC_BUS_MASK;
+       ea_sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT;
+       if (ea_sec  == 0 || ea_sub < ea_sec)
+               return false;
+
+       *sec = ea_sec;
+       *sub = ea_sub;
        return true;
 }
 
@@ -2324,6 +2335,12 @@ static void pci_init_capabilities(struct pci_dev *dev)
        /* Address Translation Services */
        pci_ats_init(dev);
 
+       /* Page Request Interface */
+       pci_pri_init(dev);
+
+       /* Process Address Space ID */
+       pci_pasid_init(dev);
+
        /* Enable ACS P2P upstream forwarding */
        pci_enable_acs(dev);
 
index 5495537..6ef74bf 100644 (file)
@@ -258,13 +258,13 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
        }
 
        /* Make sure the caller is mapping a real resource for this device */
-       for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                if (dev->resource[i].flags & res_bit &&
                    pci_mmap_fits(dev, i, vma,  PCI_MMAP_PROCFS))
                        break;
        }
 
-       if (i >= PCI_ROM_RESOURCE)
+       if (i >= PCI_STD_NUM_BARS)
                return -ENODEV;
 
        if (fpriv->mmap_state == pci_mmap_mem &&
index 320255e..a241a09 100644 (file)
@@ -474,7 +474,7 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev)
 {
        int i;
 
-       for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                struct resource *r = &dev->resource[i];
 
                if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
@@ -1809,7 +1809,7 @@ static void quirk_alder_ioapic(struct pci_dev *pdev)
         * The next five BARs all seem to be rubbish, so just clean
         * them out.
         */
-       for (i = 1; i < 6; i++)
+       for (i = 1; i < PCI_STD_NUM_BARS; i++)
                memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL,  PCI_DEVICE_ID_INTEL_EESSC,      quirk_alder_ioapic);
@@ -4081,6 +4081,40 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
 
 /*
+ * Intel Visual Compute Accelerator (VCA) is a family of PCIe add-in devices
+ * exposing computational units via Non Transparent Bridges (NTB, PEX 87xx).
+ *
+ * Similarly to MIC x200, we need to add DMA aliases to allow buffer access
+ * when IOMMU is enabled.  These aliases allow computational unit access to
+ * host memory.  These aliases mark the whole VCA device as one IOMMU
+ * group.
+ *
+ * All possible slot numbers (0x20) are used, since we are unable to tell
+ * what slot is used on other side.  This quirk is intended for both host
+ * and computational unit sides.  The VCA devices have up to five functions
+ * (four for DMA channels and one additional).
+ */
+static void quirk_pex_vca_alias(struct pci_dev *pdev)
+{
+       const unsigned int num_pci_slots = 0x20;
+       unsigned int slot;
+
+       for (slot = 0; slot < num_pci_slots; slot++) {
+               pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0));
+               pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x1));
+               pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x2));
+               pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x3));
+               pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x4));
+       }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2954, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2955, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2956, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2958, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2959, quirk_pex_vca_alias);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x295A, quirk_pex_vca_alias);
+
+/*
  * The IOMMU and interrupt controller on Broadcom Vulcan/Cavium ThunderX2 are
  * associated not at the root bus, but at a bridge below. This quirk avoids
  * generating invalid DMA aliases.
@@ -4263,6 +4297,24 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
                         quirk_chelsio_T5_disable_root_port_attributes);
 
 /*
+ * pci_acs_ctrl_enabled - compare desired ACS controls with those provided
+ *                       by a device
+ * @acs_ctrl_req: Bitmask of desired ACS controls
+ * @acs_ctrl_ena: Bitmask of ACS controls enabled or provided implicitly by
+ *               the hardware design
+ *
+ * Return 1 if all ACS controls in the @acs_ctrl_req bitmask are included
+ * in @acs_ctrl_ena, i.e., the device provides all the access controls the
+ * caller desires.  Return 0 otherwise.
+ */
+static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena)
+{
+       if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req)
+               return 1;
+       return 0;
+}
+
+/*
  * AMD has indicated that the devices below do not support peer-to-peer
  * in any system where they are found in the southbridge with an AMD
  * IOMMU in the system.  Multifunction devices that do not support
@@ -4305,7 +4357,7 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
        /* Filter out flags not applicable to multifunction */
        acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
 
-       return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
+       return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR);
 #else
        return -ENODEV;
 #endif
@@ -4313,33 +4365,38 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
 
 static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
 {
+       if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
+               return false;
+
+       switch (dev->device) {
        /*
-        * Effectively selects all downstream ports for whole ThunderX 1
-        * family by 0xf800 mask (which represents 8 SoCs), while the lower
-        * bits of device ID are used to indicate which subdevice is used
-        * within the SoC.
+        * Effectively selects all downstream ports for whole ThunderX1
+        * (which represents 8 SoCs).
         */
-       return (pci_is_pcie(dev) &&
-               (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) &&
-               ((dev->device & 0xf800) == 0xa000));
+       case 0xa000 ... 0xa7ff: /* ThunderX1 */
+       case 0xaf84:  /* ThunderX2 */
+       case 0xb884:  /* ThunderX3 */
+               return true;
+       default:
+               return false;
+       }
 }
 
 static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
 {
+       if (!pci_quirk_cavium_acs_match(dev))
+               return -ENOTTY;
+
        /*
-        * Cavium root ports don't advertise an ACS capability.  However,
+        * Cavium Root Ports don't advertise an ACS capability.  However,
         * the RTL internally implements similar protection as if ACS had
-        * Request Redirection, Completion Redirection, Source Validation,
+        * Source Validation, Request Redirection, Completion Redirection,
         * and Upstream Forwarding features enabled.  Assert that the
         * hardware implements and enables equivalent ACS functionality for
         * these flags.
         */
-       acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
-
-       if (!pci_quirk_cavium_acs_match(dev))
-               return -ENOTTY;
-
-       return acs_flags ? 0 : 1;
+       return pci_acs_ctrl_enabled(acs_flags,
+               PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
 }
 
 static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4349,13 +4406,12 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
         * transactions with others, allowing masking out these bits as if they
         * were unimplemented in the ACS capability.
         */
-       acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
-
-       return acs_flags ? 0 : 1;
+       return pci_acs_ctrl_enabled(acs_flags,
+               PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
 }
 
 /*
- * Many Intel PCH root ports do provide ACS-like features to disable peer
+ * Many Intel PCH Root Ports do provide ACS-like features to disable peer
  * transactions and validate bus numbers in requests, but do not provide an
  * actual PCIe ACS capability.  This is the list of device IDs known to fall
  * into that category as provided by Intel in Red Hat bugzilla 1037684.
@@ -4403,37 +4459,32 @@ static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
        return false;
 }
 
-#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV)
-
 static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
 {
-       u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ?
-                   INTEL_PCH_ACS_FLAGS : 0;
-
        if (!pci_quirk_intel_pch_acs_match(dev))
                return -ENOTTY;
 
-       return acs_flags & ~flags ? 0 : 1;
+       if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK)
+               return pci_acs_ctrl_enabled(acs_flags,
+                       PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+
+       return pci_acs_ctrl_enabled(acs_flags, 0);
 }
 
 /*
- * These QCOM root ports do provide ACS-like features to disable peer
+ * These QCOM Root Ports do provide ACS-like features to disable peer
  * transactions and validate bus numbers in requests, but do not provide an
  * actual PCIe ACS capability.  Hardware supports source validation but it
  * will report the issue as Completer Abort instead of ACS Violation.
- * Hardware doesn't support peer-to-peer and each root port is a root
- * complex with unique segment numbers.  It is not possible for one root
- * port to pass traffic to another root port.  All PCIe transactions are
- * terminated inside the root port.
+ * Hardware doesn't support peer-to-peer and each Root Port is a Root
+ * Complex with unique segment numbers.  It is not possible for one Root
+ * Port to pass traffic to another Root Port.  All PCIe transactions are
+ * terminated inside the Root Port.
  */
 static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
 {
-       u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV);
-       int ret = acs_flags & ~flags ? 0 : 1;
-
-       pci_info(dev, "Using QCOM ACS Quirk (%d)\n", ret);
-
-       return ret;
+       return pci_acs_ctrl_enabled(acs_flags,
+               PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
 }
 
 static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4534,7 +4585,7 @@ static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
 
        pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
 
-       return acs_flags & ~ctrl ? 0 : 1;
+       return pci_acs_ctrl_enabled(acs_flags, ctrl);
 }
 
 static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4548,10 +4599,9 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
         * perform peer-to-peer with other functions, allowing us to mask out
         * these bits as if they were unimplemented in the ACS capability.
         */
-       acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
-                      PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
-
-       return acs_flags ? 0 : 1;
+       return pci_acs_ctrl_enabled(acs_flags,
+               PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
+               PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
 }
 
 static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4562,9 +4612,8 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
         * Allow each Root Port to be in a separate IOMMU group by masking
         * SV/RR/CR/UF bits.
         */
-       acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
-
-       return acs_flags ? 0 : 1;
+       return pci_acs_ctrl_enabled(acs_flags,
+               PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
 }
 
 static const struct pci_dev_acs_enabled {
@@ -4666,6 +4715,17 @@ static const struct pci_dev_acs_enabled {
        { 0 }
 };
 
+/*
+ * pci_dev_specific_acs_enabled - check whether device provides ACS controls
+ * @dev:       PCI device
+ * @acs_flags: Bitmask of desired ACS controls
+ *
+ * Returns:
+ *   -ENOTTY:  No quirk applies to this device; we can't tell whether the
+ *             device provides the desired controls
+ *   0:                Device does not provide all the desired controls
+ *   >0:       Device provides all the controls in @acs_flags
+ */
 int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
 {
        const struct pci_dev_acs_enabled *i;
@@ -4706,7 +4766,7 @@ int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
 #define INTEL_BSPR_REG_BPPD  (1 << 9)
 
 /* Upstream Peer Decode Configuration Register */
-#define INTEL_UPDCR_REG 0x1114
+#define INTEL_UPDCR_REG 0x1014
 /* 5:0 Peer Decode Enable bits */
 #define INTEL_UPDCR_REG_MASK 0x3f
 
index e7dbe21..f279826 100644 (file)
@@ -752,24 +752,32 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
 }
 
 /*
- * Helper function for sizing routines: find first available bus resource
- * of a given type.  Note: we intentionally skip the bus resources which
- * have already been assigned (that is, have non-NULL parent resource).
+ * Helper function for sizing routines.  Assigned resources have non-NULL
+ * parent resource.
+ *
+ * Return first unassigned resource of the correct type.  If there is none,
+ * return first assigned resource of the correct type.  If none of the
+ * above, return NULL.
+ *
+ * Returning an assigned resource of the correct type allows the caller to
+ * distinguish between already assigned and no resource of the correct type.
  */
-static struct resource *find_free_bus_resource(struct pci_bus *bus,
-                                              unsigned long type_mask,
-                                              unsigned long type)
+static struct resource *find_bus_resource_of_type(struct pci_bus *bus,
+                                                 unsigned long type_mask,
+                                                 unsigned long type)
 {
+       struct resource *r, *r_assigned = NULL;
        int i;
-       struct resource *r;
 
        pci_bus_for_each_resource(bus, r, i) {
                if (r == &ioport_resource || r == &iomem_resource)
                        continue;
                if (r && (r->flags & type_mask) == type && !r->parent)
                        return r;
+               if (r && (r->flags & type_mask) == type && !r_assigned)
+                       r_assigned = r;
        }
-       return NULL;
+       return r_assigned;
 }
 
 static resource_size_t calculate_iosize(resource_size_t size,
@@ -866,8 +874,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
                         struct list_head *realloc_head)
 {
        struct pci_dev *dev;
-       struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO,
-                                                       IORESOURCE_IO);
+       struct resource *b_res = find_bus_resource_of_type(bus, IORESOURCE_IO,
+                                                          IORESOURCE_IO);
        resource_size_t size = 0, size0 = 0, size1 = 0;
        resource_size_t children_add_size = 0;
        resource_size_t min_align, align;
@@ -875,6 +883,10 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
        if (!b_res)
                return;
 
+       /* If resource is already assigned, nothing more to do */
+       if (b_res->parent)
+               return;
+
        min_align = window_alignment(bus, IORESOURCE_IO);
        list_for_each_entry(dev, &bus->devices, bus_list) {
                int i;
@@ -978,7 +990,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
        resource_size_t min_align, align, size, size0, size1;
        resource_size_t aligns[18]; /* Alignments from 1MB to 128GB */
        int order, max_order;
-       struct resource *b_res = find_free_bus_resource(bus,
+       struct resource *b_res = find_bus_resource_of_type(bus,
                                        mask | IORESOURCE_PREFETCH, type);
        resource_size_t children_add_size = 0;
        resource_size_t children_add_align = 0;
@@ -987,6 +999,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
        if (!b_res)
                return -ENOSPC;
 
+       /* If resource is already assigned, nothing more to do */
+       if (b_res->parent)
+               return 0;
+
        memset(aligns, 0, sizeof(aligns));
        max_order = 0;
        size = 0;
@@ -1178,7 +1194,8 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
 {
        struct pci_dev *dev;
        unsigned long mask, prefmask, type2 = 0, type3 = 0;
-       resource_size_t additional_mem_size = 0, additional_io_size = 0;
+       resource_size_t additional_io_size = 0, additional_mmio_size = 0,
+                       additional_mmio_pref_size = 0;
        struct resource *b_res;
        int ret;
 
@@ -1212,7 +1229,8 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
                pci_bridge_check_ranges(bus);
                if (bus->self->is_hotplug_bridge) {
                        additional_io_size  = pci_hotplug_io_size;
-                       additional_mem_size = pci_hotplug_mem_size;
+                       additional_mmio_size = pci_hotplug_mmio_size;
+                       additional_mmio_pref_size = pci_hotplug_mmio_pref_size;
                }
                /* Fall through */
        default:
@@ -1230,9 +1248,9 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
                if (b_res[2].flags & IORESOURCE_MEM_64) {
                        prefmask |= IORESOURCE_MEM_64;
                        ret = pbus_size_mem(bus, prefmask, prefmask,
-                                 prefmask, prefmask,
-                                 realloc_head ? 0 : additional_mem_size,
-                                 additional_mem_size, realloc_head);
+                               prefmask, prefmask,
+                               realloc_head ? 0 : additional_mmio_pref_size,
+                               additional_mmio_pref_size, realloc_head);
 
                        /*
                         * If successful, all non-prefetchable resources
@@ -1254,9 +1272,9 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
                if (!type2) {
                        prefmask &= ~IORESOURCE_MEM_64;
                        ret = pbus_size_mem(bus, prefmask, prefmask,
-                                        prefmask, prefmask,
-                                        realloc_head ? 0 : additional_mem_size,
-                                        additional_mem_size, realloc_head);
+                               prefmask, prefmask,
+                               realloc_head ? 0 : additional_mmio_pref_size,
+                               additional_mmio_pref_size, realloc_head);
 
                        /*
                         * If successful, only non-prefetchable resources
@@ -1265,7 +1283,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
                        if (ret == 0)
                                mask = prefmask;
                        else
-                               additional_mem_size += additional_mem_size;
+                               additional_mmio_size += additional_mmio_pref_size;
 
                        type2 = type3 = IORESOURCE_MEM;
                }
@@ -1285,8 +1303,8 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
                 * prefetchable resource in a 64-bit prefetchable window.
                 */
                pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3,
-                               realloc_head ? 0 : additional_mem_size,
-                               additional_mem_size, realloc_head);
+                             realloc_head ? 0 : additional_mmio_size,
+                             additional_mmio_size, realloc_head);
                break;
        }
 }
@@ -2066,6 +2084,8 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
        unsigned int i;
        int ret;
 
+       down_read(&pci_bus_sem);
+
        /* Walk to the root hub, releasing bridge BARs when possible */
        next = bridge;
        do {
@@ -2100,8 +2120,10 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
                next = bridge->bus ? bridge->bus->self : NULL;
        } while (next);
 
-       if (list_empty(&saved))
+       if (list_empty(&saved)) {
+               up_read(&pci_bus_sem);
                return -ENOENT;
+       }
 
        __pci_bus_size_bridges(bridge->subordinate, &added);
        __pci_bridge_assign_resources(bridge, &added, &failed);
@@ -2122,6 +2144,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
        }
 
        free_list(&saved);
+       up_read(&pci_bus_sem);
        return 0;
 
 cleanup:
@@ -2150,6 +2173,7 @@ cleanup:
                pci_setup_bridge(bridge->subordinate);
        }
        free_list(&saved);
+       up_read(&pci_bus_sem);
 
        return ret;
 }
index 8c94cd3..465d6af 100644 (file)
@@ -675,7 +675,7 @@ static int ioctl_event_summary(struct switchtec_dev *stdev,
                return -ENOMEM;
 
        s->global = ioread32(&stdev->mmio_sw_event->global_summary);
-       s->part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
+       s->part_bitmap = ioread64(&stdev->mmio_sw_event->part_event_bitmap);
        s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
 
        for (i = 0; i < stdev->partition_count; i++) {
index ac322d6..08e3227 100644 (file)
@@ -50,6 +50,8 @@
        #define PHY_R5_PHY_CR_ACK                               BIT(16)
        #define PHY_R5_PHY_BS_OUT                               BIT(17)
 
+#define PCIE_RESET_DELAY                                       500
+
 struct phy_g12a_usb3_pcie_priv {
        struct regmap           *regmap;
        struct regmap           *regmap_cr;
@@ -196,6 +198,10 @@ static int phy_g12a_usb3_init(struct phy *phy)
        struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
        int data, ret;
 
+       ret = reset_control_reset(priv->reset);
+       if (ret)
+               return ret;
+
        /* Switch PHY to USB3 */
        /* TODO figure out how to handle when PCIe was set in the bootloader */
        regmap_update_bits(priv->regmap, PHY_R0,
@@ -272,24 +278,64 @@ static int phy_g12a_usb3_init(struct phy *phy)
        return 0;
 }
 
-static int phy_g12a_usb3_pcie_init(struct phy *phy)
+static int phy_g12a_usb3_pcie_power_on(struct phy *phy)
+{
+       struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
+
+       if (priv->mode == PHY_TYPE_USB3)
+               return 0;
+
+       regmap_update_bits(priv->regmap, PHY_R0,
+                          PHY_R0_PCIE_POWER_STATE,
+                          FIELD_PREP(PHY_R0_PCIE_POWER_STATE, 0x1c));
+
+       return 0;
+}
+
+static int phy_g12a_usb3_pcie_power_off(struct phy *phy)
+{
+       struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
+
+       if (priv->mode == PHY_TYPE_USB3)
+               return 0;
+
+       regmap_update_bits(priv->regmap, PHY_R0,
+                          PHY_R0_PCIE_POWER_STATE,
+                          FIELD_PREP(PHY_R0_PCIE_POWER_STATE, 0x1d));
+
+       return 0;
+}
+
+static int phy_g12a_usb3_pcie_reset(struct phy *phy)
 {
        struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
        int ret;
 
-       ret = reset_control_reset(priv->reset);
+       if (priv->mode == PHY_TYPE_USB3)
+               return 0;
+
+       ret = reset_control_assert(priv->reset);
        if (ret)
                return ret;
 
+       udelay(PCIE_RESET_DELAY);
+
+       ret = reset_control_deassert(priv->reset);
+       if (ret)
+               return ret;
+
+       udelay(PCIE_RESET_DELAY);
+
+       return 0;
+}
+
+static int phy_g12a_usb3_pcie_init(struct phy *phy)
+{
+       struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
+
        if (priv->mode == PHY_TYPE_USB3)
                return phy_g12a_usb3_init(phy);
 
-       /* Power UP PCIE */
-       /* TODO figure out when the bootloader has set USB3 mode before */
-       regmap_update_bits(priv->regmap, PHY_R0,
-                          PHY_R0_PCIE_POWER_STATE,
-                          FIELD_PREP(PHY_R0_PCIE_POWER_STATE, 0x1c));
-
        return 0;
 }
 
@@ -297,7 +343,10 @@ static int phy_g12a_usb3_pcie_exit(struct phy *phy)
 {
        struct phy_g12a_usb3_pcie_priv *priv = phy_get_drvdata(phy);
 
-       return reset_control_reset(priv->reset);
+       if (priv->mode == PHY_TYPE_USB3)
+               return reset_control_reset(priv->reset);
+
+       return 0;
 }
 
 static struct phy *phy_g12a_usb3_pcie_xlate(struct device *dev,
@@ -326,6 +375,9 @@ static struct phy *phy_g12a_usb3_pcie_xlate(struct device *dev,
 static const struct phy_ops phy_g12a_usb3_pcie_ops = {
        .init           = phy_g12a_usb3_pcie_init,
        .exit           = phy_g12a_usb3_pcie_exit,
+       .power_on       = phy_g12a_usb3_pcie_power_on,
+       .power_off      = phy_g12a_usb3_pcie_power_off,
+       .reset          = phy_g12a_usb3_pcie_reset,
        .owner          = THIS_MODULE,
 };
 
index 86cc2cc..af063f6 100644 (file)
@@ -420,12 +420,6 @@ failed_sensitivity:
 
 static int cmpc_accel_remove_v4(struct acpi_device *acpi)
 {
-       struct input_dev *inputdev;
-       struct cmpc_accel *accel;
-
-       inputdev = dev_get_drvdata(&acpi->dev);
-       accel = dev_get_drvdata(&inputdev->dev);
-
        device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr_v4);
        device_remove_file(&acpi->dev, &cmpc_accel_g_select_attr_v4);
        return cmpc_remove_acpi_notify_device(acpi);
@@ -656,12 +650,6 @@ failed_file:
 
 static int cmpc_accel_remove(struct acpi_device *acpi)
 {
-       struct input_dev *inputdev;
-       struct cmpc_accel *accel;
-
-       inputdev = dev_get_drvdata(&acpi->dev);
-       accel = dev_get_drvdata(&inputdev->dev);
-
        device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr);
        return cmpc_remove_acpi_notify_device(acpi);
 }
index ea68f6e..ffb8d5d 100644 (file)
@@ -108,6 +108,7 @@ static int i2c_multi_inst_probe(struct platform_device *pdev)
                        if (ret < 0) {
                                dev_dbg(dev, "Error requesting irq at index %d: %d\n",
                                        inst_data[i].irq_idx, ret);
+                               goto error;
                        }
                        board_info.irq = ret;
                        break;
index ab7ae19..fa97834 100644 (file)
@@ -293,9 +293,8 @@ static int intel_punit_ipc_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, punit_ipcdev);
 
-       irq = platform_get_irq(pdev, 0);
+       irq = platform_get_irq_optional(pdev, 0);
        if (irq < 0) {
-               punit_ipcdev->irq = 0;
                dev_warn(&pdev->dev, "Invalid IRQ, using polling mode\n");
        } else {
                ret = devm_request_irq(&pdev->dev, irq, intel_punit_ioc,
index 960961f..0517272 100644 (file)
@@ -97,8 +97,8 @@ config PTP_1588_CLOCK_PCH
        help
          This driver adds support for using the PCH EG20T as a PTP
          clock. The hardware supports time stamping of PTP packets
-         when using the end-to-end delay (E2E) mechansim. The peer
-         delay mechansim (P2P) is not supported.
+         when using the end-to-end delay (E2E) mechanism. The peer
+         delay mechanism (P2P) is not supported.
 
          This clock is only useful if your PTP programs are getting
          hardware time stamps on the PTP Ethernet packets using the
index c61f00b..a577218 100644 (file)
@@ -507,6 +507,8 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
                ptp_qoriq->regs.etts_regs = base + ETTS_REGS_OFFSET;
        }
 
+       spin_lock_init(&ptp_qoriq->lock);
+
        ktime_get_real_ts64(&now);
        ptp_qoriq_settime(&ptp_qoriq->caps, &now);
 
@@ -514,7 +516,6 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
          (ptp_qoriq->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
          (ptp_qoriq->cksel & CKSEL_MASK) << CKSEL_SHIFT;
 
-       spin_lock_init(&ptp_qoriq->lock);
        spin_lock_irqsave(&ptp_qoriq->lock, flags);
 
        regs = &ptp_qoriq->regs;
index 125a173..4dd31dd 100644 (file)
@@ -2755,7 +2755,7 @@ static int tsi721_probe(struct pci_dev *pdev,
        {
                int i;
 
-               for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+               for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                        tsi_debug(INIT, &pdev->dev, "res%d %pR",
                                  i, &pdev->resource[i]);
                }
index fc53e1e..c94184d 100644 (file)
@@ -1553,8 +1553,8 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device)
        if (rc == 0) {
                memcpy(&private->vsq, vsq, sizeof(*vsq));
        } else {
-               dev_warn(&device->cdev->dev,
-                        "Reading the volume storage information failed with rc=%d\n", rc);
+               DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+                               "Reading the volume storage information failed with rc=%d", rc);
        }
 
        if (useglobal)
@@ -1737,8 +1737,8 @@ static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
        if (rc == 0) {
                dasd_eckd_cpy_ext_pool_data(device, lcq);
        } else {
-               dev_warn(&device->cdev->dev,
-                        "Reading the logical configuration failed with rc=%d\n", rc);
+               DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+                               "Reading the logical configuration failed with rc=%d", rc);
        }
 
        dasd_sfree_request(cqr, cqr->memdev);
@@ -2020,14 +2020,10 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
        dasd_eckd_read_features(device);
 
        /* Read Volume Information */
-       rc = dasd_eckd_read_vol_info(device);
-       if (rc)
-               goto out_err3;
+       dasd_eckd_read_vol_info(device);
 
        /* Read Extent Pool Information */
-       rc = dasd_eckd_read_ext_pool_info(device);
-       if (rc)
-               goto out_err3;
+       dasd_eckd_read_ext_pool_info(device);
 
        /* Read Device Characteristics */
        rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
@@ -2059,9 +2055,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
        if (readonly)
                set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
 
-       if (dasd_eckd_is_ese(device))
-               dasd_set_feature(device->cdev, DASD_FEATURE_DISCARD, 1);
-
        dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
                 "with %d cylinders, %d heads, %d sectors%s\n",
                 private->rdc_data.dev_type,
@@ -3695,14 +3688,6 @@ static int dasd_eckd_release_space(struct dasd_device *device,
                return -EINVAL;
 }
 
-static struct dasd_ccw_req *
-dasd_eckd_build_cp_discard(struct dasd_device *device, struct dasd_block *block,
-                          struct request *req, sector_t first_trk,
-                          sector_t last_trk)
-{
-       return dasd_eckd_dso_ras(device, block, req, first_trk, last_trk, 1);
-}
-
 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
                                               struct dasd_device *startdev,
                                               struct dasd_block *block,
@@ -4447,10 +4432,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
        cmdwtd = private->features.feature[12] & 0x40;
        use_prefix = private->features.feature[8] & 0x01;
 
-       if (req_op(req) == REQ_OP_DISCARD)
-               return dasd_eckd_build_cp_discard(startdev, block, req,
-                                                 first_trk, last_trk);
-
        cqr = NULL;
        if (cdlspecial || dasd_page_cache) {
                /* do nothing, just fall through to the cmd mode single case */
@@ -4729,14 +4710,12 @@ static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
                                                     struct dasd_block *block,
                                                     struct request *req)
 {
-       struct dasd_device *startdev = NULL;
        struct dasd_eckd_private *private;
-       struct dasd_ccw_req *cqr;
+       struct dasd_device *startdev;
        unsigned long flags;
+       struct dasd_ccw_req *cqr;
 
-       /* Discard requests can only be processed on base devices */
-       if (req_op(req) != REQ_OP_DISCARD)
-               startdev = dasd_alias_get_start_dev(base);
+       startdev = dasd_alias_get_start_dev(base);
        if (!startdev)
                startdev = base;
        private = startdev->private;
@@ -5663,14 +5642,10 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
        dasd_eckd_read_features(device);
 
        /* Read Volume Information */
-       rc = dasd_eckd_read_vol_info(device);
-       if (rc)
-               goto out_err2;
+       dasd_eckd_read_vol_info(device);
 
        /* Read Extent Pool Information */
-       rc = dasd_eckd_read_ext_pool_info(device);
-       if (rc)
-               goto out_err2;
+       dasd_eckd_read_ext_pool_info(device);
 
        /* Read Device Characteristics */
        rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
@@ -6521,20 +6496,8 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
        unsigned int logical_block_size = block->bp_block;
        struct request_queue *q = block->request_queue;
        struct dasd_device *device = block->base;
-       struct dasd_eckd_private *private;
-       unsigned int max_discard_sectors;
-       unsigned int max_bytes;
-       unsigned int ext_bytes; /* Extent Size in Bytes */
-       int recs_per_trk;
-       int trks_per_cyl;
-       int ext_limit;
-       int ext_size; /* Extent Size in Cylinders */
        int max;
 
-       private = device->private;
-       trks_per_cyl = private->rdc_data.trk_per_cyl;
-       recs_per_trk = recs_per_track(&private->rdc_data, 0, logical_block_size);
-
        if (device->features & DASD_FEATURE_USERAW) {
                /*
                 * the max_blocks value for raw_track access is 256
@@ -6555,28 +6518,6 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
        /* With page sized segments each segment can be translated into one idaw/tidaw */
        blk_queue_max_segment_size(q, PAGE_SIZE);
        blk_queue_segment_boundary(q, PAGE_SIZE - 1);
-
-       if (dasd_eckd_is_ese(device)) {
-               /*
-                * Depending on the extent size, up to UINT_MAX bytes can be
-                * accepted. However, neither DASD_ECKD_RAS_EXTS_MAX nor the
-                * device limits should be exceeded.
-                */
-               ext_size = dasd_eckd_ext_size(device);
-               ext_limit = min(private->real_cyl / ext_size, DASD_ECKD_RAS_EXTS_MAX);
-               ext_bytes = ext_size * trks_per_cyl * recs_per_trk *
-                       logical_block_size;
-               max_bytes = UINT_MAX - (UINT_MAX % ext_bytes);
-               if (max_bytes / ext_bytes > ext_limit)
-                       max_bytes = ext_bytes * ext_limit;
-
-               max_discard_sectors = max_bytes / 512;
-
-               blk_queue_max_discard_sectors(q, max_discard_sectors);
-               blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
-               q->limits.discard_granularity = ext_bytes;
-               q->limits.discard_alignment = ext_bytes;
-       }
 }
 
 static struct ccw_driver dasd_eckd_driver = {
index ba7d248..dcdaba6 100644 (file)
@@ -113,6 +113,7 @@ struct subchannel {
        enum sch_todo todo;
        struct work_struct todo_work;
        struct schib_config config;
+       u64 dma_mask;
        char *driver_override; /* Driver name to force a match */
 } __attribute__ ((aligned(8)));
 
index 1fbfb0a..8318504 100644 (file)
@@ -232,7 +232,12 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
         * belong to a subchannel need to fit 31 bit width (e.g. ccw).
         */
        sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
-       sch->dev.dma_mask = &sch->dev.coherent_dma_mask;
+       /*
+        * But we don't have such restrictions imposed on the stuff that
+        * is handled by the streaming API.
+        */
+       sch->dma_mask = DMA_BIT_MASK(64);
+       sch->dev.dma_mask = &sch->dma_mask;
        return sch;
 
 err:
index 131430b..0c6245f 100644 (file)
@@ -710,7 +710,7 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
        if (!cdev->private)
                goto err_priv;
        cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
-       cdev->dev.dma_mask = &cdev->dev.coherent_dma_mask;
+       cdev->dev.dma_mask = sch->dev.dma_mask;
        dma_pool = cio_gp_dma_create(&cdev->dev, 1);
        if (!dma_pool)
                goto err_dma_pool;
index f4ca1d2..cd16488 100644 (file)
@@ -113,7 +113,7 @@ static void set_impl_params(struct qdio_irq *irq_ptr,
        irq_ptr->qib.pfmt = qib_param_field_format;
        if (qib_param_field)
                memcpy(irq_ptr->qib.parm, qib_param_field,
-                      QDIO_MAX_BUFFERS_PER_Q);
+                      sizeof(irq_ptr->qib.parm));
 
        if (!input_slib_elements)
                goto output;
index a7868c8..dda2743 100644 (file)
@@ -4715,8 +4715,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
 
        QETH_CARD_TEXT(card, 2, "qdioest");
 
-       qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q,
-                                 GFP_KERNEL);
+       qib_param_field = kzalloc(FIELD_SIZEOF(struct qib, parm), GFP_KERNEL);
        if (!qib_param_field) {
                rc =  -ENOMEM;
                goto out_free_nothing;
index b8799cd..bd8143e 100644 (file)
@@ -2021,10 +2021,10 @@ static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc,
 static void qeth_l2_vnicc_init(struct qeth_card *card)
 {
        u32 *timeout = &card->options.vnicc.learning_timeout;
+       bool enable, error = false;
        unsigned int chars_len, i;
        unsigned long chars_tmp;
        u32 sup_cmds, vnicc;
-       bool enable, error;
 
        QETH_CARD_TEXT(card, 2, "vniccini");
        /* reset rx_bcast */
@@ -2045,17 +2045,24 @@ static void qeth_l2_vnicc_init(struct qeth_card *card)
        chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE;
        for_each_set_bit(i, &chars_tmp, chars_len) {
                vnicc = BIT(i);
-               qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds);
-               if (!(sup_cmds & IPA_VNICC_SET_TIMEOUT) ||
-                   !(sup_cmds & IPA_VNICC_GET_TIMEOUT))
+               if (qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds)) {
+                       sup_cmds = 0;
+                       error = true;
+               }
+               if ((sup_cmds & IPA_VNICC_SET_TIMEOUT) &&
+                   (sup_cmds & IPA_VNICC_GET_TIMEOUT))
+                       card->options.vnicc.getset_timeout_sup |= vnicc;
+               else
                        card->options.vnicc.getset_timeout_sup &= ~vnicc;
-               if (!(sup_cmds & IPA_VNICC_ENABLE) ||
-                   !(sup_cmds & IPA_VNICC_DISABLE))
+               if ((sup_cmds & IPA_VNICC_ENABLE) &&
+                   (sup_cmds & IPA_VNICC_DISABLE))
+                       card->options.vnicc.set_char_sup |= vnicc;
+               else
                        card->options.vnicc.set_char_sup &= ~vnicc;
        }
        /* enforce assumed default values and recover settings, if changed  */
-       error = qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
-                                             timeout);
+       error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
+                                              timeout);
        chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT;
        chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE;
        chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
index 296bbc3..cf63916 100644 (file)
 
 struct kmem_cache *zfcp_fsf_qtcb_cache;
 
+static bool ber_stop = true;
+module_param(ber_stop, bool, 0600);
+MODULE_PARM_DESC(ber_stop,
+                "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
+
 static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
 {
        struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
@@ -236,10 +241,15 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
        case FSF_STATUS_READ_SENSE_DATA_AVAIL:
                break;
        case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
-               dev_warn(&adapter->ccw_device->dev,
-                        "The error threshold for checksum statistics "
-                        "has been exceeded\n");
                zfcp_dbf_hba_bit_err("fssrh_3", req);
+               if (ber_stop) {
+                       dev_warn(&adapter->ccw_device->dev,
+                                "All paths over this FCP device are disused because of excessive bit errors\n");
+                       zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
+               } else {
+                       dev_warn(&adapter->ccw_device->dev,
+                                "The error threshold for checksum statistics has been exceeded\n");
+               }
                break;
        case FSF_STATUS_READ_LINK_DOWN:
                zfcp_fsf_status_read_link_down(req);
index da00ca5..401743e 100644 (file)
@@ -1923,6 +1923,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
        struct fcoe_fcp_rsp_payload *fcp_rsp;
        struct bnx2fc_rport *tgt = io_req->tgt;
        struct scsi_cmnd *sc_cmd;
+       u16 scope = 0, qualifier = 0;
 
        /* scsi_cmd_cmpl is called with tgt lock held */
 
@@ -1990,12 +1991,30 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
 
                        if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
                            io_req->cdb_status == SAM_STAT_BUSY) {
-                               /* Set the jiffies + retry_delay_timer * 100ms
-                                  for the rport/tgt */
-                               tgt->retry_delay_timestamp = jiffies +
-                                       fcp_rsp->retry_delay_timer * HZ / 10;
+                               /* Newer array firmware with BUSY or
+                                * TASK_SET_FULL may return a status that needs
+                                * the scope bits masked.
+                                * Or a huge delay timestamp up to 27 minutes
+                                * can result.
+                                */
+                               if (fcp_rsp->retry_delay_timer) {
+                                       /* Upper 2 bits */
+                                       scope = fcp_rsp->retry_delay_timer
+                                               & 0xC000;
+                                       /* Lower 14 bits */
+                                       qualifier = fcp_rsp->retry_delay_timer
+                                               & 0x3FFF;
+                               }
+                               if (scope > 0 && qualifier > 0 &&
+                                       qualifier <= 0x3FEF) {
+                                       /* Set the jiffies +
+                                        * retry_delay_timer * 100ms
+                                        * for the rport/tgt
+                                        */
+                                       tgt->retry_delay_timestamp = jiffies +
+                                               (qualifier * HZ / 10);
+                               }
                        }
-
                }
                if (io_req->fcp_resid)
                        scsi_set_resid(sc_cmd, io_req->fcp_resid);
index d1513fd..0847e68 100644 (file)
@@ -3683,7 +3683,7 @@ void hisi_sas_debugfs_work_handler(struct work_struct *work)
 }
 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler);
 
-void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba)
+static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba)
 {
        struct device *dev = hisi_hba->dev;
        int i;
@@ -3705,7 +3705,7 @@ void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba)
                devm_kfree(dev, hisi_hba->debugfs_port_reg[i]);
 }
 
-int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba)
+static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba)
 {
        const struct hisi_sas_hw *hw = hisi_hba->hw;
        struct device *dev = hisi_hba->dev;
@@ -3796,7 +3796,7 @@ fail:
        return -ENOMEM;
 }
 
-void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
+static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
 {
        hisi_hba->debugfs_bist_dentry =
                        debugfs_create_dir("bist", hisi_hba->debugfs_dir);
index 45a6604..ff6d4aa 100644 (file)
@@ -4183,11 +4183,11 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                 */
                if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
                    pdev->subsystem_device == 0xC000)
-                       return -ENODEV;
+                       goto out_disable_device;
                /* Now check the magic signature byte */
                pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
                if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
-                       return -ENODEV;
+                       goto out_disable_device;
                /* Ok it is probably a megaraid */
        }
 
index 68a8217..1a3661d 100644 (file)
@@ -1186,7 +1186,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha)
 void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha)
 {
        s8 bar, logical = 0;
-       for (bar = 0; bar < 6; bar++) {
+       for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
                /*
                ** logical BARs for SPC:
                ** bar 0 and 1 - logical BAR0
index 3374f55..aca9134 100644 (file)
@@ -401,7 +401,7 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
 
        pdev = pm8001_ha->pdev;
        /* map pci mem (PMC pci base 0-3)*/
-       for (bar = 0; bar < 6; bar++) {
+       for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
                /*
                ** logical BARs for SPC:
                ** bar 0 and 1 - logical BAR0
index 1659d35..59ca98f 100644 (file)
@@ -596,7 +596,7 @@ static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
                tmp_prio = get->operational.app_prio.fcoe;
                if (qedf_default_prio > -1)
                        qedf->prio = qedf_default_prio;
-               else if (tmp_prio < 0 || tmp_prio > 7) {
+               else if (tmp_prio > 7) {
                        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
                            "FIP/FCoE prio %d out of range, setting to %d.\n",
                            tmp_prio, QEDF_DEFAULT_PRIO);
index 8190c2a..30bafd9 100644 (file)
@@ -2920,6 +2920,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
        struct qla_hw_data *ha = vha->hw;
        uint16_t id = vha->vp_idx;
 
+       set_bit(VPORT_DELETE, &vha->dpc_flags);
+
        while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
            test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
                msleep(1000);
index 873a6ae..6ffa987 100644 (file)
@@ -2396,6 +2396,7 @@ typedef struct fc_port {
        unsigned int query:1;
        unsigned int id_changed:1;
        unsigned int scan_needed:1;
+       unsigned int n2n_flag:1;
 
        struct completion nvme_del_done;
        uint32_t nvme_prli_service_param;
@@ -2446,7 +2447,6 @@ typedef struct fc_port {
        uint8_t fc4_type;
        uint8_t fc4f_nvme;
        uint8_t scan_state;
-       uint8_t n2n_flag;
 
        unsigned long last_queue_full;
        unsigned long last_ramp_up;
@@ -3036,6 +3036,7 @@ enum scan_flags_t {
 enum fc4type_t {
        FS_FC4TYPE_FCP  = BIT_0,
        FS_FC4TYPE_NVME = BIT_1,
+       FS_FCP_IS_N2N = BIT_7,
 };
 
 struct fab_scan_rp {
@@ -4394,6 +4395,7 @@ typedef struct scsi_qla_host {
 #define IOCB_WORK_ACTIVE       31
 #define SET_ZIO_THRESHOLD_NEEDED 32
 #define ISP_ABORT_TO_ROM       33
+#define VPORT_DELETE           34
 
        unsigned long   pci_flags;
 #define PFLG_DISCONNECTED      0       /* PCI device removed */
index dc0e366..5298ed1 100644 (file)
@@ -3102,7 +3102,8 @@ int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
 {
        struct qla_work_evt *e;
 
-       if (test_bit(UNLOADING, &vha->dpc_flags))
+       if (test_bit(UNLOADING, &vha->dpc_flags) ||
+           (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)))
                return 0;
 
        e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
index 643d232..1d04131 100644 (file)
@@ -746,12 +746,15 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
                        break;
                default:
                        if ((id.b24 != fcport->d_id.b24 &&
-                           fcport->d_id.b24) ||
+                           fcport->d_id.b24 &&
+                           fcport->loop_id != FC_NO_LOOP_ID) ||
                            (fcport->loop_id != FC_NO_LOOP_ID &&
                                fcport->loop_id != loop_id)) {
                                ql_dbg(ql_dbg_disc, vha, 0x20e3,
                                    "%s %d %8phC post del sess\n",
                                    __func__, __LINE__, fcport->port_name);
+                               if (fcport->n2n_flag)
+                                       fcport->d_id.b24 = 0;
                                qlt_schedule_sess_for_deletion(fcport);
                                return;
                        }
@@ -759,6 +762,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
                }
 
                fcport->loop_id = loop_id;
+               if (fcport->n2n_flag)
+                       fcport->d_id.b24 = id.b24;
 
                wwn = wwn_to_u64(fcport->port_name);
                qlt_find_sess_invalidate_other(vha, wwn,
@@ -972,7 +977,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
                wwn = wwn_to_u64(e->port_name);
 
                ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
-                   "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
+                   "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
                    __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
                    e->port_id[0], e->current_login_state, e->last_login_state,
                    (loop_id & 0x7fff));
@@ -1499,7 +1504,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
             (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
                return 0;
 
-       if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+       if (fcport->fw_login_state == DSC_LS_PLOGI_COMP &&
+           !N2N_TOPO(vha->hw)) {
                if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
                        set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
                        return 0;
@@ -1570,8 +1576,9 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
                                qla24xx_post_gpdb_work(vha, fcport, 0);
                        }  else {
                                ql_dbg(ql_dbg_disc, vha, 0x2118,
-                                   "%s %d %8phC post NVMe PRLI\n",
-                                   __func__, __LINE__, fcport->port_name);
+                                   "%s %d %8phC post %s PRLI\n",
+                                   __func__, __LINE__, fcport->port_name,
+                                   fcport->fc4f_nvme ? "NVME" : "FC");
                                qla24xx_post_prli_work(vha, fcport);
                        }
                        break;
@@ -1853,17 +1860,38 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
                        break;
                }
 
-               if (ea->fcport->n2n_flag) {
+               if (ea->fcport->fc4f_nvme) {
                        ql_dbg(ql_dbg_disc, vha, 0x2118,
                                "%s %d %8phC post fc4 prli\n",
                                __func__, __LINE__, ea->fcport->port_name);
                        ea->fcport->fc4f_nvme = 0;
-                       ea->fcport->n2n_flag = 0;
                        qla24xx_post_prli_work(vha, ea->fcport);
+                       return;
+               }
+
+               /* at this point both PRLI NVME & PRLI FCP failed */
+               if (N2N_TOPO(vha->hw)) {
+                       if (ea->fcport->n2n_link_reset_cnt < 3) {
+                               ea->fcport->n2n_link_reset_cnt++;
+                               /*
+                                * remote port is not sending Plogi. Reset
+                                * link to kick start his state machine
+                                */
+                               set_bit(N2N_LINK_RESET, &vha->dpc_flags);
+                       } else {
+                               ql_log(ql_log_warn, vha, 0x2119,
+                                   "%s %d %8phC Unable to reconnect\n",
+                                   __func__, __LINE__, ea->fcport->port_name);
+                       }
+               } else {
+                       /*
+                        * switch connect. login failed. Take connection
+                        * down and allow relogin to retrigger
+                        */
+                       ea->fcport->flags &= ~FCF_ASYNC_SENT;
+                       ea->fcport->keep_nport_handle = 0;
+                       qlt_schedule_sess_for_deletion(ea->fcport);
                }
-               ql_dbg(ql_dbg_disc, vha, 0x2119,
-                   "%s %d %8phC unhandle event of %x\n",
-                   __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
                break;
        }
 }
@@ -3190,7 +3218,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
 
                for (j = 0; j < 2; j++, fwdt++) {
                        if (!fwdt->template) {
-                               ql_log(ql_log_warn, vha, 0x00ba,
+                               ql_dbg(ql_dbg_init, vha, 0x00ba,
                                    "-> fwdt%u no template\n", j);
                                continue;
                        }
@@ -4986,28 +5014,47 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
        unsigned long flags;
 
        /* Inititae N2N login. */
-       if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
-               /* borrowing */
-               u32 *bp, i, sz;
-
-               memset(ha->init_cb, 0, ha->init_cb_size);
-               sz = min_t(int, sizeof(struct els_plogi_payload),
-                   ha->init_cb_size);
-               rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
-                   (void *)ha->init_cb, sz);
-               if (rval == QLA_SUCCESS) {
-                       bp = (uint32_t *)ha->init_cb;
-                       for (i = 0; i < sz/4 ; i++, bp++)
-                               *bp = cpu_to_be32(*bp);
+       if (N2N_TOPO(ha)) {
+               if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
+                       /* borrowing */
+                       u32 *bp, i, sz;
+
+                       memset(ha->init_cb, 0, ha->init_cb_size);
+                       sz = min_t(int, sizeof(struct els_plogi_payload),
+                           ha->init_cb_size);
+                       rval = qla24xx_get_port_login_templ(vha,
+                           ha->init_cb_dma, (void *)ha->init_cb, sz);
+                       if (rval == QLA_SUCCESS) {
+                               bp = (uint32_t *)ha->init_cb;
+                               for (i = 0; i < sz/4 ; i++, bp++)
+                                       *bp = cpu_to_be32(*bp);
 
-                       memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb,
-                           sizeof(ha->plogi_els_payld.data));
-                       set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
-               } else {
-                       ql_dbg(ql_dbg_init, vha, 0x00d1,
-                           "PLOGI ELS param read fail.\n");
+                               memcpy(&ha->plogi_els_payld.data,
+                                   (void *)ha->init_cb,
+                                   sizeof(ha->plogi_els_payld.data));
+                               set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+                       } else {
+                               ql_dbg(ql_dbg_init, vha, 0x00d1,
+                                   "PLOGI ELS param read fail.\n");
+                               goto skip_login;
+                       }
+               }
+
+               list_for_each_entry(fcport, &vha->vp_fcports, list) {
+                       if (fcport->n2n_flag) {
+                               qla24xx_fcport_handle_login(vha, fcport);
+                               return QLA_SUCCESS;
+                       }
+               }
+skip_login:
+               spin_lock_irqsave(&vha->work_lock, flags);
+               vha->scan.scan_retry++;
+               spin_unlock_irqrestore(&vha->work_lock, flags);
+
+               if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+                       set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+                       set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
                }
-               return QLA_SUCCESS;
        }
 
        found_devs = 0;
index e92e52a..518eb95 100644 (file)
@@ -2656,9 +2656,10 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
        els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
        els_iocb->port_id[1] = sp->fcport->d_id.b.area;
        els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
-       els_iocb->s_id[0] = vha->d_id.b.al_pa;
-       els_iocb->s_id[1] = vha->d_id.b.area;
-       els_iocb->s_id[2] = vha->d_id.b.domain;
+       /* For SID the byte order is different than DID */
+       els_iocb->s_id[1] = vha->d_id.b.al_pa;
+       els_iocb->s_id[2] = vha->d_id.b.area;
+       els_iocb->s_id[0] = vha->d_id.b.domain;
 
        if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
                els_iocb->control_flags = 0;
index 4c26630..009fd5a 100644 (file)
@@ -2837,8 +2837,6 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
        if (sense_len == 0) {
                rsp->status_srb = NULL;
                sp->done(sp, cp->result);
-       } else {
-               WARN_ON_ONCE(true);
        }
 }
 
index 4c858e2..1cc6913 100644 (file)
@@ -2249,7 +2249,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
+       ql_dbg(ql_dbg_disc, vha, 0x105a,
            "Entered %s.\n", __func__);
 
        if (IS_CNA_CAPABLE(vha->hw)) {
@@ -3883,14 +3883,24 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                case TOPO_N2N:
                        ha->current_topology = ISP_CFG_N;
                        spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+                       list_for_each_entry(fcport, &vha->vp_fcports, list) {
+                               fcport->scan_state = QLA_FCPORT_SCAN;
+                               fcport->n2n_flag = 0;
+                       }
+
                        fcport = qla2x00_find_fcport_by_wwpn(vha,
                            rptid_entry->u.f1.port_name, 1);
                        spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 
                        if (fcport) {
                                fcport->plogi_nack_done_deadline = jiffies + HZ;
-                               fcport->dm_login_expire = jiffies + 3*HZ;
+                               fcport->dm_login_expire = jiffies + 2*HZ;
                                fcport->scan_state = QLA_FCPORT_FOUND;
+                               fcport->n2n_flag = 1;
+                               fcport->keep_nport_handle = 1;
+                               if (vha->flags.nvme_enabled)
+                                       fcport->fc4f_nvme = 1;
+
                                switch (fcport->disc_state) {
                                case DSC_DELETED:
                                        set_bit(RELOGIN_NEEDED,
@@ -3924,7 +3934,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                                    rptid_entry->u.f1.port_name,
                                    rptid_entry->u.f1.node_name,
                                    NULL,
-                                   FC4_TYPE_UNKNOWN);
+                                   FS_FCP_IS_N2N);
                        }
 
                        /* if our portname is higher then initiate N2N login */
@@ -4023,6 +4033,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
 
                list_for_each_entry(fcport, &vha->vp_fcports, list) {
                        fcport->scan_state = QLA_FCPORT_SCAN;
+                       fcport->n2n_flag = 0;
                }
 
                fcport = qla2x00_find_fcport_by_wwpn(vha,
@@ -4032,6 +4043,14 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                        fcport->login_retry = vha->hw->login_retry_count;
                        fcport->plogi_nack_done_deadline = jiffies + HZ;
                        fcport->scan_state = QLA_FCPORT_FOUND;
+                       fcport->keep_nport_handle = 1;
+                       fcport->n2n_flag = 1;
+                       fcport->d_id.b.domain =
+                               rptid_entry->u.f2.remote_nport_id[2];
+                       fcport->d_id.b.area =
+                               rptid_entry->u.f2.remote_nport_id[1];
+                       fcport->d_id.b.al_pa =
+                               rptid_entry->u.f2.remote_nport_id[0];
                }
        }
 }
index 1a9a11a..6afad68 100644 (file)
@@ -66,6 +66,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
        uint16_t vp_id;
        struct qla_hw_data *ha = vha->hw;
        unsigned long flags = 0;
+       u8 i;
 
        mutex_lock(&ha->vport_lock);
        /*
@@ -75,8 +76,9 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
         * ensures no active vp_list traversal while the vport is removed
         * from the queue)
         */
-       wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count),
-           10*HZ);
+       for (i = 0; i < 10 && atomic_read(&vha->vref_count); i++)
+               wait_event_timeout(vha->vref_waitq,
+                   atomic_read(&vha->vref_count), HZ);
 
        spin_lock_irqsave(&ha->vport_slock, flags);
        if (atomic_read(&vha->vref_count)) {
@@ -262,6 +264,9 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
        spin_lock_irqsave(&ha->vport_slock, flags);
        list_for_each_entry(vha, &ha->vp_list, list) {
                if (vha->vp_idx) {
+                       if (test_bit(VPORT_DELETE, &vha->dpc_flags))
+                               continue;
+
                        atomic_inc(&vha->vref_count);
                        spin_unlock_irqrestore(&ha->vport_slock, flags);
 
@@ -300,6 +305,20 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
 int
 qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
 {
+       fc_port_t *fcport;
+
+       /*
+        * To exclusively reset vport, we need to log it out first.
+        * Note: This control_vp can fail if ISP reset is already
+        * issued, this is expected, as the vp would be already
+        * logged out due to ISP reset.
+        */
+       if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
+               qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
+               list_for_each_entry(fcport, &vha->vp_fcports, list)
+                       fcport->logout_on_delete = 0;
+       }
+
        /*
         * Physical port will do most of the abort and recovery work. We can
         * just treat it as a loop down
@@ -312,16 +331,9 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
                        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
        }
 
-       /*
-        * To exclusively reset vport, we need to log it out first.  Note: this
-        * control_vp can fail if ISP reset is already issued, this is
-        * expected, as the vp would be already logged out due to ISP reset.
-        */
-       if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
-               qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
-
        ql_dbg(ql_dbg_taskm, vha, 0x801d,
            "Scheduling enable of Vport %d.\n", vha->vp_idx);
+
        return qla24xx_enable_vp(vha);
 }
 
index 73db01e..3568031 100644 (file)
@@ -1115,9 +1115,15 @@ static inline int test_fcport_count(scsi_qla_host_t *vha)
 void
 qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
 {
+       u8 i;
+
        qla2x00_mark_all_devices_lost(vha, 0);
 
-       wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha), 10*HZ);
+       for (i = 0; i < 10; i++)
+               wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha),
+                   HZ);
+
+       flush_workqueue(vha->hw->wq);
 }
 
 /*
@@ -5036,6 +5042,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
 
                        memcpy(fcport->port_name, e->u.new_sess.port_name,
                            WWN_SIZE);
+
+                       if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N)
+                               fcport->n2n_flag = 1;
+
                } else {
                        ql_dbg(ql_dbg_disc, vha, 0xffff,
                                   "%s %8phC mem alloc fail.\n",
@@ -5134,11 +5144,9 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
                        if (dfcp)
                                qlt_schedule_sess_for_deletion(tfcp);
 
-
-                       if (N2N_TOPO(vha->hw))
-                               fcport->flags &= ~FCF_FABRIC_DEVICE;
-
                        if (N2N_TOPO(vha->hw)) {
+                               fcport->flags &= ~FCF_FABRIC_DEVICE;
+                               fcport->keep_nport_handle = 1;
                                if (vha->flags.nvme_enabled) {
                                        fcport->fc4f_nvme = 1;
                                        fcport->n2n_flag = 1;
index 0ffda61..a06e562 100644 (file)
@@ -953,7 +953,7 @@ void qlt_free_session_done(struct work_struct *work)
        struct qla_hw_data *ha = vha->hw;
        unsigned long flags;
        bool logout_started = false;
-       scsi_qla_host_t *base_vha;
+       scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
        struct qlt_plogi_ack_t *own =
                sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
 
@@ -1020,6 +1020,7 @@ void qlt_free_session_done(struct work_struct *work)
 
        if (logout_started) {
                bool traced = false;
+               u16 cnt = 0;
 
                while (!READ_ONCE(sess->logout_completed)) {
                        if (!traced) {
@@ -1029,6 +1030,9 @@ void qlt_free_session_done(struct work_struct *work)
                                traced = true;
                        }
                        msleep(100);
+                       cnt++;
+                       if (cnt > 200)
+                               break;
                }
 
                ql_dbg(ql_dbg_disc, vha, 0xf087,
@@ -1101,6 +1105,7 @@ void qlt_free_session_done(struct work_struct *work)
        }
 
        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+       sess->free_pending = 0;
 
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
            "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
@@ -1109,17 +1114,9 @@ void qlt_free_session_done(struct work_struct *work)
        if (tgt && (tgt->sess_count == 0))
                wake_up_all(&tgt->waitQ);
 
-       if (vha->fcport_count == 0)
-               wake_up_all(&vha->fcport_waitQ);
-
-       base_vha = pci_get_drvdata(ha->pdev);
-
-       sess->free_pending = 0;
-
-       if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
-               return;
-
-       if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
+       if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) &&
+           !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) &&
+           (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
                switch (vha->host->active_mode) {
                case MODE_INITIATOR:
                case MODE_DUAL:
@@ -1132,6 +1129,9 @@ void qlt_free_session_done(struct work_struct *work)
                        break;
                }
        }
+
+       if (vha->fcport_count == 0)
+               wake_up_all(&vha->fcport_waitQ);
 }
 
 /* ha->tgt.sess_lock supposed to be held on entry */
@@ -1161,7 +1161,7 @@ void qlt_unreg_sess(struct fc_port *sess)
        sess->last_login_gen = sess->login_gen;
 
        INIT_WORK(&sess->free_work, qlt_free_session_done);
-       schedule_work(&sess->free_work);
+       queue_work(sess->vha->hw->wq, &sess->free_work);
 }
 EXPORT_SYMBOL(qlt_unreg_sess);
 
index 1c470e3..ae2fa17 100644 (file)
@@ -967,6 +967,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
        ses->data_direction = scmd->sc_data_direction;
        ses->sdb = scmd->sdb;
        ses->result = scmd->result;
+       ses->resid_len = scmd->req.resid_len;
        ses->underflow = scmd->underflow;
        ses->prot_op = scmd->prot_op;
        ses->eh_eflags = scmd->eh_eflags;
@@ -977,6 +978,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
        memset(scmd->cmnd, 0, BLK_MAX_CDB);
        memset(&scmd->sdb, 0, sizeof(scmd->sdb));
        scmd->result = 0;
+       scmd->req.resid_len = 0;
 
        if (sense_bytes) {
                scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
@@ -1029,6 +1031,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
        scmd->sc_data_direction = ses->data_direction;
        scmd->sdb = ses->sdb;
        scmd->result = ses->result;
+       scmd->req.resid_len = ses->resid_len;
        scmd->underflow = ses->underflow;
        scmd->prot_op = ses->prot_op;
        scmd->eh_eflags = ses->eh_eflags;
index dc210b9..5447738 100644 (file)
@@ -1834,6 +1834,7 @@ static const struct blk_mq_ops scsi_mq_ops_no_commit = {
        .init_request   = scsi_mq_init_request,
        .exit_request   = scsi_mq_exit_request,
        .initialize_rq_fn = scsi_initialize_rq,
+       .cleanup_rq     = scsi_cleanup_rq,
        .busy           = scsi_mq_lld_busy,
        .map_queues     = scsi_map_queues,
 };
@@ -1921,7 +1922,8 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q)
 {
        struct scsi_device *sdev = NULL;
 
-       if (q->mq_ops == &scsi_mq_ops)
+       if (q->mq_ops == &scsi_mq_ops_no_commit ||
+           q->mq_ops == &scsi_mq_ops)
                sdev = q->queuedata;
        if (!sdev || !get_device(&sdev->sdev_gendev))
                sdev = NULL;
index 50928bc..03163ac 100644 (file)
@@ -1654,7 +1654,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
                /* we need to evaluate the error return  */
                if (scsi_sense_valid(sshdr) &&
                        (sshdr->asc == 0x3a ||  /* medium not present */
-                        sshdr->asc == 0x20))   /* invalid command */
+                        sshdr->asc == 0x20 ||  /* invalid command */
+                        (sshdr->asc == 0x74 && sshdr->ascq == 0x71)))  /* drive is password locked */
                                /* this is no error here */
                                return 0;
 
index ed8b9ac..542d2ba 100644 (file)
@@ -1837,8 +1837,7 @@ static int storvsc_probe(struct hv_device *device,
        /*
         * Set the number of HW queues we are supporting.
         */
-       if (stor_device->num_sc != 0)
-               host->nr_hw_queues = stor_device->num_sc + 1;
+       host->nr_hw_queues = num_present_cpus();
 
        /*
         * Set the error handler work queue.
index 034dd9c..11a87f5 100644 (file)
@@ -8143,6 +8143,9 @@ int ufshcd_shutdown(struct ufs_hba *hba)
 {
        int ret = 0;
 
+       if (!hba->is_powered)
+               goto out;
+
        if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
                goto out;
 
index 290dbfc..ce32dfe 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 config EXFAT_FS
        tristate "exFAT fs support"
        depends on BLOCK
@@ -6,7 +7,7 @@ config EXFAT_FS
          This adds support for the exFAT file system.
 
 config EXFAT_DONT_MOUNT_VFAT
-       bool "Prohibit mounting of fat/vfat filesysems by exFAT"
+       bool "Prohibit mounting of fat/vfat filesystems by exFAT"
        depends on EXFAT_FS
        default y
        help
index 84944df..6c90aec 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-or-later
 
 obj-$(CONFIG_EXFAT_FS) += exfat.o
 
index 6c12f2d..3abab33 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  *  Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
  */
index f086c75..81d20e6 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *  Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
  */
index 1565ce6..e1b0017 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *  Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
  */
index b3e9cf7..79174e5 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *  Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
  */
index 03cb829..a5c4b68 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *  Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
  */
index 5f6caee..3b2b0ce 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *  Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
  */
@@ -7,6 +7,7 @@
 #include <linux/init.h>
 #include <linux/time.h>
 #include <linux/slab.h>
+#include <linux/mm.h>
 #include <linux/seq_file.h>
 #include <linux/pagemap.h>
 #include <linux/mpage.h>
@@ -3450,7 +3451,7 @@ static void exfat_free_super(struct exfat_sb_info *sbi)
                kfree(sbi->options.iocharset);
        /* mutex_init is in exfat_fill_super function. only for 3.7+ */
        mutex_destroy(&sbi->s_lock);
-       kfree(sbi);
+       kvfree(sbi);
 }
 
 static void exfat_put_super(struct super_block *sb)
@@ -3845,7 +3846,7 @@ static int exfat_fill_super(struct super_block *sb, void *data, int silent)
         * the filesystem, since we're only just about to mount
         * it and have no inodes etc active!
         */
-       sbi = kzalloc(sizeof(struct exfat_sb_info), GFP_KERNEL);
+       sbi = kvzalloc(sizeof(*sbi), GFP_KERNEL);
        if (!sbi)
                return -ENOMEM;
        mutex_init(&sbi->s_lock);
index 366082f..b91a1fa 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *  Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
  */
index 8ec524a..cb61c2a 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 menuconfig FB_TFT
        tristate "Support for small TFT LCD display modules"
-       depends on FB && SPI
+       depends on FB && SPI && OF
        depends on GPIOLIB || COMPILE_TEST
        select FB_SYS_FILLRECT
        select FB_SYS_COPYAREA
@@ -199,13 +199,3 @@ config FB_TFT_WATTEROTT
        depends on FB_TFT
        help
          Generic Framebuffer support for WATTEROTT
-
-config FB_FLEX
-       tristate "Generic FB driver for TFT LCD displays"
-       depends on FB_TFT
-       help
-         Generic Framebuffer support for TFT LCD displays.
-
-config FB_TFT_FBTFT_DEVICE
-       tristate "Module to for adding FBTFT devices"
-       depends on FB_TFT
index 6bc0331..27af43f 100644 (file)
@@ -36,7 +36,3 @@ obj-$(CONFIG_FB_TFT_UC1611)      += fb_uc1611.o
 obj-$(CONFIG_FB_TFT_UC1701)      += fb_uc1701.o
 obj-$(CONFIG_FB_TFT_UPD161704)   += fb_upd161704.o
 obj-$(CONFIG_FB_TFT_WATTEROTT)   += fb_watterott.o
-obj-$(CONFIG_FB_FLEX)            += flexfb.o
-
-# Device modules
-obj-$(CONFIG_FB_TFT_FBTFT_DEVICE) += fbtft_device.o
index cf5700a..a0a67aa 100644 (file)
@@ -714,7 +714,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
        if (par->gamma.curves && gamma) {
                if (fbtft_gamma_parse_str(par, par->gamma.curves, gamma,
                                          strlen(gamma)))
-                       goto alloc_fail;
+                       goto release_framebuf;
        }
 
        /* Transmit buffer */
@@ -731,7 +731,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
        if (txbuflen > 0) {
                txbuf = devm_kzalloc(par->info->device, txbuflen, GFP_KERNEL);
                if (!txbuf)
-                       goto alloc_fail;
+                       goto release_framebuf;
                par->txbuf.buf = txbuf;
                par->txbuf.len = txbuflen;
        }
@@ -753,6 +753,9 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
 
        return info;
 
+release_framebuf:
+       framebuffer_release(info);
+
 alloc_fail:
        vfree(vmem);
 
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
deleted file mode 100644 (file)
index 44e1410..0000000
+++ /dev/null
@@ -1,1261 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- *
- * Copyright (C) 2013, Noralf Tronnes
- */
-
-#define pr_fmt(fmt) "fbtft_device: " fmt
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/gpio/consumer.h>
-#include <linux/spi/spi.h>
-#include <video/mipi_display.h>
-
-#include "fbtft.h"
-
-#define MAX_GPIOS 32
-
-static struct spi_device *spi_device;
-static struct platform_device *p_device;
-
-static char *name;
-module_param(name, charp, 0000);
-MODULE_PARM_DESC(name,
-                "Devicename (required). name=list => list all supported devices.");
-
-static unsigned int rotate;
-module_param(rotate, uint, 0000);
-MODULE_PARM_DESC(rotate,
-                "Angle to rotate display counter clockwise: 0, 90, 180, 270");
-
-static unsigned int busnum;
-module_param(busnum, uint, 0000);
-MODULE_PARM_DESC(busnum, "SPI bus number (default=0)");
-
-static unsigned int cs;
-module_param(cs, uint, 0000);
-MODULE_PARM_DESC(cs, "SPI chip select (default=0)");
-
-static unsigned int speed;
-module_param(speed, uint, 0000);
-MODULE_PARM_DESC(speed, "SPI speed (override device default)");
-
-static int mode = -1;
-module_param(mode, int, 0000);
-MODULE_PARM_DESC(mode, "SPI mode (override device default)");
-
-static unsigned int fps;
-module_param(fps, uint, 0000);
-MODULE_PARM_DESC(fps, "Frames per second (override driver default)");
-
-static char *gamma;
-module_param(gamma, charp, 0000);
-MODULE_PARM_DESC(gamma,
-                "String representation of Gamma Curve(s). Driver specific.");
-
-static int txbuflen;
-module_param(txbuflen, int, 0000);
-MODULE_PARM_DESC(txbuflen, "txbuflen (override driver default)");
-
-static int bgr = -1;
-module_param(bgr, int, 0000);
-MODULE_PARM_DESC(bgr,
-                "BGR bit (supported by some drivers).");
-
-static unsigned int startbyte;
-module_param(startbyte, uint, 0000);
-MODULE_PARM_DESC(startbyte, "Sets the Start byte used by some SPI displays.");
-
-static bool custom;
-module_param(custom, bool, 0000);
-MODULE_PARM_DESC(custom, "Add a custom display device. Use speed= argument to make it a SPI device, else platform_device");
-
-static unsigned int width;
-module_param(width, uint, 0000);
-MODULE_PARM_DESC(width, "Display width, used with the custom argument");
-
-static unsigned int height;
-module_param(height, uint, 0000);
-MODULE_PARM_DESC(height, "Display height, used with the custom argument");
-
-static unsigned int buswidth = 8;
-module_param(buswidth, uint, 0000);
-MODULE_PARM_DESC(buswidth, "Display bus width, used with the custom argument");
-
-static s16 init[FBTFT_MAX_INIT_SEQUENCE];
-static int init_num;
-module_param_array(init, short, &init_num, 0000);
-MODULE_PARM_DESC(init, "Init sequence, used with the custom argument");
-
-static unsigned long debug;
-module_param(debug, ulong, 0000);
-MODULE_PARM_DESC(debug,
-                "level: 0-7 (the remaining 29 bits is for advanced usage)");
-
-static unsigned int verbose = 3;
-module_param(verbose, uint, 0000);
-MODULE_PARM_DESC(verbose,
-                "0 silent, >1 show devices, >2 show devices before (default=3)");
-
-struct fbtft_device_display {
-       char *name;
-       struct spi_board_info *spi;
-       struct platform_device *pdev;
-};
-
-static void fbtft_device_pdev_release(struct device *dev);
-
-static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len);
-static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
-                                             int xs, int ys, int xe, int ye);
-
-#define ADAFRUIT18_GAMMA \
-               "02 1c 07 12 37 32 29 2d 29 25 2B 39 00 01 03 10\n" \
-               "03 1d 07 06 2E 2C 29 2D 2E 2E 37 3F 00 00 02 10"
-
-#define CBERRY28_GAMMA \
-               "D0 00 14 15 13 2C 42 43 4E 09 16 14 18 21\n" \
-               "D0 00 14 15 13 0B 43 55 53 0C 17 14 23 20"
-
-static const s16 cberry28_init_sequence[] = {
-       /* turn off sleep mode */
-       -1, MIPI_DCS_EXIT_SLEEP_MODE,
-       -2, 120,
-
-       /* set pixel format to RGB-565 */
-       -1, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT,
-
-       -1, 0xB2, 0x0C, 0x0C, 0x00, 0x33, 0x33,
-
-       /*
-        * VGH = 13.26V
-        * VGL = -10.43V
-        */
-       -1, 0xB7, 0x35,
-
-       /*
-        * VDV and VRH register values come from command write
-        * (instead of NVM)
-        */
-       -1, 0xC2, 0x01, 0xFF,
-
-       /*
-        * VAP =  4.7V + (VCOM + VCOM offset + 0.5 * VDV)
-        * VAN = -4.7V + (VCOM + VCOM offset + 0.5 * VDV)
-        */
-       -1, 0xC3, 0x17,
-
-       /* VDV = 0V */
-       -1, 0xC4, 0x20,
-
-       /* VCOM = 0.675V */
-       -1, 0xBB, 0x17,
-
-       /* VCOM offset = 0V */
-       -1, 0xC5, 0x20,
-
-       /*
-        * AVDD = 6.8V
-        * AVCL = -4.8V
-        * VDS = 2.3V
-        */
-       -1, 0xD0, 0xA4, 0xA1,
-
-       -1, MIPI_DCS_SET_DISPLAY_ON,
-
-       -3,
-};
-
-static const s16 hy28b_init_sequence[] = {
-       -1, 0x00e7, 0x0010, -1, 0x0000, 0x0001,
-       -1, 0x0001, 0x0100, -1, 0x0002, 0x0700,
-       -1, 0x0003, 0x1030, -1, 0x0004, 0x0000,
-       -1, 0x0008, 0x0207, -1, 0x0009, 0x0000,
-       -1, 0x000a, 0x0000, -1, 0x000c, 0x0001,
-       -1, 0x000d, 0x0000, -1, 0x000f, 0x0000,
-       -1, 0x0010, 0x0000, -1, 0x0011, 0x0007,
-       -1, 0x0012, 0x0000, -1, 0x0013, 0x0000,
-       -2, 50, -1, 0x0010, 0x1590, -1, 0x0011,
-       0x0227, -2, 50, -1, 0x0012, 0x009c, -2, 50,
-       -1, 0x0013, 0x1900, -1, 0x0029, 0x0023,
-       -1, 0x002b, 0x000e, -2, 50,
-       -1, 0x0020, 0x0000, -1, 0x0021, 0x0000,
-       -2, 50, -1, 0x0050, 0x0000,
-       -1, 0x0051, 0x00ef, -1, 0x0052, 0x0000,
-       -1, 0x0053, 0x013f, -1, 0x0060, 0xa700,
-       -1, 0x0061, 0x0001, -1, 0x006a, 0x0000,
-       -1, 0x0080, 0x0000, -1, 0x0081, 0x0000,
-       -1, 0x0082, 0x0000, -1, 0x0083, 0x0000,
-       -1, 0x0084, 0x0000, -1, 0x0085, 0x0000,
-       -1, 0x0090, 0x0010, -1, 0x0092, 0x0000,
-       -1, 0x0093, 0x0003, -1, 0x0095, 0x0110,
-       -1, 0x0097, 0x0000, -1, 0x0098, 0x0000,
-       -1, 0x0007, 0x0133, -1, 0x0020, 0x0000,
-       -1, 0x0021, 0x0000, -2, 100, -3 };
-
-#define HY28B_GAMMA \
-       "04 1F 4 7 7 0 7 7 6 0\n" \
-       "0F 00 1 7 4 0 0 0 6 7"
-
-static const s16 pitft_init_sequence[] = {
-       -1, MIPI_DCS_SOFT_RESET,
-       -2, 5,
-       -1, MIPI_DCS_SET_DISPLAY_OFF,
-       -1, 0xEF, 0x03, 0x80, 0x02,
-       -1, 0xCF, 0x00, 0xC1, 0x30,
-       -1, 0xED, 0x64, 0x03, 0x12, 0x81,
-       -1, 0xE8, 0x85, 0x00, 0x78,
-       -1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
-       -1, 0xF7, 0x20,
-       -1, 0xEA, 0x00, 0x00,
-       -1, 0xC0, 0x23,
-       -1, 0xC1, 0x10,
-       -1, 0xC5, 0x3E, 0x28,
-       -1, 0xC7, 0x86,
-       -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55,
-       -1, 0xB1, 0x00, 0x18,
-       -1, 0xB6, 0x08, 0x82, 0x27,
-       -1, 0xF2, 0x00,
-       -1, MIPI_DCS_SET_GAMMA_CURVE, 0x01,
-       -1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E,
-               0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00,
-       -1, 0xE1, 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31,
-               0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F,
-       -1, MIPI_DCS_EXIT_SLEEP_MODE,
-       -2, 100,
-       -1, MIPI_DCS_SET_DISPLAY_ON,
-       -2, 20,
-       -3
-};
-
-static const s16 waveshare32b_init_sequence[] = {
-       -1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
-       -1, 0xCF, 0x00, 0xC1, 0x30,
-       -1, 0xE8, 0x85, 0x00, 0x78,
-       -1, 0xEA, 0x00, 0x00,
-       -1, 0xED, 0x64, 0x03, 0x12, 0x81,
-       -1, 0xF7, 0x20,
-       -1, 0xC0, 0x23,
-       -1, 0xC1, 0x10,
-       -1, 0xC5, 0x3E, 0x28,
-       -1, 0xC7, 0x86,
-       -1, MIPI_DCS_SET_ADDRESS_MODE, 0x28,
-       -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55,
-       -1, 0xB1, 0x00, 0x18,
-       -1, 0xB6, 0x08, 0x82, 0x27,
-       -1, 0xF2, 0x00,
-       -1, MIPI_DCS_SET_GAMMA_CURVE, 0x01,
-       -1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E,
-               0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00,
-       -1, 0xE1, 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31,
-               0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F,
-       -1, MIPI_DCS_EXIT_SLEEP_MODE,
-       -2, 120,
-       -1, MIPI_DCS_SET_DISPLAY_ON,
-       -1, MIPI_DCS_WRITE_MEMORY_START,
-       -3
-};
-
-#define PIOLED_GAMMA   "0 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 " \
-                       "2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 " \
-                       "3 3 3 4 4 4 4 4 4 4 4 4 4 4 4"
-
-/* Supported displays in alphabetical order */
-static struct fbtft_device_display displays[] = {
-       {
-               .name = "adafruit18",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_st7735r",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .gamma = ADAFRUIT18_GAMMA,
-                       }
-               }
-       }, {
-               .name = "adafruit18_green",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_st7735r",
-                       .max_speed_hz = 4000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                                       .fbtftops.set_addr_win =
-                                           adafruit18_green_tab_set_addr_win,
-                               },
-                               .bgr = true,
-                               .gamma = ADAFRUIT18_GAMMA,
-                       }
-               }
-       }, {
-               .name = "adafruit22",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_hx8340bn",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 9,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "adafruit22a",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9340",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "adafruit28",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9341",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "adafruit13m",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ssd1306",
-                       .max_speed_hz = 16000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                       }
-               }
-       }, {
-               .name = "admatec_c-berry28",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_st7789v",
-                       .max_speed_hz = 48000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                                       .init_sequence = cberry28_init_sequence,
-                               },
-                               .gamma = CBERRY28_GAMMA,
-                       }
-               }
-       }, {
-               .name = "agm1264k-fl",
-               .pdev = &(struct platform_device) {
-                       .name = "fb_agm1264k-fl",
-                       .id = 0,
-                       .dev = {
-                       .release = fbtft_device_pdev_release,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = FBTFT_ONBOARD_BACKLIGHT,
-                               },
-                       },
-                       }
-               }
-       }, {
-               .name = "dogs102",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_uc1701",
-                       .max_speed_hz = 8000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "er_tftm050_2",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ra8875",
-                       .max_speed_hz = 5000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                                       .width = 480,
-                                       .height = 272,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "er_tftm070_5",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ra8875",
-                       .max_speed_hz = 5000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                                       .width = 800,
-                                       .height = 480,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "ew24ha0",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_uc1611",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                       }
-               }
-       }, {
-               .name = "ew24ha0_9bit",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_uc1611",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 9,
-                               },
-                       }
-               }
-       }, {
-               .name = "flexfb",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "flexfb",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-               }
-       }, {
-               .name = "flexpfb",
-               .pdev = &(struct platform_device) {
-                       .name = "flexpfb",
-                       .id = 0,
-                       .dev = {
-                       .release = fbtft_device_pdev_release,
-                       }
-               }
-       }, {
-               .name = "freetronicsoled128",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ssd1351",
-                       .max_speed_hz = 20000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = FBTFT_ONBOARD_BACKLIGHT,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "hx8353d",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_hx8353d",
-                       .max_speed_hz = 16000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                       }
-               }
-       }, {
-               .name = "hy28a",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9320",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .startbyte = 0x70,
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "hy28b",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9325",
-                       .max_speed_hz = 48000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                                       .init_sequence = hy28b_init_sequence,
-                               },
-                               .startbyte = 0x70,
-                               .bgr = true,
-                               .fps = 50,
-                               .gamma = HY28B_GAMMA,
-                       }
-               }
-       }, {
-               .name = "ili9481",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9481",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .regwidth = 16,
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "itdb24",
-               .pdev = &(struct platform_device) {
-                       .name = "fb_s6d1121",
-                       .id = 0,
-                       .dev = {
-                       .release = fbtft_device_pdev_release,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = false,
-                       },
-                       }
-               }
-       }, {
-               .name = "itdb28",
-               .pdev = &(struct platform_device) {
-                       .name = "fb_ili9325",
-                       .id = 0,
-                       .dev = {
-                       .release = fbtft_device_pdev_release,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       },
-                       }
-               }
-       }, {
-               .name = "itdb28_spi",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9325",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "mi0283qt-2",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_hx8347d",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .startbyte = 0x70,
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "mi0283qt-9a",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9341",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 9,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "mi0283qt-v2",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_watterott",
-                       .max_speed_hz = 4000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                       }
-               }
-       }, {
-               .name = "nokia3310",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_pcd8544",
-                       .max_speed_hz = 400000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                       }
-               }
-       }, {
-               .name = "nokia3310a",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_tls8204",
-                       .max_speed_hz = 1000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                       }
-               }
-       }, {
-               .name = "nokia5110",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9163",
-                       .max_speed_hz = 12000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "piscreen",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9486",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .regwidth = 16,
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "pitft",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9340",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .chip_select = 0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                                       .init_sequence = pitft_init_sequence,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "pioled",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ssd1351",
-                       .max_speed_hz = 20000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                               .bgr = true,
-                               .gamma = PIOLED_GAMMA
-                       }
-               }
-       }, {
-               .name = "rpi-display",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9341",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "s6d02a1",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_s6d02a1",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "sainsmart18",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_st7735r",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                       }
-               }
-       }, {
-               .name = "sainsmart32",
-               .pdev = &(struct platform_device) {
-                       .name = "fb_ssd1289",
-                       .id = 0,
-                       .dev = {
-                       .release = fbtft_device_pdev_release,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 16,
-                                       .txbuflen = -2, /* disable buffer */
-                                       .backlight = 1,
-                                       .fbtftops.write = write_gpio16_wr_slow,
-                               },
-                               .bgr = true,
-                       },
-               },
-               }
-       }, {
-               .name = "sainsmart32_fast",
-               .pdev = &(struct platform_device) {
-                       .name = "fb_ssd1289",
-                       .id = 0,
-                       .dev = {
-                       .release = fbtft_device_pdev_release,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 16,
-                                       .txbuflen = -2, /* disable buffer */
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       },
-               },
-               }
-       }, {
-               .name = "sainsmart32_latched",
-               .pdev = &(struct platform_device) {
-                       .name = "fb_ssd1289",
-                       .id = 0,
-                       .dev = {
-                       .release = fbtft_device_pdev_release,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 16,
-                                       .txbuflen = -2, /* disable buffer */
-                                       .backlight = 1,
-                                       .fbtftops.write =
-                                               fbtft_write_gpio16_wr_latched,
-                               },
-                               .bgr = true,
-                       },
-               },
-               }
-       }, {
-               .name = "sainsmart32_spi",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ssd1289",
-                       .max_speed_hz = 16000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "spidev",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "spidev",
-                       .max_speed_hz = 500000,
-                       .bus_num = 0,
-                       .chip_select = 0,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                       }
-               }
-       }, {
-               .name = "ssd1331",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ssd1331",
-                       .max_speed_hz = 20000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                       }
-               }
-       }, {
-               .name = "tinylcd35",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_tinylcd",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "tm022hdh26",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9341",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "tontec35_9481", /* boards before 02 July 2014 */
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9481",
-                       .max_speed_hz = 128000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "tontec35_9486", /* boards after 02 July 2014 */
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9486",
-                       .max_speed_hz = 128000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "upd161704",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_upd161704",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                       }
-               }
-       }, {
-               .name = "waveshare32b",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9340",
-                       .max_speed_hz = 48000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                                       .init_sequence =
-                                               waveshare32b_init_sequence,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "waveshare22",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_bd663474",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                       }
-               }
-       }, {
-               /* This should be the last item.
-                * Used with the custom argument
-                */
-               .name = "",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "",
-                       .max_speed_hz = 0,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                       }
-               },
-               .pdev = &(struct platform_device) {
-                       .name = "",
-                       .id = 0,
-                       .dev = {
-                               .release = fbtft_device_pdev_release,
-                               .platform_data = &(struct fbtft_platform_data) {
-                               },
-                       },
-               },
-       }
-};
-
-static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
-{
-       u16 data;
-       int i;
-#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
-       static u16 prev_data;
-#endif
-
-       fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
-                         "%s(len=%zu): ", __func__, len);
-
-       while (len) {
-               data = *(u16 *)buf;
-
-               /* Start writing by pulling down /WR */
-               gpiod_set_value(par->gpio.wr, 0);
-
-               /* Set data */
-#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
-               if (data == prev_data) {
-                       gpiod_set_value(par->gpio.wr, 0); /* used as delay */
-               } else {
-                       for (i = 0; i < 16; i++) {
-                               if ((data & 1) != (prev_data & 1))
-                                       gpiod_set_value(par->gpio.db[i],
-                                                       data & 1);
-                               data >>= 1;
-                               prev_data >>= 1;
-                       }
-               }
-#else
-               for (i = 0; i < 16; i++) {
-                       gpiod_set_value(par->gpio.db[i], data & 1);
-                       data >>= 1;
-               }
-#endif
-
-               /* Pullup /WR */
-               gpiod_set_value(par->gpio.wr, 1);
-
-#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
-               prev_data = *(u16 *)buf;
-#endif
-               buf += 2;
-               len -= 2;
-       }
-
-       return 0;
-}
-
-static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
-                                             int xs, int ys, int xe, int ye)
-{
-       write_reg(par, 0x2A, 0, xs + 2, 0, xe + 2);
-       write_reg(par, 0x2B, 0, ys + 1, 0, ye + 1);
-       write_reg(par, 0x2C);
-}
-
-static void fbtft_device_pdev_release(struct device *dev)
-{
-/* Needed to silence this message:
- * Device 'xxx' does not have a release() function,
- * it is broken and must be fixed
- */
-}
-
-static int spi_device_found(struct device *dev, void *data)
-{
-       struct spi_device *spi = to_spi_device(dev);
-
-       dev_info(dev, "%s %s %dkHz %d bits mode=0x%02X\n", spi->modalias,
-                dev_name(dev), spi->max_speed_hz / 1000, spi->bits_per_word,
-                spi->mode);
-
-       return 0;
-}
-
-static void pr_spi_devices(void)
-{
-       pr_debug("SPI devices registered:\n");
-       bus_for_each_dev(&spi_bus_type, NULL, NULL, spi_device_found);
-}
-
-static int p_device_found(struct device *dev, void *data)
-{
-       struct platform_device
-       *pdev = to_platform_device(dev);
-
-       if (strstr(pdev->name, "fb"))
-               dev_info(dev, "%s id=%d pdata? %s\n", pdev->name, pdev->id,
-                        pdev->dev.platform_data ? "yes" : "no");
-
-       return 0;
-}
-
-static void pr_p_devices(void)
-{
-       pr_debug("'fb' Platform devices registered:\n");
-       bus_for_each_dev(&platform_bus_type, NULL, NULL, p_device_found);
-}
-
-#ifdef MODULE
-static void fbtft_device_spi_delete(struct spi_master *master, unsigned int cs)
-{
-       struct device *dev;
-       char str[32];
-
-       snprintf(str, sizeof(str), "%s.%u", dev_name(&master->dev), cs);
-
-       dev = bus_find_device_by_name(&spi_bus_type, NULL, str);
-       if (dev) {
-               if (verbose)
-                       dev_info(dev, "Deleting %s\n", str);
-               device_del(dev);
-       }
-}
-
-static int fbtft_device_spi_device_register(struct spi_board_info *spi)
-{
-       struct spi_master *master;
-
-       master = spi_busnum_to_master(spi->bus_num);
-       if (!master) {
-               pr_err("spi_busnum_to_master(%d) returned NULL\n",
-                      spi->bus_num);
-               return -EINVAL;
-       }
-       /* make sure it's available */
-       fbtft_device_spi_delete(master, spi->chip_select);
-       spi_device = spi_new_device(master, spi);
-       put_device(&master->dev);
-       if (!spi_device) {
-               dev_err(&master->dev, "spi_new_device() returned NULL\n");
-               return -EPERM;
-       }
-       return 0;
-}
-#else
-static int fbtft_device_spi_device_register(struct spi_board_info *spi)
-{
-       return spi_register_board_info(spi, 1);
-}
-#endif
-
-static int __init fbtft_device_init(void)
-{
-       struct spi_board_info *spi = NULL;
-       struct fbtft_platform_data *pdata;
-       bool found = false;
-       int i = 0;
-       int ret = 0;
-
-       if (!name) {
-#ifdef MODULE
-               pr_err("missing module parameter: 'name'\n");
-               return -EINVAL;
-#else
-               return 0;
-#endif
-       }
-
-       if (init_num > FBTFT_MAX_INIT_SEQUENCE) {
-               pr_err("init parameter: exceeded max array size: %d\n",
-                      FBTFT_MAX_INIT_SEQUENCE);
-               return -EINVAL;
-       }
-
-       if (verbose > 2) {
-               pr_spi_devices(); /* print list of registered SPI devices */
-               pr_p_devices(); /* print list of 'fb' platform devices */
-       }
-
-       pr_debug("name='%s', busnum=%d, cs=%d\n", name, busnum, cs);
-
-       if (rotate > 0 && rotate < 4) {
-               rotate = (4 - rotate) * 90;
-               pr_warn("argument 'rotate' should be an angle. Values 1-3 is deprecated. Setting it to %d.\n",
-                       rotate);
-       }
-       if (rotate != 0 && rotate != 90 && rotate != 180 && rotate != 270) {
-               pr_warn("argument 'rotate' illegal value: %d. Setting it to 0.\n",
-                       rotate);
-               rotate = 0;
-       }
-
-       /* name=list lists all supported displays */
-       if (strcmp(name, "list") == 0) {
-               pr_info("Supported displays:\n");
-
-               for (i = 0; i < ARRAY_SIZE(displays); i++)
-                       pr_info("%s\n", displays[i].name);
-               return -ECANCELED;
-       }
-
-       if (custom) {
-               i = ARRAY_SIZE(displays) - 1;
-               displays[i].name = name;
-               if (speed == 0) {
-                       displays[i].pdev->name = name;
-                       displays[i].spi = NULL;
-               } else {
-                       size_t len;
-
-                       len = strlcpy(displays[i].spi->modalias, name,
-                                     SPI_NAME_SIZE);
-                       if (len >= SPI_NAME_SIZE)
-                               pr_warn("modalias (name) truncated to: %s\n",
-                                       displays[i].spi->modalias);
-                       displays[i].pdev = NULL;
-               }
-       }
-
-       for (i = 0; i < ARRAY_SIZE(displays); i++) {
-               if (strncmp(name, displays[i].name, SPI_NAME_SIZE) == 0) {
-                       if (displays[i].spi) {
-                               spi = displays[i].spi;
-                               spi->chip_select = cs;
-                               spi->bus_num = busnum;
-                               if (speed)
-                                       spi->max_speed_hz = speed;
-                               if (mode != -1)
-                                       spi->mode = mode;
-                               pdata = (void *)spi->platform_data;
-                       } else if (displays[i].pdev) {
-                               p_device = displays[i].pdev;
-                               pdata = p_device->dev.platform_data;
-                       } else {
-                               pr_err("broken displays array\n");
-                               return -EINVAL;
-                       }
-
-                       pdata->rotate = rotate;
-                       if (bgr == 0)
-                               pdata->bgr = false;
-                       else if (bgr == 1)
-                               pdata->bgr = true;
-                       if (startbyte)
-                               pdata->startbyte = startbyte;
-                       if (gamma)
-                               pdata->gamma = gamma;
-                       pdata->display.debug = debug;
-                       if (fps)
-                               pdata->fps = fps;
-                       if (txbuflen)
-                               pdata->txbuflen = txbuflen;
-                       if (init_num)
-                               pdata->display.init_sequence = init;
-                       if (custom) {
-                               pdata->display.width = width;
-                               pdata->display.height = height;
-                               pdata->display.buswidth = buswidth;
-                               pdata->display.backlight = 1;
-                       }
-
-                       if (displays[i].spi) {
-                               ret = fbtft_device_spi_device_register(spi);
-                               if (ret) {
-                                       pr_err("failed to register SPI device\n");
-                                       return ret;
-                               }
-                       } else {
-                               ret = platform_device_register(p_device);
-                               if (ret < 0) {
-                                       pr_err("platform_device_register() returned %d\n",
-                                              ret);
-                                       return ret;
-                               }
-                       }
-                       found = true;
-                       break;
-               }
-       }
-
-       if (!found) {
-               pr_err("display not supported: '%s'\n", name);
-               return -EINVAL;
-       }
-
-       if (spi_device && (verbose > 1))
-               pr_spi_devices();
-       if (p_device && (verbose > 1))
-               pr_p_devices();
-
-       return 0;
-}
-
-static void __exit fbtft_device_exit(void)
-{
-       if (spi_device) {
-               device_del(&spi_device->dev);
-               kfree(spi_device);
-       }
-
-       if (p_device)
-               platform_device_unregister(p_device);
-}
-
-arch_initcall(fbtft_device_init);
-module_exit(fbtft_device_exit);
-
-MODULE_DESCRIPTION("Add a FBTFT device.");
-MODULE_AUTHOR("Noralf Tronnes");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
deleted file mode 100644 (file)
index 3747321..0000000
+++ /dev/null
@@ -1,851 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Generic FB driver for TFT LCD displays
- *
- * Copyright (C) 2013 Noralf Tronnes
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <linux/gpio/consumer.h>
-#include <linux/spi/spi.h>
-#include <linux/delay.h>
-
-#include "fbtft.h"
-
-#define DRVNAME            "flexfb"
-
-static char *chip;
-module_param(chip, charp, 0000);
-MODULE_PARM_DESC(chip, "LCD controller");
-
-static unsigned int width;
-module_param(width, uint, 0000);
-MODULE_PARM_DESC(width, "Display width");
-
-static unsigned int height;
-module_param(height, uint, 0000);
-MODULE_PARM_DESC(height, "Display height");
-
-static s16 init[512];
-static int init_num;
-module_param_array(init, short, &init_num, 0000);
-MODULE_PARM_DESC(init, "Init sequence");
-
-static unsigned int setaddrwin;
-module_param(setaddrwin, uint, 0000);
-MODULE_PARM_DESC(setaddrwin, "Which set_addr_win() implementation to use");
-
-static unsigned int buswidth = 8;
-module_param(buswidth, uint, 0000);
-MODULE_PARM_DESC(buswidth, "Width of databus (default: 8)");
-
-static unsigned int regwidth = 8;
-module_param(regwidth, uint, 0000);
-MODULE_PARM_DESC(regwidth, "Width of controller register (default: 8)");
-
-static bool nobacklight;
-module_param(nobacklight, bool, 0000);
-MODULE_PARM_DESC(nobacklight, "Turn off backlight functionality.");
-
-static bool latched;
-module_param(latched, bool, 0000);
-MODULE_PARM_DESC(latched, "Use with latched 16-bit databus");
-
-static const s16 *initp;
-static int initp_num;
-
-/* default init sequences */
-static const s16 st7735r_init[] = {
-       -1, 0x01,
-       -2, 150,
-       -1, 0x11,
-       -2, 500,
-       -1, 0xB1, 0x01, 0x2C, 0x2D,
-       -1, 0xB2, 0x01, 0x2C, 0x2D,
-       -1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D,
-       -1, 0xB4, 0x07,
-       -1, 0xC0, 0xA2, 0x02, 0x84,
-       -1, 0xC1, 0xC5,
-       -1, 0xC2, 0x0A, 0x00,
-       -1, 0xC3, 0x8A, 0x2A,
-       -1, 0xC4, 0x8A, 0xEE,
-       -1, 0xC5, 0x0E,
-       -1, 0x20,
-       -1, 0x36, 0xC0,
-       -1, 0x3A, 0x05,
-       -1, 0xE0, 0x0f, 0x1a, 0x0f, 0x18, 0x2f, 0x28, 0x20, 0x22,
-           0x1f, 0x1b, 0x23, 0x37, 0x00, 0x07, 0x02, 0x10,
-       -1, 0xE1, 0x0f, 0x1b, 0x0f, 0x17, 0x33, 0x2c, 0x29, 0x2e,
-           0x30, 0x30, 0x39, 0x3f, 0x00, 0x07, 0x03, 0x10,
-       -1, 0x29,
-       -2, 100,
-       -1, 0x13,
-       -2, 10,
-       -3
-};
-
-static const s16 ssd1289_init[] = {
-       -1, 0x00, 0x0001,
-       -1, 0x03, 0xA8A4,
-       -1, 0x0C, 0x0000,
-       -1, 0x0D, 0x080C,
-       -1, 0x0E, 0x2B00,
-       -1, 0x1E, 0x00B7,
-       -1, 0x01, 0x2B3F,
-       -1, 0x02, 0x0600,
-       -1, 0x10, 0x0000,
-       -1, 0x11, 0x6070,
-       -1, 0x05, 0x0000,
-       -1, 0x06, 0x0000,
-       -1, 0x16, 0xEF1C,
-       -1, 0x17, 0x0003,
-       -1, 0x07, 0x0233,
-       -1, 0x0B, 0x0000,
-       -1, 0x0F, 0x0000,
-       -1, 0x41, 0x0000,
-       -1, 0x42, 0x0000,
-       -1, 0x48, 0x0000,
-       -1, 0x49, 0x013F,
-       -1, 0x4A, 0x0000,
-       -1, 0x4B, 0x0000,
-       -1, 0x44, 0xEF00,
-       -1, 0x45, 0x0000,
-       -1, 0x46, 0x013F,
-       -1, 0x30, 0x0707,
-       -1, 0x31, 0x0204,
-       -1, 0x32, 0x0204,
-       -1, 0x33, 0x0502,
-       -1, 0x34, 0x0507,
-       -1, 0x35, 0x0204,
-       -1, 0x36, 0x0204,
-       -1, 0x37, 0x0502,
-       -1, 0x3A, 0x0302,
-       -1, 0x3B, 0x0302,
-       -1, 0x23, 0x0000,
-       -1, 0x24, 0x0000,
-       -1, 0x25, 0x8000,
-       -1, 0x4f, 0x0000,
-       -1, 0x4e, 0x0000,
-       -1, 0x22,
-       -3
-};
-
-static const s16 hx8340bn_init[] = {
-       -1, 0xC1, 0xFF, 0x83, 0x40,
-       -1, 0x11,
-       -2, 150,
-       -1, 0xCA, 0x70, 0x00, 0xD9,
-       -1, 0xB0, 0x01, 0x11,
-       -1, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06,
-       -2, 20,
-       -1, 0xC2, 0x60, 0x71, 0x01, 0x0E, 0x05, 0x02, 0x09, 0x31, 0x0A,
-       -1, 0xC3, 0x67, 0x30, 0x61, 0x17, 0x48, 0x07, 0x05, 0x33,
-       -2, 10,
-       -1, 0xB5, 0x35, 0x20, 0x45,
-       -1, 0xB4, 0x33, 0x25, 0x4C,
-       -2, 10,
-       -1, 0x3A, 0x05,
-       -1, 0x29,
-       -2, 10,
-       -3
-};
-
-static const s16 ili9225_init[] = {
-       -1, 0x0001, 0x011C,
-       -1, 0x0002, 0x0100,
-       -1, 0x0003, 0x1030,
-       -1, 0x0008, 0x0808,
-       -1, 0x000C, 0x0000,
-       -1, 0x000F, 0x0A01,
-       -1, 0x0020, 0x0000,
-       -1, 0x0021, 0x0000,
-       -2, 50,
-       -1, 0x0010, 0x0A00,
-       -1, 0x0011, 0x1038,
-       -2, 50,
-       -1, 0x0012, 0x1121,
-       -1, 0x0013, 0x004E,
-       -1, 0x0014, 0x676F,
-       -1, 0x0030, 0x0000,
-       -1, 0x0031, 0x00DB,
-       -1, 0x0032, 0x0000,
-       -1, 0x0033, 0x0000,
-       -1, 0x0034, 0x00DB,
-       -1, 0x0035, 0x0000,
-       -1, 0x0036, 0x00AF,
-       -1, 0x0037, 0x0000,
-       -1, 0x0038, 0x00DB,
-       -1, 0x0039, 0x0000,
-       -1, 0x0050, 0x0000,
-       -1, 0x0051, 0x060A,
-       -1, 0x0052, 0x0D0A,
-       -1, 0x0053, 0x0303,
-       -1, 0x0054, 0x0A0D,
-       -1, 0x0055, 0x0A06,
-       -1, 0x0056, 0x0000,
-       -1, 0x0057, 0x0303,
-       -1, 0x0058, 0x0000,
-       -1, 0x0059, 0x0000,
-       -2, 50,
-       -1, 0x0007, 0x1017,
-       -2, 50,
-       -3
-};
-
-static const s16 ili9320_init[] = {
-       -1, 0x00E5, 0x8000,
-       -1, 0x0000, 0x0001,
-       -1, 0x0001, 0x0100,
-       -1, 0x0002, 0x0700,
-       -1, 0x0003, 0x1030,
-       -1, 0x0004, 0x0000,
-       -1, 0x0008, 0x0202,
-       -1, 0x0009, 0x0000,
-       -1, 0x000A, 0x0000,
-       -1, 0x000C, 0x0000,
-       -1, 0x000D, 0x0000,
-       -1, 0x000F, 0x0000,
-       -1, 0x0010, 0x0000,
-       -1, 0x0011, 0x0007,
-       -1, 0x0012, 0x0000,
-       -1, 0x0013, 0x0000,
-       -2, 200,
-       -1, 0x0010, 0x17B0,
-       -1, 0x0011, 0x0031,
-       -2, 50,
-       -1, 0x0012, 0x0138,
-       -2, 50,
-       -1, 0x0013, 0x1800,
-       -1, 0x0029, 0x0008,
-       -2, 50,
-       -1, 0x0020, 0x0000,
-       -1, 0x0021, 0x0000,
-       -1, 0x0030, 0x0000,
-       -1, 0x0031, 0x0505,
-       -1, 0x0032, 0x0004,
-       -1, 0x0035, 0x0006,
-       -1, 0x0036, 0x0707,
-       -1, 0x0037, 0x0105,
-       -1, 0x0038, 0x0002,
-       -1, 0x0039, 0x0707,
-       -1, 0x003C, 0x0704,
-       -1, 0x003D, 0x0807,
-       -1, 0x0050, 0x0000,
-       -1, 0x0051, 0x00EF,
-       -1, 0x0052, 0x0000,
-       -1, 0x0053, 0x013F,
-       -1, 0x0060, 0x2700,
-       -1, 0x0061, 0x0001,
-       -1, 0x006A, 0x0000,
-       -1, 0x0080, 0x0000,
-       -1, 0x0081, 0x0000,
-       -1, 0x0082, 0x0000,
-       -1, 0x0083, 0x0000,
-       -1, 0x0084, 0x0000,
-       -1, 0x0085, 0x0000,
-       -1, 0x0090, 0x0010,
-       -1, 0x0092, 0x0000,
-       -1, 0x0093, 0x0003,
-       -1, 0x0095, 0x0110,
-       -1, 0x0097, 0x0000,
-       -1, 0x0098, 0x0000,
-       -1, 0x0007, 0x0173,
-       -3
-};
-
-static const s16 ili9325_init[] = {
-       -1, 0x00E3, 0x3008,
-       -1, 0x00E7, 0x0012,
-       -1, 0x00EF, 0x1231,
-       -1, 0x0001, 0x0100,
-       -1, 0x0002, 0x0700,
-       -1, 0x0003, 0x1030,
-       -1, 0x0004, 0x0000,
-       -1, 0x0008, 0x0207,
-       -1, 0x0009, 0x0000,
-       -1, 0x000A, 0x0000,
-       -1, 0x000C, 0x0000,
-       -1, 0x000D, 0x0000,
-       -1, 0x000F, 0x0000,
-       -1, 0x0010, 0x0000,
-       -1, 0x0011, 0x0007,
-       -1, 0x0012, 0x0000,
-       -1, 0x0013, 0x0000,
-       -2, 200,
-       -1, 0x0010, 0x1690,
-       -1, 0x0011, 0x0223,
-       -2, 50,
-       -1, 0x0012, 0x000D,
-       -2, 50,
-       -1, 0x0013, 0x1200,
-       -1, 0x0029, 0x000A,
-       -1, 0x002B, 0x000C,
-       -2, 50,
-       -1, 0x0020, 0x0000,
-       -1, 0x0021, 0x0000,
-       -1, 0x0030, 0x0000,
-       -1, 0x0031, 0x0506,
-       -1, 0x0032, 0x0104,
-       -1, 0x0035, 0x0207,
-       -1, 0x0036, 0x000F,
-       -1, 0x0037, 0x0306,
-       -1, 0x0038, 0x0102,
-       -1, 0x0039, 0x0707,
-       -1, 0x003C, 0x0702,
-       -1, 0x003D, 0x1604,
-       -1, 0x0050, 0x0000,
-       -1, 0x0051, 0x00EF,
-       -1, 0x0052, 0x0000,
-       -1, 0x0053, 0x013F,
-       -1, 0x0060, 0xA700,
-       -1, 0x0061, 0x0001,
-       -1, 0x006A, 0x0000,
-       -1, 0x0080, 0x0000,
-       -1, 0x0081, 0x0000,
-       -1, 0x0082, 0x0000,
-       -1, 0x0083, 0x0000,
-       -1, 0x0084, 0x0000,
-       -1, 0x0085, 0x0000,
-       -1, 0x0090, 0x0010,
-       -1, 0x0092, 0x0600,
-       -1, 0x0007, 0x0133,
-       -3
-};
-
-static const s16 ili9341_init[] = {
-       -1, 0x28,
-       -2, 20,
-       -1, 0xCF, 0x00, 0x83, 0x30,
-       -1, 0xED, 0x64, 0x03, 0x12, 0x81,
-       -1, 0xE8, 0x85, 0x01, 0x79,
-       -1, 0xCB, 0x39, 0x2c, 0x00, 0x34, 0x02,
-       -1, 0xF7, 0x20,
-       -1, 0xEA, 0x00, 0x00,
-       -1, 0xC0, 0x26,
-       -1, 0xC1, 0x11,
-       -1, 0xC5, 0x35, 0x3E,
-       -1, 0xC7, 0xBE,
-       -1, 0xB1, 0x00, 0x1B,
-       -1, 0xB6, 0x0a, 0x82, 0x27, 0x00,
-       -1, 0xB7, 0x07,
-       -1, 0x3A, 0x55,
-       -1, 0x36, 0x48,
-       -1, 0x11,
-       -2, 120,
-       -1, 0x29,
-       -2, 20,
-       -3
-};
-
-static const s16 ssd1351_init[] = {
-       -1, 0xfd, 0x12,
-       -1, 0xfd, 0xb1,
-       -1, 0xae,
-       -1, 0xb3, 0xf1,
-       -1, 0xca, 0x7f,
-       -1, 0xa0, 0x74,
-       -1, 0x15, 0x00, 0x7f,
-       -1, 0x75, 0x00, 0x7f,
-       -1, 0xa1, 0x00,
-       -1, 0xa2, 0x00,
-       -1, 0xb5, 0x00,
-       -1, 0xab, 0x01,
-       -1, 0xb1, 0x32,
-       -1, 0xb4, 0xa0, 0xb5, 0x55,
-       -1, 0xbb, 0x17,
-       -1, 0xbe, 0x05,
-       -1, 0xc1, 0xc8, 0x80, 0xc8,
-       -1, 0xc7, 0x0f,
-       -1, 0xb6, 0x01,
-       -1, 0xa6,
-       -1, 0xaf,
-       -3
-};
-
-/**
- * struct flexfb_lcd_controller - Describes the LCD controller properties
- * @name: Model name of the chip
- * @width: Width of display in pixels
- * @height: Height of display in pixels
- * @setaddrwin: Which set_addr_win() implementation to use
- * @regwidth: LCD Controller Register width in bits
- * @init_seq: LCD initialization sequence
- * @init_seq_sz: Size of LCD initialization sequence
- */
-struct flexfb_lcd_controller {
-       const char *name;
-       unsigned int width;
-       unsigned int height;
-       unsigned int setaddrwin;
-       unsigned int regwidth;
-       const s16 *init_seq;
-       int init_seq_sz;
-};
-
-static const struct flexfb_lcd_controller flexfb_chip_table[] = {
-       {
-               .name = "st7735r",
-               .width = 120,
-               .height = 160,
-               .init_seq = st7735r_init,
-               .init_seq_sz = ARRAY_SIZE(st7735r_init),
-       },
-       {
-               .name = "hx8340bn",
-               .width = 176,
-               .height = 220,
-               .init_seq = hx8340bn_init,
-               .init_seq_sz = ARRAY_SIZE(hx8340bn_init),
-       },
-       {
-               .name = "ili9225",
-               .width = 176,
-               .height = 220,
-               .regwidth = 16,
-               .init_seq = ili9225_init,
-               .init_seq_sz = ARRAY_SIZE(ili9225_init),
-       },
-       {
-               .name = "ili9320",
-               .width = 240,
-               .height = 320,
-               .setaddrwin = 1,
-               .regwidth = 16,
-               .init_seq = ili9320_init,
-               .init_seq_sz = ARRAY_SIZE(ili9320_init),
-       },
-       {
-               .name = "ili9325",
-               .width = 240,
-               .height = 320,
-               .setaddrwin = 1,
-               .regwidth = 16,
-               .init_seq = ili9325_init,
-               .init_seq_sz = ARRAY_SIZE(ili9325_init),
-       },
-       {
-               .name = "ili9341",
-               .width = 240,
-               .height = 320,
-               .init_seq = ili9341_init,
-               .init_seq_sz = ARRAY_SIZE(ili9341_init),
-       },
-       {
-               .name = "ssd1289",
-               .width = 240,
-               .height = 320,
-               .setaddrwin = 2,
-               .regwidth = 16,
-               .init_seq = ssd1289_init,
-               .init_seq_sz = ARRAY_SIZE(ssd1289_init),
-       },
-       {
-               .name = "ssd1351",
-               .width = 128,
-               .height = 128,
-               .setaddrwin = 3,
-               .init_seq = ssd1351_init,
-               .init_seq_sz = ARRAY_SIZE(ssd1351_init),
-       },
-};
-
-/* ili9320, ili9325 */
-static void flexfb_set_addr_win_1(struct fbtft_par *par,
-                                 int xs, int ys, int xe, int ye)
-{
-       switch (par->info->var.rotate) {
-       /* R20h = Horizontal GRAM Start Address */
-       /* R21h = Vertical GRAM Start Address */
-       case 0:
-               write_reg(par, 0x0020, xs);
-               write_reg(par, 0x0021, ys);
-               break;
-       case 180:
-               write_reg(par, 0x0020, width - 1 - xs);
-               write_reg(par, 0x0021, height - 1 - ys);
-               break;
-       case 270:
-               write_reg(par, 0x0020, width - 1 - ys);
-               write_reg(par, 0x0021, xs);
-               break;
-       case 90:
-               write_reg(par, 0x0020, ys);
-               write_reg(par, 0x0021, height - 1 - xs);
-               break;
-       }
-       write_reg(par, 0x0022); /* Write Data to GRAM */
-}
-
-/* ssd1289 */
-static void flexfb_set_addr_win_2(struct fbtft_par *par,
-                                 int xs, int ys, int xe, int ye)
-{
-       switch (par->info->var.rotate) {
-       /* R4Eh - Set GDDRAM X address counter */
-       /* R4Fh - Set GDDRAM Y address counter */
-       case 0:
-               write_reg(par, 0x4e, xs);
-               write_reg(par, 0x4f, ys);
-               break;
-       case 180:
-               write_reg(par, 0x4e, par->info->var.xres - 1 - xs);
-               write_reg(par, 0x4f, par->info->var.yres - 1 - ys);
-               break;
-       case 270:
-               write_reg(par, 0x4e, par->info->var.yres - 1 - ys);
-               write_reg(par, 0x4f, xs);
-               break;
-       case 90:
-               write_reg(par, 0x4e, ys);
-               write_reg(par, 0x4f, par->info->var.xres - 1 - xs);
-               break;
-       }
-
-       /* R22h - RAM data write */
-       write_reg(par, 0x22, 0);
-}
-
-/* ssd1351 */
-static void set_addr_win_3(struct fbtft_par *par,
-                          int xs, int ys, int xe, int ye)
-{
-       write_reg(par, 0x15, xs, xe);
-       write_reg(par, 0x75, ys, ye);
-       write_reg(par, 0x5C);
-}
-
-static int flexfb_verify_gpios_dc(struct fbtft_par *par)
-{
-       fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
-
-       if (!par->gpio.dc) {
-               dev_err(par->info->device,
-                       "Missing info about 'dc' gpio. Aborting.\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int flexfb_verify_gpios_db(struct fbtft_par *par)
-{
-       int i;
-       int num_db = buswidth;
-
-       fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
-
-       if (!par->gpio.dc) {
-               dev_err(par->info->device, "Missing info about 'dc' gpio. Aborting.\n");
-               return -EINVAL;
-       }
-       if (!par->gpio.wr) {
-               dev_err(par->info->device, "Missing info about 'wr' gpio. Aborting.\n");
-               return -EINVAL;
-       }
-       if (latched && !par->gpio.latch) {
-               dev_err(par->info->device, "Missing info about 'latch' gpio. Aborting.\n");
-               return -EINVAL;
-       }
-       if (latched)
-               num_db = buswidth / 2;
-       for (i = 0; i < num_db; i++) {
-               if (!par->gpio.db[i]) {
-                       dev_err(par->info->device,
-                               "Missing info about 'db%02d' gpio. Aborting.\n",
-                               i);
-                       return -EINVAL;
-               }
-       }
-
-       return 0;
-}
-
-static void flexfb_chip_load_param(const struct flexfb_lcd_controller *chip)
-{
-       if (!width)
-               width = chip->width;
-       if (!height)
-               height = chip->height;
-       setaddrwin = chip->setaddrwin;
-       if (chip->regwidth)
-               regwidth = chip->regwidth;
-       if (!init_num) {
-               initp = chip->init_seq;
-               initp_num = chip->init_seq_sz;
-       }
-}
-
-static struct fbtft_display flex_display = { };
-
-static int flexfb_chip_init(const struct device *dev)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(flexfb_chip_table); i++)
-               if (!strcmp(chip, flexfb_chip_table[i].name)) {
-                       flexfb_chip_load_param(&flexfb_chip_table[i]);
-                       return 0;
-               }
-
-       dev_err(dev, "chip=%s is not supported\n", chip);
-
-       return -EINVAL;
-}
-
-static int flexfb_probe_common(struct spi_device *sdev,
-                              struct platform_device *pdev)
-{
-       struct device *dev;
-       struct fb_info *info;
-       struct fbtft_par *par;
-       int ret;
-
-       initp = init;
-       initp_num = init_num;
-
-       if (sdev)
-               dev = &sdev->dev;
-       else
-               dev = &pdev->dev;
-
-       fbtft_init_dbg(dev, "%s(%s)\n", __func__,
-                      sdev ? "'SPI device'" : "'Platform device'");
-
-       if (chip) {
-               ret = flexfb_chip_init(dev);
-               if (ret)
-                       return ret;
-       }
-
-       if (width == 0 || height == 0) {
-               dev_err(dev, "argument(s) missing: width and height has to be set.\n");
-               return -EINVAL;
-       }
-       flex_display.width = width;
-       flex_display.height = height;
-       fbtft_init_dbg(dev, "Display resolution: %dx%d\n", width, height);
-       fbtft_init_dbg(dev, "chip = %s\n", chip ? chip : "not set");
-       fbtft_init_dbg(dev, "setaddrwin = %d\n", setaddrwin);
-       fbtft_init_dbg(dev, "regwidth = %d\n", regwidth);
-       fbtft_init_dbg(dev, "buswidth = %d\n", buswidth);
-
-       info = fbtft_framebuffer_alloc(&flex_display, dev, dev->platform_data);
-       if (!info)
-               return -ENOMEM;
-
-       par = info->par;
-       if (sdev)
-               par->spi = sdev;
-       else
-               par->pdev = pdev;
-       if (!par->init_sequence)
-               par->init_sequence = initp;
-       par->fbtftops.init_display = fbtft_init_display;
-
-       /* registerwrite functions */
-       switch (regwidth) {
-       case 8:
-               par->fbtftops.write_register = fbtft_write_reg8_bus8;
-               break;
-       case 16:
-               par->fbtftops.write_register = fbtft_write_reg16_bus8;
-               break;
-       default:
-               dev_err(dev,
-                       "argument 'regwidth': %d is not supported.\n",
-                       regwidth);
-               return -EINVAL;
-       }
-
-       /* bus functions */
-       if (sdev) {
-               par->fbtftops.write = fbtft_write_spi;
-               switch (buswidth) {
-               case 8:
-                       par->fbtftops.write_vmem = fbtft_write_vmem16_bus8;
-                       if (!par->startbyte)
-                               par->fbtftops.verify_gpios = flexfb_verify_gpios_dc;
-                       break;
-               case 9:
-                       if (regwidth == 16) {
-                               dev_err(dev, "argument 'regwidth': %d is not supported with buswidth=%d and SPI.\n",
-                                       regwidth, buswidth);
-                               return -EINVAL;
-                       }
-                       par->fbtftops.write_register = fbtft_write_reg8_bus9;
-                       par->fbtftops.write_vmem = fbtft_write_vmem16_bus9;
-                       if (par->spi->master->bits_per_word_mask
-                           & SPI_BPW_MASK(9)) {
-                               par->spi->bits_per_word = 9;
-                               break;
-                       }
-
-                       dev_warn(dev,
-                                "9-bit SPI not available, emulating using 8-bit.\n");
-                       /* allocate buffer with room for dc bits */
-                       par->extra = devm_kzalloc(par->info->device,
-                                                 par->txbuf.len
-                                                 + (par->txbuf.len / 8) + 8,
-                                                 GFP_KERNEL);
-                       if (!par->extra) {
-                               ret = -ENOMEM;
-                               goto out_release;
-                       }
-                       par->fbtftops.write = fbtft_write_spi_emulate_9;
-
-                       break;
-               default:
-                       dev_err(dev,
-                               "argument 'buswidth': %d is not supported with SPI.\n",
-                               buswidth);
-                       return -EINVAL;
-               }
-       } else {
-               par->fbtftops.verify_gpios = flexfb_verify_gpios_db;
-               switch (buswidth) {
-               case 8:
-                       par->fbtftops.write = fbtft_write_gpio8_wr;
-                       par->fbtftops.write_vmem = fbtft_write_vmem16_bus8;
-                       break;
-               case 16:
-                       par->fbtftops.write_register = fbtft_write_reg16_bus16;
-                       if (latched)
-                               par->fbtftops.write = fbtft_write_gpio16_wr_latched;
-                       else
-                               par->fbtftops.write = fbtft_write_gpio16_wr;
-                       par->fbtftops.write_vmem = fbtft_write_vmem16_bus16;
-                       break;
-               default:
-                       dev_err(dev,
-                               "argument 'buswidth': %d is not supported with parallel.\n",
-                               buswidth);
-                       return -EINVAL;
-               }
-       }
-
-       /* set_addr_win function */
-       switch (setaddrwin) {
-       case 0:
-               /* use default */
-               break;
-       case 1:
-               par->fbtftops.set_addr_win = flexfb_set_addr_win_1;
-               break;
-       case 2:
-               par->fbtftops.set_addr_win = flexfb_set_addr_win_2;
-               break;
-       case 3:
-               par->fbtftops.set_addr_win = set_addr_win_3;
-               break;
-       default:
-               dev_err(dev, "argument 'setaddrwin': unknown value %d.\n",
-                       setaddrwin);
-               return -EINVAL;
-       }
-
-       if (!nobacklight)
-               par->fbtftops.register_backlight = fbtft_register_backlight;
-
-       ret = fbtft_register_framebuffer(info);
-       if (ret < 0)
-               goto out_release;
-
-       return 0;
-
-out_release:
-       fbtft_framebuffer_release(info);
-
-       return ret;
-}
-
-static int flexfb_remove_common(struct device *dev, struct fb_info *info)
-{
-       struct fbtft_par *par;
-
-       if (!info)
-               return -EINVAL;
-       par = info->par;
-       if (par)
-               fbtft_par_dbg(DEBUG_DRIVER_INIT_FUNCTIONS, par, "%s()\n",
-                             __func__);
-       fbtft_unregister_framebuffer(info);
-       fbtft_framebuffer_release(info);
-
-       return 0;
-}
-
-static int flexfb_probe_spi(struct spi_device *spi)
-{
-       return flexfb_probe_common(spi, NULL);
-}
-
-static int flexfb_remove_spi(struct spi_device *spi)
-{
-       struct fb_info *info = spi_get_drvdata(spi);
-
-       return flexfb_remove_common(&spi->dev, info);
-}
-
-static int flexfb_probe_pdev(struct platform_device *pdev)
-{
-       return flexfb_probe_common(NULL, pdev);
-}
-
-static int flexfb_remove_pdev(struct platform_device *pdev)
-{
-       struct fb_info *info = platform_get_drvdata(pdev);
-
-       return flexfb_remove_common(&pdev->dev, info);
-}
-
-static struct spi_driver flexfb_spi_driver = {
-       .driver = {
-               .name   = DRVNAME,
-       },
-       .probe  = flexfb_probe_spi,
-       .remove = flexfb_remove_spi,
-};
-
-static const struct platform_device_id flexfb_platform_ids[] = {
-       { "flexpfb", 0 },
-       { },
-};
-MODULE_DEVICE_TABLE(platform, flexfb_platform_ids);
-
-static struct platform_driver flexfb_platform_driver = {
-       .driver = {
-               .name   = DRVNAME,
-       },
-       .id_table = flexfb_platform_ids,
-       .probe  = flexfb_probe_pdev,
-       .remove = flexfb_remove_pdev,
-};
-
-static int __init flexfb_init(void)
-{
-       int ret, ret2;
-
-       ret = spi_register_driver(&flexfb_spi_driver);
-       ret2 = platform_driver_register(&flexfb_platform_driver);
-       if (ret < 0)
-               return ret;
-       return ret2;
-}
-
-static void __exit flexfb_exit(void)
-{
-       spi_unregister_driver(&flexfb_spi_driver);
-       platform_driver_unregister(&flexfb_platform_driver);
-}
-
-/* ------------------------------------------------------------------------- */
-
-module_init(flexfb_init);
-module_exit(flexfb_exit);
-
-MODULE_DESCRIPTION("Generic FB driver for TFT LCD displays");
-MODULE_AUTHOR("Noralf Tronnes");
-MODULE_LICENSE("GPL");
index 50d87c7..9ea9c88 100644 (file)
@@ -13,9 +13,6 @@
 /* The maximum devices per each type. */
 #define GASKET_DEV_MAX 256
 
-/* The number of supported (and possible) PCI BARs. */
-#define GASKET_NUM_BARS 6
-
 /* The number of supported Gasket page tables per device. */
 #define GASKET_MAX_NUM_PAGE_TABLES 1
 
index 13179f0..cd8be80 100644 (file)
@@ -371,7 +371,7 @@ static int gasket_setup_pci(struct pci_dev *pci_dev,
 {
        int i, mapped_bars, ret;
 
-       for (i = 0; i < GASKET_NUM_BARS; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                ret = gasket_map_pci_bar(gasket_dev, i);
                if (ret) {
                        mapped_bars = i;
@@ -393,7 +393,7 @@ static void gasket_cleanup_pci(struct gasket_dev *gasket_dev)
 {
        int i;
 
-       for (i = 0; i < GASKET_NUM_BARS; i++)
+       for (i = 0; i < PCI_STD_NUM_BARS; i++)
                gasket_unmap_pci_bar(gasket_dev, i);
 }
 
@@ -493,7 +493,7 @@ static ssize_t gasket_sysfs_data_show(struct device *device,
                (enum gasket_sysfs_attribute_type)gasket_attr->data.attr_type;
        switch (sysfs_type) {
        case ATTR_BAR_OFFSETS:
-               for (i = 0; i < GASKET_NUM_BARS; i++) {
+               for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                        bar_desc = &driver_desc->bar_descriptions[i];
                        if (bar_desc->size == 0)
                                continue;
@@ -505,7 +505,7 @@ static ssize_t gasket_sysfs_data_show(struct device *device,
                }
                break;
        case ATTR_BAR_SIZES:
-               for (i = 0; i < GASKET_NUM_BARS; i++) {
+               for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                        bar_desc = &driver_desc->bar_descriptions[i];
                        if (bar_desc->size == 0)
                                continue;
@@ -556,7 +556,7 @@ static ssize_t gasket_sysfs_data_show(struct device *device,
                ret = snprintf(buf, PAGE_SIZE, "%d\n", gasket_dev->reset_count);
                break;
        case ATTR_USER_MEM_RANGES:
-               for (i = 0; i < GASKET_NUM_BARS; ++i) {
+               for (i = 0; i < PCI_STD_NUM_BARS; ++i) {
                        current_written =
                                gasket_write_mappable_regions(buf, driver_desc,
                                                              i);
@@ -736,7 +736,7 @@ static int gasket_get_bar_index(const struct gasket_dev *gasket_dev,
        const struct gasket_driver_desc *driver_desc;
 
        driver_desc = gasket_dev->internal_desc->driver_desc;
-       for (i = 0; i < GASKET_NUM_BARS; ++i) {
+       for (i = 0; i < PCI_STD_NUM_BARS; ++i) {
                struct gasket_bar_desc bar_desc =
                        driver_desc->bar_descriptions[i];
 
index be44ac1..c417aca 100644 (file)
@@ -268,7 +268,7 @@ struct gasket_dev {
        char kobj_name[GASKET_NAME_MAX];
 
        /* Virtual address of mapped BAR memory range. */
-       struct gasket_bar_data bar_data[GASKET_NUM_BARS];
+       struct gasket_bar_data bar_data[PCI_STD_NUM_BARS];
 
        /* Coherent buffer. */
        struct gasket_coherent_buffer coherent_buffer;
@@ -369,7 +369,7 @@ struct gasket_driver_desc {
        /* Set of 6 bar descriptions that describe all PCIe bars.
         * Note that BUS/AXI devices (i.e. non PCI devices) use those.
         */
-       struct gasket_bar_desc bar_descriptions[GASKET_NUM_BARS];
+       struct gasket_bar_desc bar_descriptions[PCI_STD_NUM_BARS];
 
        /*
         * Coherent buffer description.
index c64728f..8346906 100644 (file)
@@ -261,11 +261,11 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Build the PKO buffer pointer */
        hw_buffer.u64 = 0;
        if (skb_shinfo(skb)->nr_frags == 0) {
-               hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
+               hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
                hw_buffer.s.pool = 0;
                hw_buffer.s.size = skb->len;
        } else {
-               hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
+               hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
                hw_buffer.s.pool = 0;
                hw_buffer.s.size = skb_headlen(skb);
                CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
@@ -273,11 +273,12 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
                        skb_frag_t *fs = skb_shinfo(skb)->frags + i;
 
                        hw_buffer.s.addr =
-                               XKPHYS_TO_PHYS((u64)skb_frag_address(fs));
+                               XKPHYS_TO_PHYS((uintptr_t)skb_frag_address(fs));
                        hw_buffer.s.size = skb_frag_size(fs);
                        CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
                }
-               hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
+               hw_buffer.s.addr =
+                       XKPHYS_TO_PHYS((uintptr_t)CVM_OCT_SKB_CB(skb));
                hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
                pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
                pko_command.s.gather = 1;
@@ -349,10 +350,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
         */
        dst_release(skb_dst(skb));
        skb_dst_set(skb, NULL);
-#ifdef CONFIG_XFRM
-       secpath_reset(skb);
-#endif
-       nf_reset(skb);
+       skb_ext_reset(skb);
+       nf_reset_ct(skb);
 
 #ifdef CONFIG_NET_SCHED
        skb->tc_index = 0;
index a4ac3bf..b78ce9e 100644 (file)
@@ -1202,7 +1202,7 @@ static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work)
 
 static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
 {
-       return (void *)(physical_address);
+       return (void *)(uintptr_t)(physical_address);
 }
 
 static inline uint64_t cvmx_ptr_to_phys(void *ptr)
index 9ddd516..5792f49 100644 (file)
@@ -409,7 +409,7 @@ static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_inf
                pRaInfo->PTModeSS = 3;
        else if (pRaInfo->HighestRate > 0x0b)
                pRaInfo->PTModeSS = 2;
-       else if (pRaInfo->HighestRate > 0x0b)
+       else if (pRaInfo->HighestRate > 0x03)
                pRaInfo->PTModeSS = 1;
        else
                pRaInfo->PTModeSS = 0;
index 664d93a..4fac9dc 100644 (file)
@@ -348,8 +348,10 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
        }
 
        padapter->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
-       if (!padapter->HalData)
-               DBG_88E("cant not alloc memory for HAL DATA\n");
+       if (!padapter->HalData) {
+               DBG_88E("Failed to allocate memory for HAL data\n");
+               goto free_adapter;
+       }
 
        /* step read_chip_version */
        rtw_hal_read_chip_version(padapter);
diff --git a/drivers/staging/speakup/sysfs-driver-speakup b/drivers/staging/speakup/sysfs-driver-speakup
new file mode 100644 (file)
index 0000000..be3f5d6
--- /dev/null
@@ -0,0 +1,369 @@
+What:          /sys/accessibility/speakup/attrib_bleep
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Beeps the PC speaker when there is an attribute change such as
+               foreground or background color when using speakup review
+               commands. One = on, zero = off.
+
+What:          /sys/accessibility/speakup/bell_pos
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This works much like a typewriter bell. If for example 72 is
+               echoed to bell_pos, it will beep the PC speaker when typing on
+               a line past character 72.
+
+What:          /sys/accessibility/speakup/bleeps
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This controls whether one hears beeps through the PC speaker
+               when using speakup's review commands.
+               TODO: what values does it accept?
+
+What:          /sys/accessibility/speakup/bleep_time
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This controls the duration of the PC speaker beeps speakup
+               produces.
+               TODO: What are the units? Jiffies?
+
+What:          /sys/accessibility/speakup/cursor_time
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This controls cursor delay when using arrow keys. When a
+               connection is very slow, with the default setting, when moving
+               with  the arrows, or backspacing etc. speakup says the incorrect
+               characters. Set this to a higher value to adjust for the delay
+               and better synchronisation between cursor position and speech.
+
+What:          /sys/accessibility/speakup/delimiters
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Delimit a word from speakup.
+               TODO: add more info
+
+What:          /sys/accessibility/speakup/ex_num
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   TODO:
+
+What:          /sys/accessibility/speakup/key_echo
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Controls if speakup speaks keys when they are typed. One = on,
+               zero = off or don't echo keys.
+
+What:          /sys/accessibility/speakup/keymap
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Speakup keymap remaps keys to Speakup functions.
+               It uses a binary
+               format. A special program called genmap is needed to compile a
+               textual  keymap into the binary format which is then loaded into
+               /sys/accessibility/speakup/keymap.
+
+What:          /sys/accessibility/speakup/no_interrupt
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Controls if typing interrupts output from speakup. With
+               no_interrupt set to zero, typing on the keyboard will interrupt
+               speakup if for example
+               the say screen command is used before the
+               entire screen  is read.
+               With no_interrupt set to one, if the say
+               screen command is used, and one then types on the keyboard,
+               speakup will continue to say the whole screen regardless until
+               it finishes.
+
+What:          /sys/accessibility/speakup/punc_all
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This is a list of all the punctuation speakup should speak when
+               punc_level is set to four.
+
+What:          /sys/accessibility/speakup/punc_level
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Controls the level of punctuation spoken as the screen is
+               displayed, not reviewed. Levels range from zero no punctuation,
+               to four, all punctuation. One corresponds to punc_some, two
+               corresponds to punc_most, and three as well as four both
+               correspond to punc_all. Some hardware synthesizers may have
+               different levels each corresponding to  three and four for
+               punc_level. Also note that if punc_level is set to zero, and
+               key_echo is set to one, typed punctuation is still spoken as it
+               is typed.
+
+What:          /sys/accessibility/speakup/punc_most
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This is a list of all the punctuation speakup should speak when
+               punc_level is set to two.
+
+What:          /sys/accessibility/speakup/punc_some
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This is a list of all the punctuation speakup should speak when
+               punc_level is set to one.
+
+What:          /sys/accessibility/speakup/reading_punc
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Almost the same as punc_level, the differences being that
+               reading_punc controls the level of punctuation when reviewing
+               the screen with speakup's screen review commands. The other
+               difference is that reading_punc set to three speaks punc_all,
+               and reading_punc set to four speaks all punctuation, including
+               spaces.
+
+What:          /sys/accessibility/speakup/repeats
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   A list of characters speakup repeats. Normally, when there are
+               more than three characters in a row, speakup
+               just reads three of
+               those characters. For example, "......" would be read as dot,
+               dot, dot. If a . is added to the list of characters in repeats,
+               "......" would be read as dot, dot, dot, times six.
+
+What:          /sys/accessibility/speakup/say_control
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   If set to one, speakup speaks shift, alt and control when those
+               keys are pressed. If say_control is set to zero, shift, ctrl,
+               and alt are not spoken when they are pressed.
+
+What:          /sys/accessibility/speakup/say_word_ctl
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   TODO:
+
+What:          /sys/accessibility/speakup/silent
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   TODO:
+
+What:          /sys/accessibility/speakup/spell_delay
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This controls how fast a word is spelled
+               when speakup's say word
+               review command is pressed twice quickly to speak the current
+               word being reviewed. Zero just speaks the letters one after
+               another, while values one through four
+               seem to introduce more of
+               a pause between the spelling of each letter by speakup.
+
+What:          /sys/accessibility/speakup/synth
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Gets or sets the synthesizer driver currently in use. Reading
+               synth returns the synthesizer driver currently in use. Writing
+               synth switches to the given synthesizer driver, provided it is
+               either built into the kernel, or already loaded as a module.
+
+What:          /sys/accessibility/speakup/synth_direct
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Sends whatever is written to synth_direct
+               directly to the speech synthesizer in use, bypassing speakup.
+               This could be used to make the synthesizer speak
+               a string, or to
+               send control sequences to the synthesizer to change how the
+               synthesizer behaves.
+
+What:          /sys/accessibility/speakup/version
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Reading version returns the version of speakup, and the version
+               of the synthesizer driver currently in use.
+
+What:          /sys/accessibility/speakup/i18n/announcements
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This file contains various general announcements, most of which
+               cannot be categorized.  You will find messages such as "You
+               killed Speakup", "I'm alive", "leaving help", "parked",
+               "unparked", and others. You will also find the names of the
+               screen edges and cursor tracking modes here.
+
+What:          /sys/accessibility/speakup/i18n/chartab
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   TODO
+
+What:          /sys/accessibility/speakup/i18n/ctl_keys
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Here, you will find names of control keys.  These are used with
+               Speakup's say_control feature.
+
+What:          /sys/accessibility/speakup/i18n/function_names
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Here, you will find a list of names for Speakup functions.
+               These are used by the help system.  For example, suppose that
+               you have activated help mode, and you pressed
+               keypad 3.  Speakup
+               says: "keypad 3 is character, say next."
+               The message "character, say next" names a Speakup function, and
+               it comes from this function_names file.
+
+What:          /sys/accessibility/speakup/i18n/states
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This file contains names for key states.
+               Again, these are part of the help system.  For instance, if you
+               had pressed speakup + keypad 3, you would hear:
+               "speakup keypad 3 is go to bottom edge."
+               The speakup key is depressed, so the name of the key state is
+               speakup.
+               This part of the message comes from the states collection.
+
+What:          /sys/accessibility/speakup/i18n/characters
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Through this sys entry, Speakup gives you the ability to change
+               how Speakup pronounces a given character. You could, for
+               example, change how some punctuation characters are spoken. You
+               can even change how Speakup will pronounce certain letters. For
+               further details see '12.  Changing the Pronunciation of
+               Characters' in Speakup User's Guide (file spkguide.txt in
+               source).
+
+What:          /sys/accessibility/speakup/i18n/colors
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   When you use the "say attributes" function, Speakup says the
+               name of the foreground and background colors.  These names come
+               from the i18n/colors file.
+
+What:          /sys/accessibility/speakup/i18n/formatted
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This group of messages contains embedded formatting codes, to
+               specify the type and width of displayed data.  If you change
+               these, you must preserve all of the formatting codes, and they
+               must appear in the order used by the default messages.
+
+What:          /sys/accessibility/speakup/i18n/key_names
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Again, key_names is used by Speakup's help system.  In the
+               previous example, Speakup said that you pressed "keypad 3."
+               This name came from the key_names file.
+
+What:          /sys/accessibility/speakup/<synth-name>/
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   In `/sys/accessibility/speakup` is a directory corresponding to
+               the synthesizer driver currently in use (E.G) `soft` for the
+               soft driver. This directory contains files which control the
+               speech synthesizer itself,
+               as opposed to controlling the speakup
+               screen reader. The parameters in this directory have the same
+               names and functions across all
+               supported synthesizers. The range
+               of values for freq, pitch, rate, and vol is the same for all
+               supported synthesizers, with the given range being internally
+               mapped by the driver to  more or less fit the range of values
+               supported for a given parameter by the individual synthesizer.
+               Below is a description of values and  parameters for soft
+               synthesizer, which is currently the most commonly used.
+
+What:          /sys/accessibility/speakup/soft/caps_start
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This is the string that is sent to the synthesizer to cause it
+               to start speaking uppercase letters. For the soft synthesizer
+               and most others, this causes the pitch of the voice to rise
+               above the currently set pitch.
+
+What:          /sys/accessibility/speakup/soft/caps_stop
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This is the string sent to the synthesizer to cause it to stop
+               speaking uppercase letters. In the case of the soft synthesizer
+               and most others, this returns the pitch of the voice
+               down to the
+               currently set pitch.
+
+What:          /sys/accessibility/speakup/soft/delay_time
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   TODO:
+
+What:          /sys/accessibility/speakup/soft/direct
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Controls if punctuation is spoken by speakup, or by the
+               synthesizer.
+               For example, speakup speaks ">" as "greater", while
+               the espeak synthesizer used by the soft driver speaks "greater
+               than". Zero lets speakup speak the punctuation. One lets the
+               synthesizer itself speak punctuation.
+
+What:          /sys/accessibility/speakup/soft/freq
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Gets or sets the frequency of the speech synthesizer. Range is
+               0-9.
+
+What:          /sys/accessibility/speakup/soft/full_time
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   TODO:
+
+What:          /sys/accessibility/speakup/soft/jiffy_delta
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This controls how many jiffys the kernel gives to the
+               synthesizer. Setting this too high can make a system unstable,
+               or even crash it.
+
+What:          /sys/accessibility/speakup/soft/pitch
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Gets or sets the pitch of the synthesizer. The range is 0-9.
+
+What:          /sys/accessibility/speakup/soft/punct
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Gets or sets the amount of punctuation spoken by the
+               synthesizer. The range for the soft driver seems to be 0-2.
+               TODO: How is this related to speakup's punc_level, or
+               reading_punc.
+
+What:          /sys/accessibility/speakup/soft/rate
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Gets or sets the rate of the synthesizer. Range is from zero
+               slowest, to nine fastest.
+
+What:          /sys/accessibility/speakup/soft/tone
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Gets or sets the tone of the speech synthesizer. The range for
+               the soft driver seems to be 0-2. This seems to make no
+               difference if using espeak and the espeakup connector.
+               TODO: does espeakup support different tonalities?
+
+What:          /sys/accessibility/speakup/soft/trigger_time
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   TODO:
+
+What:          /sys/accessibility/speakup/soft/voice
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Gets or sets the voice used by the synthesizer if the
+               synthesizer can speak in more than one voice. The range for the
+               soft driver is 0-7. Note that while espeak supports multiple
+               voices, this parameter will not set the voice when the espeakup
+               connector is used  between speakup and espeak.
+
+What:          /sys/accessibility/speakup/soft/vol
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Gets or sets the volume of the speech synthesizer. Range is 0-9,
+               with zero being the softest, and nine being the loudest.
+
index bc1eaa3..826016c 100644 (file)
@@ -12,7 +12,7 @@
 static const struct snd_pcm_hardware snd_bcm2835_playback_hw = {
        .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
                 SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
-                SNDRV_PCM_INFO_DRAIN_TRIGGER | SNDRV_PCM_INFO_SYNC_APPLPTR),
+                SNDRV_PCM_INFO_SYNC_APPLPTR),
        .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
        .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
        .rate_min = 8000,
@@ -29,7 +29,7 @@ static const struct snd_pcm_hardware snd_bcm2835_playback_hw = {
 static const struct snd_pcm_hardware snd_bcm2835_playback_spdif_hw = {
        .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
                 SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
-                SNDRV_PCM_INFO_DRAIN_TRIGGER | SNDRV_PCM_INFO_SYNC_APPLPTR),
+                SNDRV_PCM_INFO_SYNC_APPLPTR),
        .formats = SNDRV_PCM_FMTBIT_S16_LE,
        .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_44100 |
        SNDRV_PCM_RATE_48000,
index 23fba01..c6f9cf1 100644 (file)
@@ -289,6 +289,7 @@ int bcm2835_audio_stop(struct bcm2835_alsa_stream *alsa_stream)
                                         VC_AUDIO_MSG_TYPE_STOP, false);
 }
 
+/* FIXME: this doesn't seem working as expected for "draining" */
 int bcm2835_audio_drain(struct bcm2835_alsa_stream *alsa_stream)
 {
        struct vc_audio_msg m = {
index c6bb4aa..0823029 100644 (file)
@@ -1748,8 +1748,10 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
 
        priv->hw->max_signal = 100;
 
-       if (vnt_init(priv))
+       if (vnt_init(priv)) {
+               device_free_info(priv);
                return -ENODEV;
+       }
 
        device_print_info(priv);
        pci_set_drvdata(pcid, priv);
index e55c79e..98361ac 100644 (file)
@@ -968,6 +968,11 @@ static int __init n_hdlc_init(void)
        
 }      /* end of init_module() */
 
+#ifdef CONFIG_SPARC
+#undef __exitdata
+#define __exitdata
+#endif
+
 static const char hdlc_unregister_ok[] __exitdata =
        KERN_INFO "N_HDLC: line discipline unregistered\n";
 static const char hdlc_unregister_fail[] __exitdata =
index c68e2b3..836e736 100644 (file)
@@ -141,7 +141,7 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
 
        serial8250_do_set_mctrl(port, mctrl);
 
-       if (!up->gpios) {
+       if (!mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS)) {
                /*
                 * Turn off autoRTS if RTS is lowered and restore autoRTS
                 * setting if RTS is raised
@@ -456,7 +456,8 @@ static void omap_8250_set_termios(struct uart_port *port,
        up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF);
 
        if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW &&
-           !up->gpios) {
+           !mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS) &&
+           !mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_CTS)) {
                /* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */
                up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
                priv->efr |= UART_EFR_CTS;
index 6adbadd..0b8784c 100644 (file)
@@ -48,8 +48,6 @@ struct f815xxa_data {
        int idx;
 };
 
-#define PCI_NUM_BAR_RESOURCES  6
-
 struct serial_private {
        struct pci_dev          *dev;
        unsigned int            nr;
@@ -89,7 +87,7 @@ setup_port(struct serial_private *priv, struct uart_8250_port *port,
 {
        struct pci_dev *dev = priv->dev;
 
-       if (bar >= PCI_NUM_BAR_RESOURCES)
+       if (bar >= PCI_STD_NUM_BARS)
                return -EINVAL;
 
        if (pci_resource_flags(dev, bar) & IORESOURCE_MEM) {
@@ -4060,7 +4058,7 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
                return -ENODEV;
 
        num_iomem = num_port = 0;
-       for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
                        num_port++;
                        if (first_port == -1)
@@ -4088,7 +4086,7 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
         */
        first_port = -1;
        num_port = 0;
-       for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                if (pci_resource_flags(dev, i) & IORESOURCE_IO &&
                    pci_resource_len(dev, i) == 8 &&
                    (first_port == -1 || (first_port + num_port) == i)) {
index 4789b5d..67a9eb3 100644 (file)
@@ -1032,6 +1032,7 @@ config SERIAL_SIFIVE_CONSOLE
        bool "Console on SiFive UART"
        depends on SERIAL_SIFIVE=y
        select SERIAL_CORE_CONSOLE
+       select SERIAL_EARLYCON
        help
          Select this option if you would like to use a SiFive UART as the
          system console.
index 68d74f2..a32f0d2 100644 (file)
@@ -3,7 +3,7 @@
  * Freescale linflexuart serial port driver
  *
  * Copyright 2012-2016 Freescale Semiconductor, Inc.
- * Copyright 2017-2018 NXP
+ * Copyright 2017-2019 NXP
  */
 
 #if defined(CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE) && \
@@ -246,12 +246,14 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
        struct tty_port *port = &sport->state->port;
        unsigned long flags, status;
        unsigned char rx;
+       bool brk;
 
        spin_lock_irqsave(&sport->lock, flags);
 
        status = readl(sport->membase + UARTSR);
        while (status & LINFLEXD_UARTSR_RMB) {
                rx = readb(sport->membase + BDRM);
+               brk = false;
                flg = TTY_NORMAL;
                sport->icount.rx++;
 
@@ -261,8 +263,11 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
                                status |= LINFLEXD_UARTSR_SZF;
                        if (status & LINFLEXD_UARTSR_BOF)
                                status |= LINFLEXD_UARTSR_BOF;
-                       if (status & LINFLEXD_UARTSR_FEF)
+                       if (status & LINFLEXD_UARTSR_FEF) {
+                               if (!rx)
+                                       brk = true;
                                status |= LINFLEXD_UARTSR_FEF;
+                       }
                        if (status & LINFLEXD_UARTSR_PE)
                                status |=  LINFLEXD_UARTSR_PE;
                }
@@ -271,13 +276,15 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
                       sport->membase + UARTSR);
                status = readl(sport->membase + UARTSR);
 
-               if (uart_handle_sysrq_char(sport, (unsigned char)rx))
-                       continue;
-
+               if (brk) {
+                       uart_handle_break(sport);
+               } else {
 #ifdef SUPPORT_SYSRQ
-                       sport->sysrq = 0;
+                       if (uart_handle_sysrq_char(sport, (unsigned char)rx))
+                               continue;
 #endif
-               tty_insert_flip_char(port, rx, flg);
+                       tty_insert_flip_char(port, rx, flg);
+               }
        }
 
        spin_unlock_irqrestore(&sport->lock, flags);
index 3e17bb8..537896c 100644 (file)
@@ -548,7 +548,7 @@ static void lpuart_flush_buffer(struct uart_port *port)
                val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
                lpuart32_write(&sport->port, val, UARTFIFO);
        } else {
-               val = readb(sport->port.membase + UARTPFIFO);
+               val = readb(sport->port.membase + UARTCFIFO);
                val |= UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH;
                writeb(val, sport->port.membase + UARTCFIFO);
        }
index 87c58f9..5e08f26 100644 (file)
@@ -2222,8 +2222,8 @@ static int imx_uart_probe(struct platform_device *pdev)
                return PTR_ERR(base);
 
        rxirq = platform_get_irq(pdev, 0);
-       txirq = platform_get_irq(pdev, 1);
-       rtsirq = platform_get_irq(pdev, 2);
+       txirq = platform_get_irq_optional(pdev, 1);
+       rtsirq = platform_get_irq_optional(pdev, 2);
 
        sport->port.dev = &pdev->dev;
        sport->port.mapbase = res->start;
index 03963af..d2d8b34 100644 (file)
@@ -740,7 +740,7 @@ static int __init owl_uart_init(void)
        return ret;
 }
 
-static void __init owl_uart_exit(void)
+static void __exit owl_uart_exit(void)
 {
        platform_driver_unregister(&owl_uart_platform_driver);
        uart_unregister_driver(&owl_uart_driver);
index c1b0d76..ff9a27d 100644 (file)
@@ -815,7 +815,7 @@ static int __init rda_uart_init(void)
        return ret;
 }
 
-static void __init rda_uart_exit(void)
+static void __exit rda_uart_exit(void)
 {
        platform_driver_unregister(&rda_uart_platform_driver);
        uart_unregister_driver(&rda_uart_driver);
index 6e713be..c4a414a 100644 (file)
@@ -1964,8 +1964,10 @@ uart_get_console(struct uart_port *ports, int nr, struct console *co)
  *        console=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
  *
  *     The optional form
+ *
  *        earlycon=<name>,0x<addr>,<options>
  *        console=<name>,0x<addr>,<options>
+ *
  *     is also accepted; the returned @iotype will be UPIO_MEM.
  *
  *     Returns 0 on success or -EINVAL on failure
index d907430..fb47812 100644 (file)
@@ -66,6 +66,9 @@ EXPORT_SYMBOL_GPL(mctrl_gpio_set);
 struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
                                      enum mctrl_gpio_idx gidx)
 {
+       if (gpios == NULL)
+               return NULL;
+
        return gpios->gpio[gidx];
 }
 EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod);
index 4e754a4..22e5d4e 100644 (file)
@@ -2894,8 +2894,12 @@ static int sci_init_single(struct platform_device *dev,
        port->mapbase = res->start;
        sci_port->reg_size = resource_size(res);
 
-       for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i)
-               sci_port->irqs[i] = platform_get_irq(dev, i);
+       for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i) {
+               if (i)
+                       sci_port->irqs[i] = platform_get_irq_optional(dev, i);
+               else
+                       sci_port->irqs[i] = platform_get_irq(dev, i);
+       }
 
        /* The SCI generates several interrupts. They can be muxed together or
         * connected to different interrupt lines. In the muxed case only one
index b8b912b..06e79c1 100644 (file)
@@ -897,7 +897,8 @@ static int __init ulite_init(void)
 static void __exit ulite_exit(void)
 {
        platform_driver_unregister(&ulite_platform_driver);
-       uart_unregister_driver(&ulite_uart_driver);
+       if (ulite_uart_driver.state)
+               uart_unregister_driver(&ulite_uart_driver);
 }
 
 module_init(ulite_init);
index da4563a..4e55bc3 100644 (file)
@@ -1550,7 +1550,6 @@ static int cdns_uart_probe(struct platform_device *pdev)
                goto err_out_id;
        }
 
-       uartps_major = cdns_uart_uart_driver->tty_driver->major;
        cdns_uart_data->cdns_uart_driver = cdns_uart_uart_driver;
 
        /*
@@ -1680,6 +1679,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
                console_port = NULL;
 #endif
 
+       uartps_major = cdns_uart_uart_driver->tty_driver->major;
        cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node,
                                                             "cts-override");
        return 0;
@@ -1741,6 +1741,12 @@ static int cdns_uart_remove(struct platform_device *pdev)
                console_port = NULL;
 #endif
 
+       /* If this is last instance major number should be initialized */
+       mutex_lock(&bitmap_lock);
+       if (bitmap_empty(bitmap, MAX_UART_INSTANCES))
+               uartps_major = 0;
+       mutex_unlock(&bitmap_lock);
+
        uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
        return rc;
 }
index c41ddb6..b0a29ef 100644 (file)
@@ -159,8 +159,9 @@ static int cdns3_pci_probe(struct pci_dev *pdev,
                wrap->plat_dev = platform_device_register_full(&plat_info);
                if (IS_ERR(wrap->plat_dev)) {
                        pci_disable_device(pdev);
+                       err = PTR_ERR(wrap->plat_dev);
                        kfree(wrap);
-                       return PTR_ERR(wrap->plat_dev);
+                       return err;
                }
        }
 
index 06f1e10..1109dc5 100644 (file)
@@ -160,10 +160,28 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
        if (ret)
                goto err;
 
-       if (cdns->dr_mode != USB_DR_MODE_OTG) {
+       /* Initialize idle role to start with */
+       ret = cdns3_role_start(cdns, USB_ROLE_NONE);
+       if (ret)
+               goto err;
+
+       switch (cdns->dr_mode) {
+       case USB_DR_MODE_UNKNOWN:
+       case USB_DR_MODE_OTG:
                ret = cdns3_hw_role_switch(cdns);
                if (ret)
                        goto err;
+               break;
+       case USB_DR_MODE_PERIPHERAL:
+               ret = cdns3_role_start(cdns, USB_ROLE_DEVICE);
+               if (ret)
+                       goto err;
+               break;
+       case USB_DR_MODE_HOST:
+               ret = cdns3_role_start(cdns, USB_ROLE_HOST);
+               if (ret)
+                       goto err;
+               break;
        }
 
        return ret;
index 44f652e..e71240b 100644 (file)
@@ -234,9 +234,11 @@ static int cdns3_req_ep0_set_address(struct cdns3_device *priv_dev,
 static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev,
                                    struct usb_ctrlrequest *ctrl)
 {
+       struct cdns3_endpoint *priv_ep;
        __le16 *response_pkt;
        u16 usb_status = 0;
        u32 recip;
+       u8 index;
 
        recip = ctrl->bRequestType & USB_RECIP_MASK;
 
@@ -262,9 +264,13 @@ static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev,
        case USB_RECIP_INTERFACE:
                return cdns3_ep0_delegate_req(priv_dev, ctrl);
        case USB_RECIP_ENDPOINT:
-               /* check if endpoint is stalled */
+               index = cdns3_ep_addr_to_index(ctrl->wIndex);
+               priv_ep = priv_dev->eps[index];
+
+               /* check if endpoint is stalled or stall is pending */
                cdns3_select_ep(priv_dev, ctrl->wIndex);
-               if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)))
+               if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)) ||
+                   (priv_ep->flags & EP_STALL_PENDING))
                        usb_status =  BIT(USB_ENDPOINT_HALT);
                break;
        default:
@@ -332,7 +338,7 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev,
                         * for sending status stage.
                         * This time should be less then 3ms.
                         */
-                       usleep_range(1000, 2000);
+                       mdelay(1);
                        cdns3_set_register_bit(&priv_dev->regs->usb_cmd,
                                               USB_CMD_STMODE |
                                               USB_STS_TMODE_SEL(tmode - 1));
index 228cdc4..2ca280f 100644 (file)
@@ -2571,6 +2571,7 @@ static int cdns3_gadget_start(struct cdns3 *cdns)
        switch (max_speed) {
        case USB_SPEED_FULL:
                writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf);
+               writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
                break;
        case USB_SPEED_HIGH:
                writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
@@ -2662,6 +2663,13 @@ static int __cdns3_gadget_init(struct cdns3 *cdns)
 {
        int ret = 0;
 
+       /* Ensure 32-bit DMA Mask in case we switched back from Host mode */
+       ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32));
+       if (ret) {
+               dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret);
+               return ret;
+       }
+
        cdns3_drd_switch_gadget(cdns, 1);
        pm_runtime_get_sync(cdns->dev);
 
index 7fea499..fb8bd60 100644 (file)
@@ -461,10 +461,12 @@ static int usblp_release(struct inode *inode, struct file *file)
 
        mutex_lock(&usblp_mutex);
        usblp->used = 0;
-       if (usblp->present) {
+       if (usblp->present)
                usblp_unlink_urbs(usblp);
-               usb_autopm_put_interface(usblp->intf);
-       } else          /* finish cleanup from disconnect */
+
+       usb_autopm_put_interface(usblp->intf);
+
+       if (!usblp->present)            /* finish cleanup from disconnect */
                usblp_cleanup(usblp);
        mutex_unlock(&usblp_mutex);
        return 0;
index 9e26b01..9ae2a7a 100644 (file)
@@ -234,7 +234,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
                /* UHCI */
                int     region;
 
-               for (region = 0; region < PCI_ROM_RESOURCE; region++) {
+               for (region = 0; region < PCI_STD_NUM_BARS; region++) {
                        if (!(pci_resource_flags(dev, region) &
                                        IORESOURCE_IO))
                                continue;
index 726100d..c946d64 100644 (file)
@@ -139,14 +139,14 @@ static int dwc3_otg_get_irq(struct dwc3 *dwc)
        struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
        int irq;
 
-       irq = platform_get_irq_byname(dwc3_pdev, "otg");
+       irq = platform_get_irq_byname_optional(dwc3_pdev, "otg");
        if (irq > 0)
                goto out;
 
        if (irq == -EPROBE_DEFER)
                goto out;
 
-       irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+       irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
        if (irq > 0)
                goto out;
 
@@ -157,9 +157,6 @@ static int dwc3_otg_get_irq(struct dwc3 *dwc)
        if (irq > 0)
                goto out;
 
-       if (irq != -EPROBE_DEFER)
-               dev_err(dwc->dev, "missing OTG IRQ\n");
-
        if (!irq)
                irq = -EINVAL;
 
index 8adb59f..86dc1db 100644 (file)
@@ -3264,14 +3264,14 @@ static int dwc3_gadget_get_irq(struct dwc3 *dwc)
        struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
        int irq;
 
-       irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
+       irq = platform_get_irq_byname_optional(dwc3_pdev, "peripheral");
        if (irq > 0)
                goto out;
 
        if (irq == -EPROBE_DEFER)
                goto out;
 
-       irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+       irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
        if (irq > 0)
                goto out;
 
@@ -3282,9 +3282,6 @@ static int dwc3_gadget_get_irq(struct dwc3 *dwc)
        if (irq > 0)
                goto out;
 
-       if (irq != -EPROBE_DEFER)
-               dev_err(dwc->dev, "missing peripheral IRQ\n");
-
        if (!irq)
                irq = -EINVAL;
 
index 8deea8c..5567ed2 100644 (file)
@@ -16,14 +16,14 @@ static int dwc3_host_get_irq(struct dwc3 *dwc)
        struct platform_device  *dwc3_pdev = to_platform_device(dwc->dev);
        int irq;
 
-       irq = platform_get_irq_byname(dwc3_pdev, "host");
+       irq = platform_get_irq_byname_optional(dwc3_pdev, "host");
        if (irq > 0)
                goto out;
 
        if (irq == -EPROBE_DEFER)
                goto out;
 
-       irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+       irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
        if (irq > 0)
                goto out;
 
@@ -34,9 +34,6 @@ static int dwc3_host_get_irq(struct dwc3 *dwc)
        if (irq > 0)
                goto out;
 
-       if (irq != -EPROBE_DEFER)
-               dev_err(dwc->dev, "missing host IRQ\n");
-
        if (!irq)
                irq = -EINVAL;
 
index d7e6116..d354036 100644 (file)
@@ -45,7 +45,7 @@ config USB_AT91
 
 config USB_LPC32XX
        tristate "LPC32XX USB Peripheral Controller"
-       depends on ARCH_LPC32XX
+       depends on ARCH_LPC32XX || COMPILE_TEST
        depends on I2C
        select USB_ISP1301
        help
index 8414fac..3d499d9 100644 (file)
@@ -48,6 +48,7 @@
 #define DRIVER_VERSION "02 May 2005"
 
 #define POWER_BUDGET   500     /* in mA; use 8 for low-power port testing */
+#define POWER_BUDGET_3 900     /* in mA */
 
 static const char      driver_name[] = "dummy_hcd";
 static const char      driver_desc[] = "USB Host+Gadget Emulator";
@@ -2432,7 +2433,7 @@ static int dummy_start_ss(struct dummy_hcd *dum_hcd)
        dum_hcd->rh_state = DUMMY_RH_RUNNING;
        dum_hcd->stream_en_ep = 0;
        INIT_LIST_HEAD(&dum_hcd->urbp_list);
-       dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET;
+       dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET_3;
        dummy_hcd_to_hcd(dum_hcd)->state = HC_STATE_RUNNING;
        dummy_hcd_to_hcd(dum_hcd)->uses_new_polling = 1;
 #ifdef CONFIG_USB_OTG
index b3e073f..2b1f3cc 100644 (file)
@@ -1151,7 +1151,7 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
        u32 *p32, tmp, cbytes;
 
        /* Use optimal data transfer method based on source address and size */
-       switch (((u32) data) & 0x3) {
+       switch (((uintptr_t) data) & 0x3) {
        case 0: /* 32-bit aligned */
                p32 = (u32 *) data;
                cbytes = (bytes & ~0x3);
@@ -1252,7 +1252,7 @@ static void udc_stuff_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
        u32 *p32, tmp, cbytes;
 
        /* Use optimal data transfer method based on source address and size */
-       switch (((u32) data) & 0x3) {
+       switch (((uintptr_t) data) & 0x3) {
        case 0: /* 32-bit aligned */
                p32 = (u32 *) data;
                cbytes = (bytes & ~0x3);
index f6d0449..6c7f0a8 100644 (file)
@@ -728,7 +728,7 @@ static void quirk_usb_handoff_uhci(struct pci_dev *pdev)
        if (!pio_enabled(pdev))
                return;
 
-       for (i = 0; i < PCI_ROM_RESOURCE; i++)
+       for (i = 0; i < PCI_STD_NUM_BARS; i++)
                if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
                        base = pci_resource_start(pdev, i);
                        break;
index f498160..3351d07 100644 (file)
@@ -57,6 +57,7 @@ static int xhci_create_intel_xhci_sw_pdev(struct xhci_hcd *xhci, u32 cap_offset)
                ret = platform_device_add_properties(pdev, role_switch_props);
                if (ret) {
                        dev_err(dev, "failed to register device properties\n");
+                       platform_device_put(pdev);
                        return ret;
                }
        }
index 9741cde..85ceb43 100644 (file)
@@ -3202,10 +3202,10 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
        if (usb_urb_dir_out(urb)) {
                len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
                                   seg->bounce_buf, new_buff_len, enqd_len);
-               if (len != seg->bounce_len)
+               if (len != new_buff_len)
                        xhci_warn(xhci,
                                "WARN Wrong bounce buffer write length: %zu != %d\n",
-                               len, seg->bounce_len);
+                               len, new_buff_len);
                seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
                                                 max_pkt, DMA_TO_DEVICE);
        } else {
index 5008659..517ec32 100644 (file)
@@ -1032,7 +1032,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
        writel(command, &xhci->op_regs->command);
        xhci->broken_suspend = 0;
        if (xhci_handshake(&xhci->op_regs->status,
-                               STS_SAVE, 0, 10 * 1000)) {
+                               STS_SAVE, 0, 20 * 1000)) {
        /*
         * AMD SNPS xHC 3.0 occasionally does not clear the
         * SSS bit of USBSTS and when driver tries to poll
@@ -1108,6 +1108,18 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                hibernated = true;
 
        if (!hibernated) {
+               /*
+                * Some controllers might lose power during suspend, so wait
+                * for controller not ready bit to clear, just as in xHC init.
+                */
+               retval = xhci_handshake(&xhci->op_regs->status,
+                                       STS_CNR, 0, 10 * 1000 * 1000);
+               if (retval) {
+                       xhci_warn(xhci, "Controller not ready at resume %d\n",
+                                 retval);
+                       spin_unlock_irq(&xhci->lock);
+                       return retval;
+               }
                /* step 1: restore register */
                xhci_restore_registers(xhci);
                /* step 2: initialize command ring buffer */
@@ -3083,6 +3095,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
        unsigned int ep_index;
        unsigned long flags;
        u32 ep_flag;
+       int err;
 
        xhci = hcd_to_xhci(hcd);
        if (!host_ep->hcpriv)
@@ -3142,7 +3155,17 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
                xhci_free_command(xhci, cfg_cmd);
                goto cleanup;
        }
-       xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
+
+       err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
+                                       ep_index, 0);
+       if (err < 0) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_free_command(xhci, cfg_cmd);
+               xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
+                               __func__, err);
+               goto cleanup;
+       }
+
        xhci_ring_cmd_db(xhci);
        spin_unlock_irqrestore(&xhci->lock, flags);
 
@@ -3156,8 +3179,16 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
                                           ctrl_ctx, ep_flag, ep_flag);
        xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
 
-       xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
+       err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
                                      udev->slot_id, false);
+       if (err < 0) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_free_command(xhci, cfg_cmd);
+               xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
+                               __func__, err);
+               goto cleanup;
+       }
+
        xhci_ring_cmd_db(xhci);
        spin_unlock_irqrestore(&xhci->lock, flags);
 
@@ -4674,12 +4705,12 @@ static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
        alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
                desc, state, timeout);
 
-       /* If we found we can't enable hub-initiated LPM, or
+       /* If we found we can't enable hub-initiated LPM, and
         * the U1 or U2 exit latency was too high to allow
-        * device-initiated LPM as well, just stop searching.
+        * device-initiated LPM as well, then we will disable LPM
+        * for this device, so stop searching any further.
         */
-       if (alt_timeout == USB3_LPM_DISABLED ||
-                       alt_timeout == USB3_LPM_DEVICE_INITIATED) {
+       if (alt_timeout == USB3_LPM_DISABLED) {
                *timeout = alt_timeout;
                return -E2BIG;
        }
@@ -4790,10 +4821,12 @@ static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
                if (intf->dev.driver) {
                        driver = to_usb_driver(intf->dev.driver);
                        if (driver && driver->disable_hub_initiated_lpm) {
-                               dev_dbg(&udev->dev, "Hub-initiated %s disabled "
-                                               "at request of driver %s\n",
-                                               state_name, driver->name);
-                               return xhci_get_timeout_no_hub_lpm(udev, state);
+                               dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
+                                       state_name, driver->name);
+                               timeout = xhci_get_timeout_no_hub_lpm(udev,
+                                                                     state);
+                               if (timeout == USB3_LPM_DISABLED)
+                                       return timeout;
                        }
                }
 
@@ -5077,11 +5110,18 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
                hcd->has_tt = 1;
        } else {
                /*
-                * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol
-                * minor revision instead of sbrn. Minor revision is a two digit
-                * BCD containing minor and sub-minor numbers, only show minor.
+                * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
+                * should return 0x31 for sbrn, or that the minor revision
+                * is a two digit BCD containig minor and sub-minor numbers.
+                * This was later clarified in xHCI 1.2.
+                *
+                * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
+                * minor revision set to 0x1 instead of 0x10.
                 */
-               minor_rev = xhci->usb3_rhub.min_rev / 0x10;
+               if (xhci->usb3_rhub.min_rev == 0x1)
+                       minor_rev = 1;
+               else
+                       minor_rev = xhci->usb3_rhub.min_rev / 0x10;
 
                switch (minor_rev) {
                case 2:
@@ -5198,8 +5238,16 @@ static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
        unsigned int ep_index;
        unsigned long flags;
 
+       /*
+        * udev might be NULL if tt buffer is cleared during a failed device
+        * enumeration due to a halted control endpoint. Usb core might
+        * have allocated a new udev for the next enumeration attempt.
+        */
+
        xhci = hcd_to_xhci(hcd);
        udev = (struct usb_device *)ep->hcpriv;
+       if (!udev)
+               return;
        slot_id = udev->slot_id;
        ep_index = xhci_get_endpoint_index(&ep->desc);
 
index 0a57c2c..7a6b122 100644 (file)
@@ -716,6 +716,10 @@ static int mts_usb_probe(struct usb_interface *intf,
 
        }
 
+       if (ep_in_current != &ep_in_set[2]) {
+               MTS_WARNING("couldn't find two input bulk endpoints. Bailing out.\n");
+               return -ENODEV;
+       }
 
        if ( ep_out == -1 ) {
                MTS_WARNING( "couldn't find an output bulk endpoint. Bailing out.\n" );
index bdae62b..9bce583 100644 (file)
@@ -47,16 +47,6 @@ config USB_SEVSEG
          To compile this driver as a module, choose M here: the
          module will be called usbsevseg.
 
-config USB_RIO500
-       tristate "USB Diamond Rio500 support"
-       help
-         Say Y here if you want to connect a USB Rio500 mp3 player to your
-         computer's USB port. Please read <file:Documentation/usb/rio.rst>
-         for more information.
-
-         To compile this driver as a module, choose M here: the
-         module will be called rio500.
-
 config USB_LEGOTOWER
        tristate "USB Lego Infrared Tower support"
        help
index 109f54f..0d416eb 100644 (file)
@@ -17,7 +17,6 @@ obj-$(CONFIG_USB_ISIGHTFW)            += isight_firmware.o
 obj-$(CONFIG_USB_LCD)                  += usblcd.o
 obj-$(CONFIG_USB_LD)                   += ldusb.o
 obj-$(CONFIG_USB_LEGOTOWER)            += legousbtower.o
-obj-$(CONFIG_USB_RIO500)               += rio500.o
 obj-$(CONFIG_USB_TEST)                 += usbtest.o
 obj-$(CONFIG_USB_EHSET_TEST_FIXTURE)    += ehset.o
 obj-$(CONFIG_USB_TRANCEVIBRATOR)       += trancevibrator.o
index 344d523..6f5edb9 100644 (file)
@@ -75,6 +75,7 @@ struct adu_device {
        char                    serial_number[8];
 
        int                     open_count; /* number of times this port has been opened */
+       unsigned long           disconnected:1;
 
        char            *read_buffer_primary;
        int                     read_buffer_length;
@@ -116,7 +117,7 @@ static void adu_abort_transfers(struct adu_device *dev)
 {
        unsigned long flags;
 
-       if (dev->udev == NULL)
+       if (dev->disconnected)
                return;
 
        /* shutdown transfer */
@@ -148,6 +149,7 @@ static void adu_delete(struct adu_device *dev)
        kfree(dev->read_buffer_secondary);
        kfree(dev->interrupt_in_buffer);
        kfree(dev->interrupt_out_buffer);
+       usb_put_dev(dev->udev);
        kfree(dev);
 }
 
@@ -243,7 +245,7 @@ static int adu_open(struct inode *inode, struct file *file)
        }
 
        dev = usb_get_intfdata(interface);
-       if (!dev || !dev->udev) {
+       if (!dev) {
                retval = -ENODEV;
                goto exit_no_device;
        }
@@ -326,7 +328,7 @@ static int adu_release(struct inode *inode, struct file *file)
        }
 
        adu_release_internal(dev);
-       if (dev->udev == NULL) {
+       if (dev->disconnected) {
                /* the device was unplugged before the file was released */
                if (!dev->open_count)   /* ... and we're the last user */
                        adu_delete(dev);
@@ -354,7 +356,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
                return -ERESTARTSYS;
 
        /* verify that the device wasn't unplugged */
-       if (dev->udev == NULL) {
+       if (dev->disconnected) {
                retval = -ENODEV;
                pr_err("No device or device unplugged %d\n", retval);
                goto exit;
@@ -518,7 +520,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
                goto exit_nolock;
 
        /* verify that the device wasn't unplugged */
-       if (dev->udev == NULL) {
+       if (dev->disconnected) {
                retval = -ENODEV;
                pr_err("No device or device unplugged %d\n", retval);
                goto exit;
@@ -663,7 +665,7 @@ static int adu_probe(struct usb_interface *interface,
 
        mutex_init(&dev->mtx);
        spin_lock_init(&dev->buflock);
-       dev->udev = udev;
+       dev->udev = usb_get_dev(udev);
        init_waitqueue_head(&dev->read_wait);
        init_waitqueue_head(&dev->write_wait);
 
@@ -762,14 +764,18 @@ static void adu_disconnect(struct usb_interface *interface)
 
        dev = usb_get_intfdata(interface);
 
-       mutex_lock(&dev->mtx);  /* not interruptible */
-       dev->udev = NULL;       /* poison */
        usb_deregister_dev(interface, &adu_class);
-       mutex_unlock(&dev->mtx);
+
+       usb_poison_urb(dev->interrupt_in_urb);
+       usb_poison_urb(dev->interrupt_out_urb);
 
        mutex_lock(&adutux_mutex);
        usb_set_intfdata(interface, NULL);
 
+       mutex_lock(&dev->mtx);  /* not interruptible */
+       dev->disconnected = 1;
+       mutex_unlock(&dev->mtx);
+
        /* if the device is not opened, then we clean up right now */
        if (!dev->open_count)
                adu_delete(dev);
index cf5828c..34e6cd6 100644 (file)
@@ -98,6 +98,7 @@ static void chaoskey_free(struct chaoskey *dev)
                usb_free_urb(dev->urb);
                kfree(dev->name);
                kfree(dev->buf);
+               usb_put_intf(dev->interface);
                kfree(dev);
        }
 }
@@ -145,6 +146,8 @@ static int chaoskey_probe(struct usb_interface *interface,
        if (dev == NULL)
                goto out;
 
+       dev->interface = usb_get_intf(interface);
+
        dev->buf = kmalloc(size, GFP_KERNEL);
 
        if (dev->buf == NULL)
@@ -174,8 +177,6 @@ static int chaoskey_probe(struct usb_interface *interface,
                        goto out;
        }
 
-       dev->interface = interface;
-
        dev->in_ep = in_ep;
 
        if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID)
index f5bed9f..dce44fb 100644 (file)
@@ -54,11 +54,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
 
-/* Module parameters */
-static DEFINE_MUTEX(iowarrior_mutex);
-
 static struct usb_driver iowarrior_driver;
-static DEFINE_MUTEX(iowarrior_open_disc_lock);
 
 /*--------------*/
 /*     data     */
@@ -87,6 +83,7 @@ struct iowarrior {
        char chip_serial[9];            /* the serial number string of the chip connected */
        int report_size;                /* number of bytes in a report */
        u16 product_id;
+       struct usb_anchor submitted;
 };
 
 /*--------------*/
@@ -243,6 +240,7 @@ static inline void iowarrior_delete(struct iowarrior *dev)
        kfree(dev->int_in_buffer);
        usb_free_urb(dev->int_in_urb);
        kfree(dev->read_queue);
+       usb_put_intf(dev->interface);
        kfree(dev);
 }
 
@@ -424,11 +422,13 @@ static ssize_t iowarrior_write(struct file *file,
                        retval = -EFAULT;
                        goto error;
                }
+               usb_anchor_urb(int_out_urb, &dev->submitted);
                retval = usb_submit_urb(int_out_urb, GFP_KERNEL);
                if (retval) {
                        dev_dbg(&dev->interface->dev,
                                "submit error %d for urb nr.%d\n",
                                retval, atomic_read(&dev->write_busy));
+                       usb_unanchor_urb(int_out_urb);
                        goto error;
                }
                /* submit was ok */
@@ -477,8 +477,6 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
        if (!buffer)
                return -ENOMEM;
 
-       /* lock this object */
-       mutex_lock(&iowarrior_mutex);
        mutex_lock(&dev->mutex);
 
        /* verify that the device wasn't unplugged */
@@ -571,7 +569,6 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
 error_out:
        /* unlock the device */
        mutex_unlock(&dev->mutex);
-       mutex_unlock(&iowarrior_mutex);
        kfree(buffer);
        return retval;
 }
@@ -586,27 +583,20 @@ static int iowarrior_open(struct inode *inode, struct file *file)
        int subminor;
        int retval = 0;
 
-       mutex_lock(&iowarrior_mutex);
        subminor = iminor(inode);
 
        interface = usb_find_interface(&iowarrior_driver, subminor);
        if (!interface) {
-               mutex_unlock(&iowarrior_mutex);
-               printk(KERN_ERR "%s - error, can't find device for minor %d\n",
+               pr_err("%s - error, can't find device for minor %d\n",
                       __func__, subminor);
                return -ENODEV;
        }
 
-       mutex_lock(&iowarrior_open_disc_lock);
        dev = usb_get_intfdata(interface);
-       if (!dev) {
-               mutex_unlock(&iowarrior_open_disc_lock);
-               mutex_unlock(&iowarrior_mutex);
+       if (!dev)
                return -ENODEV;
-       }
 
        mutex_lock(&dev->mutex);
-       mutex_unlock(&iowarrior_open_disc_lock);
 
        /* Only one process can open each device, no sharing. */
        if (dev->opened) {
@@ -628,7 +618,6 @@ static int iowarrior_open(struct inode *inode, struct file *file)
 
 out:
        mutex_unlock(&dev->mutex);
-       mutex_unlock(&iowarrior_mutex);
        return retval;
 }
 
@@ -764,11 +753,13 @@ static int iowarrior_probe(struct usb_interface *interface,
        init_waitqueue_head(&dev->write_wait);
 
        dev->udev = udev;
-       dev->interface = interface;
+       dev->interface = usb_get_intf(interface);
 
        iface_desc = interface->cur_altsetting;
        dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
 
+       init_usb_anchor(&dev->submitted);
+
        res = usb_find_last_int_in_endpoint(iface_desc, &dev->int_in_endpoint);
        if (res) {
                dev_err(&interface->dev, "no interrupt-in endpoint found\n");
@@ -836,7 +827,6 @@ static int iowarrior_probe(struct usb_interface *interface,
        if (retval) {
                /* something prevented us from registering this driver */
                dev_err(&interface->dev, "Not able to get a minor for this device.\n");
-               usb_set_intfdata(interface, NULL);
                goto error;
        }
 
@@ -860,26 +850,15 @@ error:
  */
 static void iowarrior_disconnect(struct usb_interface *interface)
 {
-       struct iowarrior *dev;
-       int minor;
-
-       dev = usb_get_intfdata(interface);
-       mutex_lock(&iowarrior_open_disc_lock);
-       usb_set_intfdata(interface, NULL);
-       /* prevent device read, write and ioctl */
-       dev->present = 0;
-
-       minor = dev->minor;
-       mutex_unlock(&iowarrior_open_disc_lock);
-       /* give back our minor - this will call close() locks need to be dropped at this point*/
+       struct iowarrior *dev = usb_get_intfdata(interface);
+       int minor = dev->minor;
 
        usb_deregister_dev(interface, &iowarrior_class);
 
        mutex_lock(&dev->mutex);
 
        /* prevent device read, write and ioctl */
-
-       mutex_unlock(&dev->mutex);
+       dev->present = 0;
 
        if (dev->opened) {
                /* There is a process that holds a filedescriptor to the device ,
@@ -887,10 +866,13 @@ static void iowarrior_disconnect(struct usb_interface *interface)
                   Deleting the device is postponed until close() was called.
                 */
                usb_kill_urb(dev->int_in_urb);
+               usb_kill_anchored_urbs(&dev->submitted);
                wake_up_interruptible(&dev->read_wait);
                wake_up_interruptible(&dev->write_wait);
+               mutex_unlock(&dev->mutex);
        } else {
                /* no process is using the device, cleanup now */
+               mutex_unlock(&dev->mutex);
                iowarrior_delete(dev);
        }
 
index 6581774..f3108d8 100644 (file)
@@ -153,6 +153,7 @@ MODULE_PARM_DESC(min_interrupt_out_interval, "Minimum interrupt out interval in
 struct ld_usb {
        struct mutex            mutex;          /* locks this structure */
        struct usb_interface    *intf;          /* save off the usb interface pointer */
+       unsigned long           disconnected:1;
 
        int                     open_count;     /* number of times this port has been opened */
 
@@ -192,12 +193,10 @@ static void ld_usb_abort_transfers(struct ld_usb *dev)
        /* shutdown transfer */
        if (dev->interrupt_in_running) {
                dev->interrupt_in_running = 0;
-               if (dev->intf)
-                       usb_kill_urb(dev->interrupt_in_urb);
+               usb_kill_urb(dev->interrupt_in_urb);
        }
        if (dev->interrupt_out_busy)
-               if (dev->intf)
-                       usb_kill_urb(dev->interrupt_out_urb);
+               usb_kill_urb(dev->interrupt_out_urb);
 }
 
 /**
@@ -205,8 +204,6 @@ static void ld_usb_abort_transfers(struct ld_usb *dev)
  */
 static void ld_usb_delete(struct ld_usb *dev)
 {
-       ld_usb_abort_transfers(dev);
-
        /* free data structures */
        usb_free_urb(dev->interrupt_in_urb);
        usb_free_urb(dev->interrupt_out_urb);
@@ -263,7 +260,7 @@ static void ld_usb_interrupt_in_callback(struct urb *urb)
 
 resubmit:
        /* resubmit if we're still running */
-       if (dev->interrupt_in_running && !dev->buffer_overflow && dev->intf) {
+       if (dev->interrupt_in_running && !dev->buffer_overflow) {
                retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC);
                if (retval) {
                        dev_err(&dev->intf->dev,
@@ -392,7 +389,7 @@ static int ld_usb_release(struct inode *inode, struct file *file)
                retval = -ENODEV;
                goto unlock_exit;
        }
-       if (dev->intf == NULL) {
+       if (dev->disconnected) {
                /* the device was unplugged before the file was released */
                mutex_unlock(&dev->mutex);
                /* unlock here as ld_usb_delete frees dev */
@@ -423,7 +420,7 @@ static __poll_t ld_usb_poll(struct file *file, poll_table *wait)
 
        dev = file->private_data;
 
-       if (!dev->intf)
+       if (dev->disconnected)
                return EPOLLERR | EPOLLHUP;
 
        poll_wait(file, &dev->read_wait, wait);
@@ -462,7 +459,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
        }
 
        /* verify that the device wasn't unplugged */
-       if (dev->intf == NULL) {
+       if (dev->disconnected) {
                retval = -ENODEV;
                printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
                goto unlock_exit;
@@ -542,7 +539,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
        }
 
        /* verify that the device wasn't unplugged */
-       if (dev->intf == NULL) {
+       if (dev->disconnected) {
                retval = -ENODEV;
                printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
                goto unlock_exit;
@@ -764,6 +761,9 @@ static void ld_usb_disconnect(struct usb_interface *intf)
        /* give back our minor */
        usb_deregister_dev(intf, &ld_usb_class);
 
+       usb_poison_urb(dev->interrupt_in_urb);
+       usb_poison_urb(dev->interrupt_out_urb);
+
        mutex_lock(&dev->mutex);
 
        /* if the device is not opened, then we clean up right now */
@@ -771,7 +771,7 @@ static void ld_usb_disconnect(struct usb_interface *intf)
                mutex_unlock(&dev->mutex);
                ld_usb_delete(dev);
        } else {
-               dev->intf = NULL;
+               dev->disconnected = 1;
                /* wake up pollers */
                wake_up_interruptible_all(&dev->read_wait);
                wake_up_interruptible_all(&dev->write_wait);
index 006cf13..9d4c52a 100644 (file)
@@ -179,7 +179,6 @@ static const struct usb_device_id tower_table[] = {
 };
 
 MODULE_DEVICE_TABLE (usb, tower_table);
-static DEFINE_MUTEX(open_disc_mutex);
 
 #define LEGO_USB_TOWER_MINOR_BASE      160
 
@@ -191,6 +190,7 @@ struct lego_usb_tower {
        unsigned char           minor;          /* the starting minor number for this device */
 
        int                     open_count;     /* number of times this port has been opened */
+       unsigned long           disconnected:1;
 
        char*                   read_buffer;
        size_t                  read_buffer_length; /* this much came in */
@@ -290,14 +290,13 @@ static inline void lego_usb_tower_debug_data(struct device *dev,
  */
 static inline void tower_delete (struct lego_usb_tower *dev)
 {
-       tower_abort_transfers (dev);
-
        /* free data structures */
        usb_free_urb(dev->interrupt_in_urb);
        usb_free_urb(dev->interrupt_out_urb);
        kfree (dev->read_buffer);
        kfree (dev->interrupt_in_buffer);
        kfree (dev->interrupt_out_buffer);
+       usb_put_dev(dev->udev);
        kfree (dev);
 }
 
@@ -332,18 +331,14 @@ static int tower_open (struct inode *inode, struct file *file)
                goto exit;
        }
 
-       mutex_lock(&open_disc_mutex);
        dev = usb_get_intfdata(interface);
-
        if (!dev) {
-               mutex_unlock(&open_disc_mutex);
                retval = -ENODEV;
                goto exit;
        }
 
        /* lock this device */
        if (mutex_lock_interruptible(&dev->lock)) {
-               mutex_unlock(&open_disc_mutex);
                retval = -ERESTARTSYS;
                goto exit;
        }
@@ -351,12 +346,9 @@ static int tower_open (struct inode *inode, struct file *file)
 
        /* allow opening only once */
        if (dev->open_count) {
-               mutex_unlock(&open_disc_mutex);
                retval = -EBUSY;
                goto unlock_exit;
        }
-       dev->open_count = 1;
-       mutex_unlock(&open_disc_mutex);
 
        /* reset the tower */
        result = usb_control_msg (dev->udev,
@@ -396,13 +388,14 @@ static int tower_open (struct inode *inode, struct file *file)
                dev_err(&dev->udev->dev,
                        "Couldn't submit interrupt_in_urb %d\n", retval);
                dev->interrupt_in_running = 0;
-               dev->open_count = 0;
                goto unlock_exit;
        }
 
        /* save device in the file's private structure */
        file->private_data = dev;
 
+       dev->open_count = 1;
+
 unlock_exit:
        mutex_unlock(&dev->lock);
 
@@ -423,10 +416,9 @@ static int tower_release (struct inode *inode, struct file *file)
 
        if (dev == NULL) {
                retval = -ENODEV;
-               goto exit_nolock;
+               goto exit;
        }
 
-       mutex_lock(&open_disc_mutex);
        if (mutex_lock_interruptible(&dev->lock)) {
                retval = -ERESTARTSYS;
                goto exit;
@@ -438,7 +430,8 @@ static int tower_release (struct inode *inode, struct file *file)
                retval = -ENODEV;
                goto unlock_exit;
        }
-       if (dev->udev == NULL) {
+
+       if (dev->disconnected) {
                /* the device was unplugged before the file was released */
 
                /* unlock here as tower_delete frees dev */
@@ -456,10 +449,7 @@ static int tower_release (struct inode *inode, struct file *file)
 
 unlock_exit:
        mutex_unlock(&dev->lock);
-
 exit:
-       mutex_unlock(&open_disc_mutex);
-exit_nolock:
        return retval;
 }
 
@@ -477,10 +467,9 @@ static void tower_abort_transfers (struct lego_usb_tower *dev)
        if (dev->interrupt_in_running) {
                dev->interrupt_in_running = 0;
                mb();
-               if (dev->udev)
-                       usb_kill_urb (dev->interrupt_in_urb);
+               usb_kill_urb(dev->interrupt_in_urb);
        }
-       if (dev->interrupt_out_busy && dev->udev)
+       if (dev->interrupt_out_busy)
                usb_kill_urb(dev->interrupt_out_urb);
 }
 
@@ -516,7 +505,7 @@ static __poll_t tower_poll (struct file *file, poll_table *wait)
 
        dev = file->private_data;
 
-       if (!dev->udev)
+       if (dev->disconnected)
                return EPOLLERR | EPOLLHUP;
 
        poll_wait(file, &dev->read_wait, wait);
@@ -563,7 +552,7 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
        }
 
        /* verify that the device wasn't unplugged */
-       if (dev->udev == NULL) {
+       if (dev->disconnected) {
                retval = -ENODEV;
                pr_err("No device or device unplugged %d\n", retval);
                goto unlock_exit;
@@ -649,7 +638,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
        }
 
        /* verify that the device wasn't unplugged */
-       if (dev->udev == NULL) {
+       if (dev->disconnected) {
                retval = -ENODEV;
                pr_err("No device or device unplugged %d\n", retval);
                goto unlock_exit;
@@ -759,7 +748,7 @@ static void tower_interrupt_in_callback (struct urb *urb)
 
 resubmit:
        /* resubmit if we're still running */
-       if (dev->interrupt_in_running && dev->udev) {
+       if (dev->interrupt_in_running) {
                retval = usb_submit_urb (dev->interrupt_in_urb, GFP_ATOMIC);
                if (retval)
                        dev_err(&dev->udev->dev,
@@ -822,8 +811,9 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
 
        mutex_init(&dev->lock);
 
-       dev->udev = udev;
+       dev->udev = usb_get_dev(udev);
        dev->open_count = 0;
+       dev->disconnected = 0;
 
        dev->read_buffer = NULL;
        dev->read_buffer_length = 0;
@@ -891,8 +881,10 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
                                  get_version_reply,
                                  sizeof(*get_version_reply),
                                  1000);
-       if (result < 0) {
-               dev_err(idev, "LEGO USB Tower get version control request failed\n");
+       if (result < sizeof(*get_version_reply)) {
+               if (result >= 0)
+                       result = -EIO;
+               dev_err(idev, "get version request failed: %d\n", result);
                retval = result;
                goto error;
        }
@@ -910,7 +902,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
        if (retval) {
                /* something prevented us from registering this driver */
                dev_err(idev, "Not able to get a minor for this device.\n");
-               usb_set_intfdata (interface, NULL);
                goto error;
        }
        dev->minor = interface->minor;
@@ -942,23 +933,24 @@ static void tower_disconnect (struct usb_interface *interface)
        int minor;
 
        dev = usb_get_intfdata (interface);
-       mutex_lock(&open_disc_mutex);
-       usb_set_intfdata (interface, NULL);
 
        minor = dev->minor;
 
-       /* give back our minor */
+       /* give back our minor and prevent further open() */
        usb_deregister_dev (interface, &tower_class);
 
+       /* stop I/O */
+       usb_poison_urb(dev->interrupt_in_urb);
+       usb_poison_urb(dev->interrupt_out_urb);
+
        mutex_lock(&dev->lock);
-       mutex_unlock(&open_disc_mutex);
 
        /* if the device is not opened, then we clean up right now */
        if (!dev->open_count) {
                mutex_unlock(&dev->lock);
                tower_delete (dev);
        } else {
-               dev->udev = NULL;
+               dev->disconnected = 1;
                /* wake up pollers */
                wake_up_interruptible_all(&dev->read_wait);
                wake_up_interruptible_all(&dev->write_wait);
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
deleted file mode 100644 (file)
index 30cae5e..0000000
+++ /dev/null
@@ -1,554 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/* -*- linux-c -*- */
-
-/* 
- * Driver for USB Rio 500
- *
- * Cesar Miquel (miquel@df.uba.ar)
- * 
- * based on hp_scanner.c by David E. Nelson (dnelson@jump.net)
- *
- * Based upon mouse.c (Brad Keryan) and printer.c (Michael Gee).
- *
- * Changelog:
- * 30/05/2003  replaced lock/unlock kernel with up/down
- *             Daniele Bellucci  bellucda@tiscali.it
- * */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/sched/signal.h>
-#include <linux/mutex.h>
-#include <linux/errno.h>
-#include <linux/random.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/usb.h>
-#include <linux/wait.h>
-
-#include "rio500_usb.h"
-
-#define DRIVER_AUTHOR "Cesar Miquel <miquel@df.uba.ar>"
-#define DRIVER_DESC "USB Rio 500 driver"
-
-#define RIO_MINOR      64
-
-/* stall/wait timeout for rio */
-#define NAK_TIMEOUT (HZ)
-
-#define IBUF_SIZE 0x1000
-
-/* Size of the rio buffer */
-#define OBUF_SIZE 0x10000
-
-struct rio_usb_data {
-        struct usb_device *rio_dev;     /* init: probe_rio */
-        unsigned int ifnum;             /* Interface number of the USB device */
-        int isopen;                     /* nz if open */
-        int present;                    /* Device is present on the bus */
-        char *obuf, *ibuf;              /* transfer buffers */
-        char bulk_in_ep, bulk_out_ep;   /* Endpoint assignments */
-        wait_queue_head_t wait_q;       /* for timeouts */
-};
-
-static DEFINE_MUTEX(rio500_mutex);
-static struct rio_usb_data rio_instance;
-
-static int open_rio(struct inode *inode, struct file *file)
-{
-       struct rio_usb_data *rio = &rio_instance;
-
-       /* against disconnect() */
-       mutex_lock(&rio500_mutex);
-
-       if (rio->isopen || !rio->present) {
-               mutex_unlock(&rio500_mutex);
-               return -EBUSY;
-       }
-       rio->isopen = 1;
-
-       init_waitqueue_head(&rio->wait_q);
-
-
-       dev_info(&rio->rio_dev->dev, "Rio opened.\n");
-       mutex_unlock(&rio500_mutex);
-
-       return 0;
-}
-
-static int close_rio(struct inode *inode, struct file *file)
-{
-       struct rio_usb_data *rio = &rio_instance;
-
-       /* against disconnect() */
-       mutex_lock(&rio500_mutex);
-
-       rio->isopen = 0;
-       if (!rio->present) {
-               /* cleanup has been delayed */
-               kfree(rio->ibuf);
-               kfree(rio->obuf);
-               rio->ibuf = NULL;
-               rio->obuf = NULL;
-       } else {
-               dev_info(&rio->rio_dev->dev, "Rio closed.\n");
-       }
-       mutex_unlock(&rio500_mutex);
-       return 0;
-}
-
-static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       struct RioCommand rio_cmd;
-       struct rio_usb_data *rio = &rio_instance;
-       void __user *data;
-       unsigned char *buffer;
-       int result, requesttype;
-       int retries;
-       int retval=0;
-
-       mutex_lock(&rio500_mutex);
-        /* Sanity check to make sure rio is connected, powered, etc */
-        if (rio->present == 0 || rio->rio_dev == NULL) {
-               retval = -ENODEV;
-               goto err_out;
-       }
-
-       switch (cmd) {
-       case RIO_RECV_COMMAND:
-               data = (void __user *) arg;
-               if (data == NULL)
-                       break;
-               if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
-                       retval = -EFAULT;
-                       goto err_out;
-               }
-               if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
-                       retval = -EINVAL;
-                       goto err_out;
-               }
-               buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
-               if (buffer == NULL) {
-                       retval = -ENOMEM;
-                       goto err_out;
-               }
-               if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
-                       retval = -EFAULT;
-                       free_page((unsigned long) buffer);
-                       goto err_out;
-               }
-
-               requesttype = rio_cmd.requesttype | USB_DIR_IN |
-                   USB_TYPE_VENDOR | USB_RECIP_DEVICE;
-               dev_dbg(&rio->rio_dev->dev,
-                       "sending command:reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n",
-                       requesttype, rio_cmd.request, rio_cmd.value,
-                       rio_cmd.index, rio_cmd.length);
-               /* Send rio control message */
-               retries = 3;
-               while (retries) {
-                       result = usb_control_msg(rio->rio_dev,
-                                                usb_rcvctrlpipe(rio-> rio_dev, 0),
-                                                rio_cmd.request,
-                                                requesttype,
-                                                rio_cmd.value,
-                                                rio_cmd.index, buffer,
-                                                rio_cmd.length,
-                                                jiffies_to_msecs(rio_cmd.timeout));
-                       if (result == -ETIMEDOUT)
-                               retries--;
-                       else if (result < 0) {
-                               dev_err(&rio->rio_dev->dev,
-                                       "Error executing ioctrl. code = %d\n",
-                                       result);
-                               retries = 0;
-                       } else {
-                               dev_dbg(&rio->rio_dev->dev,
-                                       "Executed ioctl. Result = %d (data=%02x)\n",
-                                       result, buffer[0]);
-                               if (copy_to_user(rio_cmd.buffer, buffer,
-                                                rio_cmd.length)) {
-                                       free_page((unsigned long) buffer);
-                                       retval = -EFAULT;
-                                       goto err_out;
-                               }
-                               retries = 0;
-                       }
-
-                       /* rio_cmd.buffer contains a raw stream of single byte
-                          data which has been returned from rio.  Data is
-                          interpreted at application level.  For data that
-                          will be cast to data types longer than 1 byte, data
-                          will be little_endian and will potentially need to
-                          be swapped at the app level */
-
-               }
-               free_page((unsigned long) buffer);
-               break;
-
-       case RIO_SEND_COMMAND:
-               data = (void __user *) arg;
-               if (data == NULL)
-                       break;
-               if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
-                       retval = -EFAULT;
-                       goto err_out;
-               }
-               if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
-                       retval = -EINVAL;
-                       goto err_out;
-               }
-               buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
-               if (buffer == NULL) {
-                       retval = -ENOMEM;
-                       goto err_out;
-               }
-               if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
-                       free_page((unsigned long)buffer);
-                       retval = -EFAULT;
-                       goto err_out;
-               }
-
-               requesttype = rio_cmd.requesttype | USB_DIR_OUT |
-                   USB_TYPE_VENDOR | USB_RECIP_DEVICE;
-               dev_dbg(&rio->rio_dev->dev,
-                       "sending command: reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n",
-                       requesttype, rio_cmd.request, rio_cmd.value,
-                       rio_cmd.index, rio_cmd.length);
-               /* Send rio control message */
-               retries = 3;
-               while (retries) {
-                       result = usb_control_msg(rio->rio_dev,
-                                                usb_sndctrlpipe(rio-> rio_dev, 0),
-                                                rio_cmd.request,
-                                                requesttype,
-                                                rio_cmd.value,
-                                                rio_cmd.index, buffer,
-                                                rio_cmd.length,
-                                                jiffies_to_msecs(rio_cmd.timeout));
-                       if (result == -ETIMEDOUT)
-                               retries--;
-                       else if (result < 0) {
-                               dev_err(&rio->rio_dev->dev,
-                                       "Error executing ioctrl. code = %d\n",
-                                       result);
-                               retries = 0;
-                       } else {
-                               dev_dbg(&rio->rio_dev->dev,
-                                       "Executed ioctl. Result = %d\n", result);
-                               retries = 0;
-
-                       }
-
-               }
-               free_page((unsigned long) buffer);
-               break;
-
-       default:
-               retval = -ENOTTY;
-               break;
-       }
-
-
-err_out:
-       mutex_unlock(&rio500_mutex);
-       return retval;
-}
-
-static ssize_t
-write_rio(struct file *file, const char __user *buffer,
-         size_t count, loff_t * ppos)
-{
-       DEFINE_WAIT(wait);
-       struct rio_usb_data *rio = &rio_instance;
-
-       unsigned long copy_size;
-       unsigned long bytes_written = 0;
-       unsigned int partial;
-
-       int result = 0;
-       int maxretry;
-       int errn = 0;
-       int intr;
-
-       intr = mutex_lock_interruptible(&rio500_mutex);
-       if (intr)
-               return -EINTR;
-        /* Sanity check to make sure rio is connected, powered, etc */
-        if (rio->present == 0 || rio->rio_dev == NULL) {
-               mutex_unlock(&rio500_mutex);
-               return -ENODEV;
-       }
-
-
-
-       do {
-               unsigned long thistime;
-               char *obuf = rio->obuf;
-
-               thistime = copy_size =
-                   (count >= OBUF_SIZE) ? OBUF_SIZE : count;
-               if (copy_from_user(rio->obuf, buffer, copy_size)) {
-                       errn = -EFAULT;
-                       goto error;
-               }
-               maxretry = 5;
-               while (thistime) {
-                       if (!rio->rio_dev) {
-                               errn = -ENODEV;
-                               goto error;
-                       }
-                       if (signal_pending(current)) {
-                               mutex_unlock(&rio500_mutex);
-                               return bytes_written ? bytes_written : -EINTR;
-                       }
-
-                       result = usb_bulk_msg(rio->rio_dev,
-                                        usb_sndbulkpipe(rio->rio_dev, 2),
-                                        obuf, thistime, &partial, 5000);
-
-                       dev_dbg(&rio->rio_dev->dev,
-                               "write stats: result:%d thistime:%lu partial:%u\n",
-                               result, thistime, partial);
-
-                       if (result == -ETIMEDOUT) {     /* NAK - so hold for a while */
-                               if (!maxretry--) {
-                                       errn = -ETIME;
-                                       goto error;
-                               }
-                               prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE);
-                               schedule_timeout(NAK_TIMEOUT);
-                               finish_wait(&rio->wait_q, &wait);
-                               continue;
-                       } else if (!result && partial) {
-                               obuf += partial;
-                               thistime -= partial;
-                       } else
-                               break;
-               }
-               if (result) {
-                       dev_err(&rio->rio_dev->dev, "Write Whoops - %x\n",
-                               result);
-                       errn = -EIO;
-                       goto error;
-               }
-               bytes_written += copy_size;
-               count -= copy_size;
-               buffer += copy_size;
-       } while (count > 0);
-
-       mutex_unlock(&rio500_mutex);
-
-       return bytes_written ? bytes_written : -EIO;
-
-error:
-       mutex_unlock(&rio500_mutex);
-       return errn;
-}
-
-static ssize_t
-read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
-{
-       DEFINE_WAIT(wait);
-       struct rio_usb_data *rio = &rio_instance;
-       ssize_t read_count;
-       unsigned int partial;
-       int this_read;
-       int result;
-       int maxretry = 10;
-       char *ibuf;
-       int intr;
-
-       intr = mutex_lock_interruptible(&rio500_mutex);
-       if (intr)
-               return -EINTR;
-       /* Sanity check to make sure rio is connected, powered, etc */
-        if (rio->present == 0 || rio->rio_dev == NULL) {
-               mutex_unlock(&rio500_mutex);
-               return -ENODEV;
-       }
-
-       ibuf = rio->ibuf;
-
-       read_count = 0;
-
-
-       while (count > 0) {
-               if (signal_pending(current)) {
-                       mutex_unlock(&rio500_mutex);
-                       return read_count ? read_count : -EINTR;
-               }
-               if (!rio->rio_dev) {
-                       mutex_unlock(&rio500_mutex);
-                       return -ENODEV;
-               }
-               this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count;
-
-               result = usb_bulk_msg(rio->rio_dev,
-                                     usb_rcvbulkpipe(rio->rio_dev, 1),
-                                     ibuf, this_read, &partial,
-                                     8000);
-
-               dev_dbg(&rio->rio_dev->dev,
-                       "read stats: result:%d this_read:%u partial:%u\n",
-                       result, this_read, partial);
-
-               if (partial) {
-                       count = this_read = partial;
-               } else if (result == -ETIMEDOUT || result == 15) {      /* FIXME: 15 ??? */
-                       if (!maxretry--) {
-                               mutex_unlock(&rio500_mutex);
-                               dev_err(&rio->rio_dev->dev,
-                                       "read_rio: maxretry timeout\n");
-                               return -ETIME;
-                       }
-                       prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE);
-                       schedule_timeout(NAK_TIMEOUT);
-                       finish_wait(&rio->wait_q, &wait);
-                       continue;
-               } else if (result != -EREMOTEIO) {
-                       mutex_unlock(&rio500_mutex);
-                       dev_err(&rio->rio_dev->dev,
-                               "Read Whoops - result:%d partial:%u this_read:%u\n",
-                               result, partial, this_read);
-                       return -EIO;
-               } else {
-                       mutex_unlock(&rio500_mutex);
-                       return (0);
-               }
-
-               if (this_read) {
-                       if (copy_to_user(buffer, ibuf, this_read)) {
-                               mutex_unlock(&rio500_mutex);
-                               return -EFAULT;
-                       }
-                       count -= this_read;
-                       read_count += this_read;
-                       buffer += this_read;
-               }
-       }
-       mutex_unlock(&rio500_mutex);
-       return read_count;
-}
-
-static const struct file_operations usb_rio_fops = {
-       .owner =        THIS_MODULE,
-       .read =         read_rio,
-       .write =        write_rio,
-       .unlocked_ioctl = ioctl_rio,
-       .open =         open_rio,
-       .release =      close_rio,
-       .llseek =       noop_llseek,
-};
-
-static struct usb_class_driver usb_rio_class = {
-       .name =         "rio500%d",
-       .fops =         &usb_rio_fops,
-       .minor_base =   RIO_MINOR,
-};
-
-static int probe_rio(struct usb_interface *intf,
-                    const struct usb_device_id *id)
-{
-       struct usb_device *dev = interface_to_usbdev(intf);
-       struct rio_usb_data *rio = &rio_instance;
-       int retval = -ENOMEM;
-       char *ibuf, *obuf;
-
-       if (rio->present) {
-               dev_info(&intf->dev, "Second USB Rio at address %d refused\n", dev->devnum);
-               return -EBUSY;
-       }
-       dev_info(&intf->dev, "USB Rio found at address %d\n", dev->devnum);
-
-       obuf = kmalloc(OBUF_SIZE, GFP_KERNEL);
-       if (!obuf) {
-               dev_err(&dev->dev,
-                       "probe_rio: Not enough memory for the output buffer\n");
-               goto err_obuf;
-       }
-       dev_dbg(&intf->dev, "obuf address: %p\n", obuf);
-
-       ibuf = kmalloc(IBUF_SIZE, GFP_KERNEL);
-       if (!ibuf) {
-               dev_err(&dev->dev,
-                       "probe_rio: Not enough memory for the input buffer\n");
-               goto err_ibuf;
-       }
-       dev_dbg(&intf->dev, "ibuf address: %p\n", ibuf);
-
-       mutex_lock(&rio500_mutex);
-       rio->rio_dev = dev;
-       rio->ibuf = ibuf;
-       rio->obuf = obuf;
-       rio->present = 1;
-       mutex_unlock(&rio500_mutex);
-
-       retval = usb_register_dev(intf, &usb_rio_class);
-       if (retval) {
-               dev_err(&dev->dev,
-                       "Not able to get a minor for this device.\n");
-               goto err_register;
-       }
-
-       usb_set_intfdata(intf, rio);
-       return retval;
-
- err_register:
-       mutex_lock(&rio500_mutex);
-       rio->present = 0;
-       mutex_unlock(&rio500_mutex);
- err_ibuf:
-       kfree(obuf);
- err_obuf:
-       return retval;
-}
-
-static void disconnect_rio(struct usb_interface *intf)
-{
-       struct rio_usb_data *rio = usb_get_intfdata (intf);
-
-       usb_set_intfdata (intf, NULL);
-       if (rio) {
-               usb_deregister_dev(intf, &usb_rio_class);
-
-               mutex_lock(&rio500_mutex);
-               if (rio->isopen) {
-                       rio->isopen = 0;
-                       /* better let it finish - the release will do whats needed */
-                       rio->rio_dev = NULL;
-                       mutex_unlock(&rio500_mutex);
-                       return;
-               }
-               kfree(rio->ibuf);
-               kfree(rio->obuf);
-
-               dev_info(&intf->dev, "USB Rio disconnected.\n");
-
-               rio->present = 0;
-               mutex_unlock(&rio500_mutex);
-       }
-}
-
-static const struct usb_device_id rio_table[] = {
-       { USB_DEVICE(0x0841, 1) },              /* Rio 500 */
-       { }                                     /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE (usb, rio_table);
-
-static struct usb_driver rio_driver = {
-       .name =         "rio500",
-       .probe =        probe_rio,
-       .disconnect =   disconnect_rio,
-       .id_table =     rio_table,
-};
-
-module_usb_driver(rio_driver);
-
-MODULE_AUTHOR( DRIVER_AUTHOR );
-MODULE_DESCRIPTION( DRIVER_DESC );
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/usb/misc/rio500_usb.h b/drivers/usb/misc/rio500_usb.h
deleted file mode 100644 (file)
index 6db7a58..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*  ----------------------------------------------------------------------
-    Copyright (C) 2000  Cesar Miquel  (miquel@df.uba.ar)
-    ---------------------------------------------------------------------- */
-
-#define RIO_SEND_COMMAND                       0x1
-#define RIO_RECV_COMMAND                       0x2
-
-#define RIO_DIR_OUT                            0x0
-#define RIO_DIR_IN                             0x1
-
-struct RioCommand {
-       short length;
-       int request;
-       int requesttype;
-       int value;
-       int index;
-       void __user *buffer;
-       int timeout;
-};
index 9ba4a4e..61e9e98 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/mutex.h>
+#include <linux/rwsem.h>
 #include <linux/uaccess.h>
 #include <linux/usb.h>
 
 #define IOCTL_GET_DRV_VERSION  2
 
 
-static DEFINE_MUTEX(lcd_mutex);
 static const struct usb_device_id id_table[] = {
        { .idVendor = 0x10D2, .match_flags = USB_DEVICE_ID_MATCH_VENDOR, },
        { },
 };
 MODULE_DEVICE_TABLE(usb, id_table);
 
-static DEFINE_MUTEX(open_disc_mutex);
-
-
 struct usb_lcd {
        struct usb_device       *udev;                  /* init: probe_lcd */
        struct usb_interface    *interface;             /* the interface for
@@ -57,6 +54,8 @@ struct usb_lcd {
                                                           using up all RAM */
        struct usb_anchor       submitted;              /* URBs to wait for
                                                           before suspend */
+       struct rw_semaphore     io_rwsem;
+       unsigned long           disconnected:1;
 };
 #define to_lcd_dev(d) container_of(d, struct usb_lcd, kref)
 
@@ -81,40 +80,29 @@ static int lcd_open(struct inode *inode, struct file *file)
        struct usb_interface *interface;
        int subminor, r;
 
-       mutex_lock(&lcd_mutex);
        subminor = iminor(inode);
 
        interface = usb_find_interface(&lcd_driver, subminor);
        if (!interface) {
-               mutex_unlock(&lcd_mutex);
-               printk(KERN_ERR "USBLCD: %s - error, can't find device for minor %d\n",
+               pr_err("USBLCD: %s - error, can't find device for minor %d\n",
                       __func__, subminor);
                return -ENODEV;
        }
 
-       mutex_lock(&open_disc_mutex);
        dev = usb_get_intfdata(interface);
-       if (!dev) {
-               mutex_unlock(&open_disc_mutex);
-               mutex_unlock(&lcd_mutex);
-               return -ENODEV;
-       }
 
        /* increment our usage count for the device */
        kref_get(&dev->kref);
-       mutex_unlock(&open_disc_mutex);
 
        /* grab a power reference */
        r = usb_autopm_get_interface(interface);
        if (r < 0) {
                kref_put(&dev->kref, lcd_delete);
-               mutex_unlock(&lcd_mutex);
                return r;
        }
 
        /* save our object in the file's private structure */
        file->private_data = dev;
-       mutex_unlock(&lcd_mutex);
 
        return 0;
 }
@@ -142,6 +130,13 @@ static ssize_t lcd_read(struct file *file, char __user * buffer,
 
        dev = file->private_data;
 
+       down_read(&dev->io_rwsem);
+
+       if (dev->disconnected) {
+               retval = -ENODEV;
+               goto out_up_io;
+       }
+
        /* do a blocking bulk read to get data from the device */
        retval = usb_bulk_msg(dev->udev,
                              usb_rcvbulkpipe(dev->udev,
@@ -158,6 +153,9 @@ static ssize_t lcd_read(struct file *file, char __user * buffer,
                        retval = bytes_read;
        }
 
+out_up_io:
+       up_read(&dev->io_rwsem);
+
        return retval;
 }
 
@@ -173,14 +171,12 @@ static long lcd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
        switch (cmd) {
        case IOCTL_GET_HARD_VERSION:
-               mutex_lock(&lcd_mutex);
                bcdDevice = le16_to_cpu((dev->udev)->descriptor.bcdDevice);
                sprintf(buf, "%1d%1d.%1d%1d",
                        (bcdDevice & 0xF000)>>12,
                        (bcdDevice & 0xF00)>>8,
                        (bcdDevice & 0xF0)>>4,
                        (bcdDevice & 0xF));
-               mutex_unlock(&lcd_mutex);
                if (copy_to_user((void __user *)arg, buf, strlen(buf)) != 0)
                        return -EFAULT;
                break;
@@ -237,11 +233,18 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
        if (r < 0)
                return -EINTR;
 
+       down_read(&dev->io_rwsem);
+
+       if (dev->disconnected) {
+               retval = -ENODEV;
+               goto err_up_io;
+       }
+
        /* create a urb, and a buffer for it, and copy the data to the urb */
        urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!urb) {
                retval = -ENOMEM;
-               goto err_no_buf;
+               goto err_up_io;
        }
 
        buf = usb_alloc_coherent(dev->udev, count, GFP_KERNEL,
@@ -278,6 +281,7 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
           the USB core will eventually free it entirely */
        usb_free_urb(urb);
 
+       up_read(&dev->io_rwsem);
 exit:
        return count;
 error_unanchor:
@@ -285,7 +289,8 @@ error_unanchor:
 error:
        usb_free_coherent(dev->udev, count, buf, urb->transfer_dma);
        usb_free_urb(urb);
-err_no_buf:
+err_up_io:
+       up_read(&dev->io_rwsem);
        up(&dev->limit_sem);
        return retval;
 }
@@ -325,6 +330,7 @@ static int lcd_probe(struct usb_interface *interface,
 
        kref_init(&dev->kref);
        sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES);
+       init_rwsem(&dev->io_rwsem);
        init_usb_anchor(&dev->submitted);
 
        dev->udev = usb_get_dev(interface_to_usbdev(interface));
@@ -365,7 +371,6 @@ static int lcd_probe(struct usb_interface *interface,
                /* something prevented us from registering this driver */
                dev_err(&interface->dev,
                        "Not able to get a minor for this device.\n");
-               usb_set_intfdata(interface, NULL);
                goto error;
        }
 
@@ -411,17 +416,18 @@ static int lcd_resume(struct usb_interface *intf)
 
 static void lcd_disconnect(struct usb_interface *interface)
 {
-       struct usb_lcd *dev;
+       struct usb_lcd *dev = usb_get_intfdata(interface);
        int minor = interface->minor;
 
-       mutex_lock(&open_disc_mutex);
-       dev = usb_get_intfdata(interface);
-       usb_set_intfdata(interface, NULL);
-       mutex_unlock(&open_disc_mutex);
-
        /* give back our minor */
        usb_deregister_dev(interface, &lcd_class);
 
+       down_write(&dev->io_rwsem);
+       dev->disconnected = 1;
+       up_write(&dev->io_rwsem);
+
+       usb_kill_anchored_urbs(&dev->submitted);
+
        /* decrement our usage count */
        kref_put(&dev->kref, lcd_delete);
 
index 6715a12..be0505b 100644 (file)
@@ -60,6 +60,7 @@ struct usb_yurex {
 
        struct kref             kref;
        struct mutex            io_mutex;
+       unsigned long           disconnected:1;
        struct fasync_struct    *async_queue;
        wait_queue_head_t       waitq;
 
@@ -107,6 +108,7 @@ static void yurex_delete(struct kref *kref)
                                dev->int_buffer, dev->urb->transfer_dma);
                usb_free_urb(dev->urb);
        }
+       usb_put_intf(dev->interface);
        usb_put_dev(dev->udev);
        kfree(dev);
 }
@@ -132,6 +134,7 @@ static void yurex_interrupt(struct urb *urb)
        switch (status) {
        case 0: /*success*/
                break;
+       /* The device is terminated or messed up, give up */
        case -EOVERFLOW:
                dev_err(&dev->interface->dev,
                        "%s - overflow with length %d, actual length is %d\n",
@@ -140,12 +143,13 @@ static void yurex_interrupt(struct urb *urb)
        case -ENOENT:
        case -ESHUTDOWN:
        case -EILSEQ:
-               /* The device is terminated, clean up */
+       case -EPROTO:
+       case -ETIME:
                return;
        default:
                dev_err(&dev->interface->dev,
                        "%s - unknown status received: %d\n", __func__, status);
-               goto exit;
+               return;
        }
 
        /* handle received message */
@@ -177,7 +181,6 @@ static void yurex_interrupt(struct urb *urb)
                break;
        }
 
-exit:
        retval = usb_submit_urb(dev->urb, GFP_ATOMIC);
        if (retval) {
                dev_err(&dev->interface->dev, "%s - usb_submit_urb failed: %d\n",
@@ -204,7 +207,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
        init_waitqueue_head(&dev->waitq);
 
        dev->udev = usb_get_dev(interface_to_usbdev(interface));
-       dev->interface = interface;
+       dev->interface = usb_get_intf(interface);
 
        /* set up the endpoint information */
        iface_desc = interface->cur_altsetting;
@@ -315,8 +318,9 @@ static void yurex_disconnect(struct usb_interface *interface)
 
        /* prevent more I/O from starting */
        usb_poison_urb(dev->urb);
+       usb_poison_urb(dev->cntl_urb);
        mutex_lock(&dev->io_mutex);
-       dev->interface = NULL;
+       dev->disconnected = 1;
        mutex_unlock(&dev->io_mutex);
 
        /* wakeup waiters */
@@ -404,7 +408,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
        dev = file->private_data;
 
        mutex_lock(&dev->io_mutex);
-       if (!dev->interface) {          /* already disconnected */
+       if (dev->disconnected) {                /* already disconnected */
                mutex_unlock(&dev->io_mutex);
                return -ENODEV;
        }
@@ -439,7 +443,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
                goto error;
 
        mutex_lock(&dev->io_mutex);
-       if (!dev->interface) {          /* already disconnected */
+       if (dev->disconnected) {                /* already disconnected */
                mutex_unlock(&dev->io_mutex);
                retval = -ENODEV;
                goto error;
index d1a0a35..0824099 100644 (file)
@@ -211,6 +211,7 @@ struct usbhs_priv;
 /* DCPCTR */
 #define BSTS           (1 << 15)       /* Buffer Status */
 #define SUREQ          (1 << 14)       /* Sending SETUP Token */
+#define INBUFM         (1 << 14)       /* (PIPEnCTR) Transfer Buffer Monitor */
 #define CSSTS          (1 << 12)       /* CSSTS Status */
 #define        ACLRM           (1 << 9)        /* Buffer Auto-Clear Mode */
 #define SQCLR          (1 << 8)        /* Toggle Bit Clear */
index 2a01ceb..86637cd 100644 (file)
@@ -89,7 +89,7 @@ static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
        list_del_init(&pkt->node);
 }
 
-static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
+struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
 {
        return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node);
 }
index 88d1816..c3d3cc3 100644 (file)
@@ -97,5 +97,6 @@ void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
                    void *buf, int len, int zero, int sequence);
 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt);
 void usbhs_pkt_start(struct usbhs_pipe *pipe);
+struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe);
 
 #endif /* RENESAS_USB_FIFO_H */
index 4d571a5..e5ef569 100644 (file)
@@ -722,8 +722,7 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
        struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
        struct device *dev = usbhsg_gpriv_to_dev(gpriv);
        unsigned long flags;
-
-       usbhsg_pipe_disable(uep);
+       int ret = 0;
 
        dev_dbg(dev, "set halt %d (pipe %d)\n",
                halt, usbhs_pipe_number(pipe));
@@ -731,6 +730,18 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
        /********************  spin lock ********************/
        usbhs_lock(priv, flags);
 
+       /*
+        * According to usb_ep_set_halt()'s description, this function should
+        * return -EAGAIN if the IN endpoint has any queue or data. Note
+        * that the usbhs_pipe_is_dir_in() returns false if the pipe is an
+        * IN endpoint in the gadget mode.
+        */
+       if (!usbhs_pipe_is_dir_in(pipe) && (__usbhsf_pkt_get(pipe) ||
+           usbhs_pipe_contains_transmittable_data(pipe))) {
+               ret = -EAGAIN;
+               goto out;
+       }
+
        if (halt)
                usbhs_pipe_stall(pipe);
        else
@@ -741,10 +752,11 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
        else
                usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE);
 
+out:
        usbhs_unlock(priv, flags);
        /********************  spin unlock ******************/
 
-       return 0;
+       return ret;
 }
 
 static int usbhsg_ep_set_halt(struct usb_ep *ep, int value)
index c4922b9..9e5afdd 100644 (file)
@@ -277,6 +277,21 @@ int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe)
        return -EBUSY;
 }
 
+bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe)
+{
+       u16 val;
+
+       /* Do not support for DCP pipe */
+       if (usbhs_pipe_is_dcp(pipe))
+               return false;
+
+       val = usbhsp_pipectrl_get(pipe);
+       if (val & INBUFM)
+               return true;
+
+       return false;
+}
+
 /*
  *             PID ctrl
  */
index 3080423..3b13052 100644 (file)
@@ -83,6 +83,7 @@ void usbhs_pipe_clear(struct usbhs_pipe *pipe);
 void usbhs_pipe_clear_without_sequence(struct usbhs_pipe *pipe,
                                       int needs_bfre, int bfre_enable);
 int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe);
+bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe);
 void usbhs_pipe_enable(struct usbhs_pipe *pipe);
 void usbhs_pipe_disable(struct usbhs_pipe *pipe);
 void usbhs_pipe_stall(struct usbhs_pipe *pipe);
index f0688c4..25e81fa 100644 (file)
@@ -1030,6 +1030,9 @@ static const struct usb_device_id id_table_combined[] = {
        /* EZPrototypes devices */
        { USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
        { USB_DEVICE_INTERFACE_NUMBER(UNJO_VID, UNJO_ISODEBUG_V1_PID, 1) },
+       /* Sienna devices */
+       { USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
+       { USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
        { }                                     /* Terminating entry */
 };
 
index f12d806..22d6621 100644 (file)
@@ -39,6 +39,9 @@
 
 #define FTDI_LUMEL_PD12_PID    0x6002
 
+/* Sienna Serial Interface by Secyourit GmbH */
+#define FTDI_SIENNA_PID                0x8348
+
 /* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */
 #define CYBER_CORTEX_AV_PID    0x8698
 
 #define BANDB_ZZ_PROG1_USB_PID 0xBA02
 
 /*
+ * Echelon USB Serial Interface
+ */
+#define ECHELON_VID            0x0920
+#define ECHELON_U20_PID                0x7500
+
+/*
  * Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI
  */
 #define INTREPID_VID           0x093C
index d34779f..e66a59e 100644 (file)
@@ -1741,8 +1741,8 @@ static struct urb *keyspan_setup_urb(struct usb_serial *serial, int endpoint,
 
        ep_desc = find_ep(serial, endpoint);
        if (!ep_desc) {
-               /* leak the urb, something's wrong and the callers don't care */
-               return urb;
+               usb_free_urb(urb);
+               return NULL;
        }
        if (usb_endpoint_xfer_int(ep_desc)) {
                ep_type_name = "INT";
index 38e920a..06ab016 100644 (file)
@@ -419,6 +419,7 @@ static void option_instat_callback(struct urb *urb);
 #define CINTERION_PRODUCT_PH8_AUDIO            0x0083
 #define CINTERION_PRODUCT_AHXX_2RMNET          0x0084
 #define CINTERION_PRODUCT_AHXX_AUDIO           0x0085
+#define CINTERION_PRODUCT_CLS8                 0x00b0
 
 /* Olivetti products */
 #define OLIVETTI_VENDOR_ID                     0x0b3c
@@ -1154,6 +1155,14 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
          .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1050, 0xff),    /* Telit FN980 (rmnet) */
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1051, 0xff),    /* Telit FN980 (MBIM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1052, 0xff),    /* Telit FN980 (RNDIS) */
+         .driver_info = NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff),    /* Telit FN980 (ECM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1847,6 +1856,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(4) },
        { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
        { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
+       { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff),
+         .driver_info = RSVD(0) | RSVD(4) },
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
        { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
index a3179fe..8f066bb 100644 (file)
@@ -314,10 +314,7 @@ static void serial_cleanup(struct tty_struct *tty)
        serial = port->serial;
        owner = serial->type->driver.owner;
 
-       mutex_lock(&serial->disc_mutex);
-       if (!serial->disconnected)
-               usb_autopm_put_interface(serial->interface);
-       mutex_unlock(&serial->disc_mutex);
+       usb_autopm_put_interface(serial->interface);
 
        usb_serial_put(serial);
        module_put(owner);
index 9656274..5f61d99 100644 (file)
@@ -4409,18 +4409,20 @@ static int tcpm_fw_get_caps(struct tcpm_port *port,
        /* USB data support is optional */
        ret = fwnode_property_read_string(fwnode, "data-role", &cap_str);
        if (ret == 0) {
-               port->typec_caps.data = typec_find_port_data_role(cap_str);
-               if (port->typec_caps.data < 0)
-                       return -EINVAL;
+               ret = typec_find_port_data_role(cap_str);
+               if (ret < 0)
+                       return ret;
+               port->typec_caps.data = ret;
        }
 
        ret = fwnode_property_read_string(fwnode, "power-role", &cap_str);
        if (ret < 0)
                return ret;
 
-       port->typec_caps.type = typec_find_port_power_role(cap_str);
-       if (port->typec_caps.type < 0)
-               return -EINVAL;
+       ret = typec_find_port_power_role(cap_str);
+       if (ret < 0)
+               return ret;
+       port->typec_caps.type = ret;
        port->port_type = port->typec_caps.type;
 
        if (port->port_type == TYPEC_PORT_SNK)
index 6c10369..d99700c 100644 (file)
@@ -75,6 +75,8 @@ static int ucsi_displayport_enter(struct typec_altmode *alt)
 
        if (cur != 0xff) {
                mutex_unlock(&dp->con->lock);
+               if (dp->con->port_altmode[cur] == alt)
+                       return 0;
                return -EBUSY;
        }
 
index 907e20e..d772fce 100644 (file)
@@ -195,7 +195,6 @@ struct ucsi_ccg {
 
        /* fw build with vendor information */
        u16 fw_build;
-       bool run_isr; /* flag to call ISR routine during resume */
        struct work_struct pm_work;
 };
 
@@ -224,18 +223,6 @@ static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
        if (quirks && quirks->max_read_len)
                max_read_len = quirks->max_read_len;
 
-       if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
-           uc->fw_version <= CCG_OLD_FW_VERSION) {
-               mutex_lock(&uc->lock);
-               /*
-                * Do not schedule pm_work to run ISR in
-                * ucsi_ccg_runtime_resume() after pm_runtime_get_sync()
-                * since we are already in ISR path.
-                */
-               uc->run_isr = false;
-               mutex_unlock(&uc->lock);
-       }
-
        pm_runtime_get_sync(uc->dev);
        while (rem_len > 0) {
                msgs[1].buf = &data[len - rem_len];
@@ -278,18 +265,6 @@ static int ccg_write(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
        msgs[0].len = len + sizeof(rab);
        msgs[0].buf = buf;
 
-       if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
-           uc->fw_version <= CCG_OLD_FW_VERSION) {
-               mutex_lock(&uc->lock);
-               /*
-                * Do not schedule pm_work to run ISR in
-                * ucsi_ccg_runtime_resume() after pm_runtime_get_sync()
-                * since we are already in ISR path.
-                */
-               uc->run_isr = false;
-               mutex_unlock(&uc->lock);
-       }
-
        pm_runtime_get_sync(uc->dev);
        status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
        if (status < 0) {
@@ -1130,7 +1105,6 @@ static int ucsi_ccg_probe(struct i2c_client *client,
        uc->ppm.sync = ucsi_ccg_sync;
        uc->dev = dev;
        uc->client = client;
-       uc->run_isr = true;
        mutex_init(&uc->lock);
        INIT_WORK(&uc->work, ccg_update_firmware);
        INIT_WORK(&uc->pm_work, ccg_pm_workaround_work);
@@ -1188,6 +1162,8 @@ static int ucsi_ccg_probe(struct i2c_client *client,
 
        pm_runtime_set_active(uc->dev);
        pm_runtime_enable(uc->dev);
+       pm_runtime_use_autosuspend(uc->dev);
+       pm_runtime_set_autosuspend_delay(uc->dev, 5000);
        pm_runtime_idle(uc->dev);
 
        return 0;
@@ -1229,7 +1205,6 @@ static int ucsi_ccg_runtime_resume(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct ucsi_ccg *uc = i2c_get_clientdata(client);
-       bool schedule = true;
 
        /*
         * Firmware version 3.1.10 or earlier, built for NVIDIA has known issue
@@ -1237,17 +1212,8 @@ static int ucsi_ccg_runtime_resume(struct device *dev)
         * Schedule a work to call ISR as a workaround.
         */
        if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
-           uc->fw_version <= CCG_OLD_FW_VERSION) {
-               mutex_lock(&uc->lock);
-               if (!uc->run_isr) {
-                       uc->run_isr = true;
-                       schedule = false;
-               }
-               mutex_unlock(&uc->lock);
-
-               if (schedule)
-                       schedule_work(&uc->pm_work);
-       }
+           uc->fw_version <= CCG_OLD_FW_VERSION)
+               schedule_work(&uc->pm_work);
 
        return 0;
 }
index c31d17d..2dc5876 100644 (file)
@@ -61,6 +61,7 @@ struct usb_skel {
        spinlock_t              err_lock;               /* lock for errors */
        struct kref             kref;
        struct mutex            io_mutex;               /* synchronize I/O with disconnect */
+       unsigned long           disconnected:1;
        wait_queue_head_t       bulk_in_wait;           /* to wait for an ongoing read */
 };
 #define to_skel_dev(d) container_of(d, struct usb_skel, kref)
@@ -73,6 +74,7 @@ static void skel_delete(struct kref *kref)
        struct usb_skel *dev = to_skel_dev(kref);
 
        usb_free_urb(dev->bulk_in_urb);
+       usb_put_intf(dev->interface);
        usb_put_dev(dev->udev);
        kfree(dev->bulk_in_buffer);
        kfree(dev);
@@ -124,10 +126,7 @@ static int skel_release(struct inode *inode, struct file *file)
                return -ENODEV;
 
        /* allow the device to be autosuspended */
-       mutex_lock(&dev->io_mutex);
-       if (dev->interface)
-               usb_autopm_put_interface(dev->interface);
-       mutex_unlock(&dev->io_mutex);
+       usb_autopm_put_interface(dev->interface);
 
        /* decrement the count on our device */
        kref_put(&dev->kref, skel_delete);
@@ -231,8 +230,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count,
 
        dev = file->private_data;
 
-       /* if we cannot read at all, return EOF */
-       if (!dev->bulk_in_urb || !count)
+       if (!count)
                return 0;
 
        /* no concurrent readers */
@@ -240,7 +238,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count,
        if (rv < 0)
                return rv;
 
-       if (!dev->interface) {          /* disconnect() was called */
+       if (dev->disconnected) {                /* disconnect() was called */
                rv = -ENODEV;
                goto exit;
        }
@@ -422,7 +420,7 @@ static ssize_t skel_write(struct file *file, const char *user_buffer,
 
        /* this lock makes sure we don't submit URBs to gone devices */
        mutex_lock(&dev->io_mutex);
-       if (!dev->interface) {          /* disconnect() was called */
+       if (dev->disconnected) {                /* disconnect() was called */
                mutex_unlock(&dev->io_mutex);
                retval = -ENODEV;
                goto error;
@@ -507,7 +505,7 @@ static int skel_probe(struct usb_interface *interface,
        init_waitqueue_head(&dev->bulk_in_wait);
 
        dev->udev = usb_get_dev(interface_to_usbdev(interface));
-       dev->interface = interface;
+       dev->interface = usb_get_intf(interface);
 
        /* set up the endpoint information */
        /* use only the first bulk-in and bulk-out endpoints */
@@ -573,9 +571,10 @@ static void skel_disconnect(struct usb_interface *interface)
 
        /* prevent more I/O from starting */
        mutex_lock(&dev->io_mutex);
-       dev->interface = NULL;
+       dev->disconnected = 1;
        mutex_unlock(&dev->io_mutex);
 
+       usb_kill_urb(dev->bulk_in_urb);
        usb_kill_anchored_urbs(&dev->submitted);
 
        /* decrement our usage count */
index 585a84d..65850e9 100644 (file)
@@ -1195,12 +1195,12 @@ static int vhci_start(struct usb_hcd *hcd)
        if (id == 0 && usb_hcd_is_primary_hcd(hcd)) {
                err = vhci_init_attr_group();
                if (err) {
-                       pr_err("init attr group\n");
+                       dev_err(hcd_dev(hcd), "init attr group failed, err = %d\n", err);
                        return err;
                }
                err = sysfs_create_group(&hcd_dev(hcd)->kobj, &vhci_attr_group);
                if (err) {
-                       pr_err("create sysfs files\n");
+                       dev_err(hcd_dev(hcd), "create sysfs files failed, err = %d\n", err);
                        vhci_finish_attr_group();
                        return err;
                }
index 0220616..379a02c 100644 (file)
@@ -110,13 +110,15 @@ static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
 static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
 {
        struct resource *res;
-       int bar;
+       int i;
        struct vfio_pci_dummy_resource *dummy_res;
 
        INIT_LIST_HEAD(&vdev->dummy_resources_list);
 
-       for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
-               res = vdev->pdev->resource + bar;
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+               int bar = i + PCI_STD_RESOURCES;
+
+               res = &vdev->pdev->resource[bar];
 
                if (!IS_ENABLED(CONFIG_VFIO_PCI_MMAP))
                        goto no_mmap;
@@ -399,7 +401,8 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
 
        vfio_config_free(vdev);
 
-       for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+               bar = i + PCI_STD_RESOURCES;
                if (!vdev->barmap[bar])
                        continue;
                pci_iounmap(pdev, vdev->barmap[bar]);
index f0891bd..90c0b80 100644 (file)
@@ -450,30 +450,32 @@ static void vfio_bar_fixup(struct vfio_pci_device *vdev)
 {
        struct pci_dev *pdev = vdev->pdev;
        int i;
-       __le32 *bar;
+       __le32 *vbar;
        u64 mask;
 
-       bar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0];
+       vbar = (__le32 *)&vdev->vconfig[PCI_BASE_ADDRESS_0];
 
-       for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++, bar++) {
-               if (!pci_resource_start(pdev, i)) {
-                       *bar = 0; /* Unmapped by host = unimplemented to user */
+       for (i = 0; i < PCI_STD_NUM_BARS; i++, vbar++) {
+               int bar = i + PCI_STD_RESOURCES;
+
+               if (!pci_resource_start(pdev, bar)) {
+                       *vbar = 0; /* Unmapped by host = unimplemented to user */
                        continue;
                }
 
-               mask = ~(pci_resource_len(pdev, i) - 1);
+               mask = ~(pci_resource_len(pdev, bar) - 1);
 
-               *bar &= cpu_to_le32((u32)mask);
-               *bar |= vfio_generate_bar_flags(pdev, i);
+               *vbar &= cpu_to_le32((u32)mask);
+               *vbar |= vfio_generate_bar_flags(pdev, bar);
 
-               if (*bar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) {
-                       bar++;
-                       *bar &= cpu_to_le32((u32)(mask >> 32));
+               if (*vbar & cpu_to_le32(PCI_BASE_ADDRESS_MEM_TYPE_64)) {
+                       vbar++;
+                       *vbar &= cpu_to_le32((u32)(mask >> 32));
                        i++;
                }
        }
 
-       bar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS];
+       vbar = (__le32 *)&vdev->vconfig[PCI_ROM_ADDRESS];
 
        /*
         * NB. REGION_INFO will have reported zero size if we weren't able
@@ -483,14 +485,14 @@ static void vfio_bar_fixup(struct vfio_pci_device *vdev)
        if (pci_resource_start(pdev, PCI_ROM_RESOURCE)) {
                mask = ~(pci_resource_len(pdev, PCI_ROM_RESOURCE) - 1);
                mask |= PCI_ROM_ADDRESS_ENABLE;
-               *bar &= cpu_to_le32((u32)mask);
+               *vbar &= cpu_to_le32((u32)mask);
        } else if (pdev->resource[PCI_ROM_RESOURCE].flags &
                                        IORESOURCE_ROM_SHADOW) {
                mask = ~(0x20000 - 1);
                mask |= PCI_ROM_ADDRESS_ENABLE;
-               *bar &= cpu_to_le32((u32)mask);
+               *vbar &= cpu_to_le32((u32)mask);
        } else
-               *bar = 0;
+               *vbar = 0;
 
        vdev->bardirty = false;
 }
index ee6ee91..8a2c760 100644 (file)
@@ -86,8 +86,8 @@ struct vfio_pci_reflck {
 
 struct vfio_pci_device {
        struct pci_dev          *pdev;
-       void __iomem            *barmap[PCI_STD_RESOURCE_END + 1];
-       bool                    bar_mmap_supported[PCI_STD_RESOURCE_END + 1];
+       void __iomem            *barmap[PCI_STD_NUM_BARS];
+       bool                    bar_mmap_supported[PCI_STD_NUM_BARS];
        u8                      *pci_config_map;
        u8                      *vconfig;
        struct perm_bits        *msi_perm;
index 7804869..0563080 100644 (file)
@@ -161,6 +161,7 @@ static int vhost_test_release(struct inode *inode, struct file *f)
 
        vhost_test_stop(n, &private);
        vhost_test_flush(n);
+       vhost_dev_stop(&n->dev);
        vhost_dev_cleanup(&n->dev);
        /* We do an extra flush before freeing memory,
         * since jobs can re-queue themselves. */
@@ -237,6 +238,7 @@ static long vhost_test_reset_owner(struct vhost_test *n)
        }
        vhost_test_stop(n, &priv);
        vhost_test_flush(n);
+       vhost_dev_stop(&n->dev);
        vhost_dev_reset_owner(&n->dev, umem);
 done:
        mutex_unlock(&n->dev.mutex);
index 2dc5703..7c4483c 100644 (file)
@@ -2593,7 +2593,7 @@ static void radeon_set_suspend(struct radeonfb_info *rinfo, int suspend)
                 * calling pci_set_power_state()
                 */
                radeonfb_whack_power_state(rinfo, PCI_D2);
-               __pci_complete_power_transition(rinfo->pdev, PCI_D2);
+               pci_platform_power_transition(rinfo->pdev, PCI_D2);
        } else {
                printk(KERN_DEBUG "radeonfb (%s): switching to D0 state...\n",
                       pci_name(rinfo->pdev));
index e6a1c80..19af562 100644 (file)
@@ -1774,7 +1774,7 @@ int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, const
        int err, idx, bar;
        bool res_id_found = false;
 
-       for (idx = 0, bar = 0; bar < PCI_ROM_RESOURCE; bar++) {
+       for (idx = 0, bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
                if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
                        continue;
                idx++;
@@ -1784,7 +1784,7 @@ int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id, const
        if (!ap)
                return -ENOMEM;
 
-       for (idx = 0, bar = 0; bar < PCI_ROM_RESOURCE; bar++) {
+       for (idx = 0, bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
                if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
                        continue;
                ap->ranges[idx].base = pci_resource_start(pdev, bar);
index 51d97ec..1caa372 100644 (file)
@@ -653,7 +653,7 @@ static void efifb_fixup_resources(struct pci_dev *dev)
        if (!base)
                return;
 
-       for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+       for (i = 0; i < PCI_STD_NUM_BARS; i++) {
                struct resource *res = &dev->resource[i];
 
                if (!(res->flags & IORESOURCE_MEM))
index 228a89b..16f60c1 100644 (file)
@@ -18,23 +18,6 @@ obj-$(CONFIG_SPU_BASE)                       += logo_spe_clut224.o
 
 # How to generate logo's
 
-# Use logo-cfiles to retrieve list of .c files to be built
-logo-cfiles = $(notdir $(patsubst %.$(2), %.c, \
-              $(wildcard $(srctree)/$(src)/*$(1).$(2))))
-
-
-# Mono logos
-extra-y += $(call logo-cfiles,_mono,pbm)
-
-# VGA16 logos
-extra-y += $(call logo-cfiles,_vga16,ppm)
-
-# 224 Logos
-extra-y += $(call logo-cfiles,_clut224,ppm)
-
-# Gray 256
-extra-y += $(call logo-cfiles,_gray256,pgm)
-
 pnmtologo := scripts/pnmtologo
 
 # Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..."
@@ -55,5 +38,5 @@ $(obj)/%_clut224.c: $(src)/%_clut224.ppm $(pnmtologo) FORCE
 $(obj)/%_gray256.c: $(src)/%_gray256.pgm $(pnmtologo) FORCE
        $(call if_changed,logo)
 
-# Files generated that shall be removed upon make clean
-clean-files := *.o *_mono.c *_vga16.c *_clut224.c *_gray256.c
+# generated C files
+targets += *_mono.c *_vga16.c *_clut224.c *_gray256.c
index 75fd140..43c3916 100644 (file)
@@ -220,6 +220,8 @@ static int hgcm_call_preprocess_linaddr(
        if (!bounce_buf)
                return -ENOMEM;
 
+       *bounce_buf_ret = bounce_buf;
+
        if (copy_in) {
                ret = copy_from_user(bounce_buf, (void __user *)buf, len);
                if (ret)
@@ -228,7 +230,6 @@ static int hgcm_call_preprocess_linaddr(
                memset(bounce_buf, 0, len);
        }
 
-       *bounce_buf_ret = bounce_buf;
        hgcm_call_add_pagelist_size(bounce_buf, len, extra);
        return 0;
 }
index ebed495..b784763 100644 (file)
@@ -103,6 +103,7 @@ config W1_SLAVE_DS2438
 
 config W1_SLAVE_DS250X
        tristate "512b/1kb/16kb EPROM family support"
+       select CRC16
        help
          Say Y here if you want to use a 1-wire
          512b/1kb/16kb EPROM family device (DS250x).
index 4e11de6..5bae515 100644 (file)
@@ -156,8 +156,10 @@ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
        (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
 
 /* balloon_append: add the given page to the balloon. */
-static void __balloon_append(struct page *page)
+static void balloon_append(struct page *page)
 {
+       __SetPageOffline(page);
+
        /* Lowmem is re-populated first, so highmem pages go at list tail. */
        if (PageHighMem(page)) {
                list_add_tail(&page->lru, &ballooned_pages);
@@ -169,11 +171,6 @@ static void __balloon_append(struct page *page)
        wake_up(&balloon_wq);
 }
 
-static void balloon_append(struct page *page)
-{
-       __balloon_append(page);
-}
-
 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
 static struct page *balloon_retrieve(bool require_lowmem)
 {
@@ -192,6 +189,7 @@ static struct page *balloon_retrieve(bool require_lowmem)
        else
                balloon_stats.balloon_low--;
 
+       __ClearPageOffline(page);
        return page;
 }
 
@@ -377,8 +375,7 @@ static void xen_online_page(struct page *page, unsigned int order)
        for (i = 0; i < size; i++) {
                p = pfn_to_page(start_pfn + i);
                __online_page_set_limits(p);
-               __SetPageOffline(p);
-               __balloon_append(p);
+               balloon_append(p);
        }
        mutex_unlock(&balloon_mutex);
 }
@@ -444,7 +441,6 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
                xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
 
                /* Relinquish the page back to the allocator. */
-               __ClearPageOffline(page);
                free_reserved_page(page);
        }
 
@@ -471,7 +467,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
                        state = BP_EAGAIN;
                        break;
                }
-               __SetPageOffline(page);
                adjust_managed_page_count(page, -1);
                xenmem_reservation_scrub_page(page);
                list_add(&page->lru, &pages);
@@ -611,7 +606,6 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
        while (pgno < nr_pages) {
                page = balloon_retrieve(true);
                if (page) {
-                       __ClearPageOffline(page);
                        pages[pgno++] = page;
 #ifdef CONFIG_XEN_HAVE_PVMMU
                        /*
@@ -653,10 +647,8 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
        mutex_lock(&balloon_mutex);
 
        for (i = 0; i < nr_pages; i++) {
-               if (pages[i]) {
-                       __SetPageOffline(pages[i]);
+               if (pages[i])
                        balloon_append(pages[i]);
-               }
        }
 
        balloon_stats.target_unpopulated -= nr_pages;
@@ -674,7 +666,6 @@ static void __init balloon_add_region(unsigned long start_pfn,
                                      unsigned long pages)
 {
        unsigned long pfn, extra_pfn_end;
-       struct page *page;
 
        /*
         * If the amount of usable memory has been limited (e.g., with
@@ -684,11 +675,10 @@ static void __init balloon_add_region(unsigned long start_pfn,
        extra_pfn_end = min(max_pfn, start_pfn + pages);
 
        for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
-               page = pfn_to_page(pfn);
                /* totalram_pages and totalhigh_pages do not
                   include the boot-time balloon extension, so
                   don't subtract from it. */
-               __balloon_append(page);
+               balloon_append(pfn_to_page(pfn));
        }
 
        balloon_stats.total_pages += extra_pfn_end - start_pfn;
index 89d60f8..d1ff218 100644 (file)
@@ -40,7 +40,7 @@
 
 #define efi_data(op)   (op.u.efi_runtime_call)
 
-efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
+static efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 {
        struct xen_platform_op op = INIT_EFI_OP(get_time);
 
@@ -61,9 +61,8 @@ efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_get_time);
 
-efi_status_t xen_efi_set_time(efi_time_t *tm)
+static efi_status_t xen_efi_set_time(efi_time_t *tm)
 {
        struct xen_platform_op op = INIT_EFI_OP(set_time);
 
@@ -75,10 +74,10 @@ efi_status_t xen_efi_set_time(efi_time_t *tm)
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_set_time);
 
-efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
-                                    efi_time_t *tm)
+static efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled,
+                                           efi_bool_t *pending,
+                                           efi_time_t *tm)
 {
        struct xen_platform_op op = INIT_EFI_OP(get_wakeup_time);
 
@@ -98,9 +97,8 @@ efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_get_wakeup_time);
 
-efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
+static efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
 {
        struct xen_platform_op op = INIT_EFI_OP(set_wakeup_time);
 
@@ -117,11 +115,10 @@ efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_set_wakeup_time);
 
-efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
-                                 u32 *attr, unsigned long *data_size,
-                                 void *data)
+static efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+                                        u32 *attr, unsigned long *data_size,
+                                        void *data)
 {
        struct xen_platform_op op = INIT_EFI_OP(get_variable);
 
@@ -141,11 +138,10 @@ efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_get_variable);
 
-efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
-                                      efi_char16_t *name,
-                                      efi_guid_t *vendor)
+static efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
+                                             efi_char16_t *name,
+                                             efi_guid_t *vendor)
 {
        struct xen_platform_op op = INIT_EFI_OP(get_next_variable_name);
 
@@ -165,11 +161,10 @@ efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_get_next_variable);
 
-efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
-                                u32 attr, unsigned long data_size,
-                                void *data)
+static efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+                                        u32 attr, unsigned long data_size,
+                                        void *data)
 {
        struct xen_platform_op op = INIT_EFI_OP(set_variable);
 
@@ -186,11 +181,10 @@ efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_set_variable);
 
-efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
-                                        u64 *remaining_space,
-                                        u64 *max_variable_size)
+static efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
+                                               u64 *remaining_space,
+                                               u64 *max_variable_size)
 {
        struct xen_platform_op op = INIT_EFI_OP(query_variable_info);
 
@@ -208,9 +202,8 @@ efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_query_variable_info);
 
-efi_status_t xen_efi_get_next_high_mono_count(u32 *count)
+static efi_status_t xen_efi_get_next_high_mono_count(u32 *count)
 {
        struct xen_platform_op op = INIT_EFI_OP(get_next_high_monotonic_count);
 
@@ -221,10 +214,9 @@ efi_status_t xen_efi_get_next_high_mono_count(u32 *count)
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_get_next_high_mono_count);
 
-efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
-                                   unsigned long count, unsigned long sg_list)
+static efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
+                               unsigned long count, unsigned long sg_list)
 {
        struct xen_platform_op op = INIT_EFI_OP(update_capsule);
 
@@ -241,11 +233,9 @@ efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_update_capsule);
 
-efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
-                                       unsigned long count, u64 *max_size,
-                                       int *reset_type)
+static efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
+                       unsigned long count, u64 *max_size, int *reset_type)
 {
        struct xen_platform_op op = INIT_EFI_OP(query_capsule_capabilities);
 
@@ -264,10 +254,9 @@ efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_query_capsule_caps);
 
-void xen_efi_reset_system(int reset_type, efi_status_t status,
-                         unsigned long data_size, efi_char16_t *data)
+static void xen_efi_reset_system(int reset_type, efi_status_t status,
+                                unsigned long data_size, efi_char16_t *data)
 {
        switch (reset_type) {
        case EFI_RESET_COLD:
@@ -281,4 +270,25 @@ void xen_efi_reset_system(int reset_type, efi_status_t status,
                BUG();
        }
 }
-EXPORT_SYMBOL_GPL(xen_efi_reset_system);
+
+/*
+ * Set XEN EFI runtime services function pointers. Other fields of struct efi,
+ * e.g. efi.systab, will be set like normal EFI.
+ */
+void __init xen_efi_runtime_setup(void)
+{
+       efi.get_time                    = xen_efi_get_time;
+       efi.set_time                    = xen_efi_set_time;
+       efi.get_wakeup_time             = xen_efi_get_wakeup_time;
+       efi.set_wakeup_time             = xen_efi_set_wakeup_time;
+       efi.get_variable                = xen_efi_get_variable;
+       efi.get_next_variable           = xen_efi_get_next_variable;
+       efi.set_variable                = xen_efi_set_variable;
+       efi.set_variable_nonblocking    = xen_efi_set_variable;
+       efi.query_variable_info         = xen_efi_query_variable_info;
+       efi.query_variable_info_nonblocking = xen_efi_query_variable_info;
+       efi.update_capsule              = xen_efi_update_capsule;
+       efi.query_capsule_caps          = xen_efi_query_capsule_caps;
+       efi.get_next_high_mono_count    = xen_efi_get_next_high_mono_count;
+       efi.reset_system                = xen_efi_reset_system;
+}
index a446a72..81401f3 100644 (file)
@@ -22,6 +22,7 @@
 
 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 
+#include <linux/dma-mapping.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -34,9 +35,6 @@
 #include <linux/slab.h>
 #include <linux/highmem.h>
 #include <linux/refcount.h>
-#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
-#include <linux/of_device.h>
-#endif
 
 #include <xen/xen.h>
 #include <xen/grant_table.h>
@@ -625,14 +623,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)
        flip->private_data = priv;
 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
        priv->dma_dev = gntdev_miscdev.this_device;
-
-       /*
-        * The device is not spawn from a device tree, so arch_setup_dma_ops
-        * is not called, thus leaving the device with dummy DMA ops.
-        * Fix this by calling of_dma_configure() with a NULL node to set
-        * default DMA ops.
-        */
-       of_dma_configure(priv->dma_dev, NULL, true);
+       dma_coerce_mask_and_coherent(priv->dma_dev, DMA_BIT_MASK(64));
 #endif
        pr_debug("priv %p\n", priv);
 
index 7ea6fb6..49b381e 100644 (file)
@@ -1363,8 +1363,7 @@ static int gnttab_setup(void)
        if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
                gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
                if (gnttab_shared.addr == NULL) {
-                       pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n",
-                               (unsigned long)xen_auto_xlat_grant_frames.vaddr);
+                       pr_warn("gnttab share frames is not mapped!\n");
                        return -ENOMEM;
                }
        }
index 5e30602..59e85e4 100644 (file)
@@ -74,7 +74,7 @@ static int xen_allocate_irq(struct pci_dev *pdev)
                        "xen-platform-pci", pdev);
 }
 
-static int platform_pci_resume(struct pci_dev *pdev)
+static int platform_pci_resume(struct device *dev)
 {
        int err;
 
@@ -83,7 +83,7 @@ static int platform_pci_resume(struct pci_dev *pdev)
 
        err = xen_set_callback_via(callback_via);
        if (err) {
-               dev_err(&pdev->dev, "platform_pci_resume failure!\n");
+               dev_err(dev, "platform_pci_resume failure!\n");
                return err;
        }
        return 0;
@@ -168,13 +168,17 @@ static const struct pci_device_id platform_pci_tbl[] = {
        {0,}
 };
 
+static struct dev_pm_ops platform_pm_ops = {
+       .resume_noirq =   platform_pci_resume,
+};
+
 static struct pci_driver platform_driver = {
        .name =           DRV_NAME,
        .probe =          platform_pci_probe,
        .id_table =       platform_pci_tbl,
-#ifdef CONFIG_PM
-       .resume_early =   platform_pci_resume,
-#endif
+       .driver = {
+               .pm =     &platform_pm_ops,
+       },
 };
 
 builtin_pci_driver(platform_driver);
index 69a626b..c57c71b 100644 (file)
@@ -775,7 +775,7 @@ static int pvcalls_back_poll(struct xenbus_device *dev,
        mappass->reqcopy = *req;
        icsk = inet_csk(mappass->sock->sk);
        queue = &icsk->icsk_accept_queue;
-       data = queue->rskq_accept_head != NULL;
+       data = READ_ONCE(queue->rskq_accept_head) != NULL;
        if (data) {
                mappass->reqcopy.cmd = 0;
                ret = 0;
index 08adc59..597af45 100644 (file)
@@ -55,6 +55,7 @@
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/miscdevice.h>
+#include <linux/workqueue.h>
 
 #include <xen/xenbus.h>
 #include <xen/xen.h>
@@ -116,6 +117,8 @@ struct xenbus_file_priv {
        wait_queue_head_t read_waitq;
 
        struct kref kref;
+
+       struct work_struct wq;
 };
 
 /* Read out any raw xenbus messages queued up. */
@@ -300,14 +303,14 @@ static void watch_fired(struct xenbus_watch *watch,
        mutex_unlock(&adap->dev_data->reply_mutex);
 }
 
-static void xenbus_file_free(struct kref *kref)
+static void xenbus_worker(struct work_struct *wq)
 {
        struct xenbus_file_priv *u;
        struct xenbus_transaction_holder *trans, *tmp;
        struct watch_adapter *watch, *tmp_watch;
        struct read_buffer *rb, *tmp_rb;
 
-       u = container_of(kref, struct xenbus_file_priv, kref);
+       u = container_of(wq, struct xenbus_file_priv, wq);
 
        /*
         * No need for locking here because there are no other users,
@@ -333,6 +336,18 @@ static void xenbus_file_free(struct kref *kref)
        kfree(u);
 }
 
+static void xenbus_file_free(struct kref *kref)
+{
+       struct xenbus_file_priv *u;
+
+       /*
+        * We might be called in xenbus_thread().
+        * Use workqueue to avoid deadlock.
+        */
+       u = container_of(kref, struct xenbus_file_priv, kref);
+       schedule_work(&u->wq);
+}
+
 static struct xenbus_transaction_holder *xenbus_get_transaction(
        struct xenbus_file_priv *u, uint32_t tx_id)
 {
@@ -650,6 +665,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
        INIT_LIST_HEAD(&u->watches);
        INIT_LIST_HEAD(&u->read_buffers);
        init_waitqueue_head(&u->read_waitq);
+       INIT_WORK(&u->wq, xenbus_worker);
 
        mutex_init(&u->reply_mutex);
        mutex_init(&u->msgbuffer_mutex);
index ad4c6b1..c5642bc 100644 (file)
@@ -879,7 +879,7 @@ out_free_interp:
           the correct location in memory. */
        for(i = 0, elf_ppnt = elf_phdata;
            i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
-               int elf_prot, elf_flags, elf_fixed = MAP_FIXED_NOREPLACE;
+               int elf_prot, elf_flags;
                unsigned long k, vaddr;
                unsigned long total_size = 0;
 
@@ -911,13 +911,6 @@ out_free_interp:
                                         */
                                }
                        }
-
-                       /*
-                        * Some binaries have overlapping elf segments and then
-                        * we have to forcefully map over an existing mapping
-                        * e.g. over this newly established brk mapping.
-                        */
-                       elf_fixed = MAP_FIXED;
                }
 
                elf_prot = make_prot(elf_ppnt->p_flags);
@@ -930,7 +923,7 @@ out_free_interp:
                 * the ET_DYN load_addr calculations, proceed normally.
                 */
                if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
-                       elf_flags |= elf_fixed;
+                       elf_flags |= MAP_FIXED;
                } else if (loc->elf_ex.e_type == ET_DYN) {
                        /*
                         * This logic is run once for the first LOAD Program
@@ -966,7 +959,7 @@ out_free_interp:
                                load_bias = ELF_ET_DYN_BASE;
                                if (current->flags & PF_RANDOMIZE)
                                        load_bias += arch_mmap_rnd();
-                               elf_flags |= elf_fixed;
+                               elf_flags |= MAP_FIXED;
                        } else
                                load_bias = 0;
 
index 8fe4eb7..27e5b26 100644 (file)
@@ -1591,7 +1591,6 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct page **pages = NULL;
-       struct extent_state *cached_state = NULL;
        struct extent_changeset *data_reserved = NULL;
        u64 release_bytes = 0;
        u64 lockstart;
@@ -1611,6 +1610,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
                return -ENOMEM;
 
        while (iov_iter_count(i) > 0) {
+               struct extent_state *cached_state = NULL;
                size_t offset = offset_in_page(pos);
                size_t sector_offset;
                size_t write_bytes = min(iov_iter_count(i),
@@ -1758,9 +1758,20 @@ again:
                if (copied > 0)
                        ret = btrfs_dirty_pages(inode, pages, dirty_pages,
                                                pos, copied, &cached_state);
+
+               /*
+                * If we have not locked the extent range, because the range's
+                * start offset is >= i_size, we might still have a non-NULL
+                * cached extent state, acquired while marking the extent range
+                * as delalloc through btrfs_dirty_pages(). Therefore free any
+                * possible cached extent state to avoid a memory leak.
+                */
                if (extents_locked)
                        unlock_extent_cached(&BTRFS_I(inode)->io_tree,
                                             lockstart, lockend, &cached_state);
+               else
+                       free_extent_state(cached_state);
+
                btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes,
                                               true);
                if (ret) {
index a054640..0f2754e 100644 (file)
@@ -6305,13 +6305,16 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
        u32 sizes[2];
        int nitems = name ? 2 : 1;
        unsigned long ptr;
+       unsigned int nofs_flag;
        int ret;
 
        path = btrfs_alloc_path();
        if (!path)
                return ERR_PTR(-ENOMEM);
 
+       nofs_flag = memalloc_nofs_save();
        inode = new_inode(fs_info->sb);
+       memalloc_nofs_restore(nofs_flag);
        if (!inode) {
                btrfs_free_path(path);
                return ERR_PTR(-ENOMEM);
index e87cbda..b57f361 100644 (file)
@@ -500,7 +500,7 @@ static int process_leaf(struct btrfs_root *root,
        struct btrfs_extent_data_ref *dref;
        struct btrfs_shared_data_ref *sref;
        u32 count;
-       int i = 0, tree_block_level = 0, ret;
+       int i = 0, tree_block_level = 0, ret = 0;
        struct btrfs_key key;
        int nritems = btrfs_header_nritems(leaf);
 
index f321502..123ac54 100644 (file)
@@ -5085,7 +5085,7 @@ static int clone_range(struct send_ctx *sctx,
        struct btrfs_path *path;
        struct btrfs_key key;
        int ret;
-       u64 clone_src_i_size;
+       u64 clone_src_i_size = 0;
 
        /*
         * Prevent cloning from a zero offset with a length matching the sector
index 29b82a7..8a6cc60 100644 (file)
@@ -2932,7 +2932,8 @@ out:
  * in the tree of log roots
  */
 static int update_log_root(struct btrfs_trans_handle *trans,
-                          struct btrfs_root *log)
+                          struct btrfs_root *log,
+                          struct btrfs_root_item *root_item)
 {
        struct btrfs_fs_info *fs_info = log->fs_info;
        int ret;
@@ -2940,10 +2941,10 @@ static int update_log_root(struct btrfs_trans_handle *trans,
        if (log->log_transid == 1) {
                /* insert root item on the first sync */
                ret = btrfs_insert_root(trans, fs_info->log_root_tree,
-                               &log->root_key, &log->root_item);
+                               &log->root_key, root_item);
        } else {
                ret = btrfs_update_root(trans, fs_info->log_root_tree,
-                               &log->root_key, &log->root_item);
+                               &log->root_key, root_item);
        }
        return ret;
 }
@@ -3041,6 +3042,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_root *log = root->log_root;
        struct btrfs_root *log_root_tree = fs_info->log_root_tree;
+       struct btrfs_root_item new_root_item;
        int log_transid = 0;
        struct btrfs_log_ctx root_log_ctx;
        struct blk_plug plug;
@@ -3104,18 +3106,26 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
                goto out;
        }
 
+       /*
+        * We _must_ update under the root->log_mutex in order to make sure we
+        * have a consistent view of the log root we are trying to commit at
+        * this moment.
+        *
+        * We _must_ copy this into a local copy, because we are not holding the
+        * log_root_tree->log_mutex yet.  This is important because when we
+        * commit the log_root_tree we must have a consistent view of the
+        * log_root_tree when we update the super block to point at the
+        * log_root_tree bytenr.  If we update the log_root_tree here we'll race
+        * with the commit and possibly point at the new block which we may not
+        * have written out.
+        */
        btrfs_set_root_node(&log->root_item, log->node);
+       memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
 
        root->log_transid++;
        log->log_transid = root->log_transid;
        root->log_start_pid = 0;
        /*
-        * Update or create log root item under the root's log_mutex to prevent
-        * races with concurrent log syncs that can lead to failure to update
-        * log root item because it was not created yet.
-        */
-       ret = update_log_root(trans, log);
-       /*
         * IO has been started, blocks of the log tree have WRITTEN flag set
         * in their headers. new modifications of the log will be written to
         * new positions. so it's safe to allow log writers to go in.
@@ -3135,6 +3145,14 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
        mutex_unlock(&log_root_tree->log_mutex);
 
        mutex_lock(&log_root_tree->log_mutex);
+
+       /*
+        * Now we are safe to update the log_root_tree because we're under the
+        * log_mutex, and we're a current writer so we're holding the commit
+        * open until we drop the log_mutex.
+        */
+       ret = update_log_root(trans, log, &new_root_item);
+
        if (atomic_dec_and_test(&log_root_tree->log_writers)) {
                /* atomic_dec_and_test implies a barrier */
                cond_wake_up_nomb(&log_root_tree->log_writer_wait);
index cdd7af4..bdfe449 100644 (file)
@@ -3845,7 +3845,11 @@ static int alloc_profile_is_valid(u64 flags, int extended)
                return !extended; /* "0" is valid for usual profiles */
 
        /* true if exactly one bit set */
-       return is_power_of_2(flags);
+       /*
+        * Don't use is_power_of_2(unsigned long) because it won't work
+        * for the single profile (1ULL << 48) on 32-bit CPUs.
+        */
+       return flags != 0 && (flags & (flags - 1)) == 0;
 }
 
 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
index a8a8f84..a516329 100644 (file)
@@ -384,8 +384,8 @@ static int parse_reply_info_readdir(void **p, void *end,
        }
 
 done:
-       if (*p != end)
-               goto bad;
+       /* Skip over any unrecognized fields */
+       *p = end;
        return 0;
 
 bad:
@@ -406,12 +406,10 @@ static int parse_reply_info_filelock(void **p, void *end,
                goto bad;
 
        info->filelock_reply = *p;
-       *p += sizeof(*info->filelock_reply);
 
-       if (unlikely(*p != end))
-               goto bad;
+       /* Skip over any unrecognized fields */
+       *p = end;
        return 0;
-
 bad:
        return -EIO;
 }
@@ -425,18 +423,21 @@ static int parse_reply_info_create(void **p, void *end,
 {
        if (features == (u64)-1 ||
            (features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
+               /* Malformed reply? */
                if (*p == end) {
                        info->has_create_ino = false;
                } else {
                        info->has_create_ino = true;
-                       info->ino = ceph_decode_64(p);
+                       ceph_decode_64_safe(p, end, info->ino, bad);
                }
+       } else {
+               if (*p != end)
+                       goto bad;
        }
 
-       if (unlikely(*p != end))
-               goto bad;
+       /* Skip over any unrecognized fields */
+       *p = end;
        return 0;
-
 bad:
        return -EIO;
 }
index 2e9c7f4..c049c7b 100644 (file)
@@ -169,18 +169,26 @@ cifs_read_super(struct super_block *sb)
        else
                sb->s_maxbytes = MAX_NON_LFS;
 
-       /* BB FIXME fix time_gran to be larger for LANMAN sessions */
-       sb->s_time_gran = 100;
-
-       if (tcon->unix_ext) {
-               ts = cifs_NTtimeToUnix(0);
+       /* Some very old servers like DOS and OS/2 used 2 second granularity */
+       if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
+           ((tcon->ses->capabilities &
+             tcon->ses->server->vals->cap_nt_find) == 0) &&
+           !tcon->unix_ext) {
+               sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
+               ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
                sb->s_time_min = ts.tv_sec;
-               ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
+               ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
+                                   cpu_to_le16(SMB_TIME_MAX), 0);
                sb->s_time_max = ts.tv_sec;
        } else {
-               ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
+               /*
+                * Almost every server, including all SMB2+, uses DCE TIME
+                * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
+                */
+               sb->s_time_gran = 100;
+               ts = cifs_NTtimeToUnix(0);
                sb->s_time_min = ts.tv_sec;
-               ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX), cpu_to_le16(SMB_TIME_MAX), 0);
+               ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
                sb->s_time_max = ts.tv_sec;
        }
 
index 2e960e1..50dfd90 100644 (file)
@@ -1210,7 +1210,7 @@ struct cifs_search_info {
        bool smallBuf:1; /* so we know which buf_release function to call */
 };
 
-#define ACL_NO_MODE    -1
+#define ACL_NO_MODE    ((umode_t)(-1))
 struct cifs_open_parms {
        struct cifs_tcon *tcon;
        struct cifs_sb_info *cifs_sb;
index 2850c3c..a64dfa9 100644 (file)
@@ -4264,7 +4264,7 @@ static int mount_get_conns(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
                server->ops->qfs_tcon(*xid, tcon);
                if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
                        if (tcon->fsDevInfo.DeviceCharacteristics &
-                           FILE_READ_ONLY_DEVICE)
+                           cpu_to_le32(FILE_READ_ONLY_DEVICE))
                                cifs_dbg(VFS, "mounted to read only share\n");
                        else if ((cifs_sb->mnt_cifs_flags &
                                  CIFS_MOUNT_RW_CACHE) == 0)
@@ -4445,7 +4445,7 @@ static int setup_dfs_tgt_conn(const char *path,
        int rc;
        struct dfs_info3_param ref = {0};
        char *mdata = NULL, *fake_devname = NULL;
-       struct smb_vol fake_vol = {0};
+       struct smb_vol fake_vol = {NULL};
 
        cifs_dbg(FYI, "%s: dfs path: %s\n", __func__, path);
 
index dd5ac84..7ce689d 100644 (file)
@@ -738,10 +738,16 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
 static int
 cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
 {
+       struct inode *inode;
+
        if (flags & LOOKUP_RCU)
                return -ECHILD;
 
        if (d_really_is_positive(direntry)) {
+               inode = d_inode(direntry);
+               if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
+                       CIFS_I(inode)->time = 0; /* force reval */
+
                if (cifs_revalidate_dentry(direntry))
                        return 0;
                else {
@@ -752,7 +758,7 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
                         * attributes will have been updated by
                         * cifs_revalidate_dentry().
                         */
-                       if (IS_AUTOMOUNT(d_inode(direntry)) &&
+                       if (IS_AUTOMOUNT(inode) &&
                           !(direntry->d_flags & DCACHE_NEED_AUTOMOUNT)) {
                                spin_lock(&direntry->d_lock);
                                direntry->d_flags |= DCACHE_NEED_AUTOMOUNT;
index 4b95700..5ad15de 100644 (file)
@@ -253,6 +253,12 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
                rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
                                         xid, fid);
 
+       if (rc) {
+               server->ops->close(xid, tcon, fid);
+               if (rc == -ESTALE)
+                       rc = -EOPENSTALE;
+       }
+
 out:
        kfree(buf);
        return rc;
@@ -1840,13 +1846,12 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
 {
        struct cifsFileInfo *open_file = NULL;
        struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
-       struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 
        /* only filter by fsuid on multiuser mounts */
        if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
                fsuid_only = false;
 
-       spin_lock(&tcon->open_file_lock);
+       spin_lock(&cifs_inode->open_file_lock);
        /* we could simply get the first_list_entry since write-only entries
           are always at the end of the list but since the first entry might
           have a close pending, we go through the whole list */
@@ -1858,7 +1863,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
                                /* found a good file */
                                /* lock it so it will not be closed on us */
                                cifsFileInfo_get(open_file);
-                               spin_unlock(&tcon->open_file_lock);
+                               spin_unlock(&cifs_inode->open_file_lock);
                                return open_file;
                        } /* else might as well continue, and look for
                             another, or simply have the caller reopen it
@@ -1866,7 +1871,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
                } else /* write only file */
                        break; /* write only files are last so must be done */
        }
-       spin_unlock(&tcon->open_file_lock);
+       spin_unlock(&cifs_inode->open_file_lock);
        return NULL;
 }
 
@@ -1877,7 +1882,6 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
 {
        struct cifsFileInfo *open_file, *inv_file = NULL;
        struct cifs_sb_info *cifs_sb;
-       struct cifs_tcon *tcon;
        bool any_available = false;
        int rc = -EBADF;
        unsigned int refind = 0;
@@ -1897,16 +1901,15 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
        }
 
        cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
-       tcon = cifs_sb_master_tcon(cifs_sb);
 
        /* only filter by fsuid on multiuser mounts */
        if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
                fsuid_only = false;
 
-       spin_lock(&tcon->open_file_lock);
+       spin_lock(&cifs_inode->open_file_lock);
 refind_writable:
        if (refind > MAX_REOPEN_ATT) {
-               spin_unlock(&tcon->open_file_lock);
+               spin_unlock(&cifs_inode->open_file_lock);
                return rc;
        }
        list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
@@ -1918,7 +1921,7 @@ refind_writable:
                        if (!open_file->invalidHandle) {
                                /* found a good writable file */
                                cifsFileInfo_get(open_file);
-                               spin_unlock(&tcon->open_file_lock);
+                               spin_unlock(&cifs_inode->open_file_lock);
                                *ret_file = open_file;
                                return 0;
                        } else {
@@ -1938,7 +1941,7 @@ refind_writable:
                cifsFileInfo_get(inv_file);
        }
 
-       spin_unlock(&tcon->open_file_lock);
+       spin_unlock(&cifs_inode->open_file_lock);
 
        if (inv_file) {
                rc = cifs_reopen_file(inv_file, false);
@@ -1953,7 +1956,7 @@ refind_writable:
                cifsFileInfo_put(inv_file);
                ++refind;
                inv_file = NULL;
-               spin_lock(&tcon->open_file_lock);
+               spin_lock(&cifs_inode->open_file_lock);
                goto refind_writable;
        }
 
@@ -4461,17 +4464,15 @@ static int cifs_readpage(struct file *file, struct page *page)
 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
 {
        struct cifsFileInfo *open_file;
-       struct cifs_tcon *tcon =
-               cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
 
-       spin_lock(&tcon->open_file_lock);
+       spin_lock(&cifs_inode->open_file_lock);
        list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
                if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
-                       spin_unlock(&tcon->open_file_lock);
+                       spin_unlock(&cifs_inode->open_file_lock);
                        return 1;
                }
        }
-       spin_unlock(&tcon->open_file_lock);
+       spin_unlock(&cifs_inode->open_file_lock);
        return 0;
 }
 
index 3bae2e5..5dcc95b 100644 (file)
@@ -414,6 +414,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
                /* if uniqueid is different, return error */
                if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
                    CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) {
+                       CIFS_I(*pinode)->time = 0; /* force reval */
                        rc = -ESTALE;
                        goto cgiiu_exit;
                }
@@ -421,6 +422,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
                /* if filetype is different, return error */
                if (unlikely(((*pinode)->i_mode & S_IFMT) !=
                    (fattr.cf_mode & S_IFMT))) {
+                       CIFS_I(*pinode)->time = 0; /* force reval */
                        rc = -ESTALE;
                        goto cgiiu_exit;
                }
@@ -933,6 +935,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
                /* if uniqueid is different, return error */
                if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
                    CIFS_I(*inode)->uniqueid != fattr.cf_uniqueid)) {
+                       CIFS_I(*inode)->time = 0; /* force reval */
                        rc = -ESTALE;
                        goto cgii_exit;
                }
@@ -940,6 +943,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
                /* if filetype is different, return error */
                if (unlikely(((*inode)->i_mode & S_IFMT) !=
                    (fattr.cf_mode & S_IFMT))) {
+                       CIFS_I(*inode)->time = 0; /* force reval */
                        rc = -ESTALE;
                        goto cgii_exit;
                }
index 49c17ee..9b41436 100644 (file)
@@ -117,10 +117,6 @@ static const struct smb_to_posix_error mapping_table_ERRSRV[] = {
        {0, 0}
 };
 
-static const struct smb_to_posix_error mapping_table_ERRHRD[] = {
-       {0, 0}
-};
-
 /*
  * Convert a string containing text IPv4 or IPv6 address to binary form.
  *
index 85f9d61..0514986 100644 (file)
@@ -751,8 +751,8 @@ add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
        unsigned int num = *num_iovec;
 
        iov[num].iov_base = create_posix_buf(mode);
-       if (mode == -1)
-               cifs_dbg(VFS, "illegal mode\n"); /* BB REMOVEME */
+       if (mode == ACL_NO_MODE)
+               cifs_dbg(FYI, "illegal mode\n");
        if (iov[num].iov_base == NULL)
                return -ENOMEM;
        iov[num].iov_len = sizeof(struct create_posix);
@@ -2521,11 +2521,8 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
                        return rc;
        }
 
-       /* TODO: add handling for the mode on create */
-       if (oparms->disposition == FILE_CREATE)
-               cifs_dbg(VFS, "mode is 0x%x\n", oparms->mode); /* BB REMOVEME */
-
-       if ((oparms->disposition == FILE_CREATE) && (oparms->mode != -1)) {
+       if ((oparms->disposition == FILE_CREATE) &&
+           (oparms->mode != ACL_NO_MODE)) {
                if (n_iov > 2) {
                        struct create_context *ccontext =
                            (struct create_context *)iov[n_iov-1].iov_base;
@@ -3217,7 +3214,8 @@ SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
 
        req->PersistentFileId = persistent_fid;
        req->VolatileFileId = volatile_fid;
-       req->OutputBufferLength = SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE;
+       req->OutputBufferLength =
+               cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE);
        req->CompletionFilter = cpu_to_le32(completion_filter);
        if (watch_tree)
                req->Flags = cpu_to_le16(SMB2_WATCH_TREE);
index da3a6d5..71b2930 100644 (file)
@@ -150,6 +150,10 @@ extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
                           bool is_fsctl, char *in_data, u32 indatalen,
                           __u32 max_response_size);
 extern void SMB2_ioctl_free(struct smb_rqst *rqst);
+extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
+                       u64 persistent_fid, u64 volatile_fid, bool watch_tree,
+                       u32 completion_filter);
+
 extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
                      u64 persistent_file_id, u64 volatile_file_id);
 extern int SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
index ae19678..9329ced 100644 (file)
@@ -241,9 +241,8 @@ void dio_warn_stale_pagecache(struct file *filp)
        }
 }
 
-/**
+/*
  * dio_complete() - called when all DIO BIO I/O has been completed
- * @offset: the byte offset in the file of the completed operation
  *
  * This drops i_dio_count, lets interested parties know that a DIO operation
  * has completed, and calculates the resulting return code for the operation.
index 8a9fcbd..fc3a8d8 100644 (file)
@@ -34,11 +34,15 @@ static void erofs_readendio(struct bio *bio)
 
 struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
 {
-       struct inode *const bd_inode = sb->s_bdev->bd_inode;
-       struct address_space *const mapping = bd_inode->i_mapping;
+       struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
+       struct page *page;
 
-       return read_cache_page_gfp(mapping, blkaddr,
+       page = read_cache_page_gfp(mapping, blkaddr,
                                   mapping_gfp_constraint(mapping, ~__GFP_FS));
+       /* should already be PageUptodate */
+       if (!IS_ERR(page))
+               lock_page(page);
+       return page;
 }
 
 static int erofs_map_blocks_flatmode(struct inode *inode,
index caf9a95..0e36949 100644 (file)
@@ -105,9 +105,9 @@ static int erofs_read_superblock(struct super_block *sb)
        int ret;
 
        page = read_mapping_page(sb->s_bdev->bd_inode->i_mapping, 0, NULL);
-       if (!page) {
+       if (IS_ERR(page)) {
                erofs_err(sb, "cannot read erofs superblock");
-               return -EIO;
+               return PTR_ERR(page);
        }
 
        sbi = EROFS_SB(sb);
index 96e34c9..fad80c9 100644 (file)
@@ -575,7 +575,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
        struct erofs_map_blocks *const map = &fe->map;
        struct z_erofs_collector *const clt = &fe->clt;
        const loff_t offset = page_offset(page);
-       bool tight = (clt->mode >= COLLECT_PRIMARY_HOOKED);
+       bool tight = true;
 
        enum z_erofs_cache_alloctype cache_strategy;
        enum z_erofs_page_type page_type;
@@ -628,8 +628,16 @@ restart_now:
        preload_compressed_pages(clt, MNGD_MAPPING(sbi),
                                 cache_strategy, pagepool);
 
-       tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED);
 hitted:
+       /*
+        * Ensure the current partial page belongs to this submit chain rather
+        * than other concurrent submit chains or the noio(bypass) chain since
+        * those chains are handled asynchronously thus the page cannot be used
+        * for inplace I/O or pagevec (should be processed in strict order.)
+        */
+       tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED &&
+                 clt->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
+
        cur = end - min_t(unsigned int, offset + end - map->m_la, end);
        if (!(map->m_flags & EROFS_MAP_MAPPED)) {
                zero_user_segment(page, cur, end);
index 8aaa7ee..8461a63 100644 (file)
@@ -164,8 +164,13 @@ static void finish_writeback_work(struct bdi_writeback *wb,
 
        if (work->auto_free)
                kfree(work);
-       if (done && atomic_dec_and_test(&done->cnt))
-               wake_up_all(done->waitq);
+       if (done) {
+               wait_queue_head_t *waitq = done->waitq;
+
+               /* @done can't be accessed after the following dec */
+               if (atomic_dec_and_test(&done->cnt))
+                       wake_up_all(waitq);
+       }
 }
 
 static void wb_queue_work(struct bdi_writeback *wb,
@@ -900,7 +905,7 @@ restart:
  * cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs
  * @bdi_id: target bdi id
  * @memcg_id: target memcg css id
- * @nr_pages: number of pages to write, 0 for best-effort dirty flushing
+ * @nr: number of pages to write, 0 for best-effort dirty flushing
  * @reason: reason why some writeback work initiated
  * @done: target wb_completion
  *
index aa8ac55..67dbe02 100644 (file)
@@ -322,6 +322,8 @@ struct io_kiocb {
 #define REQ_F_FAIL_LINK                256     /* fail rest of links */
 #define REQ_F_SHADOW_DRAIN     512     /* link-drain shadow req */
 #define REQ_F_TIMEOUT          1024    /* timeout request */
+#define REQ_F_ISREG            2048    /* regular file */
+#define REQ_F_MUST_PUNT                4096    /* must be punted even for NONBLOCK */
        u64                     user_data;
        u32                     result;
        u32                     sequence;
@@ -415,27 +417,27 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
        return ctx;
 }
 
+static inline bool __io_sequence_defer(struct io_ring_ctx *ctx,
+                                      struct io_kiocb *req)
+{
+       return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped;
+}
+
 static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
                                     struct io_kiocb *req)
 {
-       /* timeout requests always honor sequence */
-       if (!(req->flags & REQ_F_TIMEOUT) &&
-           (req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
+       if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
                return false;
 
-       return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped;
+       return __io_sequence_defer(ctx, req);
 }
 
-static struct io_kiocb *__io_get_deferred_req(struct io_ring_ctx *ctx,
-                                             struct list_head *list)
+static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
 {
        struct io_kiocb *req;
 
-       if (list_empty(list))
-               return NULL;
-
-       req = list_first_entry(list, struct io_kiocb, list);
-       if (!io_sequence_defer(ctx, req)) {
+       req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
+       if (req && !io_sequence_defer(ctx, req)) {
                list_del_init(&req->list);
                return req;
        }
@@ -443,14 +445,17 @@ static struct io_kiocb *__io_get_deferred_req(struct io_ring_ctx *ctx,
        return NULL;
 }
 
-static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
-{
-       return __io_get_deferred_req(ctx, &ctx->defer_list);
-}
-
 static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
 {
-       return __io_get_deferred_req(ctx, &ctx->timeout_list);
+       struct io_kiocb *req;
+
+       req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
+       if (req && !__io_sequence_defer(ctx, req)) {
+               list_del_init(&req->list);
+               return req;
+       }
+
+       return NULL;
 }
 
 static void __io_commit_cqring(struct io_ring_ctx *ctx)
@@ -591,14 +596,6 @@ static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
        io_cqring_ev_posted(ctx);
 }
 
-static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
-{
-       percpu_ref_put_many(&ctx->refs, refs);
-
-       if (waitqueue_active(&ctx->wait))
-               wake_up(&ctx->wait);
-}
-
 static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
                                   struct io_submit_state *state)
 {
@@ -646,7 +643,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
        req->result = 0;
        return req;
 out:
-       io_ring_drop_ctx_refs(ctx, 1);
+       percpu_ref_put(&ctx->refs);
        return NULL;
 }
 
@@ -654,7 +651,7 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
 {
        if (*nr) {
                kmem_cache_free_bulk(req_cachep, *nr, reqs);
-               io_ring_drop_ctx_refs(ctx, *nr);
+               percpu_ref_put_many(&ctx->refs, *nr);
                *nr = 0;
        }
 }
@@ -663,7 +660,7 @@ static void __io_free_req(struct io_kiocb *req)
 {
        if (req->file && !(req->flags & REQ_F_FIXED_FILE))
                fput(req->file);
-       io_ring_drop_ctx_refs(req->ctx, 1);
+       percpu_ref_put(&req->ctx->refs);
        kmem_cache_free(req_cachep, req);
 }
 
@@ -919,26 +916,26 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
        return ret;
 }
 
-static void kiocb_end_write(struct kiocb *kiocb)
+static void kiocb_end_write(struct io_kiocb *req)
 {
-       if (kiocb->ki_flags & IOCB_WRITE) {
-               struct inode *inode = file_inode(kiocb->ki_filp);
+       /*
+        * Tell lockdep we inherited freeze protection from submission
+        * thread.
+        */
+       if (req->flags & REQ_F_ISREG) {
+               struct inode *inode = file_inode(req->file);
 
-               /*
-                * Tell lockdep we inherited freeze protection from submission
-                * thread.
-                */
-               if (S_ISREG(inode->i_mode))
-                       __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
-               file_end_write(kiocb->ki_filp);
+               __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
        }
+       file_end_write(req->file);
 }
 
 static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
 
-       kiocb_end_write(kiocb);
+       if (kiocb->ki_flags & IOCB_WRITE)
+               kiocb_end_write(req);
 
        if ((req->flags & REQ_F_LINK) && res != req->result)
                req->flags |= REQ_F_FAIL_LINK;
@@ -950,7 +947,8 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
 
-       kiocb_end_write(kiocb);
+       if (kiocb->ki_flags & IOCB_WRITE)
+               kiocb_end_write(req);
 
        if ((req->flags & REQ_F_LINK) && res != req->result)
                req->flags |= REQ_F_FAIL_LINK;
@@ -1064,8 +1062,17 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
        if (!req->file)
                return -EBADF;
 
-       if (force_nonblock && !io_file_supports_async(req->file))
-               force_nonblock = false;
+       if (S_ISREG(file_inode(req->file)->i_mode))
+               req->flags |= REQ_F_ISREG;
+
+       /*
+        * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
+        * we know to async punt it even if it was opened O_NONBLOCK
+        */
+       if (force_nonblock && !io_file_supports_async(req->file)) {
+               req->flags |= REQ_F_MUST_PUNT;
+               return -EAGAIN;
+       }
 
        kiocb->ki_pos = READ_ONCE(sqe->off);
        kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
@@ -1086,7 +1093,8 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
                return ret;
 
        /* don't allow async punt if RWF_NOWAIT was requested */
-       if (kiocb->ki_flags & IOCB_NOWAIT)
+       if ((kiocb->ki_flags & IOCB_NOWAIT) ||
+           (req->file->f_flags & O_NONBLOCK))
                req->flags |= REQ_F_NOWAIT;
 
        if (force_nonblock)
@@ -1387,7 +1395,9 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
                 * need async punt anyway, so it's more efficient to do it
                 * here.
                 */
-               if (force_nonblock && ret2 > 0 && ret2 < read_size)
+               if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
+                   (req->flags & REQ_F_ISREG) &&
+                   ret2 > 0 && ret2 < read_size)
                        ret2 = -EAGAIN;
                /* Catch -EAGAIN return for forced non-blocking submission */
                if (!force_nonblock || ret2 != -EAGAIN) {
@@ -1452,7 +1462,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
                 * released so that it doesn't complain about the held lock when
                 * we return to userspace.
                 */
-               if (S_ISREG(file_inode(file)->i_mode)) {
+               if (req->flags & REQ_F_ISREG) {
                        __sb_start_write(file_inode(file)->i_sb,
                                                SB_FREEZE_WRITE, true);
                        __sb_writers_release(file_inode(file)->i_sb,
@@ -1889,18 +1899,18 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
 
 static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       unsigned count, req_dist, tail_index;
+       unsigned count;
        struct io_ring_ctx *ctx = req->ctx;
        struct list_head *entry;
-       struct timespec ts;
+       struct timespec64 ts;
 
        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags ||
            sqe->len != 1)
                return -EINVAL;
-       if (copy_from_user(&ts, (void __user *) (unsigned long) sqe->addr,
-           sizeof(ts)))
+
+       if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
                return -EFAULT;
 
        /*
@@ -1912,21 +1922,36 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                count = 1;
 
        req->sequence = ctx->cached_sq_head + count - 1;
+       /* reuse it to store the count */
+       req->submit.sequence = count;
        req->flags |= REQ_F_TIMEOUT;
 
        /*
         * Insertion sort, ensuring the first entry in the list is always
         * the one we need first.
         */
-       tail_index = ctx->cached_cq_tail - ctx->rings->sq_dropped;
-       req_dist = req->sequence - tail_index;
        spin_lock_irq(&ctx->completion_lock);
        list_for_each_prev(entry, &ctx->timeout_list) {
                struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
-               unsigned dist;
+               unsigned nxt_sq_head;
+               long long tmp, tmp_nxt;
 
-               dist = nxt->sequence - tail_index;
-               if (req_dist >= dist)
+               /*
+                * Since cached_sq_head + count - 1 can overflow, use type long
+                * long to store it.
+                */
+               tmp = (long long)ctx->cached_sq_head + count - 1;
+               nxt_sq_head = nxt->sequence - nxt->submit.sequence + 1;
+               tmp_nxt = (long long)nxt_sq_head + nxt->submit.sequence - 1;
+
+               /*
+                * cached_sq_head may overflow, and it will never overflow twice
+                * once there is some timeout req still be valid.
+                */
+               if (ctx->cached_sq_head < nxt_sq_head)
+                       tmp += UINT_MAX;
+
+               if (tmp >= tmp_nxt)
                        break;
        }
        list_add(&req->list, entry);
@@ -1934,7 +1959,7 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 
        hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        req->timeout.timer.function = io_timeout_fn;
-       hrtimer_start(&req->timeout.timer, timespec_to_ktime(ts),
+       hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts),
                        HRTIMER_MODE_REL);
        return 0;
 }
@@ -2272,7 +2297,13 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
        int ret;
 
        ret = __io_submit_sqe(ctx, req, s, force_nonblock);
-       if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
+
+       /*
+        * We async punt it if the file wasn't marked NOWAIT, or if the file
+        * doesn't support non-blocking read/write attempts
+        */
+       if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
+           (req->flags & REQ_F_MUST_PUNT))) {
                struct io_uring_sqe *sqe_copy;
 
                sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
@@ -2761,7 +2792,7 @@ out:
 
        if (link)
                io_queue_link_head(ctx, link, &link->submit, shadow_req,
-                                       block_for_last);
+                                       !block_for_last);
        if (statep)
                io_submit_state_end(statep);
 
@@ -2920,8 +2951,12 @@ static void io_finish_async(struct io_ring_ctx *ctx)
 static void io_destruct_skb(struct sk_buff *skb)
 {
        struct io_ring_ctx *ctx = skb->sk->sk_user_data;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++)
+               if (ctx->sqo_wq[i])
+                       flush_workqueue(ctx->sqo_wq[i]);
 
-       io_finish_async(ctx);
        unix_destruct_scm(skb);
 }
 
@@ -3630,7 +3665,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
                }
        }
 
-       io_ring_drop_ctx_refs(ctx, 1);
+       percpu_ref_put(&ctx->refs);
 out_fput:
        fdput(f);
        return submitted ? submitted : ret;
index c9b2850..1463b03 100644 (file)
@@ -89,58 +89,45 @@ int dcache_dir_close(struct inode *inode, struct file *file)
 EXPORT_SYMBOL(dcache_dir_close);
 
 /* parent is locked at least shared */
-static struct dentry *next_positive(struct dentry *parent,
-                                   struct list_head *from,
-                                   int count)
+/*
+ * Returns an element of siblings' list.
+ * We are looking for <count>th positive after <p>; if
+ * found, dentry is grabbed and returned to caller.
+ * If no such element exists, NULL is returned.
+ */
+static struct dentry *scan_positives(struct dentry *cursor,
+                                       struct list_head *p,
+                                       loff_t count,
+                                       struct dentry *last)
 {
-       unsigned *seq = &parent->d_inode->i_dir_seq, n;
-       struct dentry *res;
-       struct list_head *p;
-       bool skipped;
-       int i;
+       struct dentry *dentry = cursor->d_parent, *found = NULL;
 
-retry:
-       i = count;
-       skipped = false;
-       n = smp_load_acquire(seq) & ~1;
-       res = NULL;
-       rcu_read_lock();
-       for (p = from->next; p != &parent->d_subdirs; p = p->next) {
+       spin_lock(&dentry->d_lock);
+       while ((p = p->next) != &dentry->d_subdirs) {
                struct dentry *d = list_entry(p, struct dentry, d_child);
-               if (!simple_positive(d)) {
-                       skipped = true;
-               } else if (!--i) {
-                       res = d;
-                       break;
+               // we must at least skip cursors, to avoid livelocks
+               if (d->d_flags & DCACHE_DENTRY_CURSOR)
+                       continue;
+               if (simple_positive(d) && !--count) {
+                       spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+                       if (simple_positive(d))
+                               found = dget_dlock(d);
+                       spin_unlock(&d->d_lock);
+                       if (likely(found))
+                               break;
+                       count = 1;
+               }
+               if (need_resched()) {
+                       list_move(&cursor->d_child, p);
+                       p = &cursor->d_child;
+                       spin_unlock(&dentry->d_lock);
+                       cond_resched();
+                       spin_lock(&dentry->d_lock);
                }
        }
-       rcu_read_unlock();
-       if (skipped) {
-               smp_rmb();
-               if (unlikely(*seq != n))
-                       goto retry;
-       }
-       return res;
-}
-
-static void move_cursor(struct dentry *cursor, struct list_head *after)
-{
-       struct dentry *parent = cursor->d_parent;
-       unsigned n, *seq = &parent->d_inode->i_dir_seq;
-       spin_lock(&parent->d_lock);
-       for (;;) {
-               n = *seq;
-               if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
-                       break;
-               cpu_relax();
-       }
-       __list_del(cursor->d_child.prev, cursor->d_child.next);
-       if (after)
-               list_add(&cursor->d_child, after);
-       else
-               list_add_tail(&cursor->d_child, &parent->d_subdirs);
-       smp_store_release(seq, n + 2);
-       spin_unlock(&parent->d_lock);
+       spin_unlock(&dentry->d_lock);
+       dput(last);
+       return found;
 }
 
 loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
@@ -158,17 +145,25 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
                        return -EINVAL;
        }
        if (offset != file->f_pos) {
+               struct dentry *cursor = file->private_data;
+               struct dentry *to = NULL;
+
+               inode_lock_shared(dentry->d_inode);
+
+               if (offset > 2)
+                       to = scan_positives(cursor, &dentry->d_subdirs,
+                                           offset - 2, NULL);
+               spin_lock(&dentry->d_lock);
+               if (to)
+                       list_move(&cursor->d_child, &to->d_child);
+               else
+                       list_del_init(&cursor->d_child);
+               spin_unlock(&dentry->d_lock);
+               dput(to);
+
                file->f_pos = offset;
-               if (file->f_pos >= 2) {
-                       struct dentry *cursor = file->private_data;
-                       struct dentry *to;
-                       loff_t n = file->f_pos - 2;
-
-                       inode_lock_shared(dentry->d_inode);
-                       to = next_positive(dentry, &dentry->d_subdirs, n);
-                       move_cursor(cursor, to ? &to->d_child : NULL);
-                       inode_unlock_shared(dentry->d_inode);
-               }
+
+               inode_unlock_shared(dentry->d_inode);
        }
        return offset;
 }
@@ -190,25 +185,35 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
 {
        struct dentry *dentry = file->f_path.dentry;
        struct dentry *cursor = file->private_data;
-       struct list_head *p = &cursor->d_child;
-       struct dentry *next;
-       bool moved = false;
+       struct list_head *anchor = &dentry->d_subdirs;
+       struct dentry *next = NULL;
+       struct list_head *p;
 
        if (!dir_emit_dots(file, ctx))
                return 0;
 
        if (ctx->pos == 2)
-               p = &dentry->d_subdirs;
-       while ((next = next_positive(dentry, p, 1)) != NULL) {
+               p = anchor;
+       else if (!list_empty(&cursor->d_child))
+               p = &cursor->d_child;
+       else
+               return 0;
+
+       while ((next = scan_positives(cursor, p, 1, next)) != NULL) {
                if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
                              d_inode(next)->i_ino, dt_type(d_inode(next))))
                        break;
-               moved = true;
-               p = &next->d_child;
                ctx->pos++;
+               p = &next->d_child;
        }
-       if (moved)
-               move_cursor(cursor, p);
+       spin_lock(&dentry->d_lock);
+       if (next)
+               list_move_tail(&cursor->d_child, &next->d_child);
+       else
+               list_del_init(&cursor->d_child);
+       spin_unlock(&dentry->d_lock);
+       dput(next);
+
        return 0;
 }
 EXPORT_SYMBOL(dcache_readdir);
@@ -468,8 +473,7 @@ EXPORT_SYMBOL(simple_write_begin);
 
 /**
  * simple_write_end - .write_end helper for non-block-device FSes
- * @available: See .write_end of address_space_operations
- * @file:              "
+ * @file: See .write_end of address_space_operations
  * @mapping:           "
  * @pos:               "
  * @len:               "
index 222d711..040a50f 100644 (file)
 
 static struct kmem_cache *nfs_direct_cachep;
 
-/*
- * This represents a set of asynchronous requests that we're waiting on
- */
-struct nfs_direct_mirror {
-       ssize_t count;
-};
-
 struct nfs_direct_req {
        struct kref             kref;           /* release manager */
 
@@ -84,9 +77,6 @@ struct nfs_direct_req {
        atomic_t                io_count;       /* i/os we're waiting for */
        spinlock_t              lock;           /* protect completion state */
 
-       struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
-       int                     mirror_count;
-
        loff_t                  io_start;       /* Start offset for I/O */
        ssize_t                 count,          /* bytes actually processed */
                                max_count,      /* max expected count */
@@ -123,32 +113,42 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
 }
 
 static void
-nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
+nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
+                           const struct nfs_pgio_header *hdr,
+                           ssize_t dreq_len)
 {
-       int i;
-       ssize_t count;
+       if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
+             test_bit(NFS_IOHDR_EOF, &hdr->flags)))
+               return;
+       if (dreq->max_count >= dreq_len) {
+               dreq->max_count = dreq_len;
+               if (dreq->count > dreq_len)
+                       dreq->count = dreq_len;
+
+               if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
+                       dreq->error = hdr->error;
+               else /* Clear outstanding error if this is EOF */
+                       dreq->error = 0;
+       }
+}
 
-       WARN_ON_ONCE(dreq->count >= dreq->max_count);
+static void
+nfs_direct_count_bytes(struct nfs_direct_req *dreq,
+                      const struct nfs_pgio_header *hdr)
+{
+       loff_t hdr_end = hdr->io_start + hdr->good_bytes;
+       ssize_t dreq_len = 0;
 
-       if (dreq->mirror_count == 1) {
-               dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
-               dreq->count += hdr->good_bytes;
-       } else {
-               /* mirrored writes */
-               count = dreq->mirrors[hdr->pgio_mirror_idx].count;
-               if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
-                       count = hdr->io_start + hdr->good_bytes - dreq->io_start;
-                       dreq->mirrors[hdr->pgio_mirror_idx].count = count;
-               }
-               /* update the dreq->count by finding the minimum agreed count from all
-                * mirrors */
-               count = dreq->mirrors[0].count;
+       if (hdr_end > dreq->io_start)
+               dreq_len = hdr_end - dreq->io_start;
 
-               for (i = 1; i < dreq->mirror_count; i++)
-                       count = min(count, dreq->mirrors[i].count);
+       nfs_direct_handle_truncated(dreq, hdr, dreq_len);
 
-               dreq->count = count;
-       }
+       if (dreq_len > dreq->max_count)
+               dreq_len = dreq->max_count;
+
+       if (dreq->count < dreq_len)
+               dreq->count = dreq_len;
 }
 
 /*
@@ -293,18 +293,6 @@ void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
        cinfo->completion_ops = &nfs_direct_commit_completion_ops;
 }
 
-static inline void nfs_direct_setup_mirroring(struct nfs_direct_req *dreq,
-                                            struct nfs_pageio_descriptor *pgio,
-                                            struct nfs_page *req)
-{
-       int mirror_count = 1;
-
-       if (pgio->pg_ops->pg_get_mirror_count)
-               mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
-
-       dreq->mirror_count = mirror_count;
-}
-
 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
 {
        struct nfs_direct_req *dreq;
@@ -319,7 +307,6 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
        INIT_LIST_HEAD(&dreq->mds_cinfo.list);
        dreq->verf.committed = NFS_INVALID_STABLE_HOW;  /* not set yet */
        INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
-       dreq->mirror_count = 1;
        spin_lock_init(&dreq->lock);
 
        return dreq;
@@ -402,20 +389,12 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
        struct nfs_direct_req *dreq = hdr->dreq;
 
        spin_lock(&dreq->lock);
-       if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
-               dreq->error = hdr->error;
-
        if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
                spin_unlock(&dreq->lock);
                goto out_put;
        }
 
-       if (hdr->good_bytes != 0)
-               nfs_direct_good_bytes(dreq, hdr);
-
-       if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
-               dreq->error = 0;
-
+       nfs_direct_count_bytes(dreq, hdr);
        spin_unlock(&dreq->lock);
 
        while (!list_empty(&hdr->pages)) {
@@ -646,29 +625,22 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
        LIST_HEAD(reqs);
        struct nfs_commit_info cinfo;
        LIST_HEAD(failed);
-       int i;
 
        nfs_init_cinfo_from_dreq(&cinfo, dreq);
        nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
 
        dreq->count = 0;
+       dreq->max_count = 0;
+       list_for_each_entry(req, &reqs, wb_list)
+               dreq->max_count += req->wb_bytes;
        dreq->verf.committed = NFS_INVALID_STABLE_HOW;
        nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
-       for (i = 0; i < dreq->mirror_count; i++)
-               dreq->mirrors[i].count = 0;
        get_dreq(dreq);
 
        nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
                              &nfs_direct_write_completion_ops);
        desc.pg_dreq = dreq;
 
-       req = nfs_list_entry(reqs.next);
-       nfs_direct_setup_mirroring(dreq, &desc, req);
-       if (desc.pg_error < 0) {
-               list_splice_init(&reqs, &failed);
-               goto out_failed;
-       }
-
        list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
                /* Bump the transmission count */
                req->wb_nio++;
@@ -686,7 +658,6 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
        }
        nfs_pageio_complete(&desc);
 
-out_failed:
        while (!list_empty(&failed)) {
                req = nfs_list_entry(failed.next);
                nfs_list_remove_request(req);
@@ -791,17 +762,13 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
        nfs_init_cinfo_from_dreq(&cinfo, dreq);
 
        spin_lock(&dreq->lock);
-
-       if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
-               dreq->error = hdr->error;
-
        if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
                spin_unlock(&dreq->lock);
                goto out_put;
        }
 
+       nfs_direct_count_bytes(dreq, hdr);
        if (hdr->good_bytes != 0) {
-               nfs_direct_good_bytes(dreq, hdr);
                if (nfs_write_need_commit(hdr)) {
                        if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
                                request_commit = true;
@@ -923,7 +890,6 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
                                break;
                        }
 
-                       nfs_direct_setup_mirroring(dreq, &desc, req);
                        if (desc.pg_error < 0) {
                                nfs_free_request(req);
                                result = desc.pg_error;
index 11eafcf..ab8ca20 100644 (file)
@@ -6106,6 +6106,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
 
        status = nfs4_call_sync_custom(&task_setup_data);
        if (setclientid.sc_cred) {
+               kfree(clp->cl_acceptor);
                clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
                put_rpccred(setclientid.sc_cred);
        }
index 85ca495..52cab65 100644 (file)
@@ -786,7 +786,6 @@ static void nfs_inode_remove_request(struct nfs_page *req)
        struct nfs_inode *nfsi = NFS_I(inode);
        struct nfs_page *head;
 
-       atomic_long_dec(&nfsi->nrequests);
        if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
                head = req->wb_head;
 
@@ -799,8 +798,10 @@ static void nfs_inode_remove_request(struct nfs_page *req)
                spin_unlock(&mapping->private_lock);
        }
 
-       if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
+       if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
                nfs_release_request(req);
+               atomic_long_dec(&nfsi->nrequests);
+       }
 }
 
 static void
index 8de1c9d..9cd0a68 100644 (file)
@@ -2049,7 +2049,8 @@ out_write_size:
                inode->i_mtime = inode->i_ctime = current_time(inode);
                di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
                di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
-               ocfs2_update_inode_fsync_trans(handle, inode, 1);
+               if (handle)
+                       ocfs2_update_inode_fsync_trans(handle, inode, 1);
        }
        if (handle)
                ocfs2_journal_dirty(handle, wc->w_di_bh);
@@ -2146,13 +2147,30 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
        struct ocfs2_dio_write_ctxt *dwc = NULL;
        struct buffer_head *di_bh = NULL;
        u64 p_blkno;
-       loff_t pos = iblock << inode->i_sb->s_blocksize_bits;
+       unsigned int i_blkbits = inode->i_sb->s_blocksize_bits;
+       loff_t pos = iblock << i_blkbits;
+       sector_t endblk = (i_size_read(inode) - 1) >> i_blkbits;
        unsigned len, total_len = bh_result->b_size;
        int ret = 0, first_get_block = 0;
 
        len = osb->s_clustersize - (pos & (osb->s_clustersize - 1));
        len = min(total_len, len);
 
+       /*
+        * bh_result->b_size is count in get_more_blocks according to write
+        * "pos" and "end", we need map twice to return different buffer state:
+        * 1. area in file size, not set NEW;
+        * 2. area out file size, set  NEW.
+        *
+        *                 iblock    endblk
+        * |--------|---------|---------|---------
+        * |<-------area in file------->|
+        */
+
+       if ((iblock <= endblk) &&
+           ((iblock + ((len - 1) >> i_blkbits)) > endblk))
+               len = (endblk - iblock + 1) << i_blkbits;
+
        mlog(0, "get block of %lu at %llu:%u req %u\n",
                        inode->i_ino, pos, len, total_len);
 
@@ -2236,6 +2254,9 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
        if (desc->c_needs_zero)
                set_buffer_new(bh_result);
 
+       if (iblock > endblk)
+               set_buffer_new(bh_result);
+
        /* May sleep in end_io. It should not happen in a irq context. So defer
         * it to dio work queue. */
        set_buffer_defer_completion(bh_result);
index 2e982db..53939bf 100644 (file)
@@ -1230,6 +1230,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
                        transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
                        if (IS_ERR(transfer_to[USRQUOTA])) {
                                status = PTR_ERR(transfer_to[USRQUOTA]);
+                               transfer_to[USRQUOTA] = NULL;
                                goto bail_unlock;
                        }
                }
@@ -1239,6 +1240,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
                        transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
                        if (IS_ERR(transfer_to[GRPQUOTA])) {
                                status = PTR_ERR(transfer_to[GRPQUOTA]);
+                               transfer_to[GRPQUOTA] = NULL;
                                goto bail_unlock;
                        }
                }
index d6f7b29..efeea20 100644 (file)
@@ -283,7 +283,7 @@ static int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb,
        if (inode_alloc)
                inode_lock(inode_alloc);
 
-       if (o2info_coherent(&fi->ifi_req)) {
+       if (inode_alloc && o2info_coherent(&fi->ifi_req)) {
                status = ocfs2_inode_lock(inode_alloc, &bh, 0);
                if (status < 0) {
                        mlog_errno(status);
index 930e3d3..699a560 100644 (file)
@@ -217,7 +217,8 @@ void ocfs2_recovery_exit(struct ocfs2_super *osb)
        /* At this point, we know that no more recovery threads can be
         * launched, so wait for any recovery completion work to
         * complete. */
-       flush_workqueue(osb->ocfs2_wq);
+       if (osb->ocfs2_wq)
+               flush_workqueue(osb->ocfs2_wq);
 
        /*
         * Now that recovery is shut down, and the osb is about to be
index 158e5af..720e9f9 100644 (file)
@@ -377,7 +377,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
        struct ocfs2_dinode *alloc = NULL;
 
        cancel_delayed_work(&osb->la_enable_wq);
-       flush_workqueue(osb->ocfs2_wq);
+       if (osb->ocfs2_wq)
+               flush_workqueue(osb->ocfs2_wq);
 
        if (osb->local_alloc_state == OCFS2_LA_UNUSED)
                goto out;
index 90c830e..d850797 100644 (file)
@@ -1490,18 +1490,6 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
        return loc->xl_ops->xlo_check_space(loc, xi);
 }
 
-static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
-{
-       loc->xl_ops->xlo_add_entry(loc, name_hash);
-       loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
-       /*
-        * We can't leave the new entry's xe_name_offset at zero or
-        * add_namevalue() will go nuts.  We set it to the size of our
-        * storage so that it can never be less than any other entry.
-        */
-       loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
-}
-
 static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
                                   struct ocfs2_xattr_info *xi)
 {
@@ -2133,29 +2121,31 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
        if (rc)
                goto out;
 
-       if (loc->xl_entry) {
-               if (ocfs2_xa_can_reuse_entry(loc, xi)) {
-                       orig_value_size = loc->xl_entry->xe_value_size;
-                       rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
-                       if (rc)
-                               goto out;
-                       goto alloc_value;
-               }
+       if (!loc->xl_entry) {
+               rc = -EINVAL;
+               goto out;
+       }
 
-               if (!ocfs2_xattr_is_local(loc->xl_entry)) {
-                       orig_clusters = ocfs2_xa_value_clusters(loc);
-                       rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
-                       if (rc) {
-                               mlog_errno(rc);
-                               ocfs2_xa_cleanup_value_truncate(loc,
-                                                               "overwriting",
-                                                               orig_clusters);
-                               goto out;
-                       }
+       if (ocfs2_xa_can_reuse_entry(loc, xi)) {
+               orig_value_size = loc->xl_entry->xe_value_size;
+               rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
+               if (rc)
+                       goto out;
+               goto alloc_value;
+       }
+
+       if (!ocfs2_xattr_is_local(loc->xl_entry)) {
+               orig_clusters = ocfs2_xa_value_clusters(loc);
+               rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
+               if (rc) {
+                       mlog_errno(rc);
+                       ocfs2_xa_cleanup_value_truncate(loc,
+                                                       "overwriting",
+                                                       orig_clusters);
+                       goto out;
                }
-               ocfs2_xa_wipe_namevalue(loc);
-       } else
-               ocfs2_xa_add_entry(loc, name_hash);
+       }
+       ocfs2_xa_wipe_namevalue(loc);
 
        /*
         * If we get here, we have a blank entry.  Fill it.  We grow our
index ac92473..8c1f1bb 100644 (file)
@@ -132,9 +132,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                    global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR);
        show_val_kb(m, "ShmemPmdMapped: ",
                    global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR);
-       show_val_kb(m, "FileHugePages: ",
+       show_val_kb(m, "FileHugePages:  ",
                    global_node_page_state(NR_FILE_THPS) * HPAGE_PMD_NR);
-       show_val_kb(m, "FilePmdMapped: ",
+       show_val_kb(m, "FilePmdMapped:  ",
                    global_node_page_state(NR_FILE_PMDMAPPED) * HPAGE_PMD_NR);
 #endif
 
index 544d1ee..7c952ee 100644 (file)
@@ -42,10 +42,12 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
                return -EINVAL;
 
        while (count > 0) {
-               if (pfn_valid(pfn))
-                       ppage = pfn_to_page(pfn);
-               else
-                       ppage = NULL;
+               /*
+                * TODO: ZONE_DEVICE support requires to identify
+                * memmaps that were actually initialized.
+                */
+               ppage = pfn_to_online_page(pfn);
+
                if (!ppage || PageSlab(ppage) || page_has_type(ppage))
                        pcount = 0;
                else
@@ -216,10 +218,11 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
                return -EINVAL;
 
        while (count > 0) {
-               if (pfn_valid(pfn))
-                       ppage = pfn_to_page(pfn);
-               else
-                       ppage = NULL;
+               /*
+                * TODO: ZONE_DEVICE support requires to identify
+                * memmaps that were actually initialized.
+                */
+               ppage = pfn_to_online_page(pfn);
 
                if (put_user(stable_page_flags(ppage), out)) {
                        ret = -EFAULT;
@@ -261,10 +264,11 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
                return -EINVAL;
 
        while (count > 0) {
-               if (pfn_valid(pfn))
-                       ppage = pfn_to_page(pfn);
-               else
-                       ppage = NULL;
+               /*
+                * TODO: ZONE_DEVICE support requires to identify
+                * memmaps that were actually initialized.
+                */
+               ppage = pfn_to_online_page(pfn);
 
                if (ppage)
                        ino = page_cgroup_ino(ppage);
index 2f6a453..d26d5ea 100644 (file)
 #include <linux/syscalls.h>
 #include <linux/unistd.h>
 #include <linux/compat.h>
-
 #include <linux/uaccess.h>
 
+#include <asm/unaligned.h>
+
+/*
+ * Note the "unsafe_put_user() semantics: we goto a
+ * label for errors.
+ */
+#define unsafe_copy_dirent_name(_dst, _src, _len, label) do {  \
+       char __user *dst = (_dst);                              \
+       const char *src = (_src);                               \
+       size_t len = (_len);                                    \
+       unsafe_put_user(0, dst+len, label);                     \
+       unsafe_copy_to_user(dst, src, len, label);              \
+} while (0)
+
+
 int iterate_dir(struct file *file, struct dir_context *ctx)
 {
        struct inode *inode = file_inode(file);
@@ -65,6 +79,40 @@ out:
 EXPORT_SYMBOL(iterate_dir);
 
 /*
+ * POSIX says that a dirent name cannot contain NULL or a '/'.
+ *
+ * It's not 100% clear what we should really do in this case.
+ * The filesystem is clearly corrupted, but returning a hard
+ * error means that you now don't see any of the other names
+ * either, so that isn't a perfect alternative.
+ *
+ * And if you return an error, what error do you use? Several
+ * filesystems seem to have decided on EUCLEAN being the error
+ * code for EFSCORRUPTED, and that may be the error to use. Or
+ * just EIO, which is perhaps more obvious to users.
+ *
+ * In order to see the other file names in the directory, the
+ * caller might want to make this a "soft" error: skip the
+ * entry, and return the error at the end instead.
+ *
+ * Note that this should likely do a "memchr(name, 0, len)"
+ * check too, since that would be filesystem corruption as
+ * well. However, that case can't actually confuse user space,
+ * which has to do a strlen() on the name anyway to find the
+ * filename length, and the above "soft error" worry means
+ * that it's probably better left alone until we have that
+ * issue clarified.
+ */
+static int verify_dirent_name(const char *name, int len)
+{
+       if (!len)
+               return -EIO;
+       if (memchr(name, '/', len))
+               return -EIO;
+       return 0;
+}
+
+/*
  * Traditional linux readdir() handling..
  *
  * "count=1" is a special case, meaning that the buffer is one
@@ -173,6 +221,9 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
        int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
                sizeof(long));
 
+       buf->error = verify_dirent_name(name, namlen);
+       if (unlikely(buf->error))
+               return buf->error;
        buf->error = -EINVAL;   /* only used if we fail.. */
        if (reclen > buf->count)
                return -EINVAL;
@@ -182,28 +233,31 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
                return -EOVERFLOW;
        }
        dirent = buf->previous;
-       if (dirent) {
-               if (signal_pending(current))
-                       return -EINTR;
-               if (__put_user(offset, &dirent->d_off))
-                       goto efault;
-       }
-       dirent = buf->current_dir;
-       if (__put_user(d_ino, &dirent->d_ino))
-               goto efault;
-       if (__put_user(reclen, &dirent->d_reclen))
-               goto efault;
-       if (copy_to_user(dirent->d_name, name, namlen))
-               goto efault;
-       if (__put_user(0, dirent->d_name + namlen))
-               goto efault;
-       if (__put_user(d_type, (char __user *) dirent + reclen - 1))
+       if (dirent && signal_pending(current))
+               return -EINTR;
+
+       /*
+        * Note! This range-checks 'previous' (which may be NULL).
+        * The real range was checked in getdents
+        */
+       if (!user_access_begin(dirent, sizeof(*dirent)))
                goto efault;
+       if (dirent)
+               unsafe_put_user(offset, &dirent->d_off, efault_end);
+       dirent = buf->current_dir;
+       unsafe_put_user(d_ino, &dirent->d_ino, efault_end);
+       unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
+       unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end);
+       unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
+       user_access_end();
+
        buf->previous = dirent;
        dirent = (void __user *)dirent + reclen;
        buf->current_dir = dirent;
        buf->count -= reclen;
        return 0;
+efault_end:
+       user_access_end();
 efault:
        buf->error = -EFAULT;
        return -EFAULT;
@@ -259,34 +313,38 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
        int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
                sizeof(u64));
 
+       buf->error = verify_dirent_name(name, namlen);
+       if (unlikely(buf->error))
+               return buf->error;
        buf->error = -EINVAL;   /* only used if we fail.. */
        if (reclen > buf->count)
                return -EINVAL;
        dirent = buf->previous;
-       if (dirent) {
-               if (signal_pending(current))
-                       return -EINTR;
-               if (__put_user(offset, &dirent->d_off))
-                       goto efault;
-       }
-       dirent = buf->current_dir;
-       if (__put_user(ino, &dirent->d_ino))
-               goto efault;
-       if (__put_user(0, &dirent->d_off))
-               goto efault;
-       if (__put_user(reclen, &dirent->d_reclen))
-               goto efault;
-       if (__put_user(d_type, &dirent->d_type))
-               goto efault;
-       if (copy_to_user(dirent->d_name, name, namlen))
-               goto efault;
-       if (__put_user(0, dirent->d_name + namlen))
+       if (dirent && signal_pending(current))
+               return -EINTR;
+
+       /*
+        * Note! This range-checks 'previous' (which may be NULL).
+        * The real range was checked in getdents
+        */
+       if (!user_access_begin(dirent, sizeof(*dirent)))
                goto efault;
+       if (dirent)
+               unsafe_put_user(offset, &dirent->d_off, efault_end);
+       dirent = buf->current_dir;
+       unsafe_put_user(ino, &dirent->d_ino, efault_end);
+       unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
+       unsafe_put_user(d_type, &dirent->d_type, efault_end);
+       unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
+       user_access_end();
+
        buf->previous = dirent;
        dirent = (void __user *)dirent + reclen;
        buf->current_dir = dirent;
        buf->count -= reclen;
        return 0;
+efault_end:
+       user_access_end();
 efault:
        buf->error = -EFAULT;
        return -EFAULT;
index eea7af6..2616424 100644 (file)
@@ -318,19 +318,10 @@ COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct compat_statfs __user *,
 static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf)
 {
        struct compat_statfs64 buf;
-       if (sizeof(ubuf->f_bsize) == 4) {
-               if ((kbuf->f_type | kbuf->f_bsize | kbuf->f_namelen |
-                    kbuf->f_frsize | kbuf->f_flags) & 0xffffffff00000000ULL)
-                       return -EOVERFLOW;
-               /* f_files and f_ffree may be -1; it's okay
-                * to stuff that into 32 bits */
-               if (kbuf->f_files != 0xffffffffffffffffULL
-                && (kbuf->f_files & 0xffffffff00000000ULL))
-                       return -EOVERFLOW;
-               if (kbuf->f_ffree != 0xffffffffffffffffULL
-                && (kbuf->f_ffree & 0xffffffff00000000ULL))
-                       return -EOVERFLOW;
-       }
+
+       if ((kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL)
+               return -EOVERFLOW;
+
        memset(&buf, 0, sizeof(struct compat_statfs64));
        buf.f_type = kbuf->f_type;
        buf.f_bsize = kbuf->f_bsize;
index f627b7c..cfadab2 100644 (file)
@@ -1300,6 +1300,7 @@ int get_tree_bdev(struct fs_context *fc,
        mutex_lock(&bdev->bd_fsfreeze_mutex);
        if (bdev->bd_fsfreeze_count > 0) {
                mutex_unlock(&bdev->bd_fsfreeze_mutex);
+               blkdev_put(bdev, mode);
                warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
                return -EBUSY;
        }
@@ -1308,8 +1309,10 @@ int get_tree_bdev(struct fs_context *fc,
        fc->sget_key = bdev;
        s = sget_fc(fc, test_bdev_super_fc, set_bdev_super_fc);
        mutex_unlock(&bdev->bd_fsfreeze_mutex);
-       if (IS_ERR(s))
+       if (IS_ERR(s)) {
+               blkdev_put(bdev, mode);
                return PTR_ERR(s);
+       }
 
        if (s->s_root) {
                /* Don't summarily change the RO/RW state. */
index 9fc14e3..0caa151 100644 (file)
 #include <linux/namei.h>
 #include <linux/tracefs.h>
 #include <linux/fsnotify.h>
+#include <linux/security.h>
 #include <linux/seq_file.h>
 #include <linux/parser.h>
 #include <linux/magic.h>
 #include <linux/slab.h>
-#include <linux/security.h>
 
 #define TRACEFS_DEFAULT_MODE   0700
 
@@ -28,25 +28,6 @@ static struct vfsmount *tracefs_mount;
 static int tracefs_mount_count;
 static bool tracefs_registered;
 
-static int default_open_file(struct inode *inode, struct file *filp)
-{
-       struct dentry *dentry = filp->f_path.dentry;
-       struct file_operations *real_fops;
-       int ret;
-
-       if (!dentry)
-               return -EINVAL;
-
-       ret = security_locked_down(LOCKDOWN_TRACEFS);
-       if (ret)
-               return ret;
-
-       real_fops = dentry->d_fsdata;
-       if (!real_fops->open)
-               return 0;
-       return real_fops->open(inode, filp);
-}
-
 static ssize_t default_read_file(struct file *file, char __user *buf,
                                 size_t count, loff_t *ppos)
 {
@@ -241,12 +222,6 @@ static int tracefs_apply_options(struct super_block *sb)
        return 0;
 }
 
-static void tracefs_destroy_inode(struct inode *inode)
-{
-       if (S_ISREG(inode->i_mode))
-               kfree(inode->i_fop);
-}
-
 static int tracefs_remount(struct super_block *sb, int *flags, char *data)
 {
        int err;
@@ -283,7 +258,6 @@ static int tracefs_show_options(struct seq_file *m, struct dentry *root)
 static const struct super_operations tracefs_super_operations = {
        .statfs         = simple_statfs,
        .remount_fs     = tracefs_remount,
-       .destroy_inode  = tracefs_destroy_inode,
        .show_options   = tracefs_show_options,
 };
 
@@ -414,10 +388,12 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
                                   struct dentry *parent, void *data,
                                   const struct file_operations *fops)
 {
-       struct file_operations *proxy_fops;
        struct dentry *dentry;
        struct inode *inode;
 
+       if (security_locked_down(LOCKDOWN_TRACEFS))
+               return NULL;
+
        if (!(mode & S_IFMT))
                mode |= S_IFREG;
        BUG_ON(!S_ISREG(mode));
@@ -430,20 +406,8 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
        if (unlikely(!inode))
                return failed_creating(dentry);
 
-       proxy_fops = kzalloc(sizeof(struct file_operations), GFP_KERNEL);
-       if (unlikely(!proxy_fops)) {
-               iput(inode);
-               return failed_creating(dentry);
-       }
-
-       if (!fops)
-               fops = &tracefs_file_operations;
-
-       dentry->d_fsdata = (void *)fops;
-       memcpy(proxy_fops, fops, sizeof(*proxy_fops));
-       proxy_fops->open = default_open_file;
        inode->i_mode = mode;
-       inode->i_fop = proxy_fops;
+       inode->i_fop = fops ? fops : &tracefs_file_operations;
        inode->i_private = data;
        d_instantiate(dentry, inode);
        fsnotify_create(dentry->d_parent->d_inode, dentry);
index 5de296b..14fbdf2 100644 (file)
@@ -28,12 +28,11 @@ xfs_get_aghdr_buf(
        struct xfs_mount        *mp,
        xfs_daddr_t             blkno,
        size_t                  numblks,
-       int                     flags,
        const struct xfs_buf_ops *ops)
 {
        struct xfs_buf          *bp;
 
-       bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, flags);
+       bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0);
        if (!bp)
                return NULL;
 
@@ -345,7 +344,7 @@ xfs_ag_init_hdr(
 {
        struct xfs_buf          *bp;
 
-       bp = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, 0, ops);
+       bp = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, ops);
        if (!bp)
                return -ENOMEM;
 
index b9f0196..f0089e8 100644 (file)
@@ -826,32 +826,17 @@ xfs_attr_shortform_to_leaf(
        sf = (xfs_attr_shortform_t *)tmpbuffer;
 
        xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
-       xfs_bmap_local_to_extents_empty(dp, XFS_ATTR_FORK);
+       xfs_bmap_local_to_extents_empty(args->trans, dp, XFS_ATTR_FORK);
 
        bp = NULL;
        error = xfs_da_grow_inode(args, &blkno);
-       if (error) {
-               /*
-                * If we hit an IO error middle of the transaction inside
-                * grow_inode(), we may have inconsistent data. Bail out.
-                */
-               if (error == -EIO)
-                       goto out;
-               xfs_idata_realloc(dp, size, XFS_ATTR_FORK);     /* try to put */
-               memcpy(ifp->if_u1.if_data, tmpbuffer, size);    /* it back */
+       if (error)
                goto out;
-       }
 
        ASSERT(blkno == 0);
        error = xfs_attr3_leaf_create(args, blkno, &bp);
-       if (error) {
-               /* xfs_attr3_leaf_create may not have instantiated a block */
-               if (bp && (xfs_da_shrink_inode(args, 0, bp) != 0))
-                       goto out;
-               xfs_idata_realloc(dp, size, XFS_ATTR_FORK);     /* try to put */
-               memcpy(ifp->if_u1.if_data, tmpbuffer, size);    /* it back */
+       if (error)
                goto out;
-       }
 
        memset((char *)&nargs, 0, sizeof(nargs));
        nargs.dp = dp;
index 4edc25a..02469d5 100644 (file)
@@ -792,6 +792,7 @@ out_root_realloc:
  */
 void
 xfs_bmap_local_to_extents_empty(
+       struct xfs_trans        *tp,
        struct xfs_inode        *ip,
        int                     whichfork)
 {
@@ -808,6 +809,7 @@ xfs_bmap_local_to_extents_empty(
        ifp->if_u1.if_root = NULL;
        ifp->if_height = 0;
        XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 }
 
 
@@ -840,7 +842,7 @@ xfs_bmap_local_to_extents(
        ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
 
        if (!ifp->if_bytes) {
-               xfs_bmap_local_to_extents_empty(ip, whichfork);
+               xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
                flags = XFS_ILOG_CORE;
                goto done;
        }
@@ -887,7 +889,7 @@ xfs_bmap_local_to_extents(
 
        /* account for the change in fork size */
        xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
-       xfs_bmap_local_to_extents_empty(ip, whichfork);
+       xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
        flags |= XFS_ILOG_CORE;
 
        ifp->if_u1.if_root = NULL;
index 5bb446d..e2798c6 100644 (file)
@@ -182,7 +182,8 @@ void        xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
                xfs_filblks_t len);
 int    xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
 int    xfs_bmap_set_attrforkoff(struct xfs_inode *ip, int size, int *version);
-void   xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
+void   xfs_bmap_local_to_extents_empty(struct xfs_trans *tp,
+               struct xfs_inode *ip, int whichfork);
 void   __xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno,
                xfs_filblks_t len, const struct xfs_owner_info *oinfo,
                bool skip_discard);
index 9595ced..49e4bc3 100644 (file)
@@ -1096,7 +1096,7 @@ xfs_dir2_sf_to_block(
        memcpy(sfp, oldsfp, ifp->if_bytes);
 
        xfs_idata_realloc(dp, -ifp->if_bytes, XFS_DATA_FORK);
-       xfs_bmap_local_to_extents_empty(dp, XFS_DATA_FORK);
+       xfs_bmap_local_to_extents_empty(tp, dp, XFS_DATA_FORK);
        dp->i_d.di_size = 0;
 
        /*
index 39dd2b9..e9371a8 100644 (file)
@@ -366,11 +366,11 @@ struct xfs_bulkstat {
        uint64_t        bs_blocks;      /* number of blocks             */
        uint64_t        bs_xflags;      /* extended flags               */
 
-       uint64_t        bs_atime;       /* access time, seconds         */
-       uint64_t        bs_mtime;       /* modify time, seconds         */
+       int64_t         bs_atime;       /* access time, seconds         */
+       int64_t         bs_mtime;       /* modify time, seconds         */
 
-       uint64_t        bs_ctime;       /* inode change time, seconds   */
-       uint64_t        bs_btime;       /* creation time, seconds       */
+       int64_t         bs_ctime;       /* inode change time, seconds   */
+       int64_t         bs_btime;       /* creation time, seconds       */
 
        uint32_t        bs_gen;         /* generation count             */
        uint32_t        bs_uid;         /* user id                      */
index 93b3793..0cab11a 100644 (file)
@@ -341,7 +341,6 @@ xchk_refcountbt_rec(
        xfs_extlen_t            len;
        xfs_nlink_t             refcount;
        bool                    has_cowflag;
-       int                     error = 0;
 
        bno = be32_to_cpu(rec->refc.rc_startblock);
        len = be32_to_cpu(rec->refc.rc_blockcount);
@@ -366,7 +365,7 @@ xchk_refcountbt_rec(
 
        xchk_refcountbt_xref(bs->sc, bno, len, refcount);
 
-       return error;
+       return 0;
 }
 
 /* Make sure we have as many refc blocks as the rmap says. */
index 0910cb7..4f44370 100644 (file)
@@ -864,6 +864,7 @@ xfs_alloc_file_space(
        xfs_filblks_t           allocatesize_fsb;
        xfs_extlen_t            extsz, temp;
        xfs_fileoff_t           startoffset_fsb;
+       xfs_fileoff_t           endoffset_fsb;
        int                     nimaps;
        int                     quota_flag;
        int                     rt;
@@ -891,7 +892,8 @@ xfs_alloc_file_space(
        imapp = &imaps[0];
        nimaps = 1;
        startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
-       allocatesize_fsb = XFS_B_TO_FSB(mp, count);
+       endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
+       allocatesize_fsb = endoffset_fsb - startoffset_fsb;
 
        /*
         * Allocate file space until done or until there is an error
index 21c2436..0abba17 100644 (file)
@@ -345,6 +345,15 @@ xfs_buf_allocate_memory(
        unsigned short          page_count, i;
        xfs_off_t               start, end;
        int                     error;
+       xfs_km_flags_t          kmflag_mask = 0;
+
+       /*
+        * assure zeroed buffer for non-read cases.
+        */
+       if (!(flags & XBF_READ)) {
+               kmflag_mask |= KM_ZERO;
+               gfp_mask |= __GFP_ZERO;
+       }
 
        /*
         * for buffers that are contained within a single page, just allocate
@@ -354,7 +363,8 @@ xfs_buf_allocate_memory(
        size = BBTOB(bp->b_length);
        if (size < PAGE_SIZE) {
                int align_mask = xfs_buftarg_dma_alignment(bp->b_target);
-               bp->b_addr = kmem_alloc_io(size, align_mask, KM_NOFS);
+               bp->b_addr = kmem_alloc_io(size, align_mask,
+                                          KM_NOFS | kmflag_mask);
                if (!bp->b_addr) {
                        /* low memory - use alloc_page loop instead */
                        goto use_alloc_page;
index a2beee9..641d07f 100644 (file)
@@ -1443,7 +1443,7 @@ xlog_alloc_log(
                prev_iclog = iclog;
 
                iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask,
-                                               KM_MAYFAIL);
+                                               KM_MAYFAIL | KM_ZERO);
                if (!iclog->ic_data)
                        goto out_free_iclog;
 #ifdef DEBUG
index 5083190..c1a514f 100644 (file)
@@ -127,7 +127,7 @@ xlog_alloc_buffer(
        if (nbblks > 1 && log->l_sectBBsize > 1)
                nbblks += log->l_sectBBsize;
        nbblks = round_up(nbblks, log->l_sectBBsize);
-       return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL);
+       return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
 }
 
 /*
index adff14f..ddfee1b 100644 (file)
@@ -4,4 +4,5 @@
 # (This file is not included when SRCARCH=um since UML borrows several
 # asm headers from the host architecutre.)
 
+mandatory-y += msi.h
 mandatory-y += simd.h
index 514bffa..fa19e01 100644 (file)
@@ -46,6 +46,8 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev);
 int pci_disable_pcie_error_reporting(struct pci_dev *dev);
 int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
 int pci_cleanup_aer_error_status_regs(struct pci_dev *dev);
+void pci_save_aer_state(struct pci_dev *dev);
+void pci_restore_aer_state(struct pci_dev *dev);
 #else
 static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
 {
@@ -63,6 +65,8 @@ static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
 {
        return -EINVAL;
 }
+static inline void pci_save_aer_state(struct pci_dev *dev) {}
+static inline void pci_restore_aer_state(struct pci_dev *dev) {}
 #endif
 
 void cper_print_aer(struct pci_dev *dev, int aer_severity,
index 90528f1..29fc933 100644 (file)
@@ -326,10 +326,11 @@ static inline int bitmap_equal(const unsigned long *src1,
 }
 
 /**
- * bitmap_or_equal - Check whether the or of two bitnaps is equal to a third
+ * bitmap_or_equal - Check whether the or of two bitmaps is equal to a third
  * @src1:      Pointer to bitmap 1
  * @src2:      Pointer to bitmap 2 will be or'ed with bitmap 1
  * @src3:      Pointer to bitmap 3. Compare to the result of *@src1 | *@src2
+ * @nbits:     number of bits in each of these bitmaps
  *
  * Returns: True if (*@src1 | *@src2) == *@src3, false otherwise
  */
index cf074bc..c94a9ff 100644 (file)
@@ -4,6 +4,13 @@
 #include <asm/types.h>
 #include <linux/bits.h>
 
+/* Set bits in the first 'n' bytes when loaded from memory */
+#ifdef __LITTLE_ENDIAN
+#  define aligned_byte_mask(n) ((1UL << 8*(n))-1)
+#else
+#  define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
+#endif
+
 #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
 #define BITS_TO_LONGS(nr)      DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
 
index 6b318ef..cdf0165 100644 (file)
@@ -40,6 +40,7 @@
 # define __GCC4_has_attribute___noclone__             1
 # define __GCC4_has_attribute___nonstring__           0
 # define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
+# define __GCC4_has_attribute___fallthrough__         0
 #endif
 
 /*
 #endif
 
 /*
+ * Add the pseudo keyword 'fallthrough' so case statement blocks
+ * must end with any of these keywords:
+ *   break;
+ *   fallthrough;
+ *   goto <label>;
+ *   return [expression];
+ *
+ *  gcc: https://gcc.gnu.org/onlinedocs/gcc/Statement-Attributes.html#Statement-Attributes
+ */
+#if __has_attribute(__fallthrough__)
+# define fallthrough                    __attribute__((__fallthrough__))
+#else
+# define fallthrough                    do {} while (0)  /* fallthrough */
+#endif
+
+/*
  * Note the missing underscores.
  *
  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noinline-function-attribute
index 79435cf..897e799 100644 (file)
@@ -31,6 +31,8 @@
 #define SJA1105_META_SMAC                      0x222222222222ull
 #define SJA1105_META_DMAC                      0x0180C200000Eull
 
+#define SJA1105_HWTS_RX_EN                     0
+
 /* Global tagger data: each struct sja1105_port has a reference to
  * the structure defined in struct sja1105_private.
  */
@@ -42,7 +44,7 @@ struct sja1105_tagger_data {
         * from taggers running on multiple ports on SMP systems
         */
        spinlock_t meta_lock;
-       bool hwts_rx_en;
+       unsigned long state;
 };
 
 struct sja1105_skb_cb {
index 95f55b7..621158e 100644 (file)
@@ -52,10 +52,10 @@ extern struct module __this_module;
        __ADDRESSABLE(sym)                                              \
        asm("   .section \"___ksymtab" sec "+" #sym "\", \"a\"  \n"     \
            "   .balign 4                                       \n"     \
-           "__ksymtab_" #sym NS_SEPARATOR #ns ":               \n"     \
+           "__ksymtab_" #ns NS_SEPARATOR #sym ":               \n"     \
            "   .long   " #sym "- .                             \n"     \
            "   .long   __kstrtab_" #sym "- .                   \n"     \
-           "   .long   __kstrtab_ns_" #sym "- .                \n"     \
+           "   .long   __kstrtabns_" #sym "- .                 \n"     \
            "   .previous                                       \n")
 
 #define __KSYMTAB_ENTRY(sym, sec)                                      \
@@ -76,10 +76,10 @@ struct kernel_symbol {
 #else
 #define __KSYMTAB_ENTRY_NS(sym, sec, ns)                               \
        static const struct kernel_symbol __ksymtab_##sym##__##ns       \
-       asm("__ksymtab_" #sym NS_SEPARATOR #ns)                         \
+       asm("__ksymtab_" #ns NS_SEPARATOR #sym)                         \
        __attribute__((section("___ksymtab" sec "+" #sym), used))       \
        __aligned(sizeof(void *))                                       \
-       = { (unsigned long)&sym, __kstrtab_##sym, __kstrtab_ns_##sym }
+       = { (unsigned long)&sym, __kstrtab_##sym, __kstrtabns_##sym }
 
 #define __KSYMTAB_ENTRY(sym, sec)                                      \
        static const struct kernel_symbol __ksymtab_##sym               \
@@ -112,7 +112,7 @@ struct kernel_symbol {
 /* For every exported symbol, place a struct in the __ksymtab section */
 #define ___EXPORT_SYMBOL_NS(sym, sec, ns)                              \
        ___export_symbol_common(sym, sec);                              \
-       static const char __kstrtab_ns_##sym[]                          \
+       static const char __kstrtabns_##sym[]                           \
        __attribute__((section("__ksymtab_strings"), used, aligned(1))) \
        = #ns;                                                          \
        __KSYMTAB_ENTRY_NS(sym, sec, ns)
index f8245d6..5dd9c98 100644 (file)
@@ -202,6 +202,14 @@ struct gpio_irq_chip {
        bool threaded;
 
        /**
+        * @init_hw: optional routine to initialize hardware before
+        * an IRQ chip will be added. This is quite useful when
+        * a particular driver wants to clear IRQ related registers
+        * in order to avoid undesired events.
+        */
+       int (*init_hw)(struct gpio_chip *chip);
+
+       /**
         * @init_valid_mask: optional routine to initialize @valid_mask, to be
         * used if not all GPIO lines are valid interrupts. Sometimes some
         * lines just cannot fire interrupts, and this routine, when defined,
index 04c36b7..7257916 100644 (file)
@@ -235,7 +235,7 @@ enum hwmon_power_attributes {
 #define HWMON_P_LABEL                  BIT(hwmon_power_label)
 #define HWMON_P_ALARM                  BIT(hwmon_power_alarm)
 #define HWMON_P_CAP_ALARM              BIT(hwmon_power_cap_alarm)
-#define HWMON_P_MIN_ALARM              BIT(hwmon_power_max_alarm)
+#define HWMON_P_MIN_ALARM              BIT(hwmon_power_min_alarm)
 #define HWMON_P_MAX_ALARM              BIT(hwmon_power_max_alarm)
 #define HWMON_P_LCRIT_ALARM            BIT(hwmon_power_lcrit_alarm)
 #define HWMON_P_CRIT_ALARM             BIT(hwmon_power_crit_alarm)
index fcb46b3..719fc3e 100644 (file)
@@ -1090,6 +1090,7 @@ enum kvm_stat_kind {
 
 struct kvm_stat_data {
        int offset;
+       int mode;
        struct kvm *kvm;
 };
 
@@ -1097,6 +1098,7 @@ struct kvm_stats_debugfs_item {
        const char *name;
        int offset;
        enum kvm_stat_kind kind;
+       int mode;
 };
 extern struct kvm_stats_debugfs_item debugfs_entries[];
 extern struct dentry *kvm_debugfs_dir;
index b8df711..efb309d 100644 (file)
@@ -247,7 +247,7 @@ extern void led_set_brightness(struct led_classdev *led_cdev,
 /**
  * led_set_brightness_sync - set LED brightness synchronously
  * @led_cdev: the LED to set
- * @brightness: the brightness to set it to
+ * @value: the brightness to set it to
  *
  * Set an LED's brightness immediately. This function will block
  * the caller for the time required for accessing device registers,
@@ -301,8 +301,7 @@ extern void led_sysfs_enable(struct led_classdev *led_cdev);
 /**
  * led_compose_name - compose LED class device name
  * @dev: LED controller device object
- * @child: child fwnode_handle describing a LED or a group of synchronized LEDs;
- *        it must be provided only for fwnode based LEDs
+ * @init_data: the LED class device initialization data
  * @led_classdev_name: composed LED class device name
  *
  * Create LED class device name basing on the provided init_data argument.
index 9b60863..ae703ea 100644 (file)
@@ -356,6 +356,19 @@ static inline bool mem_cgroup_disabled(void)
        return !cgroup_subsys_enabled(memory_cgrp_subsys);
 }
 
+static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
+                                                 bool in_low_reclaim)
+{
+       if (mem_cgroup_disabled())
+               return 0;
+
+       if (in_low_reclaim)
+               return READ_ONCE(memcg->memory.emin);
+
+       return max(READ_ONCE(memcg->memory.emin),
+                  READ_ONCE(memcg->memory.elow));
+}
+
 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
                                                struct mem_cgroup *memcg);
 
@@ -537,6 +550,8 @@ void mem_cgroup_handle_over_high(void);
 
 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
 
+unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
+
 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
                                struct task_struct *p);
 
@@ -829,6 +844,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
 {
 }
 
+static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
+                                                 bool in_low_reclaim)
+{
+       return 0;
+}
+
 static inline enum mem_cgroup_protection mem_cgroup_protected(
        struct mem_cgroup *root, struct mem_cgroup *memcg)
 {
@@ -968,6 +989,11 @@ static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
        return 0;
 }
 
+static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
+{
+       return 0;
+}
+
 static inline void
 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
 {
@@ -1264,6 +1290,9 @@ void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
 static inline void mem_cgroup_track_foreign_dirty(struct page *page,
                                                  struct bdi_writeback *wb)
 {
+       if (mem_cgroup_disabled())
+               return;
+
        if (unlikely(&page->mem_cgroup->css != wb->memcg_css))
                mem_cgroup_track_foreign_dirty_slowpath(page, wb);
 }
index ad24554..75f880c 100644 (file)
@@ -31,7 +31,7 @@
 #define PHY_ID_KSZ886X         0x00221430
 #define PHY_ID_KSZ8863         0x00221435
 
-#define PHY_ID_KSZ8795         0x00221550
+#define PHY_ID_KSZ87XX         0x00221550
 
 #define        PHY_ID_KSZ9477          0x00221631
 
index 5cd824c..4ce8901 100644 (file)
@@ -455,6 +455,15 @@ static inline void mii_lpa_mod_linkmode_lpa_t(unsigned long *lp_advertising,
                         lp_advertising, lpa & LPA_LPACK);
 }
 
+static inline void mii_ctrl1000_mod_linkmode_adv_t(unsigned long *advertising,
+                                                  u32 ctrl1000)
+{
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertising,
+                        ctrl1000 & ADVERTISE_1000HALF);
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertising,
+                        ctrl1000 & ADVERTISE_1000FULL);
+}
+
 /**
  * linkmode_adv_to_lcl_adv_t
  * @advertising:pointer to linkmode advertising
index 21a89c4..29658c0 100644 (file)
@@ -2,11 +2,10 @@
 #ifndef __OF_PCI_H
 #define __OF_PCI_H
 
-#include <linux/pci.h>
-#include <linux/msi.h>
+#include <linux/types.h>
+#include <linux/errno.h>
 
 struct pci_dev;
-struct of_phandle_args;
 struct device_node;
 
 #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PCI)
index 682fd46..cfce186 100644 (file)
@@ -18,7 +18,7 @@ struct page_ext_operations {
 
 enum page_ext_flags {
        PAGE_EXT_OWNER,
-       PAGE_EXT_OWNER_ACTIVE,
+       PAGE_EXT_OWNER_ALLOCATED,
 #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
        PAGE_EXT_YOUNG,
        PAGE_EXT_IDLE,
@@ -36,6 +36,7 @@ struct page_ext {
        unsigned long flags;
 };
 
+extern unsigned long page_ext_size;
 extern void pgdat_page_ext_init(struct pglist_data *pgdat);
 
 #ifdef CONFIG_SPARSEMEM
@@ -52,6 +53,13 @@ static inline void page_ext_init(void)
 
 struct page_ext *lookup_page_ext(const struct page *page);
 
+static inline struct page_ext *page_ext_next(struct page_ext *curr)
+{
+       void *next = curr;
+       next += page_ext_size;
+       return next;
+}
+
 #else /* !CONFIG_PAGE_EXTENSION */
 struct page_ext;
 
index 1ebb88e..5d62e78 100644 (file)
@@ -4,74 +4,39 @@
 
 #include <linux/pci.h>
 
-#ifdef CONFIG_PCI_PRI
+#ifdef CONFIG_PCI_ATS
+/* Address Translation Service */
+int pci_enable_ats(struct pci_dev *dev, int ps);
+void pci_disable_ats(struct pci_dev *dev);
+int pci_ats_queue_depth(struct pci_dev *dev);
+int pci_ats_page_aligned(struct pci_dev *dev);
+#else /* CONFIG_PCI_ATS */
+static inline int pci_enable_ats(struct pci_dev *d, int ps)
+{ return -ENODEV; }
+static inline void pci_disable_ats(struct pci_dev *d) { }
+static inline int pci_ats_queue_depth(struct pci_dev *d)
+{ return -ENODEV; }
+static inline int pci_ats_page_aligned(struct pci_dev *dev)
+{ return 0; }
+#endif /* CONFIG_PCI_ATS */
 
+#ifdef CONFIG_PCI_PRI
 int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
 void pci_disable_pri(struct pci_dev *pdev);
-void pci_restore_pri_state(struct pci_dev *pdev);
 int pci_reset_pri(struct pci_dev *pdev);
-
-#else /* CONFIG_PCI_PRI */
-
-static inline int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
-{
-       return -ENODEV;
-}
-
-static inline void pci_disable_pri(struct pci_dev *pdev)
-{
-}
-
-static inline void pci_restore_pri_state(struct pci_dev *pdev)
-{
-}
-
-static inline int pci_reset_pri(struct pci_dev *pdev)
-{
-       return -ENODEV;
-}
-
+int pci_prg_resp_pasid_required(struct pci_dev *pdev);
 #endif /* CONFIG_PCI_PRI */
 
 #ifdef CONFIG_PCI_PASID
-
 int pci_enable_pasid(struct pci_dev *pdev, int features);
 void pci_disable_pasid(struct pci_dev *pdev);
-void pci_restore_pasid_state(struct pci_dev *pdev);
 int pci_pasid_features(struct pci_dev *pdev);
 int pci_max_pasids(struct pci_dev *pdev);
-int pci_prg_resp_pasid_required(struct pci_dev *pdev);
-
-#else  /* CONFIG_PCI_PASID */
-
-static inline int pci_enable_pasid(struct pci_dev *pdev, int features)
-{
-       return -EINVAL;
-}
-
-static inline void pci_disable_pasid(struct pci_dev *pdev)
-{
-}
-
-static inline void pci_restore_pasid_state(struct pci_dev *pdev)
-{
-}
-
+#else /* CONFIG_PCI_PASID */
 static inline int pci_pasid_features(struct pci_dev *pdev)
-{
-       return -EINVAL;
-}
-
+{ return -EINVAL; }
 static inline int pci_max_pasids(struct pci_dev *pdev)
-{
-       return -EINVAL;
-}
-
-static inline int pci_prg_resp_pasid_required(struct pci_dev *pdev)
-{
-       return 0;
-}
+{ return -EINVAL; }
 #endif /* CONFIG_PCI_PASID */
 
-
-#endif /* LINUX_PCI_ATS_H*/
+#endif /* LINUX_PCI_ATS_H */
index f641bad..56f1846 100644 (file)
@@ -117,7 +117,7 @@ struct pci_epc_features {
        unsigned int    msix_capable : 1;
        u8      reserved_bar;
        u8      bar_fixed_64bit;
-       u64     bar_fixed_size[BAR_5 + 1];
+       u64     bar_fixed_size[PCI_STD_NUM_BARS];
        size_t  align;
 };
 
index f9088c8..0b7a177 100644 (file)
@@ -82,7 +82,7 @@ enum pci_mmap_state {
 enum {
        /* #0-5: standard PCI resources */
        PCI_STD_RESOURCES,
-       PCI_STD_RESOURCE_END = 5,
+       PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1,
 
        /* #6: expansion ROM resource */
        PCI_ROM_RESOURCE,
@@ -284,7 +284,6 @@ struct irq_affinity;
 struct pcie_link_state;
 struct pci_vpd;
 struct pci_sriov;
-struct pci_ats;
 struct pci_p2pdma;
 
 /* The pci_dev structure describes PCI devices */
@@ -452,12 +451,14 @@ struct pci_dev {
        };
        u16             ats_cap;        /* ATS Capability offset */
        u8              ats_stu;        /* ATS Smallest Translation Unit */
-       atomic_t        ats_ref_cnt;    /* Number of VFs with ATS enabled */
 #endif
 #ifdef CONFIG_PCI_PRI
+       u16             pri_cap;        /* PRI Capability offset */
        u32             pri_reqs_alloc; /* Number of PRI requests allocated */
+       unsigned int    pasid_required:1; /* PRG Response PASID Required */
 #endif
 #ifdef CONFIG_PCI_PASID
+       u16             pasid_cap;      /* PASID Capability offset */
        u16             pasid_features;
 #endif
 #ifdef CONFIG_PCI_P2PDMA
@@ -805,8 +806,6 @@ struct module;
  *             The remove function always gets called from process
  *             context, so it can sleep.
  * @suspend:   Put device into low power state.
- * @suspend_late: Put device into low power state.
- * @resume_early: Wake device from low power state.
  * @resume:    Wake device from low power state.
  *             (Please see Documentation/power/pci.rst for descriptions
  *             of PCI Power Management and the related functions.)
@@ -829,8 +828,6 @@ struct pci_driver {
        int  (*probe)(struct pci_dev *dev, const struct pci_device_id *id);     /* New device inserted */
        void (*remove)(struct pci_dev *dev);    /* Device removed (NULL if not a hot-plug capable driver) */
        int  (*suspend)(struct pci_dev *dev, pm_message_t state);       /* Device suspended */
-       int  (*suspend_late)(struct pci_dev *dev, pm_message_t state);
-       int  (*resume_early)(struct pci_dev *dev);
        int  (*resume)(struct pci_dev *dev);    /* Device woken up */
        void (*shutdown)(struct pci_dev *dev);
        int  (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
@@ -1232,7 +1229,7 @@ struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
 int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
 int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
                                u16 cap, unsigned int size);
-int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
+int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
 int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
 pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
 bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
@@ -1454,7 +1451,6 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
 void pci_free_irq_vectors(struct pci_dev *dev);
 int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
 const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
-int pci_irq_get_node(struct pci_dev *pdev, int vec);
 
 #else
 static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
@@ -1497,11 +1493,6 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
 {
        return cpu_possible_mask;
 }
-
-static inline int pci_irq_get_node(struct pci_dev *pdev, int vec)
-{
-       return first_online_node;
-}
 #endif
 
 /**
@@ -1544,9 +1535,13 @@ extern bool pcie_ports_native;
 #define pcie_ports_native      false
 #endif
 
-#define PCIE_LINK_STATE_L0S    1
-#define PCIE_LINK_STATE_L1     2
-#define PCIE_LINK_STATE_CLKPM  4
+#define PCIE_LINK_STATE_L0S            BIT(0)
+#define PCIE_LINK_STATE_L1             BIT(1)
+#define PCIE_LINK_STATE_CLKPM          BIT(2)
+#define PCIE_LINK_STATE_L1_1           BIT(3)
+#define PCIE_LINK_STATE_L1_2           BIT(4)
+#define PCIE_LINK_STATE_L1_1_PCIPM     BIT(5)
+#define PCIE_LINK_STATE_L1_2_PCIPM     BIT(6)
 
 #ifdef CONFIG_PCIEASPM
 int pci_disable_link_state(struct pci_dev *pdev, int state);
@@ -1776,19 +1771,6 @@ pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
                                              NULL);
 }
 
-#ifdef CONFIG_PCI_ATS
-/* Address Translation Service */
-int pci_enable_ats(struct pci_dev *dev, int ps);
-void pci_disable_ats(struct pci_dev *dev);
-int pci_ats_queue_depth(struct pci_dev *dev);
-int pci_ats_page_aligned(struct pci_dev *dev);
-#else
-static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
-static inline void pci_disable_ats(struct pci_dev *d) { }
-static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
-static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; }
-#endif
-
 /* Include architecture-dependent settings and functions */
 
 #include <asm/pci.h>
@@ -2400,4 +2382,12 @@ void pci_uevent_ers(struct pci_dev *pdev, enum  pci_ers_result err_type);
 #define pci_info_ratelimited(pdev, fmt, arg...) \
        dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
 
+#define pci_WARN(pdev, condition, fmt, arg...) \
+       WARN(condition, "%s %s: " fmt, \
+            dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
+
+#define pci_WARN_ONCE(pdev, condition, fmt, arg...) \
+       WARN_ONCE(condition, "%s %s: " fmt, \
+                 dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
+
 #endif /* LINUX_PCI_H */
index a7ecbe0..9a0e981 100644 (file)
@@ -678,6 +678,7 @@ static inline bool phy_is_started(struct phy_device *phydev)
        return phydev->state >= PHY_UP;
 }
 
+void phy_resolve_aneg_pause(struct phy_device *phydev);
 void phy_resolve_aneg_linkmode(struct phy_device *phydev);
 
 /**
@@ -1076,6 +1077,7 @@ int genphy_config_eee_advert(struct phy_device *phydev);
 int __genphy_config_aneg(struct phy_device *phydev, bool changed);
 int genphy_aneg_done(struct phy_device *phydev);
 int genphy_update_link(struct phy_device *phydev);
+int genphy_read_lpa(struct phy_device *phydev);
 int genphy_read_status(struct phy_device *phydev);
 int genphy_suspend(struct phy_device *phydev);
 int genphy_resume(struct phy_device *phydev);
index 1b5cec0..f268840 100644 (file)
@@ -64,6 +64,8 @@ extern struct resource *platform_get_resource_byname(struct platform_device *,
                                                     unsigned int,
                                                     const char *);
 extern int platform_get_irq_byname(struct platform_device *, const char *);
+extern int platform_get_irq_byname_optional(struct platform_device *dev,
+                                           const char *name);
 extern int platform_add_devices(struct platform_device **, int);
 
 struct platform_device_info {
index 2c2e56b..67a1d86 100644 (file)
@@ -223,6 +223,7 @@ extern long schedule_timeout_uninterruptible(long timeout);
 extern long schedule_timeout_idle(long timeout);
 asmlinkage void schedule(void);
 extern void schedule_preempt_disabled(void);
+asmlinkage void preempt_schedule_irq(void);
 
 extern int __must_check io_schedule_prepare(void);
 extern void io_schedule_finish(int token);
index e7d3b1a..7914fda 100644 (file)
@@ -3510,8 +3510,9 @@ int skb_ensure_writable(struct sk_buff *skb, int write_len);
 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
 int skb_vlan_pop(struct sk_buff *skb);
 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
-int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto);
-int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto);
+int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
+                 int mac_len);
+int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len);
 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
 int skb_mpls_dec_ttl(struct sk_buff *skb);
 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
@@ -4160,15 +4161,12 @@ static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
 static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
 #endif /* CONFIG_SKB_EXTENSIONS */
 
-static inline void nf_reset(struct sk_buff *skb)
+static inline void nf_reset_ct(struct sk_buff *skb)
 {
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
        nf_conntrack_put(skb_nfct(skb));
        skb->_nfct = 0;
 #endif
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-       skb_ext_del(skb, SKB_EXT_BRIDGE_NF);
-#endif
 }
 
 static inline void nf_reset_trace(struct sk_buff *skb)
index ab2b98a..4d2a2fa 100644 (file)
@@ -493,6 +493,10 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
  * kmalloc is the normal method of allocating memory
  * for objects smaller than page size in the kernel.
  *
+ * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
+ * bytes. For @size of power of two bytes, the alignment is also guaranteed
+ * to be at least to the size.
+ *
  * The @flags argument may be one of the GFP flags defined at
  * include/linux/gfp.h and described at
  * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
index b2f9df7..b6ccdc2 100644 (file)
@@ -227,7 +227,26 @@ static inline bool strstarts(const char *str, const char *prefix)
 }
 
 size_t memweight(const void *ptr, size_t bytes);
-void memzero_explicit(void *s, size_t count);
+
+/**
+ * memzero_explicit - Fill a region of memory (e.g. sensitive
+ *                   keying data) with 0s.
+ * @s: Pointer to the start of the area.
+ * @count: The size of the area.
+ *
+ * Note: usually using memset() is just fine (!), but in cases
+ * where clearing out _local_ data at the end of a scope is
+ * necessary, memzero_explicit() should be used instead in
+ * order to prevent the compiler from optimising away zeroing.
+ *
+ * memzero_explicit() doesn't need an arch-specific version as
+ * it just invokes the one of memset() implicitly.
+ */
+static inline void memzero_explicit(void *s, size_t count)
+{
+       memset(s, 0, count);
+       barrier_data(s);
+}
 
 /**
  * kbasename - return the last part of a pathname.
index 7638dbe..a940de0 100644 (file)
@@ -61,6 +61,7 @@ struct sock_xprt {
        struct mutex            recv_mutex;
        struct sockaddr_storage srcaddr;
        unsigned short          srcport;
+       int                     xprt_err;
 
        /*
         * UDP socket buffer size parameters
index 99617e5..668e25a 100644 (file)
@@ -393,7 +393,7 @@ struct tcp_sock {
        /* fastopen_rsk points to request_sock that resulted in this big
         * socket. Used to retransmit SYNACKs etc.
         */
-       struct request_sock *fastopen_rsk;
+       struct request_sock __rcu *fastopen_rsk;
        u32     *saved_syn;
 };
 
@@ -447,8 +447,8 @@ static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
 
 static inline bool tcp_passive_fastopen(const struct sock *sk)
 {
-       return (sk->sk_state == TCP_SYN_RECV &&
-               tcp_sk(sk)->fastopen_rsk != NULL);
+       return sk->sk_state == TCP_SYN_RECV &&
+              rcu_access_pointer(tcp_sk(sk)->fastopen_rsk) != NULL;
 }
 
 static inline void fastopen_queue_tune(struct sock *sk, int backlog)
index 63238c8..131ea1b 100644 (file)
@@ -152,7 +152,7 @@ struct tcg_algorithm_info {
  * total. Once we've done this we know the offset of the data length field,
  * and can calculate the total size of the event.
  *
- * Return: size of the event on success, <0 on failure
+ * Return: size of the event on success, 0 on failure
  */
 
 static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
@@ -170,6 +170,7 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
        u16 halg;
        int i;
        int j;
+       u32 count, event_type;
 
        marker = event;
        marker_start = marker;
@@ -190,16 +191,22 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
        }
 
        event = (struct tcg_pcr_event2_head *)mapping;
+       /*
+        * The loop below will unmap these fields if the log is larger than
+        * one page, so save them here for reference:
+        */
+       count = READ_ONCE(event->count);
+       event_type = READ_ONCE(event->event_type);
 
        efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
 
        /* Check if event is malformed. */
-       if (event->count > efispecid->num_algs) {
+       if (count > efispecid->num_algs) {
                size = 0;
                goto out;
        }
 
-       for (i = 0; i < event->count; i++) {
+       for (i = 0; i < count; i++) {
                halg_size = sizeof(event->digests[i].alg_id);
 
                /* Map the digest's algorithm identifier */
@@ -256,8 +263,9 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
                + event_field->event_size;
        size = marker - marker_start;
 
-       if ((event->event_type == 0) && (event_field->event_size == 0))
+       if (event_type == 0 && event_field->event_size == 0)
                size = 0;
+
 out:
        if (do_mapping)
                TPM_MEMUNMAP(mapping, mapping_size);
index 70bbdc3..d4ee6e9 100644 (file)
@@ -231,6 +231,76 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
 
 #endif         /* ARCH_HAS_NOCACHE_UACCESS */
 
+extern __must_check int check_zeroed_user(const void __user *from, size_t size);
+
+/**
+ * copy_struct_from_user: copy a struct from userspace
+ * @dst:   Destination address, in kernel space. This buffer must be @ksize
+ *         bytes long.
+ * @ksize: Size of @dst struct.
+ * @src:   Source address, in userspace.
+ * @usize: (Alleged) size of @src struct.
+ *
+ * Copies a struct from userspace to kernel space, in a way that guarantees
+ * backwards-compatibility for struct syscall arguments (as long as future
+ * struct extensions are made such that all new fields are *appended* to the
+ * old struct, and zeroed-out new fields have the same meaning as the old
+ * struct).
+ *
+ * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
+ * The recommended usage is something like the following:
+ *
+ *   SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
+ *   {
+ *      int err;
+ *      struct foo karg = {};
+ *
+ *      if (usize > PAGE_SIZE)
+ *        return -E2BIG;
+ *      if (usize < FOO_SIZE_VER0)
+ *        return -EINVAL;
+ *
+ *      err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
+ *      if (err)
+ *        return err;
+ *
+ *      // ...
+ *   }
+ *
+ * There are three cases to consider:
+ *  * If @usize == @ksize, then it's copied verbatim.
+ *  * If @usize < @ksize, then the userspace has passed an old struct to a
+ *    newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
+ *    are to be zero-filled.
+ *  * If @usize > @ksize, then the userspace has passed a new struct to an
+ *    older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
+ *    are checked to ensure they are zeroed, otherwise -E2BIG is returned.
+ *
+ * Returns (in all cases, some data may have been copied):
+ *  * -E2BIG:  (@usize > @ksize) and there are non-zero trailing bytes in @src.
+ *  * -EFAULT: access to userspace failed.
+ */
+static __always_inline __must_check int
+copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
+                     size_t usize)
+{
+       size_t size = min(ksize, usize);
+       size_t rest = max(ksize, usize) - size;
+
+       /* Deal with trailing bytes. */
+       if (usize < ksize) {
+               memset(dst + size, 0, rest);
+       } else if (usize > ksize) {
+               int ret = check_zeroed_user(src + size, rest);
+               if (ret <= 0)
+                       return ret ?: -E2BIG;
+       }
+       /* Copy the interoperable parts of the struct. */
+       if (copy_from_user(dst, src, size))
+               return -EFAULT;
+       return 0;
+}
+
 /*
  * probe_kernel_read(): safely attempt to read from a location
  * @dst: pointer to the buffer that shall take the data
@@ -285,8 +355,10 @@ extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
 #ifndef user_access_begin
 #define user_access_begin(ptr,len) access_ok(ptr, len)
 #define user_access_end() do { } while (0)
-#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
-#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
+#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
+#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
+#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
+#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
 static inline unsigned long user_access_save(void) { return 0UL; }
 static inline void user_access_restore(unsigned long flags) { }
 #endif
index 5921599..86eecbd 100644 (file)
@@ -230,8 +230,8 @@ static inline int xa_err(void *entry)
  * This structure is used either directly or via the XA_LIMIT() macro
  * to communicate the range of IDs that are valid for allocation.
  * Two common ranges are predefined for you:
- *  * xa_limit_32b     - [0 - UINT_MAX]
- *  * xa_limit_31b     - [0 - INT_MAX]
+ * * xa_limit_32b      - [0 - UINT_MAX]
+ * * xa_limit_31b      - [0 - INT_MAX]
  */
 struct xa_limit {
        u32 max;
index ff45c3e..4ab2c49 100644 (file)
@@ -5550,6 +5550,14 @@ const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
 const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
 
 /**
+ * regulatory_pre_cac_allowed - check if pre-CAC allowed in the current regdom
+ * @wiphy: wiphy for which pre-CAC capability is checked.
+ *
+ * Pre-CAC is allowed only in some regdomains (notable ETSI).
+ */
+bool regulatory_pre_cac_allowed(struct wiphy *wiphy);
+
+/**
  * DOC: Internal regulatory db functions
  *
  */
index df528a6..ea985aa 100644 (file)
@@ -104,7 +104,7 @@ void llc_sk_reset(struct sock *sk);
 
 /* Access to a connection */
 int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
-int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
 void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
 void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
 void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
index f8712bb..4c2cd93 100644 (file)
@@ -52,6 +52,9 @@ struct bpf_prog;
 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
 
 struct net {
+       /* First cache line can be often dirtied.
+        * Do not place here read-mostly fields.
+        */
        refcount_t              passive;        /* To decide when the network
                                                 * namespace should be freed.
                                                 */
@@ -60,7 +63,13 @@ struct net {
                                                 */
        spinlock_t              rules_mod_lock;
 
-       u32                     hash_mix;
+       unsigned int            dev_unreg_count;
+
+       unsigned int            dev_base_seq;   /* protected by rtnl_mutex */
+       int                     ifindex;
+
+       spinlock_t              nsid_lock;
+       atomic_t                fnhe_genid;
 
        struct list_head        list;           /* list of network namespaces */
        struct list_head        exit_list;      /* To linked to call pernet exit
@@ -76,11 +85,11 @@ struct net {
 #endif
        struct user_namespace   *user_ns;       /* Owning user namespace */
        struct ucounts          *ucounts;
-       spinlock_t              nsid_lock;
        struct idr              netns_ids;
 
        struct ns_common        ns;
 
+       struct list_head        dev_base_head;
        struct proc_dir_entry   *proc_net;
        struct proc_dir_entry   *proc_net_stat;
 
@@ -93,17 +102,18 @@ struct net {
 
        struct uevent_sock      *uevent_sock;           /* uevent socket */
 
-       struct list_head        dev_base_head;
        struct hlist_head       *dev_name_head;
        struct hlist_head       *dev_index_head;
-       unsigned int            dev_base_seq;   /* protected by rtnl_mutex */
-       int                     ifindex;
-       unsigned int            dev_unreg_count;
+       /* Note that @hash_mix can be read millions times per second,
+        * it is critical that it is on a read_mostly cache line.
+        */
+       u32                     hash_mix;
+
+       struct net_device       *loopback_dev;          /* The loopback */
 
        /* core fib_rules */
        struct list_head        rules_ops;
 
-       struct net_device       *loopback_dev;          /* The loopback */
        struct netns_core       core;
        struct netns_mib        mib;
        struct netns_packet     packet;
@@ -171,7 +181,6 @@ struct net {
        struct sock             *crypto_nlsk;
 #endif
        struct sock             *diag_nlsk;
-       atomic_t                fnhe_genid;
 } __randomize_layout;
 
 #include <linux/seq_file_net.h>
index fd178d5..cf8b332 100644 (file)
@@ -185,7 +185,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
 
 static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
 {
-       return queue->rskq_accept_head == NULL;
+       return READ_ONCE(queue->rskq_accept_head) == NULL;
 }
 
 static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
@@ -197,7 +197,7 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
        req = queue->rskq_accept_head;
        if (req) {
                sk_acceptq_removed(parent);
-               queue->rskq_accept_head = req->dl_next;
+               WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
                if (queue->rskq_accept_head == NULL)
                        queue->rskq_accept_tail = NULL;
        }
index 5d60f13..3ab5c6b 100644 (file)
@@ -610,4 +610,9 @@ static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
        return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
 }
 
+static inline bool sctp_newsk_ready(const struct sock *sk)
+{
+       return sock_flag(sk, SOCK_DEAD) || sk->sk_socket;
+}
+
 #endif /* __net_sctp_h__ */
index 2c53f1a..f69b58b 100644 (file)
@@ -878,12 +878,17 @@ static inline bool sk_acceptq_is_full(const struct sock *sk)
  */
 static inline int sk_stream_min_wspace(const struct sock *sk)
 {
-       return sk->sk_wmem_queued >> 1;
+       return READ_ONCE(sk->sk_wmem_queued) >> 1;
 }
 
 static inline int sk_stream_wspace(const struct sock *sk)
 {
-       return sk->sk_sndbuf - sk->sk_wmem_queued;
+       return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued);
+}
+
+static inline void sk_wmem_queued_add(struct sock *sk, int val)
+{
+       WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
 }
 
 void sk_stream_write_space(struct sock *sk);
@@ -1207,7 +1212,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
 
 static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
 {
-       if (sk->sk_wmem_queued >= sk->sk_sndbuf)
+       if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
                return false;
 
        return sk->sk_prot->stream_memory_free ?
@@ -1467,7 +1472,7 @@ DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
 {
        sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
-       sk->sk_wmem_queued -= skb->truesize;
+       sk_wmem_queued_add(sk, -skb->truesize);
        sk_mem_uncharge(sk, skb->truesize);
        if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
            !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
@@ -2014,7 +2019,7 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *fro
        skb->len             += copy;
        skb->data_len        += copy;
        skb->truesize        += copy;
-       sk->sk_wmem_queued   += copy;
+       sk_wmem_queued_add(sk, copy);
        sk_mem_charge(sk, copy);
        return 0;
 }
@@ -2220,10 +2225,14 @@ static inline void sk_wake_async(const struct sock *sk, int how, int band)
 
 static inline void sk_stream_moderate_sndbuf(struct sock *sk)
 {
-       if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
-               sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
-               sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF);
-       }
+       u32 val;
+
+       if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
+               return;
+
+       val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
+
+       WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
 }
 
 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
@@ -2251,7 +2260,7 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
  */
 static inline bool sock_writeable(const struct sock *sk)
 {
-       return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
+       return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1);
 }
 
 static inline gfp_t gfp_any(void)
@@ -2271,7 +2280,9 @@ static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
 
 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
 {
-       return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
+       int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len);
+
+       return v ?: 1;
 }
 
 /* Alas, with timeout socket operations are not restartable.
index c9a3f96..ab4eb5e 100644 (file)
@@ -258,7 +258,7 @@ static inline bool tcp_under_memory_pressure(const struct sock *sk)
            mem_cgroup_under_socket_pressure(sk->sk_memcg))
                return true;
 
-       return tcp_memory_pressure;
+       return READ_ONCE(tcp_memory_pressure);
 }
 /*
  * The next routines deal with comparing 32 bit unsigned ints
@@ -1380,13 +1380,14 @@ static inline int tcp_win_from_space(const struct sock *sk, int space)
 /* Note: caller must be prepared to deal with negative returns */
 static inline int tcp_space(const struct sock *sk)
 {
-       return tcp_win_from_space(sk, sk->sk_rcvbuf - sk->sk_backlog.len -
+       return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
+                                 READ_ONCE(sk->sk_backlog.len) -
                                  atomic_read(&sk->sk_rmem_alloc));
 }
 
 static inline int tcp_full_space(const struct sock *sk)
 {
-       return tcp_win_from_space(sk, sk->sk_rcvbuf);
+       return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
 }
 
 extern void tcp_openreq_init_rwin(struct request_sock *req,
@@ -1916,7 +1917,8 @@ static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
 static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
-       u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
+       u32 notsent_bytes = READ_ONCE(tp->write_seq) -
+                           READ_ONCE(tp->snd_nxt);
 
        return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
 }
index 3810b34..6bd5ed6 100644 (file)
@@ -32,6 +32,7 @@ extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
 struct scsi_eh_save {
        /* saved state */
        int result;
+       unsigned int resid_len;
        int eh_eflags;
        enum dma_data_direction data_direction;
        unsigned underflow;
index 0fd3929..057d2a2 100644 (file)
@@ -264,6 +264,9 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
 #define AZX_REG_ML_LOUTPAY             0x20
 #define AZX_REG_ML_LINPAY              0x30
 
+/* bit0 is reserved, with BIT(1) mapping to stream1 */
+#define ML_LOSIDV_STREAM_MASK          0xFFFE
+
 #define ML_LCTL_SCF_MASK                       0xF
 #define AZX_MLCTL_SPA                          (0x1 << 16)
 #define AZX_MLCTL_CPA                          (0x1 << 23)
index a13a62d..191fe44 100644 (file)
@@ -519,10 +519,10 @@ TRACE_EVENT(rxrpc_local,
            );
 
 TRACE_EVENT(rxrpc_peer,
-           TP_PROTO(struct rxrpc_peer *peer, enum rxrpc_peer_trace op,
+           TP_PROTO(unsigned int peer_debug_id, enum rxrpc_peer_trace op,
                     int usage, const void *where),
 
-           TP_ARGS(peer, op, usage, where),
+           TP_ARGS(peer_debug_id, op, usage, where),
 
            TP_STRUCT__entry(
                    __field(unsigned int,       peer            )
@@ -532,7 +532,7 @@ TRACE_EVENT(rxrpc_peer,
                             ),
 
            TP_fast_assign(
-                   __entry->peer = peer->debug_id;
+                   __entry->peer = peer_debug_id;
                    __entry->op = op;
                    __entry->usage = usage;
                    __entry->where = where;
@@ -546,10 +546,10 @@ TRACE_EVENT(rxrpc_peer,
            );
 
 TRACE_EVENT(rxrpc_conn,
-           TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op,
+           TP_PROTO(unsigned int conn_debug_id, enum rxrpc_conn_trace op,
                     int usage, const void *where),
 
-           TP_ARGS(conn, op, usage, where),
+           TP_ARGS(conn_debug_id, op, usage, where),
 
            TP_STRUCT__entry(
                    __field(unsigned int,       conn            )
@@ -559,7 +559,7 @@ TRACE_EVENT(rxrpc_conn,
                             ),
 
            TP_fast_assign(
-                   __entry->conn = conn->debug_id;
+                   __entry->conn = conn_debug_id;
                    __entry->op = op;
                    __entry->usage = usage;
                    __entry->where = where;
@@ -606,10 +606,10 @@ TRACE_EVENT(rxrpc_client,
            );
 
 TRACE_EVENT(rxrpc_call,
-           TP_PROTO(struct rxrpc_call *call, enum rxrpc_call_trace op,
+           TP_PROTO(unsigned int call_debug_id, enum rxrpc_call_trace op,
                     int usage, const void *where, const void *aux),
 
-           TP_ARGS(call, op, usage, where, aux),
+           TP_ARGS(call_debug_id, op, usage, where, aux),
 
            TP_STRUCT__entry(
                    __field(unsigned int,               call            )
@@ -620,7 +620,7 @@ TRACE_EVENT(rxrpc_call,
                             ),
 
            TP_fast_assign(
-                   __entry->call = call->debug_id;
+                   __entry->call = call_debug_id;
                    __entry->op = op;
                    __entry->usage = usage;
                    __entry->where = where;
@@ -1068,7 +1068,7 @@ TRACE_EVENT(rxrpc_recvmsg,
                             ),
 
            TP_fast_assign(
-                   __entry->call = call->debug_id;
+                   __entry->call = call ? call->debug_id : 0;
                    __entry->why = why;
                    __entry->seq = seq;
                    __entry->offset = offset;
index a0c4b8a..51fe9f6 100644 (file)
@@ -82,7 +82,7 @@ TRACE_EVENT(sock_rcvqueue_full,
        TP_fast_assign(
                __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
                __entry->truesize   = skb->truesize;
-               __entry->sk_rcvbuf  = sk->sk_rcvbuf;
+               __entry->sk_rcvbuf  = READ_ONCE(sk->sk_rcvbuf);
        ),
 
        TP_printk("rmem_alloc=%d truesize=%u sk_rcvbuf=%d",
@@ -115,7 +115,7 @@ TRACE_EVENT(sock_exceed_buf_limit,
                __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
                __entry->sysctl_wmem = sk_get_wmem0(sk, prot);
                __entry->wmem_alloc = refcount_read(&sk->sk_wmem_alloc);
-               __entry->wmem_queued = sk->sk_wmem_queued;
+               __entry->wmem_queued = READ_ONCE(sk->sk_wmem_queued);
                __entry->kind = kind;
        ),
 
index c99b4f2..4fe35d6 100644 (file)
@@ -1003,6 +1003,8 @@ struct drm_amdgpu_info_device {
        __u64 high_va_max;
        /* gfx10 pa_sc_tile_steering_override */
        __u32 pa_sc_tile_steering_override;
+       /* disabled TCCs */
+       __u64 tcc_disabled_mask;
 };
 
 struct drm_amdgpu_info_hw_ip {
index 1c215ea..e168dc5 100644 (file)
@@ -45,6 +45,27 @@ struct nvme_passthru_cmd {
        __u32   result;
 };
 
+struct nvme_passthru_cmd64 {
+       __u8    opcode;
+       __u8    flags;
+       __u16   rsvd1;
+       __u32   nsid;
+       __u32   cdw2;
+       __u32   cdw3;
+       __u64   metadata;
+       __u64   addr;
+       __u32   metadata_len;
+       __u32   data_len;
+       __u32   cdw10;
+       __u32   cdw11;
+       __u32   cdw12;
+       __u32   cdw13;
+       __u32   cdw14;
+       __u32   cdw15;
+       __u32   timeout_ms;
+       __u64   result;
+};
+
 #define nvme_admin_cmd nvme_passthru_cmd
 
 #define NVME_IOCTL_ID          _IO('N', 0x40)
@@ -54,5 +75,7 @@ struct nvme_passthru_cmd {
 #define NVME_IOCTL_RESET       _IO('N', 0x44)
 #define NVME_IOCTL_SUBSYS_RESET        _IO('N', 0x45)
 #define NVME_IOCTL_RESCAN      _IO('N', 0x46)
+#define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64)
+#define NVME_IOCTL_IO64_CMD    _IOWR('N', 0x48, struct nvme_passthru_cmd64)
 
 #endif /* _UAPI_LINUX_NVME_IOCTL_H */
index 29d6e93..acb7d2b 100644 (file)
@@ -34,6 +34,7 @@
  * of which the first 64 bytes are standardized as follows:
  */
 #define PCI_STD_HEADER_SIZEOF  64
+#define PCI_STD_NUM_BARS       6       /* Number of standard BARs */
 #define PCI_VENDOR_ID          0x00    /* 16 bits */
 #define PCI_DEVICE_ID          0x02    /* 16 bits */
 #define PCI_COMMAND            0x04    /* 16 bits */
 #define  PCI_EXP_LNKCTL2_TLS_8_0GT     0x0003 /* Supported Speed 8GT/s */
 #define  PCI_EXP_LNKCTL2_TLS_16_0GT    0x0004 /* Supported Speed 16GT/s */
 #define  PCI_EXP_LNKCTL2_TLS_32_0GT    0x0005 /* Supported Speed 32GT/s */
+#define  PCI_EXP_LNKCTL2_ENTER_COMP    0x0010 /* Enter Compliance */
+#define  PCI_EXP_LNKCTL2_TX_MARGIN     0x0380 /* Transmit Margin */
 #define PCI_EXP_LNKSTA2                50      /* Link Status 2 */
 #define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52      /* v2 endpoints with link end here */
 #define PCI_EXP_SLTCAP2                52      /* Slot Capabilities 2 */
index 364c350..62b6f69 100644 (file)
@@ -35,6 +35,9 @@
 
 */
 
+#ifndef _UAPI_LINUX_PG_H
+#define _UAPI_LINUX_PG_H
+
 #define PG_MAGIC       'P'
 #define PG_RESET       'Z'
 #define PG_COMMAND     'C'
@@ -61,4 +64,4 @@ struct pg_read_hdr {
 
 };
 
-/* end of pg.h */
+#endif /* _UAPI_LINUX_PG_H */
index b3105ac..99335e1 100644 (file)
 #define CLONE_NEWNET           0x40000000      /* New network namespace */
 #define CLONE_IO               0x80000000      /* Clone io context */
 
-/*
- * Arguments for the clone3 syscall
+#ifndef __ASSEMBLY__
+/**
+ * struct clone_args - arguments for the clone3 syscall
+ * @flags:       Flags for the new process as listed above.
+ *               All flags are valid except for CSIGNAL and
+ *               CLONE_DETACHED.
+ * @pidfd:       If CLONE_PIDFD is set, a pidfd will be
+ *               returned in this argument.
+ * @child_tid:   If CLONE_CHILD_SETTID is set, the TID of the
+ *               child process will be returned in the child's
+ *               memory.
+ * @parent_tid:  If CLONE_PARENT_SETTID is set, the TID of
+ *               the child process will be returned in the
+ *               parent's memory.
+ * @exit_signal: The exit_signal the parent process will be
+ *               sent when the child exits.
+ * @stack:       Specify the location of the stack for the
+ *               child process.
+ * @stack_size:  The size of the stack for the child process.
+ * @tls:         If CLONE_SETTLS is set, the tls descriptor
+ *               is set to tls.
+ *
+ * The structure is versioned by size and thus extensible.
+ * New struct members must go at the end of the struct and
+ * must be properly 64bit aligned.
  */
 struct clone_args {
        __aligned_u64 flags;
@@ -46,6 +69,9 @@ struct clone_args {
        __aligned_u64 stack_size;
        __aligned_u64 tls;
 };
+#endif
+
+#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
 
 /*
  * Scheduling policies
index 0f4f87a..e7fe550 100644 (file)
 #define PORT_SUNIX     121
 
 /* Freescale Linflex UART */
-#define PORT_LINFLEXUART       121
+#define PORT_LINFLEXUART       122
 
 #endif /* _UAPILINUX_SERIAL_CORE_H */
index 98b30c1..d89969a 100644 (file)
@@ -212,30 +212,7 @@ int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
 
 bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
 
-efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc);
-efi_status_t xen_efi_set_time(efi_time_t *tm);
-efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
-                                    efi_time_t *tm);
-efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm);
-efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
-                                 u32 *attr, unsigned long *data_size,
-                                 void *data);
-efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
-                                      efi_char16_t *name, efi_guid_t *vendor);
-efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
-                                 u32 attr, unsigned long data_size,
-                                 void *data);
-efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
-                                        u64 *remaining_space,
-                                        u64 *max_variable_size);
-efi_status_t xen_efi_get_next_high_mono_count(u32 *count);
-efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
-                                   unsigned long count, unsigned long sg_list);
-efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
-                                       unsigned long count, u64 *max_size,
-                                       int *reset_type);
-void xen_efi_reset_system(int reset_type, efi_status_t status,
-                         unsigned long data_size, efi_char16_t *data);
+void xen_efi_runtime_setup(void);
 
 
 #ifdef CONFIG_PREEMPT
index ca4e5d4..c00b925 100644 (file)
@@ -87,9 +87,9 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
  */
 void dma_common_free_remap(void *cpu_addr, size_t size)
 {
-       struct page **pages = dma_common_find_pages(cpu_addr);
+       struct vm_struct *area = find_vm_area(cpu_addr);
 
-       if (!pages) {
+       if (!area || area->flags != VM_DMA_COHERENT) {
                WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
                return;
        }
index 4655adb..9ec0b0b 100644 (file)
@@ -3779,11 +3779,23 @@ static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event)
        perf_event_groups_insert(&ctx->flexible_groups, event);
 }
 
+/* pick an event from the flexible_groups to rotate */
 static inline struct perf_event *
-ctx_first_active(struct perf_event_context *ctx)
+ctx_event_to_rotate(struct perf_event_context *ctx)
 {
-       return list_first_entry_or_null(&ctx->flexible_active,
-                                       struct perf_event, active_list);
+       struct perf_event *event;
+
+       /* pick the first active flexible event */
+       event = list_first_entry_or_null(&ctx->flexible_active,
+                                        struct perf_event, active_list);
+
+       /* if no active flexible event, pick the first event */
+       if (!event) {
+               event = rb_entry_safe(rb_first(&ctx->flexible_groups.tree),
+                                     typeof(*event), group_node);
+       }
+
+       return event;
 }
 
 static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
@@ -3808,9 +3820,9 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
        perf_pmu_disable(cpuctx->ctx.pmu);
 
        if (task_rotate)
-               task_event = ctx_first_active(task_ctx);
+               task_event = ctx_event_to_rotate(task_ctx);
        if (cpu_rotate)
-               cpu_event = ctx_first_active(&cpuctx->ctx);
+               cpu_event = ctx_event_to_rotate(&cpuctx->ctx);
 
        /*
         * As per the order given at ctx_resched() first 'pop' task flexible
@@ -5668,7 +5680,8 @@ again:
         * undo the VM accounting.
         */
 
-       atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
+       atomic_long_sub((size >> PAGE_SHIFT) + 1 - mmap_locked,
+                       &mmap_user->locked_vm);
        atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm);
        free_uid(mmap_user);
 
@@ -5812,8 +5825,20 @@ accounting:
 
        user_locked = atomic_long_read(&user->locked_vm) + user_extra;
 
-       if (user_locked > user_lock_limit)
+       if (user_locked <= user_lock_limit) {
+               /* charge all to locked_vm */
+       } else if (atomic_long_read(&user->locked_vm) >= user_lock_limit) {
+               /* charge all to pinned_vm */
+               extra = user_extra;
+               user_extra = 0;
+       } else {
+               /*
+                * charge locked_vm until it hits user_lock_limit;
+                * charge the rest from pinned_vm
+                */
                extra = user_locked - user_lock_limit;
+               user_extra -= extra;
+       }
 
        lock_limit = rlimit(RLIMIT_MEMLOCK);
        lock_limit >>= PAGE_SHIFT;
@@ -10586,55 +10611,26 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
        u32 size;
        int ret;
 
-       if (!access_ok(uattr, PERF_ATTR_SIZE_VER0))
-               return -EFAULT;
-
-       /*
-        * zero the full structure, so that a short copy will be nice.
-        */
+       /* Zero the full structure, so that a short copy will be nice. */
        memset(attr, 0, sizeof(*attr));
 
        ret = get_user(size, &uattr->size);
        if (ret)
                return ret;
 
-       if (size > PAGE_SIZE)   /* silly large */
-               goto err_size;
-
-       if (!size)              /* abi compat */
+       /* ABI compatibility quirk: */
+       if (!size)
                size = PERF_ATTR_SIZE_VER0;
-
-       if (size < PERF_ATTR_SIZE_VER0)
+       if (size < PERF_ATTR_SIZE_VER0 || size > PAGE_SIZE)
                goto err_size;
 
-       /*
-        * If we're handed a bigger struct than we know of,
-        * ensure all the unknown bits are 0 - i.e. new
-        * user-space does not rely on any kernel feature
-        * extensions we dont know about yet.
-        */
-       if (size > sizeof(*attr)) {
-               unsigned char __user *addr;
-               unsigned char __user *end;
-               unsigned char val;
-
-               addr = (void __user *)uattr + sizeof(*attr);
-               end  = (void __user *)uattr + size;
-
-               for (; addr < end; addr++) {
-                       ret = get_user(val, addr);
-                       if (ret)
-                               return ret;
-                       if (val)
-                               goto err_size;
-               }
-               size = sizeof(*attr);
+       ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
+       if (ret) {
+               if (ret == -E2BIG)
+                       goto err_size;
+               return ret;
        }
 
-       ret = copy_from_user(attr, uattr, size);
-       if (ret)
-               return -EFAULT;
-
        attr->size = size;
 
        if (attr->__reserved_1)
@@ -11891,6 +11887,10 @@ static int inherit_group(struct perf_event *parent_event,
                                            child, leader, child_ctx);
                if (IS_ERR(child_ctr))
                        return PTR_ERR(child_ctr);
+
+               if (sub->aux_event == parent_event &&
+                   !perf_get_aux_event(child_ctr, leader))
+                       return -EINVAL;
        }
        return 0;
 }
index 94d38a3..c747610 100644 (file)
@@ -474,14 +474,17 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
        struct vm_area_struct *vma;
        int ret, is_register, ref_ctr_updated = 0;
        bool orig_page_huge = false;
+       unsigned int gup_flags = FOLL_FORCE;
 
        is_register = is_swbp_insn(&opcode);
        uprobe = container_of(auprobe, struct uprobe, arch);
 
 retry:
+       if (is_register)
+               gup_flags |= FOLL_SPLIT_PMD;
        /* Read the page with vaddr into memory */
-       ret = get_user_pages_remote(NULL, mm, vaddr, 1,
-                       FOLL_FORCE | FOLL_SPLIT_PMD, &old_page, &vma, NULL);
+       ret = get_user_pages_remote(NULL, mm, vaddr, 1, gup_flags,
+                                   &old_page, &vma, NULL);
        if (ret <= 0)
                return ret;
 
@@ -489,6 +492,12 @@ retry:
        if (ret <= 0)
                goto put_old;
 
+       if (WARN(!is_register && PageCompound(old_page),
+                "uprobe unregister should never work on compound page\n")) {
+               ret = -EINVAL;
+               goto put_old;
+       }
+
        /* We are going to replace instruction, update ref_ctr. */
        if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
                ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
index f9572f4..bcdf531 100644 (file)
@@ -2525,39 +2525,19 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
 #ifdef __ARCH_WANT_SYS_CLONE3
 noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
                                              struct clone_args __user *uargs,
-                                             size_t size)
+                                             size_t usize)
 {
+       int err;
        struct clone_args args;
 
-       if (unlikely(size > PAGE_SIZE))
+       if (unlikely(usize > PAGE_SIZE))
                return -E2BIG;
-
-       if (unlikely(size < sizeof(struct clone_args)))
+       if (unlikely(usize < CLONE_ARGS_SIZE_VER0))
                return -EINVAL;
 
-       if (unlikely(!access_ok(uargs, size)))
-               return -EFAULT;
-
-       if (size > sizeof(struct clone_args)) {
-               unsigned char __user *addr;
-               unsigned char __user *end;
-               unsigned char val;
-
-               addr = (void __user *)uargs + sizeof(struct clone_args);
-               end = (void __user *)uargs + size;
-
-               for (; addr < end; addr++) {
-                       if (get_user(val, addr))
-                               return -EFAULT;
-                       if (val)
-                               return -E2BIG;
-               }
-
-               size = sizeof(struct clone_args);
-       }
-
-       if (copy_from_user(&args, uargs, size))
-               return -EFAULT;
+       err = copy_struct_from_user(&args, sizeof(args), uargs, usize);
+       if (err)
+               return err;
 
        /*
         * Verify that higher 32bits of exit_signal are unset and that
@@ -2604,6 +2584,17 @@ static bool clone3_args_valid(const struct kernel_clone_args *kargs)
        return true;
 }
 
+/**
+ * clone3 - create a new process with specific properties
+ * @uargs: argument structure
+ * @size:  size of @uargs
+ *
+ * clone3() is the extensible successor to clone()/clone2().
+ * It takes a struct as argument that is versioned by its size.
+ *
+ * Return: On success, a positive PID for the child process.
+ *         On error, a negative errno number.
+ */
 SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size)
 {
        int err;
@@ -2934,7 +2925,7 @@ int sysctl_max_threads(struct ctl_table *table, int write,
        struct ctl_table t;
        int ret;
        int threads = max_threads;
-       int min = MIN_THREADS;
+       int min = 1;
        int max = MAX_THREADS;
 
        t = *table;
@@ -2946,7 +2937,7 @@ int sysctl_max_threads(struct ctl_table *table, int write,
        if (ret || !write)
                return ret;
 
-       set_max_threads(threads);
+       max_threads = threads;
 
        return 0;
 }
index c073842..dc520f0 100644 (file)
@@ -22,12 +22,6 @@ EXPORT_SYMBOL(system_freezing_cnt);
 bool pm_freezing;
 bool pm_nosig_freezing;
 
-/*
- * Temporary export for the deadlock workaround in ata_scsi_hotplug().
- * Remove once the hack becomes unnecessary.
- */
-EXPORT_SYMBOL_GPL(pm_freezing);
-
 /* protects freezing and frozen transitions */
 static DEFINE_SPINLOCK(freezer_lock);
 
index 9ff4498..5a0fc0b 100755 (executable)
@@ -71,7 +71,13 @@ done | cpio --quiet -pd $cpio_dir >/dev/null 2>&1
 find $cpio_dir -type f -print0 |
        xargs -0 -P8 -n1 perl -pi -e 'BEGIN {undef $/;}; s/\/\*((?!SPDX).)*?\*\///smg;'
 
-tar -Jcf $tarfile -C $cpio_dir/ . > /dev/null
+# Create archive and try to normalize metadata for reproducibility.
+# For compatibility with older versions of tar, files are fed to tar
+# pre-sorted, as --sort=name might not be available.
+find $cpio_dir -printf "./%P\n" | LC_ALL=C sort | \
+    tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
+    --owner=0 --group=0 --numeric-owner --no-recursion \
+    -Jcf $tarfile -C $cpio_dir/ -T - > /dev/null
 
 echo "$src_files_md5" >  kernel/kheaders.md5
 echo "$obj_files_md5" >> kernel/kheaders.md5
index 621467c..b262f47 100644 (file)
@@ -866,9 +866,9 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
 }
 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
 
-void __kthread_queue_delayed_work(struct kthread_worker *worker,
-                                 struct kthread_delayed_work *dwork,
-                                 unsigned long delay)
+static void __kthread_queue_delayed_work(struct kthread_worker *worker,
+                                        struct kthread_delayed_work *dwork,
+                                        unsigned long delay)
 {
        struct timer_list *timer = &dwork->timer;
        struct kthread_work *work = &dwork->work;
index 47e8ebc..f470a03 100644 (file)
@@ -180,6 +180,7 @@ void panic(const char *fmt, ...)
         * after setting panic_cpu) from invoking panic() again.
         */
        local_irq_disable();
+       preempt_disable_notrace();
 
        /*
         * It's possible to come here directly from a panic-assertion and
index e8710d1..e26de7a 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/seq_file.h>
 #include <linux/suspend.h>
 #include <linux/syscalls.h>
+#include <linux/pm_runtime.h>
 
 #include "power.h"
 
index 7880f4f..dd05a37 100644 (file)
@@ -5106,9 +5106,6 @@ static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *a
        u32 size;
        int ret;
 
-       if (!access_ok(uattr, SCHED_ATTR_SIZE_VER0))
-               return -EFAULT;
-
        /* Zero the full structure, so that a short copy will be nice: */
        memset(attr, 0, sizeof(*attr));
 
@@ -5116,45 +5113,19 @@ static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *a
        if (ret)
                return ret;
 
-       /* Bail out on silly large: */
-       if (size > PAGE_SIZE)
-               goto err_size;
-
        /* ABI compatibility quirk: */
        if (!size)
                size = SCHED_ATTR_SIZE_VER0;
-
-       if (size < SCHED_ATTR_SIZE_VER0)
+       if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
                goto err_size;
 
-       /*
-        * If we're handed a bigger struct than we know of,
-        * ensure all the unknown bits are 0 - i.e. new
-        * user-space does not rely on any kernel feature
-        * extensions we dont know about yet.
-        */
-       if (size > sizeof(*attr)) {
-               unsigned char __user *addr;
-               unsigned char __user *end;
-               unsigned char val;
-
-               addr = (void __user *)uattr + sizeof(*attr);
-               end  = (void __user *)uattr + size;
-
-               for (; addr < end; addr++) {
-                       ret = get_user(val, addr);
-                       if (ret)
-                               return ret;
-                       if (val)
-                               goto err_size;
-               }
-               size = sizeof(*attr);
+       ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
+       if (ret) {
+               if (ret == -E2BIG)
+                       goto err_size;
+               return ret;
        }
 
-       ret = copy_from_user(attr, uattr, size);
-       if (ret)
-               return -EFAULT;
-
        if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
            size < SCHED_ATTR_SIZE_VER1)
                return -EINVAL;
@@ -5354,7 +5325,7 @@ sched_attr_copy_to_user(struct sched_attr __user *uattr,
  * sys_sched_getattr - similar to sched_getparam, but with sched_attr
  * @pid: the pid in question.
  * @uattr: structure containing the extended parameters.
- * @usize: sizeof(attr) that user-space knows about, for forwards and backwards compatibility.
+ * @usize: sizeof(attr) for fwd/bwd comp.
  * @flags: for future extension.
  */
 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
index 2305ce8..46ed4e1 100644 (file)
@@ -740,7 +740,7 @@ void vtime_account_system(struct task_struct *tsk)
 
        write_seqcount_begin(&vtime->seqcount);
        /* We might have scheduled out from guest path */
-       if (current->flags & PF_VCPU)
+       if (tsk->flags & PF_VCPU)
                vtime_account_guest(tsk, vtime);
        else
                __vtime_account_system(tsk, vtime);
@@ -783,7 +783,7 @@ void vtime_guest_enter(struct task_struct *tsk)
         */
        write_seqcount_begin(&vtime->seqcount);
        __vtime_account_system(tsk, vtime);
-       current->flags |= PF_VCPU;
+       tsk->flags |= PF_VCPU;
        write_seqcount_end(&vtime->seqcount);
 }
 EXPORT_SYMBOL_GPL(vtime_guest_enter);
@@ -794,7 +794,7 @@ void vtime_guest_exit(struct task_struct *tsk)
 
        write_seqcount_begin(&vtime->seqcount);
        vtime_account_guest(tsk, vtime);
-       current->flags &= ~PF_VCPU;
+       tsk->flags &= ~PF_VCPU;
        write_seqcount_end(&vtime->seqcount);
 }
 EXPORT_SYMBOL_GPL(vtime_guest_exit);
index 83ab35e..682a754 100644 (file)
@@ -4926,20 +4926,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
                if (++count > 3) {
                        u64 new, old = ktime_to_ns(cfs_b->period);
 
-                       new = (old * 147) / 128; /* ~115% */
-                       new = min(new, max_cfs_quota_period);
-
-                       cfs_b->period = ns_to_ktime(new);
-
-                       /* since max is 1s, this is limited to 1e9^2, which fits in u64 */
-                       cfs_b->quota *= new;
-                       cfs_b->quota = div64_u64(cfs_b->quota, old);
-
-                       pr_warn_ratelimited(
-       "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
-                               smp_processor_id(),
-                               div_u64(new, NSEC_PER_USEC),
-                               div_u64(cfs_b->quota, NSEC_PER_USEC));
+                       /*
+                        * Grow period by a factor of 2 to avoid losing precision.
+                        * Precision loss in the quota/period ratio can cause __cfs_schedulable
+                        * to fail.
+                        */
+                       new = old * 2;
+                       if (new < max_cfs_quota_period) {
+                               cfs_b->period = ns_to_ktime(new);
+                               cfs_b->quota *= 2;
+
+                               pr_warn_ratelimited(
+       "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
+                                       smp_processor_id(),
+                                       div_u64(new, NSEC_PER_USEC),
+                                       div_u64(cfs_b->quota, NSEC_PER_USEC));
+                       } else {
+                               pr_warn_ratelimited(
+       "cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
+                                       smp_processor_id(),
+                                       div_u64(old, NSEC_PER_USEC),
+                                       div_u64(cfs_b->quota, NSEC_PER_USEC));
+                       }
 
                        /* reset count so we don't come right back in here */
                        count = 0;
index a39bed2..168479a 100644 (file)
@@ -174,7 +174,6 @@ static int membarrier_private_expedited(int flags)
                 */
                if (cpu == raw_smp_processor_id())
                        continue;
-               rcu_read_lock();
                p = rcu_dereference(cpu_rq(cpu)->curr);
                if (p && p->mm == mm)
                        __cpumask_set_cpu(cpu, tmpmask);
index c7031a2..998d50e 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright (C) 2010          SUSE Linux Products GmbH
  * Copyright (C) 2010          Tejun Heo <tj@kernel.org>
  */
+#include <linux/compiler.h>
 #include <linux/completion.h>
 #include <linux/cpu.h>
 #include <linux/init.h>
@@ -167,7 +168,7 @@ static void set_state(struct multi_stop_data *msdata,
        /* Reset ack counter. */
        atomic_set(&msdata->thread_ack, msdata->num_threads);
        smp_wmb();
-       msdata->state = newstate;
+       WRITE_ONCE(msdata->state, newstate);
 }
 
 /* Last one to ack a state moves to the next state. */
@@ -186,7 +187,7 @@ void __weak stop_machine_yield(const struct cpumask *cpumask)
 static int multi_cpu_stop(void *data)
 {
        struct multi_stop_data *msdata = data;
-       enum multi_stop_state curstate = MULTI_STOP_NONE;
+       enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
        int cpu = smp_processor_id(), err = 0;
        const struct cpumask *cpumask;
        unsigned long flags;
@@ -210,8 +211,9 @@ static int multi_cpu_stop(void *data)
        do {
                /* Chill out and ensure we re-read multi_stop_state. */
                stop_machine_yield(cpumask);
-               if (msdata->state != curstate) {
-                       curstate = msdata->state;
+               newstate = READ_ONCE(msdata->state);
+               if (newstate != curstate) {
+                       curstate = newstate;
                        switch (curstate) {
                        case MULTI_STOP_DISABLE_IRQ:
                                local_irq_disable();
index 00fcea2..b6f2f35 100644 (file)
@@ -163,7 +163,7 @@ static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
 #ifdef CONFIG_SPARC
 #endif
 
-#ifdef __hppa__
+#ifdef CONFIG_PARISC
 extern int pwrsw_enabled;
 #endif
 
@@ -620,7 +620,7 @@ static struct ctl_table kern_table[] = {
                .proc_handler   = proc_dointvec,
        },
 #endif
-#ifdef __hppa__
+#ifdef CONFIG_PARISC
        {
                .procname       = "soft-power",
                .data           = &pwrsw_enabled,
index 0d4dc24..6560553 100644 (file)
@@ -164,7 +164,7 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
        struct hrtimer_clock_base *base;
 
        for (;;) {
-               base = timer->base;
+               base = READ_ONCE(timer->base);
                if (likely(base != &migration_base)) {
                        raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
                        if (likely(base == timer->base))
@@ -244,7 +244,7 @@ again:
                        return base;
 
                /* See the comment in lock_hrtimer_base() */
-               timer->base = &migration_base;
+               WRITE_ONCE(timer->base, &migration_base);
                raw_spin_unlock(&base->cpu_base->lock);
                raw_spin_lock(&new_base->cpu_base->lock);
 
@@ -253,10 +253,10 @@ again:
                        raw_spin_unlock(&new_base->cpu_base->lock);
                        raw_spin_lock(&base->cpu_base->lock);
                        new_cpu_base = this_cpu_base;
-                       timer->base = base;
+                       WRITE_ONCE(timer->base, base);
                        goto again;
                }
-               timer->base = new_base;
+               WRITE_ONCE(timer->base, new_base);
        } else {
                if (new_cpu_base != this_cpu_base &&
                    hrtimer_check_target(timer, new_base)) {
index c1f5bb5..b5a65e2 100644 (file)
@@ -42,39 +42,39 @@ static int bc_shutdown(struct clock_event_device *evt)
  */
 static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
 {
-       int bc_moved;
        /*
-        * We try to cancel the timer first. If the callback is on
-        * flight on some other cpu then we let it handle it. If we
-        * were able to cancel the timer nothing can rearm it as we
-        * own broadcast_lock.
+        * This is called either from enter/exit idle code or from the
+        * broadcast handler. In all cases tick_broadcast_lock is held.
         *
-        * However we can also be called from the event handler of
-        * ce_broadcast_hrtimer itself when it expires. We cannot
-        * restart the timer because we are in the callback, but we
-        * can set the expiry time and let the callback return
-        * HRTIMER_RESTART.
+        * hrtimer_cancel() cannot be called here neither from the
+        * broadcast handler nor from the enter/exit idle code. The idle
+        * code can run into the problem described in bc_shutdown() and the
+        * broadcast handler cannot wait for itself to complete for obvious
+        * reasons.
         *
-        * Since we are in the idle loop at this point and because
-        * hrtimer_{start/cancel} functions call into tracing,
-        * calls to these functions must be bound within RCU_NONIDLE.
+        * Each caller tries to arm the hrtimer on its own CPU, but if the
+        * hrtimer callbback function is currently running, then
+        * hrtimer_start() cannot move it and the timer stays on the CPU on
+        * which it is assigned at the moment.
+        *
+        * As this can be called from idle code, the hrtimer_start()
+        * invocation has to be wrapped with RCU_NONIDLE() as
+        * hrtimer_start() can call into tracing.
         */
-       RCU_NONIDLE(
-               {
-                       bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
-                       if (bc_moved) {
-                               hrtimer_start(&bctimer, expires,
-                                             HRTIMER_MODE_ABS_PINNED_HARD);
-                       }
-               }
-       );
-
-       if (bc_moved) {
-               /* Bind the "device" to the cpu */
-               bc->bound_on = smp_processor_id();
-       } else if (bc->bound_on == smp_processor_id()) {
-               hrtimer_set_expires(&bctimer, expires);
-       }
+       RCU_NONIDLE( {
+               hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED_HARD);
+               /*
+                * The core tick broadcast mode expects bc->bound_on to be set
+                * correctly to prevent a CPU which has the broadcast hrtimer
+                * armed from going deep idle.
+                *
+                * As tick_broadcast_lock is held, nothing can change the cpu
+                * base which was just established in hrtimer_start() above. So
+                * the below access is safe even without holding the hrtimer
+                * base lock.
+                */
+               bc->bound_on = bctimer.base->cpu_base->cpu;
+       } );
        return 0;
 }
 
@@ -100,10 +100,6 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
 {
        ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
 
-       if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
-               if (ce_broadcast_hrtimer.next_event != KTIME_MAX)
-                       return HRTIMER_RESTART;
-
        return HRTIMER_NORESTART;
 }
 
index 62a50bf..f296d89 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/clocksource.h>
 #include <linux/sched/task.h>
 #include <linux/kallsyms.h>
+#include <linux/security.h>
 #include <linux/seq_file.h>
 #include <linux/tracefs.h>
 #include <linux/hardirq.h>
@@ -3486,6 +3487,11 @@ static int
 ftrace_avail_open(struct inode *inode, struct file *file)
 {
        struct ftrace_iterator *iter;
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
 
        if (unlikely(ftrace_disabled))
                return -ENODEV;
@@ -3505,6 +3511,15 @@ ftrace_enabled_open(struct inode *inode, struct file *file)
 {
        struct ftrace_iterator *iter;
 
+       /*
+        * This shows us what functions are currently being
+        * traced and by what. Not sure if we want lockdown
+        * to hide such critical information for an admin.
+        * Although, perhaps it can show information we don't
+        * want people to see, but if something is tracing
+        * something, we probably want to know about it.
+        */
+
        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
        if (!iter)
                return -ENOMEM;
@@ -3540,21 +3555,22 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
        struct ftrace_hash *hash;
        struct list_head *mod_head;
        struct trace_array *tr = ops->private;
-       int ret = 0;
+       int ret = -ENOMEM;
 
        ftrace_ops_init(ops);
 
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
+       if (tracing_check_open_get_tr(tr))
+               return -ENODEV;
+
        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
        if (!iter)
-               return -ENOMEM;
+               goto out;
 
-       if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
-               kfree(iter);
-               return -ENOMEM;
-       }
+       if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
+               goto out;
 
        iter->ops = ops;
        iter->flags = flag;
@@ -3584,13 +3600,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
 
                if (!iter->hash) {
                        trace_parser_put(&iter->parser);
-                       kfree(iter);
-                       ret = -ENOMEM;
                        goto out_unlock;
                }
        } else
                iter->hash = hash;
 
+       ret = 0;
+
        if (file->f_mode & FMODE_READ) {
                iter->pg = ftrace_pages_start;
 
@@ -3602,7 +3618,6 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                        /* Failed */
                        free_ftrace_hash(iter->hash);
                        trace_parser_put(&iter->parser);
-                       kfree(iter);
                }
        } else
                file->private_data = iter;
@@ -3610,6 +3625,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
  out_unlock:
        mutex_unlock(&ops->func_hash->regex_lock);
 
+ out:
+       if (ret) {
+               kfree(iter);
+               if (tr)
+                       trace_array_put(tr);
+       }
+
        return ret;
 }
 
@@ -3618,6 +3640,7 @@ ftrace_filter_open(struct inode *inode, struct file *file)
 {
        struct ftrace_ops *ops = inode->i_private;
 
+       /* Checks for tracefs lockdown */
        return ftrace_regex_open(ops,
                        FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
                        inode, file);
@@ -3628,6 +3651,7 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
 {
        struct ftrace_ops *ops = inode->i_private;
 
+       /* Checks for tracefs lockdown */
        return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
                                 inode, file);
 }
@@ -5037,6 +5061,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
        mutex_unlock(&iter->ops->func_hash->regex_lock);
        free_ftrace_hash(iter->hash);
+       if (iter->tr)
+               trace_array_put(iter->tr);
        kfree(iter);
 
        return 0;
@@ -5194,9 +5220,13 @@ static int
 __ftrace_graph_open(struct inode *inode, struct file *file,
                    struct ftrace_graph_data *fgd)
 {
-       int ret = 0;
+       int ret;
        struct ftrace_hash *new_hash = NULL;
 
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        if (file->f_mode & FMODE_WRITE) {
                const int size_bits = FTRACE_HASH_DEFAULT_BITS;
 
@@ -6537,8 +6567,9 @@ ftrace_pid_open(struct inode *inode, struct file *file)
        struct seq_file *m;
        int ret = 0;
 
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        if ((file->f_mode & FMODE_WRITE) &&
            (file->f_flags & O_TRUNC))
index 252f79c..6a0ee91 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/stacktrace.h>
 #include <linux/writeback.h>
 #include <linux/kallsyms.h>
+#include <linux/security.h>
 #include <linux/seq_file.h>
 #include <linux/notifier.h>
 #include <linux/irqflags.h>
@@ -304,6 +305,23 @@ void trace_array_put(struct trace_array *this_tr)
        mutex_unlock(&trace_types_lock);
 }
 
+int tracing_check_open_get_tr(struct trace_array *tr)
+{
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
+       if (tracing_disabled)
+               return -ENODEV;
+
+       if (tr && trace_array_get(tr) < 0)
+               return -ENODEV;
+
+       return 0;
+}
+
 int call_filter_check_discard(struct trace_event_call *call, void *rec,
                              struct ring_buffer *buffer,
                              struct ring_buffer_event *event)
@@ -4140,8 +4158,11 @@ release:
 
 int tracing_open_generic(struct inode *inode, struct file *filp)
 {
-       if (tracing_disabled)
-               return -ENODEV;
+       int ret;
+
+       ret = tracing_check_open_get_tr(NULL);
+       if (ret)
+               return ret;
 
        filp->private_data = inode->i_private;
        return 0;
@@ -4156,15 +4177,14 @@ bool tracing_is_disabled(void)
  * Open and update trace_array ref count.
  * Must have the current trace_array passed to it.
  */
-static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
+int tracing_open_generic_tr(struct inode *inode, struct file *filp)
 {
        struct trace_array *tr = inode->i_private;
+       int ret;
 
-       if (tracing_disabled)
-               return -ENODEV;
-
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        filp->private_data = inode->i_private;
 
@@ -4233,10 +4253,11 @@ static int tracing_open(struct inode *inode, struct file *file)
 {
        struct trace_array *tr = inode->i_private;
        struct trace_iterator *iter;
-       int ret = 0;
+       int ret;
 
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        /* If this file was open for write, then erase contents */
        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
@@ -4352,12 +4373,15 @@ static int show_traces_open(struct inode *inode, struct file *file)
        struct seq_file *m;
        int ret;
 
-       if (tracing_disabled)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        ret = seq_open(file, &show_traces_seq_ops);
-       if (ret)
+       if (ret) {
+               trace_array_put(tr);
                return ret;
+       }
 
        m = file->private_data;
        m->private = tr;
@@ -4365,6 +4389,14 @@ static int show_traces_open(struct inode *inode, struct file *file)
        return 0;
 }
 
+static int show_traces_release(struct inode *inode, struct file *file)
+{
+       struct trace_array *tr = inode->i_private;
+
+       trace_array_put(tr);
+       return seq_release(inode, file);
+}
+
 static ssize_t
 tracing_write_stub(struct file *filp, const char __user *ubuf,
                   size_t count, loff_t *ppos)
@@ -4395,8 +4427,8 @@ static const struct file_operations tracing_fops = {
 static const struct file_operations show_traces_fops = {
        .open           = show_traces_open,
        .read           = seq_read,
-       .release        = seq_release,
        .llseek         = seq_lseek,
+       .release        = show_traces_release,
 };
 
 static ssize_t
@@ -4697,11 +4729,9 @@ static int tracing_trace_options_open(struct inode *inode, struct file *file)
        struct trace_array *tr = inode->i_private;
        int ret;
 
-       if (tracing_disabled)
-               return -ENODEV;
-
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        ret = single_open(file, tracing_trace_options_show, inode->i_private);
        if (ret < 0)
@@ -5038,8 +5068,11 @@ static const struct seq_operations tracing_saved_tgids_seq_ops = {
 
 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
 {
-       if (tracing_disabled)
-               return -ENODEV;
+       int ret;
+
+       ret = tracing_check_open_get_tr(NULL);
+       if (ret)
+               return ret;
 
        return seq_open(filp, &tracing_saved_tgids_seq_ops);
 }
@@ -5115,8 +5148,11 @@ static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
 
 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
 {
-       if (tracing_disabled)
-               return -ENODEV;
+       int ret;
+
+       ret = tracing_check_open_get_tr(NULL);
+       if (ret)
+               return ret;
 
        return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
 }
@@ -5280,8 +5316,11 @@ static const struct seq_operations tracing_eval_map_seq_ops = {
 
 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
 {
-       if (tracing_disabled)
-               return -ENODEV;
+       int ret;
+
+       ret = tracing_check_open_get_tr(NULL);
+       if (ret)
+               return ret;
 
        return seq_open(filp, &tracing_eval_map_seq_ops);
 }
@@ -5804,13 +5843,11 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
 {
        struct trace_array *tr = inode->i_private;
        struct trace_iterator *iter;
-       int ret = 0;
-
-       if (tracing_disabled)
-               return -ENODEV;
+       int ret;
 
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        mutex_lock(&trace_types_lock);
 
@@ -5999,6 +6036,7 @@ waitagain:
               sizeof(struct trace_iterator) -
               offsetof(struct trace_iterator, seq));
        cpumask_clear(iter->started);
+       trace_seq_init(&iter->seq);
        iter->pos = -1;
 
        trace_event_read_lock();
@@ -6547,11 +6585,9 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
        struct trace_array *tr = inode->i_private;
        int ret;
 
-       if (tracing_disabled)
-               return -ENODEV;
-
-       if (trace_array_get(tr))
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        ret = single_open(file, tracing_clock_show, inode->i_private);
        if (ret < 0)
@@ -6581,11 +6617,9 @@ static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
        struct trace_array *tr = inode->i_private;
        int ret;
 
-       if (tracing_disabled)
-               return -ENODEV;
-
-       if (trace_array_get(tr))
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
        if (ret < 0)
@@ -6638,10 +6672,11 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
        struct trace_array *tr = inode->i_private;
        struct trace_iterator *iter;
        struct seq_file *m;
-       int ret = 0;
+       int ret;
 
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        if (file->f_mode & FMODE_READ) {
                iter = __tracing_open(inode, file, true);
@@ -6786,6 +6821,7 @@ static int snapshot_raw_open(struct inode *inode, struct file *filp)
        struct ftrace_buffer_info *info;
        int ret;
 
+       /* The following checks for tracefs lockdown */
        ret = tracing_buffers_open(inode, filp);
        if (ret < 0)
                return ret;
@@ -7105,8 +7141,9 @@ static int tracing_err_log_open(struct inode *inode, struct file *file)
        struct trace_array *tr = inode->i_private;
        int ret = 0;
 
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        /* If this file was opened for write, then erase contents */
        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
@@ -7157,11 +7194,9 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
        struct ftrace_buffer_info *info;
        int ret;
 
-       if (tracing_disabled)
-               return -ENODEV;
-
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        info = kzalloc(sizeof(*info), GFP_KERNEL);
        if (!info) {
index f801d15..d685c61 100644 (file)
@@ -338,6 +338,7 @@ extern struct mutex trace_types_lock;
 
 extern int trace_array_get(struct trace_array *tr);
 extern void trace_array_put(struct trace_array *tr);
+extern int tracing_check_open_get_tr(struct trace_array *tr);
 
 extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
@@ -681,6 +682,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf);
 void tracing_reset_current(int cpu);
 void tracing_reset_all_online_cpus(void);
 int tracing_open_generic(struct inode *inode, struct file *filp);
+int tracing_open_generic_tr(struct inode *inode, struct file *filp);
 bool tracing_is_disabled(void);
 bool tracer_tracing_is_on(struct trace_array *tr);
 void tracer_tracing_on(struct trace_array *tr);
index a41fed4..89779eb 100644 (file)
@@ -174,6 +174,10 @@ static int dyn_event_open(struct inode *inode, struct file *file)
 {
        int ret;
 
+       ret = tracing_check_open_get_tr(NULL);
+       if (ret)
+               return ret;
+
        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
                ret = dyn_events_release_all(NULL);
                if (ret < 0)
index b89cdfe..fba87d1 100644 (file)
@@ -12,6 +12,7 @@
 #define pr_fmt(fmt) fmt
 
 #include <linux/workqueue.h>
+#include <linux/security.h>
 #include <linux/spinlock.h>
 #include <linux/kthread.h>
 #include <linux/tracefs.h>
@@ -1294,6 +1295,8 @@ static int trace_format_open(struct inode *inode, struct file *file)
        struct seq_file *m;
        int ret;
 
+       /* Do we want to hide event format files on tracefs lockdown? */
+
        ret = seq_open(file, &trace_format_seq_ops);
        if (ret < 0)
                return ret;
@@ -1440,28 +1443,17 @@ static int system_tr_open(struct inode *inode, struct file *filp)
        struct trace_array *tr = inode->i_private;
        int ret;
 
-       if (tracing_is_disabled())
-               return -ENODEV;
-
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
-
        /* Make a temporary dir that has no system but points to tr */
        dir = kzalloc(sizeof(*dir), GFP_KERNEL);
-       if (!dir) {
-               trace_array_put(tr);
+       if (!dir)
                return -ENOMEM;
-       }
 
-       dir->tr = tr;
-
-       ret = tracing_open_generic(inode, filp);
+       ret = tracing_open_generic_tr(inode, filp);
        if (ret < 0) {
-               trace_array_put(tr);
                kfree(dir);
                return ret;
        }
-
+       dir->tr = tr;
        filp->private_data = dir;
 
        return 0;
@@ -1771,6 +1763,10 @@ ftrace_event_open(struct inode *inode, struct file *file,
        struct seq_file *m;
        int ret;
 
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        ret = seq_open(file, seq_ops);
        if (ret < 0)
                return ret;
@@ -1795,6 +1791,7 @@ ftrace_event_avail_open(struct inode *inode, struct file *file)
 {
        const struct seq_operations *seq_ops = &show_event_seq_ops;
 
+       /* Checks for tracefs lockdown */
        return ftrace_event_open(inode, file, seq_ops);
 }
 
@@ -1805,8 +1802,9 @@ ftrace_event_set_open(struct inode *inode, struct file *file)
        struct trace_array *tr = inode->i_private;
        int ret;
 
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        if ((file->f_mode & FMODE_WRITE) &&
            (file->f_flags & O_TRUNC))
@@ -1825,8 +1823,9 @@ ftrace_event_set_pid_open(struct inode *inode, struct file *file)
        struct trace_array *tr = inode->i_private;
        int ret;
 
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        if ((file->f_mode & FMODE_WRITE) &&
            (file->f_flags & O_TRUNC))
index 9468bd8..57648c5 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/module.h>
 #include <linux/kallsyms.h>
+#include <linux/security.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/stacktrace.h>
@@ -1448,6 +1449,10 @@ static int synth_events_open(struct inode *inode, struct file *file)
 {
        int ret;
 
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
                ret = dyn_events_release_all(&synth_event_ops);
                if (ret < 0)
@@ -1680,7 +1685,7 @@ static int save_hist_vars(struct hist_trigger_data *hist_data)
        if (var_data)
                return 0;
 
-       if (trace_array_get(tr) < 0)
+       if (tracing_check_open_get_tr(tr))
                return -ENODEV;
 
        var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
@@ -5515,6 +5520,12 @@ static int hist_show(struct seq_file *m, void *v)
 
 static int event_hist_open(struct inode *inode, struct file *file)
 {
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        return single_open(file, hist_show, file);
 }
 
index 2a2912c..2cd53ca 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
  */
 
+#include <linux/security.h>
 #include <linux/module.h>
 #include <linux/ctype.h>
 #include <linux/mutex.h>
@@ -173,7 +174,11 @@ static const struct seq_operations event_triggers_seq_ops = {
 
 static int event_trigger_regex_open(struct inode *inode, struct file *file)
 {
-       int ret = 0;
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
 
        mutex_lock(&event_mutex);
 
@@ -292,6 +297,7 @@ event_trigger_write(struct file *filp, const char __user *ubuf,
 static int
 event_trigger_open(struct inode *inode, struct file *filp)
 {
+       /* Checks for tracefs lockdown */
        return event_trigger_regex_open(inode, filp);
 }
 
index fa95139..862f4b0 100644 (file)
@@ -150,7 +150,7 @@ void trace_hwlat_callback(bool enter)
                if (enter)
                        nmi_ts_start = time_get();
                else
-                       nmi_total_ts = time_get() - nmi_ts_start;
+                       nmi_total_ts += time_get() - nmi_ts_start;
        }
 
        if (enter)
@@ -256,6 +256,8 @@ static int get_sample(void)
                /* Keep a running maximum ever recorded hardware latency */
                if (sample > tr->max_latency)
                        tr->max_latency = sample;
+               if (outer_sample > tr->max_latency)
+                       tr->max_latency = outer_sample;
        }
 
 out:
index 324ffbe..1552a95 100644 (file)
@@ -7,11 +7,11 @@
  */
 #define pr_fmt(fmt)    "trace_kprobe: " fmt
 
+#include <linux/security.h>
 #include <linux/module.h>
 #include <linux/uaccess.h>
 #include <linux/rculist.h>
 #include <linux/error-injection.h>
-#include <linux/security.h>
 
 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
 
@@ -936,6 +936,10 @@ static int probes_open(struct inode *inode, struct file *file)
 {
        int ret;
 
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
                ret = dyn_events_release_all(&trace_kprobe_ops);
                if (ret < 0)
@@ -988,6 +992,12 @@ static const struct seq_operations profile_seq_op = {
 
 static int profile_open(struct inode *inode, struct file *file)
 {
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        return seq_open(file, &profile_seq_op);
 }
 
index c3fd849..d4e31e9 100644 (file)
@@ -6,6 +6,7 @@
  *
  */
 #include <linux/seq_file.h>
+#include <linux/security.h>
 #include <linux/uaccess.h>
 #include <linux/kernel.h>
 #include <linux/ftrace.h>
@@ -348,6 +349,12 @@ static const struct seq_operations show_format_seq_ops = {
 static int
 ftrace_formats_open(struct inode *inode, struct file *file)
 {
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        return seq_open(file, &show_format_seq_ops);
 }
 
index ec9a34a..4df9a20 100644 (file)
@@ -5,6 +5,7 @@
  */
 #include <linux/sched/task_stack.h>
 #include <linux/stacktrace.h>
+#include <linux/security.h>
 #include <linux/kallsyms.h>
 #include <linux/seq_file.h>
 #include <linux/spinlock.h>
@@ -470,6 +471,12 @@ static const struct seq_operations stack_trace_seq_ops = {
 
 static int stack_trace_open(struct inode *inode, struct file *file)
 {
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        return seq_open(file, &stack_trace_seq_ops);
 }
 
@@ -487,6 +494,7 @@ stack_trace_filter_open(struct inode *inode, struct file *file)
 {
        struct ftrace_ops *ops = inode->i_private;
 
+       /* Checks for tracefs lockdown */
        return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
                                 inode, file);
 }
index 75bf1bc..9ab0a1a 100644 (file)
@@ -9,7 +9,7 @@
  *
  */
 
-
+#include <linux/security.h>
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/rbtree.h>
@@ -238,6 +238,10 @@ static int tracing_stat_open(struct inode *inode, struct file *file)
        struct seq_file *m;
        struct stat_session *session = inode->i_private;
 
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        ret = stat_seq_init(session);
        if (ret)
                return ret;
index dd88434..352073d 100644 (file)
@@ -7,6 +7,7 @@
  */
 #define pr_fmt(fmt)    "trace_uprobe: " fmt
 
+#include <linux/security.h>
 #include <linux/ctype.h>
 #include <linux/module.h>
 #include <linux/uaccess.h>
@@ -769,6 +770,10 @@ static int probes_open(struct inode *inode, struct file *file)
 {
        int ret;
 
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
                ret = dyn_events_release_all(&trace_uprobe_ops);
                if (ret)
@@ -818,6 +823,12 @@ static const struct seq_operations profile_seq_op = {
 
 static int profile_open(struct inode *inode, struct file *file)
 {
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        return seq_open(file, &profile_seq_op);
 }
 
index 6a0e9bd..ab75d73 100644 (file)
@@ -262,7 +262,7 @@ EXPORT_SYMBOL(devm_ioport_unmap);
 /*
  * PCI iomap devres
  */
-#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
+#define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
 
 struct pcim_iomap_devres {
        void __iomem *table[PCIM_IOMAP_MAX];
index ae25e2f..f25eb11 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/export.h>
 #include <linux/generic-radix-tree.h>
 #include <linux/gfp.h>
+#include <linux/kmemleak.h>
 
 #define GENRADIX_ARY           (PAGE_SIZE / sizeof(struct genradix_node *))
 #define GENRADIX_ARY_SHIFT     ilog2(GENRADIX_ARY)
@@ -75,6 +76,27 @@ void *__genradix_ptr(struct __genradix *radix, size_t offset)
 }
 EXPORT_SYMBOL(__genradix_ptr);
 
+static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
+{
+       struct genradix_node *node;
+
+       node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO);
+
+       /*
+        * We're using pages (not slab allocations) directly for kernel data
+        * structures, so we need to explicitly inform kmemleak of them in order
+        * to avoid false positive memory leak reports.
+        */
+       kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask);
+       return node;
+}
+
+static inline void genradix_free_node(struct genradix_node *node)
+{
+       kmemleak_free(node);
+       free_page((unsigned long)node);
+}
+
 /*
  * Returns pointer to the specified byte @offset within @radix, allocating it if
  * necessary - newly allocated slots are always zeroed out:
@@ -97,8 +119,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
                        break;
 
                if (!new_node) {
-                       new_node = (void *)
-                               __get_free_page(gfp_mask|__GFP_ZERO);
+                       new_node = genradix_alloc_node(gfp_mask);
                        if (!new_node)
                                return NULL;
                }
@@ -121,8 +142,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
                n = READ_ONCE(*p);
                if (!n) {
                        if (!new_node) {
-                               new_node = (void *)
-                                       __get_free_page(gfp_mask|__GFP_ZERO);
+                               new_node = genradix_alloc_node(gfp_mask);
                                if (!new_node)
                                        return NULL;
                        }
@@ -133,7 +153,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
        }
 
        if (new_node)
-               free_page((unsigned long) new_node);
+               genradix_free_node(new_node);
 
        return &n->data[offset];
 }
@@ -191,7 +211,7 @@ static void genradix_free_recurse(struct genradix_node *n, unsigned level)
                                genradix_free_recurse(n->children[i], level - 1);
        }
 
-       free_page((unsigned long) n);
+       genradix_free_node(n);
 }
 
 int __genradix_prealloc(struct __genradix *radix, size_t size,
index cd7a10c..08ec58c 100644 (file)
@@ -748,27 +748,6 @@ void *memset(void *s, int c, size_t count)
 EXPORT_SYMBOL(memset);
 #endif
 
-/**
- * memzero_explicit - Fill a region of memory (e.g. sensitive
- *                   keying data) with 0s.
- * @s: Pointer to the start of the area.
- * @count: The size of the area.
- *
- * Note: usually using memset() is just fine (!), but in cases
- * where clearing out _local_ data at the end of a scope is
- * necessary, memzero_explicit() should be used instead in
- * order to prevent the compiler from optimising away zeroing.
- *
- * memzero_explicit() doesn't need an arch-specific version as
- * it just invokes the one of memset() implicitly.
- */
-void memzero_explicit(void *s, size_t count)
-{
-       memset(s, 0, count);
-       barrier_data(s);
-}
-EXPORT_SYMBOL(memzero_explicit);
-
 #ifndef __HAVE_ARCH_MEMSET16
 /**
  * memset16() - Fill a memory area with a uint16_t
index 28ff554..6c0005d 100644 (file)
@@ -3,16 +3,10 @@
 #include <linux/export.h>
 #include <linux/uaccess.h>
 #include <linux/mm.h>
+#include <linux/bitops.h>
 
 #include <asm/word-at-a-time.h>
 
-/* Set bits in the first 'n' bytes when loaded from memory */
-#ifdef __LITTLE_ENDIAN
-#  define aligned_byte_mask(n) ((1ul << 8*(n))-1)
-#else
-#  define aligned_byte_mask(n) (~0xfful << (BITS_PER_LONG - 8 - 8*(n)))
-#endif
-
 /*
  * Do a strnlen, return length of string *with* final '\0'.
  * 'count' is the user-supplied count, while 'max' is the
index 9729f27..9742e5c 100644 (file)
@@ -297,6 +297,32 @@ out:
        return 1;
 }
 
+static int __init do_kmem_cache_size_bulk(int size, int *total_failures)
+{
+       struct kmem_cache *c;
+       int i, iter, maxiter = 1024;
+       int num, bytes;
+       bool fail = false;
+       void *objects[10];
+
+       c = kmem_cache_create("test_cache", size, size, 0, NULL);
+       for (iter = 0; (iter < maxiter) && !fail; iter++) {
+               num = kmem_cache_alloc_bulk(c, GFP_KERNEL, ARRAY_SIZE(objects),
+                                           objects);
+               for (i = 0; i < num; i++) {
+                       bytes = count_nonzero_bytes(objects[i], size);
+                       if (bytes)
+                               fail = true;
+                       fill_with_garbage(objects[i], size);
+               }
+
+               if (num)
+                       kmem_cache_free_bulk(c, num, objects);
+       }
+       *total_failures += fail;
+       return 1;
+}
+
 /*
  * Test kmem_cache allocation by creating caches of different sizes, with and
  * without constructors, with and without SLAB_TYPESAFE_BY_RCU.
@@ -318,6 +344,7 @@ static int __init test_kmemcache(int *total_failures)
                        num_tests += do_kmem_cache_size(size, ctor, rcu, zero,
                                                        &failures);
                }
+               num_tests += do_kmem_cache_size_bulk(size, &failures);
        }
        REPORT_FAILURES_IN_FN();
        *total_failures += failures;
index 67bcd5d..5ff04d8 100644 (file)
 # define TEST_U64
 #endif
 
-#define test(condition, msg)           \
-({                                     \
-       int cond = (condition);         \
-       if (cond)                       \
-               pr_warn("%s\n", msg);   \
-       cond;                           \
+#define test(condition, msg, ...)                                      \
+({                                                                     \
+       int cond = (condition);                                         \
+       if (cond)                                                       \
+               pr_warn("[%d] " msg "\n", __LINE__, ##__VA_ARGS__);     \
+       cond;                                                           \
 })
 
+static bool is_zeroed(void *from, size_t size)
+{
+       return memchr_inv(from, 0x0, size) == NULL;
+}
+
+static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size)
+{
+       int ret = 0;
+       size_t start, end, i, zero_start, zero_end;
+
+       if (test(size < 2 * PAGE_SIZE, "buffer too small"))
+               return -EINVAL;
+
+       /*
+        * We want to cross a page boundary to exercise the code more
+        * effectively. We also don't want to make the size we scan too large,
+        * otherwise the test can take a long time and cause soft lockups. So
+        * scan a 1024 byte region across the page boundary.
+        */
+       size = 1024;
+       start = PAGE_SIZE - (size / 2);
+
+       kmem += start;
+       umem += start;
+
+       zero_start = size / 4;
+       zero_end = size - zero_start;
+
+       /*
+        * We conduct a series of check_nonzero_user() tests on a block of
+        * memory with the following byte-pattern (trying every possible
+        * [start,end] pair):
+        *
+        *   [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
+        *
+        * And we verify that check_nonzero_user() acts identically to
+        * memchr_inv().
+        */
+
+       memset(kmem, 0x0, size);
+       for (i = 1; i < zero_start; i += 2)
+               kmem[i] = 0xff;
+       for (i = zero_end; i < size; i += 2)
+               kmem[i] = 0xff;
+
+       ret |= test(copy_to_user(umem, kmem, size),
+                   "legitimate copy_to_user failed");
+
+       for (start = 0; start <= size; start++) {
+               for (end = start; end <= size; end++) {
+                       size_t len = end - start;
+                       int retval = check_zeroed_user(umem + start, len);
+                       int expected = is_zeroed(kmem + start, len);
+
+                       ret |= test(retval != expected,
+                                   "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)",
+                                   retval, expected, start, end);
+               }
+       }
+
+       return ret;
+}
+
+static int test_copy_struct_from_user(char *kmem, char __user *umem,
+                                     size_t size)
+{
+       int ret = 0;
+       char *umem_src = NULL, *expected = NULL;
+       size_t ksize, usize;
+
+       umem_src = kmalloc(size, GFP_KERNEL);
+       ret = test(umem_src == NULL, "kmalloc failed");
+       if (ret)
+               goto out_free;
+
+       expected = kmalloc(size, GFP_KERNEL);
+       ret = test(expected == NULL, "kmalloc failed");
+       if (ret)
+               goto out_free;
+
+       /* Fill umem with a fixed byte pattern. */
+       memset(umem_src, 0x3e, size);
+       ret |= test(copy_to_user(umem, umem_src, size),
+                   "legitimate copy_to_user failed");
+
+       /* Check basic case -- (usize == ksize). */
+       ksize = size;
+       usize = size;
+
+       memcpy(expected, umem_src, ksize);
+
+       memset(kmem, 0x0, size);
+       ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
+                   "copy_struct_from_user(usize == ksize) failed");
+       ret |= test(memcmp(kmem, expected, ksize),
+                   "copy_struct_from_user(usize == ksize) gives unexpected copy");
+
+       /* Old userspace case -- (usize < ksize). */
+       ksize = size;
+       usize = size / 2;
+
+       memcpy(expected, umem_src, usize);
+       memset(expected + usize, 0x0, ksize - usize);
+
+       memset(kmem, 0x0, size);
+       ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
+                   "copy_struct_from_user(usize < ksize) failed");
+       ret |= test(memcmp(kmem, expected, ksize),
+                   "copy_struct_from_user(usize < ksize) gives unexpected copy");
+
+       /* New userspace (-E2BIG) case -- (usize > ksize). */
+       ksize = size / 2;
+       usize = size;
+
+       memset(kmem, 0x0, size);
+       ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG,
+                   "copy_struct_from_user(usize > ksize) didn't give E2BIG");
+
+       /* New userspace (success) case -- (usize > ksize). */
+       ksize = size / 2;
+       usize = size;
+
+       memcpy(expected, umem_src, ksize);
+       ret |= test(clear_user(umem + ksize, usize - ksize),
+                   "legitimate clear_user failed");
+
+       memset(kmem, 0x0, size);
+       ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
+                   "copy_struct_from_user(usize > ksize) failed");
+       ret |= test(memcmp(kmem, expected, ksize),
+                   "copy_struct_from_user(usize > ksize) gives unexpected copy");
+
+out_free:
+       kfree(expected);
+       kfree(umem_src);
+       return ret;
+}
+
 static int __init test_user_copy_init(void)
 {
        int ret = 0;
@@ -106,6 +244,11 @@ static int __init test_user_copy_init(void)
 #endif
 #undef test_legit
 
+       /* Test usage of check_nonzero_user(). */
+       ret |= test_check_nonzero_user(kmem, usermem, 2 * PAGE_SIZE);
+       /* Test usage of copy_struct_from_user(). */
+       ret |= test_copy_struct_from_user(kmem, usermem, 2 * PAGE_SIZE);
+
        /*
         * Invalid usage: none of these copies should succeed.
         */
index 4f16eec..f68dea8 100644 (file)
@@ -89,9 +89,9 @@
  *       goto errout;
  *   }
  *
- *   pos = textsearch_find_continuous(conf, \&state, example, strlen(example));
+ *   pos = textsearch_find_continuous(conf, &state, example, strlen(example));
  *   if (pos != UINT_MAX)
- *       panic("Oh my god, dancing chickens at \%d\n", pos);
+ *       panic("Oh my god, dancing chickens at %d\n", pos);
  *
  *   textsearch_destroy(conf);
  */
index c2bfbca..cbb4d9e 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/uaccess.h>
+#include <linux/bitops.h>
 
 /* out-of-line parts */
 
@@ -31,3 +32,57 @@ unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
 }
 EXPORT_SYMBOL(_copy_to_user);
 #endif
+
+/**
+ * check_zeroed_user: check if a userspace buffer only contains zero bytes
+ * @from: Source address, in userspace.
+ * @size: Size of buffer.
+ *
+ * This is effectively shorthand for "memchr_inv(from, 0, size) == NULL" for
+ * userspace addresses (and is more efficient because we don't care where the
+ * first non-zero byte is).
+ *
+ * Returns:
+ *  * 0: There were non-zero bytes present in the buffer.
+ *  * 1: The buffer was full of zero bytes.
+ *  * -EFAULT: access to userspace failed.
+ */
+int check_zeroed_user(const void __user *from, size_t size)
+{
+       unsigned long val;
+       uintptr_t align = (uintptr_t) from % sizeof(unsigned long);
+
+       if (unlikely(size == 0))
+               return 1;
+
+       from -= align;
+       size += align;
+
+       if (!user_access_begin(from, size))
+               return -EFAULT;
+
+       unsafe_get_user(val, (unsigned long __user *) from, err_fault);
+       if (align)
+               val &= ~aligned_byte_mask(align);
+
+       while (size > sizeof(unsigned long)) {
+               if (unlikely(val))
+                       goto done;
+
+               from += sizeof(unsigned long);
+               size -= sizeof(unsigned long);
+
+               unsafe_get_user(val, (unsigned long __user *) from, err_fault);
+       }
+
+       if (size < sizeof(unsigned long))
+               val &= aligned_byte_mask(size);
+
+done:
+       user_access_end();
+       return (val == 0);
+err_fault:
+       user_access_end();
+       return -EFAULT;
+}
+EXPORT_SYMBOL(check_zeroed_user);
index cc00364..9fe698f 100644 (file)
@@ -24,13 +24,4 @@ config GENERIC_COMPAT_VDSO
        help
          This config option enables the compat VDSO layer.
 
-config CROSS_COMPILE_COMPAT_VDSO
-       string "32 bit Toolchain prefix for compat vDSO"
-       default ""
-       depends on GENERIC_COMPAT_VDSO
-       help
-         Defines the cross-compiler prefix for compiling compat vDSO.
-         If a 64 bit compiler (i.e. x86_64) can compile the VDSO for
-         32 bit, it does not need to define this parameter.
-
 endif
index d9daa3e..c360f6a 100644 (file)
@@ -239,8 +239,8 @@ static int __init default_bdi_init(void)
 {
        int err;
 
-       bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
-                                             WQ_UNBOUND | WQ_SYSFS, 0);
+       bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
+                                WQ_SYSFS, 0);
        if (!bdi_wq)
                return -ENOMEM;
 
index ce08b39..672d3c7 100644 (file)
@@ -270,14 +270,15 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
 
        /* Ensure the start of the pageblock or zone is online and valid */
        block_pfn = pageblock_start_pfn(pfn);
-       block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn));
+       block_pfn = max(block_pfn, zone->zone_start_pfn);
+       block_page = pfn_to_online_page(block_pfn);
        if (block_page) {
                page = block_page;
                pfn = block_pfn;
        }
 
        /* Ensure the end of the pageblock or zone is online and valid */
-       block_pfn += pageblock_nr_pages;
+       block_pfn = pageblock_end_pfn(pfn) - 1;
        block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
        end_page = pfn_to_online_page(block_pfn);
        if (!end_page)
@@ -303,7 +304,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
 
                page += (1 << PAGE_ALLOC_COSTLY_ORDER);
                pfn += (1 << PAGE_ALLOC_COSTLY_ORDER);
-       } while (page < end_page);
+       } while (page <= end_page);
 
        return false;
 }
index 1146fcf..85b7d08 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/rmap.h>
 #include <linux/delayacct.h>
 #include <linux/psi.h>
+#include <linux/ramfs.h>
 #include "internal.h"
 
 #define CREATE_TRACE_POINTS
index 23a9f9c..8f236a3 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1973,7 +1973,8 @@ static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
 }
 
 static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
-                      unsigned long end, int write, struct page **pages, int *nr)
+                      unsigned long end, unsigned int flags,
+                      struct page **pages, int *nr)
 {
        unsigned long pte_end;
        struct page *head, *page;
@@ -1986,7 +1987,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
 
        pte = READ_ONCE(*ptep);
 
-       if (!pte_access_permitted(pte, write))
+       if (!pte_access_permitted(pte, flags & FOLL_WRITE))
                return 0;
 
        /* hugepages are never "special" */
@@ -2023,7 +2024,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
 }
 
 static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
-               unsigned int pdshift, unsigned long end, int write,
+               unsigned int pdshift, unsigned long end, unsigned int flags,
                struct page **pages, int *nr)
 {
        pte_t *ptep;
@@ -2033,7 +2034,7 @@ static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
        ptep = hugepte_offset(hugepd, addr, pdshift);
        do {
                next = hugepte_addr_end(addr, end, sz);
-               if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
+               if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
                        return 0;
        } while (ptep++, addr = next, addr != end);
 
@@ -2041,7 +2042,7 @@ static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
 }
 #else
 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
-               unsigned pdshift, unsigned long end, int write,
+               unsigned int pdshift, unsigned long end, unsigned int flags,
                struct page **pages, int *nr)
 {
        return 0;
@@ -2049,7 +2050,8 @@ static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
 #endif /* CONFIG_ARCH_HAS_HUGEPD */
 
 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
-               unsigned long end, unsigned int flags, struct page **pages, int *nr)
+                       unsigned long end, unsigned int flags,
+                       struct page **pages, int *nr)
 {
        struct page *head, *page;
        int refs;
index c5cb6dc..13cc937 100644 (file)
@@ -2789,8 +2789,13 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                        ds_queue->split_queue_len--;
                        list_del(page_deferred_list(head));
                }
-               if (mapping)
-                       __dec_node_page_state(page, NR_SHMEM_THPS);
+               if (mapping) {
+                       if (PageSwapBacked(page))
+                               __dec_node_page_state(page, NR_SHMEM_THPS);
+                       else
+                               __dec_node_page_state(page, NR_FILE_THPS);
+               }
+
                spin_unlock(&ds_queue->split_queue_lock);
                __split_huge_page(page, list, end, flags);
                if (PageSwapCache(head)) {
index ef37c85..b45a953 100644 (file)
@@ -1084,11 +1084,10 @@ static bool pfn_range_valid_gigantic(struct zone *z,
        struct page *page;
 
        for (i = start_pfn; i < end_pfn; i++) {
-               if (!pfn_valid(i))
+               page = pfn_to_online_page(i);
+               if (!page)
                        return false;
 
-               page = pfn_to_page(i);
-
                if (page_zone(page) != z)
                        return false;
 
index fb1e150..1960330 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/spinlock.h>
 #include <linux/list.h>
 #include <linux/cpumask.h>
+#include <linux/mman.h>
 
 #include <linux/atomic.h>
 #include <linux/user_namespace.h>
index 03a8d84..2446076 100644 (file)
@@ -527,6 +527,16 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
 }
 
 /*
+ * Remove an object from the object_tree_root and object_list. Must be called
+ * with the kmemleak_lock held _if_ kmemleak is still enabled.
+ */
+static void __remove_object(struct kmemleak_object *object)
+{
+       rb_erase(&object->rb_node, &object_tree_root);
+       list_del_rcu(&object->object_list);
+}
+
+/*
  * Look up an object in the object search tree and remove it from both
  * object_tree_root and object_list. The returned object's use_count should be
  * at least 1, as initially set by create_object().
@@ -538,10 +548,8 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali
 
        write_lock_irqsave(&kmemleak_lock, flags);
        object = lookup_object(ptr, alias);
-       if (object) {
-               rb_erase(&object->rb_node, &object_tree_root);
-               list_del_rcu(&object->object_list);
-       }
+       if (object)
+               __remove_object(object);
        write_unlock_irqrestore(&kmemleak_lock, flags);
 
        return object;
@@ -1834,12 +1842,16 @@ static const struct file_operations kmemleak_fops = {
 
 static void __kmemleak_do_cleanup(void)
 {
-       struct kmemleak_object *object;
+       struct kmemleak_object *object, *tmp;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(object, &object_list, object_list)
-               delete_object_full(object->pointer);
-       rcu_read_unlock();
+       /*
+        * Kmemleak has already been disabled, no need for RCU list traversal
+        * or kmemleak_lock held.
+        */
+       list_for_each_entry_safe(object, tmp, &object_list, object_list) {
+               __remove_object(object);
+               __delete_object(object);
+       }
 }
 
 /*
index 7d4f61a..c4b16ca 100644 (file)
@@ -1356,9 +1356,6 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
                align = SMP_CACHE_BYTES;
        }
 
-       if (end > memblock.current_limit)
-               end = memblock.current_limit;
-
 again:
        found = memblock_find_in_range_node(size, align, start, end, nid,
                                            flags);
@@ -1469,6 +1466,9 @@ static void * __init memblock_alloc_internal(
        if (WARN_ON_ONCE(slab_is_available()))
                return kzalloc_node(size, GFP_NOWAIT, nid);
 
+       if (max_addr > memblock.current_limit)
+               max_addr = memblock.current_limit;
+
        alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid);
 
        /* retry allocation without lower limit */
index c313c49..3631065 100644 (file)
@@ -1567,6 +1567,11 @@ unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
        return max;
 }
 
+unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
+{
+       return page_counter_read(&memcg->memory);
+}
+
 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
                                     int order)
 {
@@ -5415,6 +5420,8 @@ static int mem_cgroup_move_account(struct page *page,
                                   struct mem_cgroup *from,
                                   struct mem_cgroup *to)
 {
+       struct lruvec *from_vec, *to_vec;
+       struct pglist_data *pgdat;
        unsigned long flags;
        unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
        int ret;
@@ -5438,11 +5445,15 @@ static int mem_cgroup_move_account(struct page *page,
 
        anon = PageAnon(page);
 
+       pgdat = page_pgdat(page);
+       from_vec = mem_cgroup_lruvec(pgdat, from);
+       to_vec = mem_cgroup_lruvec(pgdat, to);
+
        spin_lock_irqsave(&from->move_lock, flags);
 
        if (!anon && page_mapped(page)) {
-               __mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages);
-               __mod_memcg_state(to, NR_FILE_MAPPED, nr_pages);
+               __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
+               __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
        }
 
        /*
@@ -5454,14 +5465,14 @@ static int mem_cgroup_move_account(struct page *page,
                struct address_space *mapping = page_mapping(page);
 
                if (mapping_cap_account_dirty(mapping)) {
-                       __mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages);
-                       __mod_memcg_state(to, NR_FILE_DIRTY, nr_pages);
+                       __mod_lruvec_state(from_vec, NR_FILE_DIRTY, -nr_pages);
+                       __mod_lruvec_state(to_vec, NR_FILE_DIRTY, nr_pages);
                }
        }
 
        if (PageWriteback(page)) {
-               __mod_memcg_state(from, NR_WRITEBACK, -nr_pages);
-               __mod_memcg_state(to, NR_WRITEBACK, nr_pages);
+               __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
+               __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
        }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
index 7ef849d..3151c87 100644 (file)
@@ -199,7 +199,6 @@ struct to_kill {
        struct task_struct *tsk;
        unsigned long addr;
        short size_shift;
-       char addr_valid;
 };
 
 /*
@@ -324,22 +323,27 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
                }
        }
        tk->addr = page_address_in_vma(p, vma);
-       tk->addr_valid = 1;
        if (is_zone_device_page(p))
                tk->size_shift = dev_pagemap_mapping_shift(p, vma);
        else
                tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT;
 
        /*
-        * In theory we don't have to kill when the page was
-        * munmaped. But it could be also a mremap. Since that's
-        * likely very rare kill anyways just out of paranoia, but use
-        * a SIGKILL because the error is not contained anymore.
+        * Send SIGKILL if "tk->addr == -EFAULT". Also, as
+        * "tk->size_shift" is always non-zero for !is_zone_device_page(),
+        * so "tk->size_shift == 0" effectively checks no mapping on
+        * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
+        * to a process' address space, it's possible not all N VMAs
+        * contain mappings for the page, but at least one VMA does.
+        * Only deliver SIGBUS with payload derived from the VMA that
+        * has a mapping for the page.
         */
-       if (tk->addr == -EFAULT || tk->size_shift == 0) {
+       if (tk->addr == -EFAULT) {
                pr_info("Memory failure: Unable to find user space address %lx in %s\n",
                        page_to_pfn(p), tsk->comm);
-               tk->addr_valid = 0;
+       } else if (tk->size_shift == 0) {
+               kfree(tk);
+               return;
        }
        get_task_struct(tsk);
        tk->tsk = tsk;
@@ -366,7 +370,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
                         * make sure the process doesn't catch the
                         * signal and then access the memory. Just kill it.
                         */
-                       if (fail || tk->addr_valid == 0) {
+                       if (fail || tk->addr == -EFAULT) {
                                pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
                                       pfn, tk->tsk->comm, tk->tsk->pid);
                                do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
@@ -1253,17 +1257,19 @@ int memory_failure(unsigned long pfn, int flags)
        if (!sysctl_memory_failure_recovery)
                panic("Memory failure on page %lx", pfn);
 
-       if (!pfn_valid(pfn)) {
+       p = pfn_to_online_page(pfn);
+       if (!p) {
+               if (pfn_valid(pfn)) {
+                       pgmap = get_dev_pagemap(pfn, NULL);
+                       if (pgmap)
+                               return memory_failure_dev_pagemap(pfn, flags,
+                                                                 pgmap);
+               }
                pr_err("Memory failure: %#lx: memory outside kernel control\n",
                        pfn);
                return -ENXIO;
        }
 
-       pgmap = get_dev_pagemap(pfn, NULL);
-       if (pgmap)
-               return memory_failure_dev_pagemap(pfn, flags, pgmap);
-
-       p = pfn_to_page(pfn);
        if (PageHuge(p))
                return memory_failure_hugetlb(pfn, flags);
        if (TestSetPageHWPoison(p)) {
index b1be791..df570e5 100644 (file)
@@ -436,67 +436,25 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
        zone_span_writeunlock(zone);
 }
 
-static void shrink_pgdat_span(struct pglist_data *pgdat,
-                             unsigned long start_pfn, unsigned long end_pfn)
+static void update_pgdat_span(struct pglist_data *pgdat)
 {
-       unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
-       unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
-       unsigned long pgdat_end_pfn = p;
-       unsigned long pfn;
-       int nid = pgdat->node_id;
-
-       if (pgdat_start_pfn == start_pfn) {
-               /*
-                * If the section is smallest section in the pgdat, it need
-                * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
-                * In this case, we find second smallest valid mem_section
-                * for shrinking zone.
-                */
-               pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
-                                               pgdat_end_pfn);
-               if (pfn) {
-                       pgdat->node_start_pfn = pfn;
-                       pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
-               }
-       } else if (pgdat_end_pfn == end_pfn) {
-               /*
-                * If the section is biggest section in the pgdat, it need
-                * shrink pgdat->node_spanned_pages.
-                * In this case, we find second biggest valid mem_section for
-                * shrinking zone.
-                */
-               pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
-                                              start_pfn);
-               if (pfn)
-                       pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
-       }
-
-       /*
-        * If the section is not biggest or smallest mem_section in the pgdat,
-        * it only creates a hole in the pgdat. So in this case, we need not
-        * change the pgdat.
-        * But perhaps, the pgdat has only hole data. Thus it check the pgdat
-        * has only hole or not.
-        */
-       pfn = pgdat_start_pfn;
-       for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SUBSECTION) {
-               if (unlikely(!pfn_valid(pfn)))
-                       continue;
-
-               if (pfn_to_nid(pfn) != nid)
-                       continue;
+       unsigned long node_start_pfn = 0, node_end_pfn = 0;
+       struct zone *zone;
 
-               /* Skip range to be removed */
-               if (pfn >= start_pfn && pfn < end_pfn)
-                       continue;
+       for (zone = pgdat->node_zones;
+            zone < pgdat->node_zones + MAX_NR_ZONES; zone++) {
+               unsigned long zone_end_pfn = zone->zone_start_pfn +
+                                            zone->spanned_pages;
 
-               /* If we find valid section, we have nothing to do */
-               return;
+               /* No need to lock the zones, they can't change. */
+               if (zone_end_pfn > node_end_pfn)
+                       node_end_pfn = zone_end_pfn;
+               if (zone->zone_start_pfn < node_start_pfn)
+                       node_start_pfn = zone->zone_start_pfn;
        }
 
-       /* The pgdat has no valid section */
-       pgdat->node_start_pfn = 0;
-       pgdat->node_spanned_pages = 0;
+       pgdat->node_start_pfn = node_start_pfn;
+       pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
 }
 
 static void __remove_zone(struct zone *zone, unsigned long start_pfn,
@@ -507,7 +465,7 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn,
 
        pgdat_resize_lock(zone->zone_pgdat, &flags);
        shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
-       shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
+       update_pgdat_span(pgdat);
        pgdat_resize_unlock(zone->zone_pgdat, &flags);
 }
 
index 32c79b5..03ccbdf 100644 (file)
@@ -13,8 +13,6 @@
 #include <linux/xarray.h>
 
 static DEFINE_XARRAY(pgmap_array);
-#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
-#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
 
 #ifdef CONFIG_DEV_PAGEMAP_OPS
 DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
@@ -105,6 +103,7 @@ static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
 void memunmap_pages(struct dev_pagemap *pgmap)
 {
        struct resource *res = &pgmap->res;
+       struct page *first_page;
        unsigned long pfn;
        int nid;
 
@@ -113,14 +112,16 @@ void memunmap_pages(struct dev_pagemap *pgmap)
                put_page(pfn_to_page(pfn));
        dev_pagemap_cleanup(pgmap);
 
+       /* make sure to access a memmap that was actually initialized */
+       first_page = pfn_to_page(pfn_first(pgmap));
+
        /* pages are dead and unused, undo the arch mapping */
-       nid = page_to_nid(pfn_to_page(PHYS_PFN(res->start)));
+       nid = page_to_nid(first_page);
 
        mem_hotplug_begin();
        if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
-               pfn = PHYS_PFN(res->start);
-               __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
-                                PHYS_PFN(resource_size(res)), NULL);
+               __remove_pages(page_zone(first_page), PHYS_PFN(res->start),
+                              PHYS_PFN(resource_size(res)), NULL);
        } else {
                arch_remove_memory(nid, res->start, resource_size(res),
                                pgmap_altmap(pgmap));
index 15c2050..ecc3dba 100644 (file)
@@ -1175,11 +1175,17 @@ static __always_inline bool free_pages_prepare(struct page *page,
                debug_check_no_obj_freed(page_address(page),
                                           PAGE_SIZE << order);
        }
-       arch_free_page(page, order);
        if (want_init_on_free())
                kernel_init_free_pages(page, 1 << order);
 
        kernel_poison_pages(page, 1 << order, 0);
+       /*
+        * arch_free_page() can make the page's contents inaccessible.  s390
+        * does this.  So nothing which can access the page's contents should
+        * happen after this.
+        */
+       arch_free_page(page, order);
+
        if (debug_pagealloc_enabled())
                kernel_map_pages(page, 1 << order, 0);
 
@@ -4467,12 +4473,14 @@ retry_cpuset:
                if (page)
                        goto got_pg;
 
-                if (order >= pageblock_order && (gfp_mask & __GFP_IO)) {
+                if (order >= pageblock_order && (gfp_mask & __GFP_IO) &&
+                    !(gfp_mask & __GFP_RETRY_MAYFAIL)) {
                        /*
                         * If allocating entire pageblock(s) and compaction
                         * failed because all zones are below low watermarks
                         * or is prohibited because it recently failed at this
-                        * order, fail immediately.
+                        * order, fail immediately unless the allocator has
+                        * requested compaction and reclaim retry.
                         *
                         * Reclaim is
                         *  - potentially very expensive because zones are far
index 5f5769c..4ade843 100644 (file)
@@ -67,8 +67,9 @@ static struct page_ext_operations *page_ext_ops[] = {
 #endif
 };
 
+unsigned long page_ext_size = sizeof(struct page_ext);
+
 static unsigned long total_usage;
-static unsigned long extra_mem;
 
 static bool __init invoke_need_callbacks(void)
 {
@@ -78,9 +79,8 @@ static bool __init invoke_need_callbacks(void)
 
        for (i = 0; i < entries; i++) {
                if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
-                       page_ext_ops[i]->offset = sizeof(struct page_ext) +
-                                               extra_mem;
-                       extra_mem += page_ext_ops[i]->size;
+                       page_ext_ops[i]->offset = page_ext_size;
+                       page_ext_size += page_ext_ops[i]->size;
                        need = true;
                }
        }
@@ -99,14 +99,9 @@ static void __init invoke_init_callbacks(void)
        }
 }
 
-static unsigned long get_entry_size(void)
-{
-       return sizeof(struct page_ext) + extra_mem;
-}
-
 static inline struct page_ext *get_entry(void *base, unsigned long index)
 {
-       return base + get_entry_size() * index;
+       return base + page_ext_size * index;
 }
 
 #if !defined(CONFIG_SPARSEMEM)
@@ -156,7 +151,7 @@ static int __init alloc_node_page_ext(int nid)
                !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
                nr_pages += MAX_ORDER_NR_PAGES;
 
-       table_size = get_entry_size() * nr_pages;
+       table_size = page_ext_size * nr_pages;
 
        base = memblock_alloc_try_nid(
                        table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
@@ -234,7 +229,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
        if (section->page_ext)
                return 0;
 
-       table_size = get_entry_size() * PAGES_PER_SECTION;
+       table_size = page_ext_size * PAGES_PER_SECTION;
        base = alloc_page_ext(table_size, nid);
 
        /*
@@ -254,7 +249,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
         * we need to apply a mask.
         */
        pfn &= PAGE_SECTION_MASK;
-       section->page_ext = (void *)base - get_entry_size() * pfn;
+       section->page_ext = (void *)base - page_ext_size * pfn;
        total_usage += table_size;
        return 0;
 }
@@ -267,7 +262,7 @@ static void free_page_ext(void *addr)
                struct page *page = virt_to_page(addr);
                size_t table_size;
 
-               table_size = get_entry_size() * PAGES_PER_SECTION;
+               table_size = page_ext_size * PAGES_PER_SECTION;
 
                BUG_ON(PageReserved(page));
                kmemleak_free(addr);
index dee9311..18ecde9 100644 (file)
@@ -24,12 +24,10 @@ struct page_owner {
        short last_migrate_reason;
        gfp_t gfp_mask;
        depot_stack_handle_t handle;
-#ifdef CONFIG_DEBUG_PAGEALLOC
        depot_stack_handle_t free_handle;
-#endif
 };
 
-static bool page_owner_disabled = true;
+static bool page_owner_enabled = false;
 DEFINE_STATIC_KEY_FALSE(page_owner_inited);
 
 static depot_stack_handle_t dummy_handle;
@@ -44,7 +42,7 @@ static int __init early_page_owner_param(char *buf)
                return -EINVAL;
 
        if (strcmp(buf, "on") == 0)
-               page_owner_disabled = false;
+               page_owner_enabled = true;
 
        return 0;
 }
@@ -52,10 +50,7 @@ early_param("page_owner", early_page_owner_param);
 
 static bool need_page_owner(void)
 {
-       if (page_owner_disabled)
-               return false;
-
-       return true;
+       return page_owner_enabled;
 }
 
 static __always_inline depot_stack_handle_t create_dummy_stack(void)
@@ -84,7 +79,7 @@ static noinline void register_early_stack(void)
 
 static void init_page_owner(void)
 {
-       if (page_owner_disabled)
+       if (!page_owner_enabled)
                return;
 
        register_dummy_stack();
@@ -148,25 +143,19 @@ void __reset_page_owner(struct page *page, unsigned int order)
 {
        int i;
        struct page_ext *page_ext;
-#ifdef CONFIG_DEBUG_PAGEALLOC
        depot_stack_handle_t handle = 0;
        struct page_owner *page_owner;
 
-       if (debug_pagealloc_enabled())
-               handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
-#endif
+       handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
 
+       page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
        for (i = 0; i < (1 << order); i++) {
-               page_ext = lookup_page_ext(page + i);
-               if (unlikely(!page_ext))
-                       continue;
-               __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
-#ifdef CONFIG_DEBUG_PAGEALLOC
-               if (debug_pagealloc_enabled()) {
-                       page_owner = get_page_owner(page_ext);
-                       page_owner->free_handle = handle;
-               }
-#endif
+               __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
+               page_owner = get_page_owner(page_ext);
+               page_owner->free_handle = handle;
+               page_ext = page_ext_next(page_ext);
        }
 }
 
@@ -184,9 +173,9 @@ static inline void __set_page_owner_handle(struct page *page,
                page_owner->gfp_mask = gfp_mask;
                page_owner->last_migrate_reason = -1;
                __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
-               __set_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
+               __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
 
-               page_ext = lookup_page_ext(page + i);
+               page_ext = page_ext_next(page_ext);
        }
 }
 
@@ -224,12 +213,10 @@ void __split_page_owner(struct page *page, unsigned int order)
        if (unlikely(!page_ext))
                return;
 
-       page_owner = get_page_owner(page_ext);
-       page_owner->order = 0;
-       for (i = 1; i < (1 << order); i++) {
-               page_ext = lookup_page_ext(page + i);
+       for (i = 0; i < (1 << order); i++) {
                page_owner = get_page_owner(page_ext);
                page_owner->order = 0;
+               page_ext = page_ext_next(page_ext);
        }
 }
 
@@ -260,7 +247,7 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
         * the new page, which will be freed.
         */
        __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
-       __set_bit(PAGE_EXT_OWNER_ACTIVE, &new_ext->flags);
+       __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
 }
 
 void pagetypeinfo_showmixedcount_print(struct seq_file *m,
@@ -284,7 +271,8 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
         * not matter as the mixed block count will still be correct
         */
        for (; pfn < end_pfn; ) {
-               if (!pfn_valid(pfn)) {
+               page = pfn_to_online_page(pfn);
+               if (!page) {
                        pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
                        continue;
                }
@@ -292,13 +280,13 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
                block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
                block_end_pfn = min(block_end_pfn, end_pfn);
 
-               page = pfn_to_page(pfn);
                pageblock_mt = get_pageblock_migratetype(page);
 
                for (; pfn < block_end_pfn; pfn++) {
                        if (!pfn_valid_within(pfn))
                                continue;
 
+                       /* The pageblock is online, no need to recheck. */
                        page = pfn_to_page(pfn);
 
                        if (page_zone(page) != zone)
@@ -320,7 +308,7 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
                        if (unlikely(!page_ext))
                                continue;
 
-                       if (!test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+                       if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
                                continue;
 
                        page_owner = get_page_owner(page_ext);
@@ -435,7 +423,7 @@ void __dump_page_owner(struct page *page)
                return;
        }
 
-       if (test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+       if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
                pr_alert("page_owner tracks the page as allocated\n");
        else
                pr_alert("page_owner tracks the page as freed\n");
@@ -451,7 +439,6 @@ void __dump_page_owner(struct page *page)
                stack_trace_print(entries, nr_entries, 0);
        }
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
        handle = READ_ONCE(page_owner->free_handle);
        if (!handle) {
                pr_alert("page_owner free stack trace missing\n");
@@ -460,7 +447,6 @@ void __dump_page_owner(struct page *page)
                pr_alert("page last free stack trace:\n");
                stack_trace_print(entries, nr_entries, 0);
        }
-#endif
 
        if (page_owner->last_migrate_reason != -1)
                pr_alert("page has been migrated, last migrate reason: %s\n",
@@ -527,7 +513,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
                 * Although we do have the info about past allocation of free
                 * pages, it's not relevant for current memory usage.
                 */
-               if (!test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+               if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
                        continue;
 
                page_owner = get_page_owner(page_ext);
index d9a23bb..0c7b2a9 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -61,6 +61,7 @@
 #include <linux/mmu_notifier.h>
 #include <linux/migrate.h>
 #include <linux/hugetlb.h>
+#include <linux/huge_mm.h>
 #include <linux/backing-dev.h>
 #include <linux/page_idle.h>
 #include <linux/memremap.h>
index cd570cc..220be9f 100644 (file)
@@ -3482,6 +3482,12 @@ static int shmem_parse_options(struct fs_context *fc, void *data)
 {
        char *options = data;
 
+       if (options) {
+               int err = security_sb_eat_lsm_opts(options, &fc->security);
+               if (err)
+                       return err;
+       }
+
        while (options != NULL) {
                char *this_char = options;
                for (;;) {
index 3ce1248..b3fe97f 100644 (file)
@@ -33,7 +33,7 @@ __meminit void page_alloc_shuffle(enum mm_shuffle_ctl ctl)
 }
 
 static bool shuffle_param;
-extern int shuffle_show(char *buffer, const struct kernel_param *kp)
+static int shuffle_show(char *buffer, const struct kernel_param *kp)
 {
        return sprintf(buffer, "%c\n", test_bit(SHUFFLE_ENABLE, &shuffle_state)
                        ? 'Y' : 'N');
index 9df3705..66e5d80 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4206,9 +4206,12 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
 
 /**
  * __ksize -- Uninstrumented ksize.
+ * @objp: pointer to the object
  *
  * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
  * safety checks as ksize() with KASAN instrumentation enabled.
+ *
+ * Return: size of the actual memory used by @objp in bytes
  */
 size_t __ksize(const void *objp)
 {
index 6491c3a..f9fb27b 100644 (file)
@@ -178,10 +178,13 @@ static int init_memcg_params(struct kmem_cache *s,
 
 static void destroy_memcg_params(struct kmem_cache *s)
 {
-       if (is_root_cache(s))
+       if (is_root_cache(s)) {
                kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
-       else
+       } else {
+               mem_cgroup_put(s->memcg_params.memcg);
+               WRITE_ONCE(s->memcg_params.memcg, NULL);
                percpu_ref_exit(&s->memcg_params.refcnt);
+       }
 }
 
 static void free_memcg_params(struct rcu_head *rcu)
@@ -253,8 +256,6 @@ static void memcg_unlink_cache(struct kmem_cache *s)
        } else {
                list_del(&s->memcg_params.children_node);
                list_del(&s->memcg_params.kmem_caches_node);
-               mem_cgroup_put(s->memcg_params.memcg);
-               WRITE_ONCE(s->memcg_params.memcg, NULL);
        }
 }
 #else
@@ -1030,10 +1031,19 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name,
                unsigned int useroffset, unsigned int usersize)
 {
        int err;
+       unsigned int align = ARCH_KMALLOC_MINALIGN;
 
        s->name = name;
        s->size = s->object_size = size;
-       s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
+
+       /*
+        * For power of two sizes, guarantee natural alignment for kmalloc
+        * caches, regardless of SL*B debugging options.
+        */
+       if (is_power_of_2(size))
+               align = max(align, size);
+       s->align = calculate_alignment(flags, align, size);
+
        s->useroffset = useroffset;
        s->usersize = usersize;
 
@@ -1287,12 +1297,16 @@ void __init create_kmalloc_caches(slab_flags_t flags)
  */
 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
 {
-       void *ret;
+       void *ret = NULL;
        struct page *page;
 
        flags |= __GFP_COMP;
        page = alloc_pages(flags, order);
-       ret = page ? page_address(page) : NULL;
+       if (likely(page)) {
+               ret = page_address(page);
+               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
+                                   1 << order);
+       }
        ret = kasan_kmalloc_large(ret, size, flags);
        /* As ret might get tagged, call kmemleak hook after KASAN. */
        kmemleak_alloc(ret, size, 1, flags);
index cf377be..fa53e9f 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -190,7 +190,7 @@ static int slob_last(slob_t *s)
 
 static void *slob_new_pages(gfp_t gfp, int order, int node)
 {
-       void *page;
+       struct page *page;
 
 #ifdef CONFIG_NUMA
        if (node != NUMA_NO_NODE)
@@ -202,14 +202,21 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
        if (!page)
                return NULL;
 
+       mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
+                           1 << order);
        return page_address(page);
 }
 
 static void slob_free_pages(void *b, int order)
 {
+       struct page *sp = virt_to_page(b);
+
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += 1 << order;
-       free_pages((unsigned long)b, order);
+
+       mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
+                           -(1 << order));
+       __free_pages(sp, order);
 }
 
 /*
@@ -217,6 +224,7 @@ static void slob_free_pages(void *b, int order)
  * @sp: Page to look in.
  * @size: Size of the allocation.
  * @align: Allocation alignment.
+ * @align_offset: Offset in the allocated block that will be aligned.
  * @page_removed_from_list: Return parameter.
  *
  * Tries to find a chunk of memory at least @size bytes big within @page.
@@ -227,7 +235,7 @@ static void slob_free_pages(void *b, int order)
  *         true (set to false otherwise).
  */
 static void *slob_page_alloc(struct page *sp, size_t size, int align,
-                            bool *page_removed_from_list)
+                             int align_offset, bool *page_removed_from_list)
 {
        slob_t *prev, *cur, *aligned = NULL;
        int delta = 0, units = SLOB_UNITS(size);
@@ -236,8 +244,17 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align,
        for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
                slobidx_t avail = slob_units(cur);
 
+               /*
+                * 'aligned' will hold the address of the slob block so that the
+                * address 'aligned'+'align_offset' is aligned according to the
+                * 'align' parameter. This is for kmalloc() which prepends the
+                * allocated block with its size, so that the block itself is
+                * aligned when needed.
+                */
                if (align) {
-                       aligned = (slob_t *)ALIGN((unsigned long)cur, align);
+                       aligned = (slob_t *)
+                               (ALIGN((unsigned long)cur + align_offset, align)
+                                - align_offset);
                        delta = aligned - cur;
                }
                if (avail >= units + delta) { /* room enough? */
@@ -281,7 +298,8 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align,
 /*
  * slob_alloc: entry point into the slob allocator.
  */
-static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
+static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
+                                                       int align_offset)
 {
        struct page *sp;
        struct list_head *slob_list;
@@ -312,7 +330,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
                if (sp->units < SLOB_UNITS(size))
                        continue;
 
-               b = slob_page_alloc(sp, size, align, &page_removed_from_list);
+               b = slob_page_alloc(sp, size, align, align_offset, &page_removed_from_list);
                if (!b)
                        continue;
 
@@ -349,7 +367,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
                INIT_LIST_HEAD(&sp->slab_list);
                set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
                set_slob_page_free(sp, slob_list);
-               b = slob_page_alloc(sp, size, align, &_unused);
+               b = slob_page_alloc(sp, size, align, align_offset, &_unused);
                BUG_ON(!b);
                spin_unlock_irqrestore(&slob_lock, flags);
        }
@@ -451,7 +469,7 @@ static __always_inline void *
 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
 {
        unsigned int *m;
-       int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+       int minalign = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
        void *ret;
 
        gfp &= gfp_allowed_mask;
@@ -459,19 +477,28 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
        fs_reclaim_acquire(gfp);
        fs_reclaim_release(gfp);
 
-       if (size < PAGE_SIZE - align) {
+       if (size < PAGE_SIZE - minalign) {
+               int align = minalign;
+
+               /*
+                * For power of two sizes, guarantee natural alignment for
+                * kmalloc()'d objects.
+                */
+               if (is_power_of_2(size))
+                       align = max(minalign, (int) size);
+
                if (!size)
                        return ZERO_SIZE_PTR;
 
-               m = slob_alloc(size + align, gfp, align, node);
+               m = slob_alloc(size + minalign, gfp, align, node, minalign);
 
                if (!m)
                        return NULL;
                *m = size;
-               ret = (void *)m + align;
+               ret = (void *)m + minalign;
 
                trace_kmalloc_node(caller, ret,
-                                  size, size + align, gfp, node);
+                                  size, size + minalign, gfp, node);
        } else {
                unsigned int order = get_order(size);
 
@@ -521,8 +548,13 @@ void kfree(const void *block)
                int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
                unsigned int *m = (unsigned int *)(block - align);
                slob_free(m, *m + align);
-       } else
-               __free_pages(sp, compound_order(sp));
+       } else {
+               unsigned int order = compound_order(sp);
+               mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
+                                   -(1 << order));
+               __free_pages(sp, order);
+
+       }
 }
 EXPORT_SYMBOL(kfree);
 
@@ -567,7 +599,7 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
        fs_reclaim_release(flags);
 
        if (c->size < PAGE_SIZE) {
-               b = slob_alloc(c->size, flags, c->align, node);
+               b = slob_alloc(c->size, flags, c->align, node, 0);
                trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
                                            SLOB_UNITS(c->size) * SLOB_UNIT,
                                            flags, node);
index 42c1b3a..b25c807 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2672,6 +2672,17 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
 }
 
 /*
+ * If the object has been wiped upon free, make sure it's fully initialized by
+ * zeroing out freelist pointer.
+ */
+static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
+                                                  void *obj)
+{
+       if (unlikely(slab_want_init_on_free(s)) && obj)
+               memset((void *)((char *)obj + s->offset), 0, sizeof(void *));
+}
+
+/*
  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
  * have the fastpath folded into their functions. So no function call
  * overhead for requests that can be satisfied on the fastpath.
@@ -2759,12 +2770,8 @@ redo:
                prefetch_freepointer(s, next_object);
                stat(s, ALLOC_FASTPATH);
        }
-       /*
-        * If the object has been wiped upon free, make sure it's fully
-        * initialized by zeroing out freelist pointer.
-        */
-       if (unlikely(slab_want_init_on_free(s)) && object)
-               memset(object + s->offset, 0, sizeof(void *));
+
+       maybe_wipe_obj_freeptr(s, object);
 
        if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
                memset(object, 0, s->object_size);
@@ -3178,10 +3185,13 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
                                goto error;
 
                        c = this_cpu_ptr(s->cpu_slab);
+                       maybe_wipe_obj_freeptr(s, p[i]);
+
                        continue; /* goto for-loop */
                }
                c->freelist = get_freepointer(s, object);
                p[i] = object;
+               maybe_wipe_obj_freeptr(s, p[i]);
        }
        c->tid = next_tid(c->tid);
        local_irq_enable();
@@ -3821,11 +3831,15 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
 {
        struct page *page;
        void *ptr = NULL;
+       unsigned int order = get_order(size);
 
        flags |= __GFP_COMP;
-       page = alloc_pages_node(node, flags, get_order(size));
-       if (page)
+       page = alloc_pages_node(node, flags, order);
+       if (page) {
                ptr = page_address(page);
+               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
+                                   1 << order);
+       }
 
        return kmalloc_large_node_hook(ptr, size, flags);
 }
@@ -3951,9 +3965,13 @@ void kfree(const void *x)
 
        page = virt_to_head_page(x);
        if (unlikely(!PageSlab(page))) {
+               unsigned int order = compound_order(page);
+
                BUG_ON(!PageCompound(page));
                kfree_hook(object);
-               __free_pages(page, compound_order(page));
+               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
+                                   -(1 << order));
+               __free_pages(page, order);
                return;
        }
        slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
@@ -4838,7 +4856,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                }
        }
 
-       get_online_mems();
+       /*
+        * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
+        * already held which will conflict with an existing lock order:
+        *
+        * mem_hotplug_lock->slab_mutex->kernfs_mutex
+        *
+        * We don't really need mem_hotplug_lock (to hold off
+        * slab_mem_going_offline_callback) here because slab's memory hot
+        * unplug code doesn't destroy the kmem_cache->node[] data.
+        */
+
 #ifdef CONFIG_SLUB_DEBUG
        if (flags & SO_ALL) {
                struct kmem_cache_node *n;
@@ -4879,7 +4907,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                        x += sprintf(buf + x, " N%d=%lu",
                                        node, nodes[node]);
 #endif
-       put_online_mems();
        kfree(nodes);
        return x + sprintf(buf + x, "\n");
 }
index bf32de9..f6891c1 100644 (file)
@@ -219,7 +219,7 @@ static inline unsigned long first_present_section_nr(void)
        return next_present_section_nr(-1);
 }
 
-void subsection_mask_set(unsigned long *map, unsigned long pfn,
+static void subsection_mask_set(unsigned long *map, unsigned long pfn,
                unsigned long nr_pages)
 {
        int idx = subsection_map_index(pfn);
index 8563339..dd9ebc1 100644 (file)
@@ -592,6 +592,16 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
                                        unlock_page(page);
                                        continue;
                                }
+
+                               /* Take a pin outside pagevec */
+                               get_page(page);
+
+                               /*
+                                * Drop extra pins before trying to invalidate
+                                * the huge page.
+                                */
+                               pagevec_remove_exceptionals(&pvec);
+                               pagevec_release(&pvec);
                        }
 
                        ret = invalidate_inode_page(page);
@@ -602,6 +612,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
                         */
                        if (!ret)
                                deactivate_file_page(page);
+                       if (PageTransHuge(page))
+                               put_page(page);
                        count += ret;
                }
                pagevec_remove_exceptionals(&pvec);
index f3b5081..4bac22f 100644 (file)
@@ -355,6 +355,9 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
  * "hierarchy" or "local").
  *
  * To be used as memcg event method.
+ *
+ * Return: 0 on success, -ENOMEM on memory failure or -EINVAL if @args could
+ * not be parsed.
  */
 int vmpressure_register_event(struct mem_cgroup *memcg,
                              struct eventfd_ctx *eventfd, const char *args)
@@ -362,7 +365,7 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
        struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
        struct vmpressure_event *ev;
        enum vmpressure_modes mode = VMPRESSURE_NO_PASSTHROUGH;
-       enum vmpressure_levels level = -1;
+       enum vmpressure_levels level;
        char *spec, *spec_orig;
        char *token;
        int ret = 0;
@@ -375,20 +378,18 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
 
        /* Find required level */
        token = strsep(&spec, ",");
-       level = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
-       if (level < 0) {
-               ret = level;
+       ret = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
+       if (ret < 0)
                goto out;
-       }
+       level = ret;
 
        /* Find optional mode */
        token = strsep(&spec, ",");
        if (token) {
-               mode = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
-               if (mode < 0) {
-                       ret = mode;
+               ret = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
+               if (ret < 0)
                        goto out;
-               }
+               mode = ret;
        }
 
        ev = kzalloc(sizeof(*ev), GFP_KERNEL);
@@ -404,6 +405,7 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
        mutex_lock(&vmpr->events_lock);
        list_add(&ev->node, &vmpr->events);
        mutex_unlock(&vmpr->events_lock);
+       ret = 0;
 out:
        kfree(spec_orig);
        return ret;
index e5d52d6..ee4eecc 100644 (file)
@@ -351,12 +351,13 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
  */
 unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
 {
-       unsigned long lru_size;
+       unsigned long lru_size = 0;
        int zid;
 
-       if (!mem_cgroup_disabled())
-               lru_size = lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
-       else
+       if (!mem_cgroup_disabled()) {
+               for (zid = 0; zid < MAX_NR_ZONES; zid++)
+                       lru_size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
+       } else
                lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
 
        for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
@@ -932,10 +933,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
         * Note that if SetPageDirty is always performed via set_page_dirty,
         * and thus under the i_pages lock, then this ordering is not required.
         */
-       if (unlikely(PageTransHuge(page)) && PageSwapCache(page))
-               refcount = 1 + HPAGE_PMD_NR;
-       else
-               refcount = 2;
+       refcount = 1 + compound_nr(page);
        if (!page_ref_freeze(page, refcount))
                goto cannot_free;
        /* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */
@@ -2459,17 +2457,70 @@ out:
        *lru_pages = 0;
        for_each_evictable_lru(lru) {
                int file = is_file_lru(lru);
-               unsigned long size;
+               unsigned long lruvec_size;
                unsigned long scan;
+               unsigned long protection;
+
+               lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
+               protection = mem_cgroup_protection(memcg,
+                                                  sc->memcg_low_reclaim);
+
+               if (protection) {
+                       /*
+                        * Scale a cgroup's reclaim pressure by proportioning
+                        * its current usage to its memory.low or memory.min
+                        * setting.
+                        *
+                        * This is important, as otherwise scanning aggression
+                        * becomes extremely binary -- from nothing as we
+                        * approach the memory protection threshold, to totally
+                        * nominal as we exceed it.  This results in requiring
+                        * setting extremely liberal protection thresholds. It
+                        * also means we simply get no protection at all if we
+                        * set it too low, which is not ideal.
+                        *
+                        * If there is any protection in place, we reduce scan
+                        * pressure by how much of the total memory used is
+                        * within protection thresholds.
+                        *
+                        * There is one special case: in the first reclaim pass,
+                        * we skip over all groups that are within their low
+                        * protection. If that fails to reclaim enough pages to
+                        * satisfy the reclaim goal, we come back and override
+                        * the best-effort low protection. However, we still
+                        * ideally want to honor how well-behaved groups are in
+                        * that case instead of simply punishing them all
+                        * equally. As such, we reclaim them based on how much
+                        * memory they are using, reducing the scan pressure
+                        * again by how much of the total memory used is under
+                        * hard protection.
+                        */
+                       unsigned long cgroup_size = mem_cgroup_size(memcg);
+
+                       /* Avoid TOCTOU with earlier protection check */
+                       cgroup_size = max(cgroup_size, protection);
+
+                       scan = lruvec_size - lruvec_size * protection /
+                               cgroup_size;
+
+                       /*
+                        * Minimally target SWAP_CLUSTER_MAX pages to keep
+                        * reclaim moving forwards, avoiding decremeting
+                        * sc->priority further than desirable.
+                        */
+                       scan = max(scan, SWAP_CLUSTER_MAX);
+               } else {
+                       scan = lruvec_size;
+               }
+
+               scan >>= sc->priority;
 
-               size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
-               scan = size >> sc->priority;
                /*
                 * If the cgroup's already been deleted, make sure to
                 * scrape out the remaining cache.
                 */
                if (!scan && !mem_cgroup_online(memcg))
-                       scan = min(size, SWAP_CLUSTER_MAX);
+                       scan = min(lruvec_size, SWAP_CLUSTER_MAX);
 
                switch (scan_balance) {
                case SCAN_EQUAL:
@@ -2489,7 +2540,7 @@ out:
                case SCAN_ANON:
                        /* Scan one type exclusively */
                        if ((scan_balance == SCAN_FILE) != file) {
-                               size = 0;
+                               lruvec_size = 0;
                                scan = 0;
                        }
                        break;
@@ -2498,7 +2549,7 @@ out:
                        BUG();
                }
 
-               *lru_pages += size;
+               *lru_pages += lruvec_size;
                nr[lru] = scan;
        }
 }
@@ -2742,6 +2793,13 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
                                memcg_memory_event(memcg, MEMCG_LOW);
                                break;
                        case MEMCG_PROT_NONE:
+                               /*
+                                * All protection thresholds breached. We may
+                                * still choose to vary the scan pressure
+                                * applied based on by how much the cgroup in
+                                * question has exceeded its protection
+                                * thresholds (see get_scan_count).
+                                */
                                break;
                        }
 
index 05bdf90..6d3d3f6 100644 (file)
@@ -998,9 +998,11 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
        struct z3fold_header *zhdr;
        struct page *page;
        enum buddy bud;
+       bool page_claimed;
 
        zhdr = handle_to_z3fold_header(handle);
        page = virt_to_page(zhdr);
+       page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
 
        if (test_bit(PAGE_HEADLESS, &page->private)) {
                /* if a headless page is under reclaim, just leave.
@@ -1008,7 +1010,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
                 * has not been set before, we release this page
                 * immediately so we don't care about its value any more.
                 */
-               if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
+               if (!page_claimed) {
                        spin_lock(&pool->lock);
                        list_del(&page->lru);
                        spin_unlock(&pool->lock);
@@ -1044,13 +1046,15 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
                atomic64_dec(&pool->pages_nr);
                return;
        }
-       if (test_bit(PAGE_CLAIMED, &page->private)) {
+       if (page_claimed) {
+               /* the page has not been claimed by us */
                z3fold_page_unlock(zhdr);
                return;
        }
        if (unlikely(PageIsolated(page)) ||
            test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
                z3fold_page_unlock(zhdr);
+               clear_bit(PAGE_CLAIMED, &page->private);
                return;
        }
        if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
@@ -1060,10 +1064,12 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
                zhdr->cpu = -1;
                kref_get(&zhdr->refcount);
                do_compact_page(zhdr, true);
+               clear_bit(PAGE_CLAIMED, &page->private);
                return;
        }
        kref_get(&zhdr->refcount);
        queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
+       clear_bit(PAGE_CLAIMED, &page->private);
        z3fold_page_unlock(zhdr);
 }
 
index a1146cb..9cbed6f 100644 (file)
@@ -436,7 +436,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
        /* clean the netfilter state now that the batman-adv header has been
         * removed
         */
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
                goto dropped;
index 8842798..506d614 100644 (file)
@@ -33,6 +33,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
 {
        int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
        unsigned int hlen, ll_rs, mtu;
+       ktime_t tstamp = skb->tstamp;
        struct ip_frag_state state;
        struct iphdr *iph;
        int err;
@@ -80,6 +81,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
                        if (iter.frag)
                                ip_fraglist_prepare(skb, &iter);
 
+                       skb->tstamp = tstamp;
                        err = output(net, sk, data, skb);
                        if (err || !iter.frag)
                                break;
@@ -104,6 +106,7 @@ slow_path:
                        goto blackhole;
                }
 
+               skb2->tstamp = tstamp;
                err = output(net, sk, data, skb2);
                if (err)
                        goto blackhole;
index 4cc8dc5..c210fc1 100644 (file)
@@ -640,7 +640,7 @@ int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
                skb->len += copied;
                skb->truesize += truesize;
                if (sk && sk->sk_type == SOCK_STREAM) {
-                       sk->sk_wmem_queued += truesize;
+                       sk_wmem_queued_add(sk, truesize);
                        sk_mem_charge(sk, truesize);
                } else {
                        refcount_add(truesize, &skb->sk->sk_wmem_alloc);
index e48680e..f80151e 100644 (file)
@@ -3172,7 +3172,7 @@ static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
                                                    NETLINK_CB(cb->skb).portid,
                                                    cb->nlh->nlmsg_seq,
                                                    NLM_F_MULTI);
-                       if (err) {
+                       if (err && err != -EOPNOTSUPP) {
                                mutex_unlock(&devlink->lock);
                                goto out;
                        }
@@ -3432,7 +3432,7 @@ static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
                                                NETLINK_CB(cb->skb).portid,
                                                cb->nlh->nlmsg_seq,
                                                NLM_F_MULTI);
-                               if (err) {
+                               if (err && err != -EOPNOTSUPP) {
                                        mutex_unlock(&devlink->lock);
                                        goto out;
                                }
@@ -4088,7 +4088,7 @@ static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                           cb->extack);
                mutex_unlock(&devlink->lock);
-               if (err)
+               if (err && err != -EOPNOTSUPP)
                        break;
                idx++;
        }
index ed65636..3fed575 100644 (file)
@@ -4252,12 +4252,14 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
                case SO_RCVBUF:
                        val = min_t(u32, val, sysctl_rmem_max);
                        sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
-                       sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
+                       WRITE_ONCE(sk->sk_rcvbuf,
+                                  max_t(int, val * 2, SOCK_MIN_RCVBUF));
                        break;
                case SO_SNDBUF:
                        val = min_t(u32, val, sysctl_wmem_max);
                        sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
-                       sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
+                       WRITE_ONCE(sk->sk_sndbuf,
+                                  max_t(int, val * 2, SOCK_MIN_SNDBUF));
                        break;
                case SO_MAX_PACING_RATE: /* 32bit version */
                        if (val != ~0U)
@@ -4274,7 +4276,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
                case SO_RCVLOWAT:
                        if (val < 0)
                                val = INT_MAX;
-                       sk->sk_rcvlowat = val ? : 1;
+                       WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
                        break;
                case SO_MARK:
                        if (sk->sk_mark != val) {
index a0e0d29..6d3e482 100644 (file)
@@ -245,7 +245,8 @@ static int __peernet2id(struct net *net, struct net *peer)
        return __peernet2id_alloc(net, peer, &no);
 }
 
-static void rtnl_net_notifyid(struct net *net, int cmd, int id);
+static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
+                             struct nlmsghdr *nlh);
 /* This function returns the id of a peer netns. If no id is assigned, one will
  * be allocated and returned.
  */
@@ -268,7 +269,7 @@ int peernet2id_alloc(struct net *net, struct net *peer)
        id = __peernet2id_alloc(net, peer, &alloc);
        spin_unlock_bh(&net->nsid_lock);
        if (alloc && id >= 0)
-               rtnl_net_notifyid(net, RTM_NEWNSID, id);
+               rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL);
        if (alive)
                put_net(peer);
        return id;
@@ -532,7 +533,7 @@ static void unhash_nsid(struct net *net, struct net *last)
                        idr_remove(&tmp->netns_ids, id);
                spin_unlock_bh(&tmp->nsid_lock);
                if (id >= 0)
-                       rtnl_net_notifyid(tmp, RTM_DELNSID, id);
+                       rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL);
                if (tmp == last)
                        break;
        }
@@ -764,7 +765,8 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
        err = alloc_netid(net, peer, nsid);
        spin_unlock_bh(&net->nsid_lock);
        if (err >= 0) {
-               rtnl_net_notifyid(net, RTM_NEWNSID, err);
+               rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
+                                 nlh);
                err = 0;
        } else if (err == -ENOSPC && nsid >= 0) {
                err = -EEXIST;
@@ -1051,9 +1053,12 @@ end:
        return err < 0 ? err : skb->len;
 }
 
-static void rtnl_net_notifyid(struct net *net, int cmd, int id)
+static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
+                             struct nlmsghdr *nlh)
 {
        struct net_fill_args fillargs = {
+               .portid = portid,
+               .seq = nlh ? nlh->nlmsg_seq : 0,
                .cmd = cmd,
                .nsid = id,
        };
@@ -1068,7 +1073,7 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id)
        if (err < 0)
                goto err_out;
 
-       rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
+       rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, 0);
        return;
 
 err_out:
index c9bb000..f35c2e9 100644 (file)
@@ -96,7 +96,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
 
        fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq;
 
-       tcp_sk(sk)->fastopen_rsk = NULL;
+       RCU_INIT_POINTER(tcp_sk(sk)->fastopen_rsk, NULL);
        spin_lock_bh(&fastopenq->lock);
        fastopenq->qlen--;
        tcp_rsk(req)->tfo_listener = false;
index 01d6520..867e61d 100644 (file)
@@ -4415,7 +4415,7 @@ static void skb_set_err_queue(struct sk_buff *skb)
 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
 {
        if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-           (unsigned int)sk->sk_rcvbuf)
+           (unsigned int)READ_ONCE(sk->sk_rcvbuf))
                return -ENOMEM;
 
        skb_orphan(skb);
@@ -5120,7 +5120,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
        skb->ignore_df = 0;
        skb_dst_drop(skb);
        skb_ext_reset(skb);
-       nf_reset(skb);
+       nf_reset_ct(skb);
        nf_reset_trace(skb);
 
 #ifdef CONFIG_NET_SWITCHDEV
@@ -5477,12 +5477,14 @@ static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
  * @skb: buffer
  * @mpls_lse: MPLS label stack entry to push
  * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
+ * @mac_len: length of the MAC header
  *
  * Expects skb->data at mac header.
  *
  * Returns 0 on success, -errno otherwise.
  */
-int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto)
+int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
+                 int mac_len)
 {
        struct mpls_shim_hdr *lse;
        int err;
@@ -5499,15 +5501,15 @@ int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto)
                return err;
 
        if (!skb->inner_protocol) {
-               skb_set_inner_network_header(skb, skb->mac_len);
+               skb_set_inner_network_header(skb, mac_len);
                skb_set_inner_protocol(skb, skb->protocol);
        }
 
        skb_push(skb, MPLS_HLEN);
        memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
-               skb->mac_len);
+               mac_len);
        skb_reset_mac_header(skb);
-       skb_set_network_header(skb, skb->mac_len);
+       skb_set_network_header(skb, mac_len);
 
        lse = mpls_hdr(skb);
        lse->label_stack_entry = mpls_lse;
@@ -5526,29 +5528,30 @@ EXPORT_SYMBOL_GPL(skb_mpls_push);
  *
  * @skb: buffer
  * @next_proto: ethertype of header after popped MPLS header
+ * @mac_len: length of the MAC header
  *
  * Expects skb->data at mac header.
  *
  * Returns 0 on success, -errno otherwise.
  */
-int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto)
+int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len)
 {
        int err;
 
        if (unlikely(!eth_p_mpls(skb->protocol)))
-               return -EINVAL;
+               return 0;
 
-       err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
+       err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
        if (unlikely(err))
                return err;
 
        skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
        memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
-               skb->mac_len);
+               mac_len);
 
        __skb_pull(skb, MPLS_HLEN);
        skb_reset_mac_header(skb);
-       skb_set_network_header(skb, skb->mac_len);
+       skb_set_network_header(skb, mac_len);
 
        if (skb->dev && skb->dev->type == ARPHRD_ETHER) {
                struct ethhdr *hdr;
index 07863ed..a515392 100644 (file)
@@ -522,7 +522,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
                rc = sk_backlog_rcv(sk, skb);
 
                mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
-       } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
+       } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
                bh_unlock_sock(sk);
                atomic_inc(&sk->sk_drops);
                goto discard_and_relse;
@@ -785,7 +785,8 @@ set_sndbuf:
                 */
                val = min_t(int, val, INT_MAX / 2);
                sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
-               sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
+               WRITE_ONCE(sk->sk_sndbuf,
+                          max_t(int, val * 2, SOCK_MIN_SNDBUF));
                /* Wake up sending tasks if we upped the value. */
                sk->sk_write_space(sk);
                break;
@@ -831,7 +832,8 @@ set_rcvbuf:
                 * returning the value we actually used in getsockopt
                 * is the most desirable behavior.
                 */
-               sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
+               WRITE_ONCE(sk->sk_rcvbuf,
+                          max_t(int, val * 2, SOCK_MIN_RCVBUF));
                break;
 
        case SO_RCVBUFFORCE:
@@ -974,7 +976,7 @@ set_rcvbuf:
                if (sock->ops->set_rcvlowat)
                        ret = sock->ops->set_rcvlowat(sk, val);
                else
-                       sk->sk_rcvlowat = val ? : 1;
+                       WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
                break;
 
        case SO_RCVTIMEO_OLD:
@@ -1700,8 +1702,6 @@ static void __sk_destruct(struct rcu_head *head)
                sk_filter_uncharge(sk, filter);
                RCU_INIT_POINTER(sk->sk_filter, NULL);
        }
-       if (rcu_access_pointer(sk->sk_reuseport_cb))
-               reuseport_detach_sock(sk);
 
        sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
 
@@ -1728,7 +1728,14 @@ static void __sk_destruct(struct rcu_head *head)
 
 void sk_destruct(struct sock *sk)
 {
-       if (sock_flag(sk, SOCK_RCU_FREE))
+       bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
+
+       if (rcu_access_pointer(sk->sk_reuseport_cb)) {
+               reuseport_detach_sock(sk);
+               use_call_rcu = true;
+       }
+
+       if (use_call_rcu)
                call_rcu(&sk->sk_rcu, __sk_destruct);
        else
                __sk_destruct(&sk->sk_rcu);
@@ -2083,8 +2090,10 @@ EXPORT_SYMBOL(sock_i_ino);
 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
                             gfp_t priority)
 {
-       if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
+       if (force ||
+           refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) {
                struct sk_buff *skb = alloc_skb(size, priority);
+
                if (skb) {
                        skb_set_owner_w(skb, sk);
                        return skb;
@@ -2185,7 +2194,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
                        break;
                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
-               if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
+               if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
                        break;
                if (sk->sk_shutdown & SEND_SHUTDOWN)
                        break;
@@ -2220,7 +2229,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
                if (sk->sk_shutdown & SEND_SHUTDOWN)
                        goto failure;
 
-               if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
+               if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
                        break;
 
                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -2329,8 +2338,8 @@ static void sk_leave_memory_pressure(struct sock *sk)
        } else {
                unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
 
-               if (memory_pressure && *memory_pressure)
-                       *memory_pressure = 0;
+               if (memory_pressure && READ_ONCE(*memory_pressure))
+                       WRITE_ONCE(*memory_pressure, 0);
        }
 }
 
@@ -2801,7 +2810,7 @@ static void sock_def_write_space(struct sock *sk)
        /* Do not wake up a writer until he can make "significant"
         * progress.  --DaveM
         */
-       if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
+       if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) {
                wq = rcu_dereference(sk->sk_wq);
                if (skwq_has_sleeper(wq))
                        wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
@@ -3199,13 +3208,13 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem)
        memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
 
        mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
-       mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
+       mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
        mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
-       mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
+       mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
        mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
-       mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
+       mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
        mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
-       mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
+       mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
        mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
 }
 
@@ -3492,7 +3501,7 @@ static long sock_prot_memory_allocated(struct proto *proto)
        return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
 }
 
-static char *sock_prot_memory_pressure(struct proto *proto)
+static const char *sock_prot_memory_pressure(struct proto *proto)
 {
        return proto->memory_pressure != NULL ?
        proto_memory_pressure(proto) ? "yes" : "no" : "NI";
index b685bc8..d9b4200 100644 (file)
@@ -871,7 +871,7 @@ lookup:
 
        if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_and_relse;
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, refcounted);
 
index 7300202..716d265 100644 (file)
@@ -46,7 +46,7 @@ static struct dsa_switch_tree *dsa_tree_alloc(int index)
        dst->index = index;
 
        INIT_LIST_HEAD(&dst->list);
-       list_add_tail(&dsa_tree_list, &dst->list);
+       list_add_tail(&dst->list, &dsa_tree_list);
 
        kref_init(&dst->refcount);
 
index 9c9aff3..63ef2a1 100644 (file)
@@ -156,7 +156,11 @@ static struct sk_buff
        /* Step 1: A timestampable frame was received.
         * Buffer it until we get its meta frame.
         */
-       if (is_link_local && sp->data->hwts_rx_en) {
+       if (is_link_local) {
+               if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
+                       /* Do normal processing. */
+                       return skb;
+
                spin_lock(&sp->data->meta_lock);
                /* Was this a link-local frame instead of the meta
                 * that we were expecting?
@@ -187,6 +191,12 @@ static struct sk_buff
        } else if (is_meta) {
                struct sk_buff *stampable_skb;
 
+               /* Drop the meta frame if we're not in the right state
+                * to process it.
+                */
+               if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
+                       return NULL;
+
                spin_lock(&sp->data->meta_lock);
 
                stampable_skb = sp->data->stampable_skb;
index a918354..eb30fc1 100644 (file)
@@ -906,7 +906,7 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
        percpu_counter_inc(sk->sk_prot->orphan_count);
 
        if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
-               BUG_ON(tcp_sk(child)->fastopen_rsk != req);
+               BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
                BUG_ON(sk != req->rsk_listener);
 
                /* Paranoid, to prevent race condition if
@@ -915,7 +915,7 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
                 * Also to satisfy an assertion in
                 * tcp_v4_destroy_sock().
                 */
-               tcp_sk(child)->fastopen_rsk = NULL;
+               RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL);
        }
        inet_csk_destroy_sock(child);
 }
@@ -934,7 +934,7 @@ struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
                req->sk = child;
                req->dl_next = NULL;
                if (queue->rskq_accept_head == NULL)
-                       queue->rskq_accept_head = req;
+                       WRITE_ONCE(queue->rskq_accept_head, req);
                else
                        queue->rskq_accept_tail->dl_next = req;
                queue->rskq_accept_tail = req;
index bbb005e..7dc79b9 100644 (file)
@@ -193,7 +193,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
        if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
                struct inet_diag_meminfo minfo = {
                        .idiag_rmem = sk_rmem_alloc_get(sk),
-                       .idiag_wmem = sk->sk_wmem_queued,
+                       .idiag_wmem = READ_ONCE(sk->sk_wmem_queued),
                        .idiag_fmem = sk->sk_forward_alloc,
                        .idiag_tmem = sk_wmem_alloc_get(sk),
                };
index a53a543..52690bb 100644 (file)
@@ -1446,6 +1446,7 @@ static void erspan_setup(struct net_device *dev)
        struct ip_tunnel *t = netdev_priv(dev);
 
        ether_setup(dev);
+       dev->max_mtu = 0;
        dev->netdev_ops = &erspan_netdev_ops;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
index 1e2392b..c59a78a 100644 (file)
@@ -199,7 +199,7 @@ resubmit:
                                kfree_skb(skb);
                                return;
                        }
-                       nf_reset(skb);
+                       nf_reset_ct(skb);
                }
                ret = INDIRECT_CALL_2(ipprot->handler, tcp_v4_rcv, udp_rcv,
                                      skb);
index 28fca40..814b9b8 100644 (file)
@@ -771,6 +771,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
        struct rtable *rt = skb_rtable(skb);
        unsigned int mtu, hlen, ll_rs;
        struct ip_fraglist_iter iter;
+       ktime_t tstamp = skb->tstamp;
        struct ip_frag_state state;
        int err = 0;
 
@@ -846,6 +847,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                                ip_fraglist_prepare(skb, &iter);
                        }
 
+                       skb->tstamp = tstamp;
                        err = output(net, sk, skb);
 
                        if (!err)
@@ -900,6 +902,7 @@ slow_path:
                /*
                 *      Put this fragment into the sending queue.
                 */
+               skb2->tstamp = tstamp;
                err = output(net, sk, skb2);
                if (err)
                        goto fail;
index 313470f..716d547 100644 (file)
@@ -1794,7 +1794,7 @@ static void ip_encap(struct net *net, struct sk_buff *skb,
        ip_send_check(iph);
 
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
-       nf_reset(skb);
+       nf_reset_ct(skb);
 }
 
 static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
@@ -2140,7 +2140,7 @@ int ip_mr_input(struct sk_buff *skb)
 
                        mroute_sk = rcu_dereference(mrt->mroute_sk);
                        if (mroute_sk) {
-                               nf_reset(skb);
+                               nf_reset_ct(skb);
                                raw_rcv(mroute_sk, skb);
                                return 0;
                        }
index af3fbf7..6cc5743 100644 (file)
@@ -65,7 +65,7 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
 
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
        /* Avoid counting cloned packets towards the original connection. */
-       nf_reset(skb);
+       nf_reset_ct(skb);
        nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
 #endif
        /*
index 80da5a6..3183413 100644 (file)
@@ -332,7 +332,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
                kfree_skb(skb);
                return NET_RX_DROP;
        }
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        skb_push(skb, skb->data - skb_network_header(skb));
 
index 7dcce72..621f834 100644 (file)
@@ -916,16 +916,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
        if (peer->rate_tokens == 0 ||
            time_after(jiffies,
                       (peer->rate_last +
-                       (ip_rt_redirect_load << peer->rate_tokens)))) {
+                       (ip_rt_redirect_load << peer->n_redirects)))) {
                __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
 
                icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
                peer->rate_last = jiffies;
-               ++peer->rate_tokens;
                ++peer->n_redirects;
 #ifdef CONFIG_IP_ROUTE_VERBOSE
                if (log_martians &&
-                   peer->rate_tokens == ip_rt_redirect_number)
+                   peer->n_redirects == ip_rt_redirect_number)
                        net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
                                             &ip_hdr(skb)->saddr, inet_iif(skb),
                                             &ip_hdr(skb)->daddr, &gw);
@@ -1483,7 +1482,7 @@ static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
        prev = cmpxchg(p, orig, rt);
        if (prev == orig) {
                if (orig) {
-                       dst_dev_put(&orig->dst);
+                       rt_add_uncached_list(orig);
                        dst_release(&orig->dst);
                }
        } else {
@@ -2471,14 +2470,17 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
        int orig_oif = fl4->flowi4_oif;
        unsigned int flags = 0;
        struct rtable *rth;
-       int err = -ENETUNREACH;
+       int err;
 
        if (fl4->saddr) {
-               rth = ERR_PTR(-EINVAL);
                if (ipv4_is_multicast(fl4->saddr) ||
                    ipv4_is_lbcast(fl4->saddr) ||
-                   ipv4_is_zeronet(fl4->saddr))
+                   ipv4_is_zeronet(fl4->saddr)) {
+                       rth = ERR_PTR(-EINVAL);
                        goto out;
+               }
+
+               rth = ERR_PTR(-ENETUNREACH);
 
                /* I removed check for oif == dev_out->oif here.
                   It was wrong for two reasons:
index 79c325a..42187a3 100644 (file)
@@ -326,7 +326,7 @@ void tcp_enter_memory_pressure(struct sock *sk)
 {
        unsigned long val;
 
-       if (tcp_memory_pressure)
+       if (READ_ONCE(tcp_memory_pressure))
                return;
        val = jiffies;
 
@@ -341,7 +341,7 @@ void tcp_leave_memory_pressure(struct sock *sk)
 {
        unsigned long val;
 
-       if (!tcp_memory_pressure)
+       if (!READ_ONCE(tcp_memory_pressure))
                return;
        val = xchg(&tcp_memory_pressure, 0);
        if (val)
@@ -450,8 +450,8 @@ void tcp_init_sock(struct sock *sk)
 
        icsk->icsk_sync_mss = tcp_sync_mss;
 
-       sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
-       sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
+       WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
+       WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
 
        sk_sockets_allocated_inc(sk);
        sk->sk_route_forced_caps = NETIF_F_GSO;
@@ -477,7 +477,7 @@ static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
 static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
                                          int target, struct sock *sk)
 {
-       return (tp->rcv_nxt - tp->copied_seq >= target) ||
+       return (READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq) >= target) ||
                (sk->sk_prot->stream_memory_read ?
                sk->sk_prot->stream_memory_read(sk) : false);
 }
@@ -543,10 +543,10 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 
        /* Connected or passive Fast Open socket? */
        if (state != TCP_SYN_SENT &&
-           (state != TCP_SYN_RECV || tp->fastopen_rsk)) {
+           (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) {
                int target = sock_rcvlowat(sk, 0, INT_MAX);
 
-               if (tp->urg_seq == tp->copied_seq &&
+               if (READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) &&
                    !sock_flag(sk, SOCK_URGINLINE) &&
                    tp->urg_data)
                        target++;
@@ -607,7 +607,8 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
                unlock_sock_fast(sk, slow);
                break;
        case SIOCATMARK:
-               answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
+               answ = tp->urg_data &&
+                      READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq);
                break;
        case SIOCOUTQ:
                if (sk->sk_state == TCP_LISTEN)
@@ -616,7 +617,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
                if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
                        answ = 0;
                else
-                       answ = tp->write_seq - tp->snd_una;
+                       answ = READ_ONCE(tp->write_seq) - tp->snd_una;
                break;
        case SIOCOUTQNSD:
                if (sk->sk_state == TCP_LISTEN)
@@ -625,7 +626,8 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
                if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
                        answ = 0;
                else
-                       answ = tp->write_seq - tp->snd_nxt;
+                       answ = READ_ONCE(tp->write_seq) -
+                              READ_ONCE(tp->snd_nxt);
                break;
        default:
                return -ENOIOCTLCMD;
@@ -657,7 +659,7 @@ static void skb_entail(struct sock *sk, struct sk_buff *skb)
        tcb->sacked  = 0;
        __skb_header_release(skb);
        tcp_add_write_queue_tail(sk, skb);
-       sk->sk_wmem_queued += skb->truesize;
+       sk_wmem_queued_add(sk, skb->truesize);
        sk_mem_charge(sk, skb->truesize);
        if (tp->nonagle & TCP_NAGLE_PUSH)
                tp->nonagle &= ~TCP_NAGLE_PUSH;
@@ -1032,10 +1034,10 @@ new_segment:
                skb->len += copy;
                skb->data_len += copy;
                skb->truesize += copy;
-               sk->sk_wmem_queued += copy;
+               sk_wmem_queued_add(sk, copy);
                sk_mem_charge(sk, copy);
                skb->ip_summed = CHECKSUM_PARTIAL;
-               tp->write_seq += copy;
+               WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
                TCP_SKB_CB(skb)->end_seq += copy;
                tcp_skb_pcount_set(skb, 0);
 
@@ -1362,7 +1364,7 @@ new_segment:
                if (!copied)
                        TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
 
-               tp->write_seq += copy;
+               WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
                TCP_SKB_CB(skb)->end_seq += copy;
                tcp_skb_pcount_set(skb, 0);
 
@@ -1668,9 +1670,9 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
                sk_eat_skb(sk, skb);
                if (!desc->count)
                        break;
-               tp->copied_seq = seq;
+               WRITE_ONCE(tp->copied_seq, seq);
        }
-       tp->copied_seq = seq;
+       WRITE_ONCE(tp->copied_seq, seq);
 
        tcp_rcv_space_adjust(sk);
 
@@ -1699,7 +1701,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
        else
                cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1;
        val = min(val, cap);
-       sk->sk_rcvlowat = val ? : 1;
+       WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
 
        /* Check if we need to signal EPOLLIN right now */
        tcp_data_ready(sk);
@@ -1709,7 +1711,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
 
        val <<= 1;
        if (val > sk->sk_rcvbuf) {
-               sk->sk_rcvbuf = val;
+               WRITE_ONCE(sk->sk_rcvbuf, val);
                tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val);
        }
        return 0;
@@ -1798,13 +1800,11 @@ static int tcp_zerocopy_receive(struct sock *sk,
                }
                if (skb_frag_size(frags) != PAGE_SIZE || skb_frag_off(frags)) {
                        int remaining = zc->recv_skip_hint;
-                       int size = skb_frag_size(frags);
 
-                       while (remaining && (size != PAGE_SIZE ||
+                       while (remaining && (skb_frag_size(frags) != PAGE_SIZE ||
                                             skb_frag_off(frags))) {
-                               remaining -= size;
+                               remaining -= skb_frag_size(frags);
                                frags++;
-                               size = skb_frag_size(frags);
                        }
                        zc->recv_skip_hint -= remaining;
                        break;
@@ -1821,7 +1821,7 @@ static int tcp_zerocopy_receive(struct sock *sk,
 out:
        up_read(&current->mm->mmap_sem);
        if (length) {
-               tp->copied_seq = seq;
+               WRITE_ONCE(tp->copied_seq, seq);
                tcp_rcv_space_adjust(sk);
 
                /* Clean up data we have read: This will do ACK frames. */
@@ -2119,7 +2119,7 @@ found_ok_skb:
                        if (urg_offset < used) {
                                if (!urg_offset) {
                                        if (!sock_flag(sk, SOCK_URGINLINE)) {
-                                               ++*seq;
+                                               WRITE_ONCE(*seq, *seq + 1);
                                                urg_hole++;
                                                offset++;
                                                used--;
@@ -2141,7 +2141,7 @@ found_ok_skb:
                        }
                }
 
-               *seq += used;
+               WRITE_ONCE(*seq, *seq + used);
                copied += used;
                len -= used;
 
@@ -2168,7 +2168,7 @@ skip_copy:
 
 found_fin_ok:
                /* Process the FIN. */
-               ++*seq;
+               WRITE_ONCE(*seq, *seq + 1);
                if (!(flags & MSG_PEEK))
                        sk_eat_skb(sk, skb);
                break;
@@ -2489,7 +2489,10 @@ adjudge_to_death:
        }
 
        if (sk->sk_state == TCP_CLOSE) {
-               struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
+               struct request_sock *req;
+
+               req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
+                                               lockdep_sock_is_held(sk));
                /* We could get here with a non-NULL req if the socket is
                 * aborted (e.g., closed with unread data) before 3WHS
                 * finishes.
@@ -2561,6 +2564,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        int old_state = sk->sk_state;
+       u32 seq;
 
        if (old_state != TCP_CLOSE)
                tcp_set_state(sk, TCP_CLOSE);
@@ -2587,7 +2591,7 @@ int tcp_disconnect(struct sock *sk, int flags)
                __kfree_skb(sk->sk_rx_skb_cache);
                sk->sk_rx_skb_cache = NULL;
        }
-       tp->copied_seq = tp->rcv_nxt;
+       WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
        tp->urg_data = 0;
        tcp_write_queue_purge(sk);
        tcp_fastopen_active_disable_ofo_check(sk);
@@ -2603,9 +2607,12 @@ int tcp_disconnect(struct sock *sk, int flags)
        tp->srtt_us = 0;
        tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
        tp->rcv_rtt_last_tsecr = 0;
-       tp->write_seq += tp->max_window + 2;
-       if (tp->write_seq == 0)
-               tp->write_seq = 1;
+
+       seq = tp->write_seq + tp->max_window + 2;
+       if (!seq)
+               seq = 1;
+       WRITE_ONCE(tp->write_seq, seq);
+
        icsk->icsk_backoff = 0;
        tp->snd_cwnd = 2;
        icsk->icsk_probes_out = 0;
@@ -2932,9 +2939,9 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                if (sk->sk_state != TCP_CLOSE)
                        err = -EPERM;
                else if (tp->repair_queue == TCP_SEND_QUEUE)
-                       tp->write_seq = val;
+                       WRITE_ONCE(tp->write_seq, val);
                else if (tp->repair_queue == TCP_RECV_QUEUE)
-                       tp->rcv_nxt = val;
+                       WRITE_ONCE(tp->rcv_nxt, val);
                else
                        err = -EINVAL;
                break;
@@ -3833,7 +3840,13 @@ EXPORT_SYMBOL(tcp_md5_hash_key);
 
 void tcp_done(struct sock *sk)
 {
-       struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
+       struct request_sock *req;
+
+       /* We might be called with a new socket, after
+        * inet_csk_prepare_forced_close() has been called
+        * so we can not use lockdep_sock_is_held(sk)
+        */
+       req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1);
 
        if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
                TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
index 81a8221..5495061 100644 (file)
@@ -26,8 +26,9 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
        } else if (sk->sk_type == SOCK_STREAM) {
                const struct tcp_sock *tp = tcp_sk(sk);
 
-               r->idiag_rqueue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
-               r->idiag_wqueue = tp->write_seq - tp->snd_una;
+               r->idiag_rqueue = max_t(int, READ_ONCE(tp->rcv_nxt) -
+                                            READ_ONCE(tp->copied_seq), 0);
+               r->idiag_wqueue = READ_ONCE(tp->write_seq) - tp->snd_una;
        }
        if (info)
                tcp_get_info(sk, info);
index 3fd4512..a915ade 100644 (file)
@@ -253,7 +253,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
         */
        tp = tcp_sk(child);
 
-       tp->fastopen_rsk = req;
+       rcu_assign_pointer(tp->fastopen_rsk, req);
        tcp_rsk(req)->tfo_listener = true;
 
        /* RFC1323: The window in SYN & SYN/ACK segments is never
index 3578357..a2e52ad 100644 (file)
@@ -359,7 +359,8 @@ static void tcp_sndbuf_expand(struct sock *sk)
        sndmem *= nr_segs * per_mss;
 
        if (sk->sk_sndbuf < sndmem)
-               sk->sk_sndbuf = min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]);
+               WRITE_ONCE(sk->sk_sndbuf,
+                          min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]));
 }
 
 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -483,8 +484,9 @@ static void tcp_clamp_window(struct sock *sk)
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
            !tcp_under_memory_pressure(sk) &&
            sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
-               sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
-                                   net->ipv4.sysctl_tcp_rmem[2]);
+               WRITE_ONCE(sk->sk_rcvbuf,
+                          min(atomic_read(&sk->sk_rmem_alloc),
+                              net->ipv4.sysctl_tcp_rmem[2]));
        }
        if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
                tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
@@ -648,7 +650,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
                rcvbuf = min_t(u64, rcvwin * rcvmem,
                               sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
                if (rcvbuf > sk->sk_rcvbuf) {
-                       sk->sk_rcvbuf = rcvbuf;
+                       WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
 
                        /* Make the window clamp follow along.  */
                        tp->window_clamp = tcp_win_from_space(sk, rcvbuf);
@@ -2666,7 +2668,7 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
        struct tcp_sock *tp = tcp_sk(sk);
        bool recovered = !before(tp->snd_una, tp->high_seq);
 
-       if ((flag & FLAG_SND_UNA_ADVANCED || tp->fastopen_rsk) &&
+       if ((flag & FLAG_SND_UNA_ADVANCED || rcu_access_pointer(tp->fastopen_rsk)) &&
            tcp_try_undo_loss(sk, false))
                return;
 
@@ -2990,7 +2992,7 @@ void tcp_rearm_rto(struct sock *sk)
        /* If the retrans timer is currently being used by Fast Open
         * for SYN-ACK retrans purpose, stay put.
         */
-       if (tp->fastopen_rsk)
+       if (rcu_access_pointer(tp->fastopen_rsk))
                return;
 
        if (!tp->packets_out) {
@@ -3362,7 +3364,7 @@ static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
 
        sock_owned_by_me((struct sock *)tp);
        tp->bytes_received += delta;
-       tp->rcv_nxt = seq;
+       WRITE_ONCE(tp->rcv_nxt, seq);
 }
 
 /* Update our send window.
@@ -5356,7 +5358,7 @@ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
        }
 
        tp->urg_data = TCP_URG_NOTYET;
-       tp->urg_seq = ptr;
+       WRITE_ONCE(tp->urg_seq, ptr);
 
        /* Disable header prediction. */
        tp->pred_flags = 0;
@@ -5932,7 +5934,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                /* Ok.. it's good. Set up sequence numbers and
                 * move to established.
                 */
-               tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+               WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
                tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
 
                /* RFC1323: The window in SYN & SYN/ACK segments is
@@ -5961,7 +5963,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                /* Remember, tcp_poll() does not lock socket!
                 * Change state from SYN-SENT only after copied_seq
                 * is initialized. */
-               tp->copied_seq = tp->rcv_nxt;
+               WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
 
                smc_check_reset_syn(tp);
 
@@ -6035,8 +6037,8 @@ discard:
                        tp->tcp_header_len = sizeof(struct tcphdr);
                }
 
-               tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
-               tp->copied_seq = tp->rcv_nxt;
+               WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
+               WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
                tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
 
                /* RFC1323: The window in SYN & SYN/ACK segments is
@@ -6087,6 +6089,8 @@ reset_and_undo:
 
 static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
 {
+       struct request_sock *req;
+
        tcp_try_undo_loss(sk, false);
 
        /* Reset rtx states to prevent spurious retransmits_timed_out() */
@@ -6096,7 +6100,9 @@ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
        /* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
         * we no longer need req so release it.
         */
-       reqsk_fastopen_remove(sk, tcp_sk(sk)->fastopen_rsk, false);
+       req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
+                                       lockdep_sock_is_held(sk));
+       reqsk_fastopen_remove(sk, req, false);
 
        /* Re-arm the timer because data may have been sent out.
         * This is similar to the regular data transmission case
@@ -6171,7 +6177,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
 
        tcp_mstamp_refresh(tp);
        tp->rx_opt.saw_tstamp = 0;
-       req = tp->fastopen_rsk;
+       req = rcu_dereference_protected(tp->fastopen_rsk,
+                                       lockdep_sock_is_held(sk));
        if (req) {
                bool req_stolen;
 
@@ -6211,7 +6218,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
                        tcp_try_undo_spurious_syn(sk);
                        tp->retrans_stamp = 0;
                        tcp_init_transfer(sk, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB);
-                       tp->copied_seq = tp->rcv_nxt;
+                       WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
                }
                smp_mb();
                tcp_set_state(sk, TCP_ESTABLISHED);
index 2ee45e3..6be5683 100644 (file)
@@ -164,9 +164,11 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
                 * without appearing to create any others.
                 */
                if (likely(!tp->repair)) {
-                       tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
-                       if (tp->write_seq == 0)
-                               tp->write_seq = 1;
+                       u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
+
+                       if (!seq)
+                               seq = 1;
+                       WRITE_ONCE(tp->write_seq, seq);
                        tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
                        tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
                }
@@ -253,7 +255,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                tp->rx_opt.ts_recent       = 0;
                tp->rx_opt.ts_recent_stamp = 0;
                if (likely(!tp->repair))
-                       tp->write_seq      = 0;
+                       WRITE_ONCE(tp->write_seq, 0);
        }
 
        inet->inet_dport = usin->sin_port;
@@ -291,10 +293,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 
        if (likely(!tp->repair)) {
                if (!tp->write_seq)
-                       tp->write_seq = secure_tcp_seq(inet->inet_saddr,
-                                                      inet->inet_daddr,
-                                                      inet->inet_sport,
-                                                      usin->sin_port);
+                       WRITE_ONCE(tp->write_seq,
+                                  secure_tcp_seq(inet->inet_saddr,
+                                                 inet->inet_daddr,
+                                                 inet->inet_sport,
+                                                 usin->sin_port));
                tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
                                                 inet->inet_saddr,
                                                 inet->inet_daddr);
@@ -478,7 +481,7 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
        icsk = inet_csk(sk);
        tp = tcp_sk(sk);
        /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
-       fastopen = tp->fastopen_rsk;
+       fastopen = rcu_dereference(tp->fastopen_rsk);
        snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
        if (sk->sk_state != TCP_LISTEN &&
            !between(seq, snd_una, tp->snd_nxt)) {
@@ -1644,7 +1647,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
 
 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
 {
-       u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
+       u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
        struct skb_shared_info *shinfo;
        const struct tcphdr *th;
        struct tcphdr *thtail;
@@ -1916,7 +1919,7 @@ process:
        if (tcp_v4_inbound_md5_hash(sk, skb))
                goto discard_and_relse;
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        if (tcp_filter(sk, skb))
                goto discard_and_relse;
@@ -2121,7 +2124,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
        if (inet_csk(sk)->icsk_bind_hash)
                inet_put_port(sk);
 
-       BUG_ON(tp->fastopen_rsk);
+       BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
 
        /* If socket is aborted during connect operation */
        tcp_free_fastopen_req(tp);
@@ -2455,12 +2458,13 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
                /* Because we don't lock the socket,
                 * we might find a transient negative value.
                 */
-               rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
+               rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
+                                     READ_ONCE(tp->copied_seq), 0);
 
        seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
                        "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
                i, src, srcp, dest, destp, state,
-               tp->write_seq - tp->snd_una,
+               READ_ONCE(tp->write_seq) - tp->snd_una,
                rx_queue,
                timer_active,
                jiffies_delta_to_clock_t(timer_expires - jiffies),
index bb140a5..c802bc8 100644 (file)
@@ -462,6 +462,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
        struct tcp_request_sock *treq = tcp_rsk(req);
        struct inet_connection_sock *newicsk;
        struct tcp_sock *oldtp, *newtp;
+       u32 seq;
 
        if (!newsk)
                return NULL;
@@ -475,12 +476,16 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
        /* Now setup tcp_sock */
        newtp->pred_flags = 0;
 
-       newtp->rcv_wup = newtp->copied_seq =
-       newtp->rcv_nxt = treq->rcv_isn + 1;
+       seq = treq->rcv_isn + 1;
+       newtp->rcv_wup = seq;
+       WRITE_ONCE(newtp->copied_seq, seq);
+       WRITE_ONCE(newtp->rcv_nxt, seq);
        newtp->segs_in = 1;
 
-       newtp->snd_sml = newtp->snd_una =
-       newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
+       seq = treq->snt_isn + 1;
+       newtp->snd_sml = newtp->snd_una = seq;
+       WRITE_ONCE(newtp->snd_nxt, seq);
+       newtp->snd_up = seq;
 
        INIT_LIST_HEAD(&newtp->tsq_node);
        INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
@@ -495,7 +500,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
        newtp->total_retrans = req->num_retrans;
 
        tcp_init_xmit_timers(newsk);
-       newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
+       WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
 
        if (sock_flag(newsk, SOCK_KEEPOPEN))
                inet_csk_reset_keepalive_timer(newsk,
@@ -541,7 +546,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
        newtp->rx_opt.mss_clamp = req->mss;
        tcp_ecn_openreq_child(newtp, req);
        newtp->fastopen_req = NULL;
-       newtp->fastopen_rsk = NULL;
+       RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
 
        __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
 
index fec6d67..0488607 100644 (file)
@@ -67,7 +67,7 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
        struct tcp_sock *tp = tcp_sk(sk);
        unsigned int prior_packets = tp->packets_out;
 
-       tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
+       WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq);
 
        __skb_unlink(skb, &sk->sk_write_queue);
        tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
@@ -1196,10 +1196,10 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
        struct tcp_sock *tp = tcp_sk(sk);
 
        /* Advance write_seq and place onto the write_queue. */
-       tp->write_seq = TCP_SKB_CB(skb)->end_seq;
+       WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
        __skb_header_release(skb);
        tcp_add_write_queue_tail(sk, skb);
-       sk->sk_wmem_queued += skb->truesize;
+       sk_wmem_queued_add(sk, skb->truesize);
        sk_mem_charge(sk, skb->truesize);
 }
 
@@ -1333,7 +1333,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
                return -ENOMEM; /* We'll just try again later. */
        skb_copy_decrypted(buff, skb);
 
-       sk->sk_wmem_queued += buff->truesize;
+       sk_wmem_queued_add(sk, buff->truesize);
        sk_mem_charge(sk, buff->truesize);
        nlen = skb->len - len - nsize;
        buff->truesize += nlen;
@@ -1443,7 +1443,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
 
        if (delta_truesize) {
                skb->truesize      -= delta_truesize;
-               sk->sk_wmem_queued -= delta_truesize;
+               sk_wmem_queued_add(sk, -delta_truesize);
                sk_mem_uncharge(sk, delta_truesize);
                sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
        }
@@ -1888,7 +1888,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
                return -ENOMEM;
        skb_copy_decrypted(buff, skb);
 
-       sk->sk_wmem_queued += buff->truesize;
+       sk_wmem_queued_add(sk, buff->truesize);
        sk_mem_charge(sk, buff->truesize);
        buff->truesize += nlen;
        skb->truesize -= nlen;
@@ -2152,7 +2152,7 @@ static int tcp_mtu_probe(struct sock *sk)
        nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
        if (!nskb)
                return -1;
-       sk->sk_wmem_queued += nskb->truesize;
+       sk_wmem_queued_add(sk, nskb->truesize);
        sk_mem_charge(sk, nskb->truesize);
 
        skb = tcp_send_head(sk);
@@ -2482,7 +2482,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
        /* Don't do any loss probe on a Fast Open connection before 3WHS
         * finishes.
         */
-       if (tp->fastopen_rsk)
+       if (rcu_access_pointer(tp->fastopen_rsk))
                return false;
 
        early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
@@ -3142,7 +3142,7 @@ void tcp_send_fin(struct sock *sk)
                         * if FIN had been sent. This is because retransmit path
                         * does not change tp->snd_nxt.
                         */
-                       tp->snd_nxt++;
+                       WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1);
                        return;
                }
        } else {
@@ -3222,7 +3222,7 @@ int tcp_send_synack(struct sock *sk)
                        tcp_rtx_queue_unlink_and_free(skb, sk);
                        __skb_header_release(nskb);
                        tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
-                       sk->sk_wmem_queued += nskb->truesize;
+                       sk_wmem_queued_add(sk, nskb->truesize);
                        sk_mem_charge(sk, nskb->truesize);
                        skb = nskb;
                }
@@ -3426,14 +3426,14 @@ static void tcp_connect_init(struct sock *sk)
        tp->snd_una = tp->write_seq;
        tp->snd_sml = tp->write_seq;
        tp->snd_up = tp->write_seq;
-       tp->snd_nxt = tp->write_seq;
+       WRITE_ONCE(tp->snd_nxt, tp->write_seq);
 
        if (likely(!tp->repair))
                tp->rcv_nxt = 0;
        else
                tp->rcv_tstamp = tcp_jiffies32;
        tp->rcv_wup = tp->rcv_nxt;
-       tp->copied_seq = tp->rcv_nxt;
+       WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
 
        inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
        inet_csk(sk)->icsk_retransmits = 0;
@@ -3447,9 +3447,9 @@ static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
 
        tcb->end_seq += skb->len;
        __skb_header_release(skb);
-       sk->sk_wmem_queued += skb->truesize;
+       sk_wmem_queued_add(sk, skb->truesize);
        sk_mem_charge(sk, skb->truesize);
-       tp->write_seq = tcb->end_seq;
+       WRITE_ONCE(tp->write_seq, tcb->end_seq);
        tp->packets_out += tcp_skb_pcount(skb);
 }
 
@@ -3586,11 +3586,11 @@ int tcp_connect(struct sock *sk)
        /* We change tp->snd_nxt after the tcp_transmit_skb() call
         * in order to make this packet get counted in tcpOutSegs.
         */
-       tp->snd_nxt = tp->write_seq;
+       WRITE_ONCE(tp->snd_nxt, tp->write_seq);
        tp->pushed_seq = tp->write_seq;
        buff = tcp_send_head(sk);
        if (unlikely(buff)) {
-               tp->snd_nxt     = TCP_SKB_CB(buff)->seq;
+               WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);
                tp->pushed_seq  = TCP_SKB_CB(buff)->seq;
        }
        TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
index 40de2d2..dd5a631 100644 (file)
@@ -198,8 +198,13 @@ static bool retransmits_timed_out(struct sock *sk,
                return false;
 
        start_ts = tcp_sk(sk)->retrans_stamp;
-       if (likely(timeout == 0))
-               timeout = tcp_model_timeout(sk, boundary, TCP_RTO_MIN);
+       if (likely(timeout == 0)) {
+               unsigned int rto_base = TCP_RTO_MIN;
+
+               if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
+                       rto_base = tcp_timeout_init(sk);
+               timeout = tcp_model_timeout(sk, boundary, rto_base);
+       }
 
        return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
 }
@@ -381,15 +386,13 @@ abort:            tcp_write_err(sk);
  *     Timer for Fast Open socket to retransmit SYNACK. Note that the
  *     sk here is the child socket, not the parent (listener) socket.
  */
-static void tcp_fastopen_synack_timer(struct sock *sk)
+static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        int max_retries = icsk->icsk_syn_retries ? :
            sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
        struct tcp_sock *tp = tcp_sk(sk);
-       struct request_sock *req;
 
-       req = tcp_sk(sk)->fastopen_rsk;
        req->rsk_ops->syn_ack_timeout(req);
 
        if (req->num_timeout >= max_retries) {
@@ -430,11 +433,14 @@ void tcp_retransmit_timer(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        struct net *net = sock_net(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
+       struct request_sock *req;
 
-       if (tp->fastopen_rsk) {
+       req = rcu_dereference_protected(tp->fastopen_rsk,
+                                       lockdep_sock_is_held(sk));
+       if (req) {
                WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
                             sk->sk_state != TCP_FIN_WAIT1);
-               tcp_fastopen_synack_timer(sk);
+               tcp_fastopen_synack_timer(sk, req);
                /* Before we receive ACK to our SYN-ACK don't retransmit
                 * anything else (e.g., data or FIN segments).
                 */
index cf75515..14bc654 100644 (file)
@@ -821,6 +821,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
        int is_udplite = IS_UDPLITE(sk);
        int offset = skb_transport_offset(skb);
        int len = skb->len - offset;
+       int datalen = len - sizeof(*uh);
        __wsum csum = 0;
 
        /*
@@ -854,10 +855,12 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
                        return -EIO;
                }
 
-               skb_shinfo(skb)->gso_size = cork->gso_size;
-               skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
-               skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(uh),
-                                                        cork->gso_size);
+               if (datalen > cork->gso_size) {
+                       skb_shinfo(skb)->gso_size = cork->gso_size;
+                       skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+                       skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
+                                                                cork->gso_size);
+               }
                goto csum_partial;
        }
 
@@ -1969,7 +1972,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
         */
        if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
                goto drop;
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
                int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
@@ -2298,7 +2301,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
 
        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
                goto drop;
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        /* No socket. Drop packet silently, if checksum is wrong */
        if (udp_lib_checksum_complete(skb))
index 6a576ff..34ccef1 100644 (file)
@@ -5964,13 +5964,20 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
        switch (event) {
        case RTM_NEWADDR:
                /*
-                * If the address was optimistic
-                * we inserted the route at the start of
-                * our DAD process, so we don't need
-                * to do it again
+                * If the address was optimistic we inserted the route at the
+                * start of our DAD process, so we don't need to do it again.
+                * If the device was taken down in the middle of the DAD
+                * cycle there is a race where we could get here without a
+                * host route, so nothing to insert. That will be fixed when
+                * the device is brought up.
                 */
-               if (!rcu_access_pointer(ifp->rt->fib6_node))
+               if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
                        ip6_ins_rt(net, ifp->rt);
+               } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
+                       pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
+                               &ifp->addr, ifp->idev->dev->name);
+               }
+
                if (ifp->idev->cnf.forwarding)
                        addrconf_join_anycast(ifp);
                if (!ipv6_addr_any(&ifp->peer_addr))
index d5779d6..787d9f2 100644 (file)
@@ -2192,6 +2192,7 @@ static void ip6erspan_tap_setup(struct net_device *dev)
 {
        ether_setup(dev);
 
+       dev->max_mtu = 0;
        dev->netdev_ops = &ip6erspan_netdev_ops;
        dev->needs_free_netdev = true;
        dev->priv_destructor = ip6gre_dev_free;
index d432d00..3d71c7d 100644 (file)
@@ -223,6 +223,16 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
        if (ipv6_addr_is_multicast(&hdr->saddr))
                goto err;
 
+       /* While RFC4291 is not explicit about v4mapped addresses
+        * in IPv6 headers, it seems clear linux dual-stack
+        * model can not deal properly with these.
+        * Security models could be fooled by ::ffff:127.0.0.1 for example.
+        *
+        * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
+        */
+       if (ipv6_addr_v4mapped(&hdr->saddr))
+               goto err;
+
        skb->transport_header = skb->network_header + sizeof(*hdr);
        IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
 
@@ -371,7 +381,7 @@ resubmit_final:
                        /* Free reference early: we don't need it any more,
                           and it may hold ip_conntrack module loaded
                           indefinitely. */
-                       nf_reset(skb);
+                       nf_reset_ct(skb);
 
                        skb_postpull_rcsum(skb, skb_network_header(skb),
                                           skb_network_header_len(skb));
index edadee4..71827b5 100644 (file)
@@ -768,6 +768,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                                inet6_sk(skb->sk) : NULL;
        struct ip6_frag_state state;
        unsigned int mtu, hlen, nexthdr_offset;
+       ktime_t tstamp = skb->tstamp;
        int hroom, err = 0;
        __be32 frag_id;
        u8 *prevhdr, nexthdr = 0;
@@ -855,6 +856,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                        if (iter.frag)
                                ip6_fraglist_prepare(skb, &iter);
 
+                       skb->tstamp = tstamp;
                        err = output(net, sk, skb);
                        if (!err)
                                IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
@@ -913,6 +915,7 @@ slow_path:
                /*
                 *      Put this fragment into the sending queue.
                 */
+               frag->tstamp = tstamp;
                err = output(net, sk, frag);
                if (err)
                        goto fail;
index a9bff55..409e79b 100644 (file)
@@ -119,6 +119,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                                  struct sk_buff *))
 {
        int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
+       ktime_t tstamp = skb->tstamp;
        struct ip6_frag_state state;
        u8 *prevhdr, nexthdr = 0;
        unsigned int mtu, hlen;
@@ -183,6 +184,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                        if (iter.frag)
                                ip6_fraglist_prepare(skb, &iter);
 
+                       skb->tstamp = tstamp;
                        err = output(net, sk, data, skb);
                        if (err || !iter.frag)
                                break;
@@ -215,6 +217,7 @@ slow_path:
                        goto blackhole;
                }
 
+               skb2->tstamp = tstamp;
                err = output(net, sk, data, skb2);
                if (err)
                        goto blackhole;
index e6c9da9..a0a2de3 100644 (file)
@@ -54,7 +54,7 @@ void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
                return;
 
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
-       nf_reset(skb);
+       nf_reset_ct(skb);
        nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
 #endif
        if (hooknum == NF_INET_PRE_ROUTING ||
index 6e1888e..a77f6b7 100644 (file)
@@ -215,7 +215,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
 
                        /* Not releasing hash table! */
                        if (clone) {
-                               nf_reset(clone);
+                               nf_reset_ct(clone);
                                rawv6_rcv(sk, clone);
                        }
                }
index e3d9f45..4804b6d 100644 (file)
@@ -215,7 +215,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
            !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
                tp->rx_opt.ts_recent = 0;
                tp->rx_opt.ts_recent_stamp = 0;
-               tp->write_seq = 0;
+               WRITE_ONCE(tp->write_seq, 0);
        }
 
        sk->sk_v6_daddr = usin->sin6_addr;
@@ -311,10 +311,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
        if (likely(!tp->repair)) {
                if (!tp->write_seq)
-                       tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
-                                                        sk->sk_v6_daddr.s6_addr32,
-                                                        inet->inet_sport,
-                                                        inet->inet_dport);
+                       WRITE_ONCE(tp->write_seq,
+                                  secure_tcpv6_seq(np->saddr.s6_addr32,
+                                                   sk->sk_v6_daddr.s6_addr32,
+                                                   inet->inet_sport,
+                                                   inet->inet_dport));
                tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
                                                   np->saddr.s6_addr32,
                                                   sk->sk_v6_daddr.s6_addr32);
@@ -406,7 +407,7 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        tp = tcp_sk(sk);
        /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
-       fastopen = tp->fastopen_rsk;
+       fastopen = rcu_dereference(tp->fastopen_rsk);
        snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
        if (sk->sk_state != TCP_LISTEN &&
            !between(seq, snd_una, tp->snd_nxt)) {
@@ -1895,7 +1896,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
                /* Because we don't lock the socket,
                 * we might find a transient negative value.
                 */
-               rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
+               rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
+                                     READ_ONCE(tp->copied_seq), 0);
 
        seq_printf(seq,
                   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
@@ -1906,7 +1908,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
                   dest->s6_addr32[0], dest->s6_addr32[1],
                   dest->s6_addr32[2], dest->s6_addr32[3], destp,
                   state,
-                  tp->write_seq - tp->snd_una,
+                  READ_ONCE(tp->write_seq) - tp->snd_una,
                   rx_queue,
                   timer_active,
                   jiffies_delta_to_clock_t(timer_expires - jiffies),
index aae4938..6324d3a 100644 (file)
@@ -1109,6 +1109,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
        __wsum csum = 0;
        int offset = skb_transport_offset(skb);
        int len = skb->len - offset;
+       int datalen = len - sizeof(*uh);
 
        /*
         * Create a UDP header
@@ -1141,8 +1142,12 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
                        return -EIO;
                }
 
-               skb_shinfo(skb)->gso_size = cork->gso_size;
-               skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+               if (datalen > cork->gso_size) {
+                       skb_shinfo(skb)->gso_size = cork->gso_size;
+                       skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+                       skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
+                                                                cork->gso_size);
+               }
                goto csum_partial;
        }
 
index 105e5a7..f82ea12 100644 (file)
@@ -1078,7 +1078,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
        IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
                              IPSKB_REROUTED);
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk)) {
index bd3f393..fd5ac27 100644 (file)
@@ -151,7 +151,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
        skb->ip_summed = CHECKSUM_NONE;
 
        skb_dst_drop(skb);
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        rcu_read_lock();
        dev = rcu_dereference(spriv->dev);
index 6228333..0d7c887 100644 (file)
@@ -193,7 +193,7 @@ pass_up:
        if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_put;
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        return sk_receive_skb(sk, skb, 1);
 
index 687e23a..802f19a 100644 (file)
@@ -206,7 +206,7 @@ pass_up:
        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_put;
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        return sk_receive_skb(sk, skb, 1);
 
index 2017b7d..c74f44d 100644 (file)
@@ -113,22 +113,26 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr)
  *
  *     Send data via reliable llc2 connection.
  *     Returns 0 upon success, non-zero if action did not succeed.
+ *
+ *     This function always consumes a reference to the skb.
  */
 static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock)
 {
        struct llc_sock* llc = llc_sk(sk);
-       int rc = 0;
 
        if (unlikely(llc_data_accept_state(llc->state) ||
                     llc->remote_busy_flag ||
                     llc->p_flag)) {
                long timeout = sock_sndtimeo(sk, noblock);
+               int rc;
 
                rc = llc_ui_wait_for_busy_core(sk, timeout);
+               if (rc) {
+                       kfree_skb(skb);
+                       return rc;
+               }
        }
-       if (unlikely(!rc))
-               rc = llc_build_and_send_pkt(sk, skb);
-       return rc;
+       return llc_build_and_send_pkt(sk, skb);
 }
 
 static void llc_ui_sk_init(struct socket *sock, struct sock *sk)
@@ -899,7 +903,7 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
        int flags = msg->msg_flags;
        int noblock = flags & MSG_DONTWAIT;
-       struct sk_buff *skb;
+       struct sk_buff *skb = NULL;
        size_t size = 0;
        int rc = -EINVAL, copied = 0, hdrlen;
 
@@ -908,10 +912,10 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        lock_sock(sk);
        if (addr) {
                if (msg->msg_namelen < sizeof(*addr))
-                       goto release;
+                       goto out;
        } else {
                if (llc_ui_addr_null(&llc->addr))
-                       goto release;
+                       goto out;
                addr = &llc->addr;
        }
        /* must bind connection to sap if user hasn't done it. */
@@ -919,7 +923,7 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
                /* bind to sap with null dev, exclusive. */
                rc = llc_ui_autobind(sock, addr);
                if (rc)
-                       goto release;
+                       goto out;
        }
        hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr);
        size = hdrlen + len;
@@ -928,12 +932,12 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        copied = size - hdrlen;
        rc = -EINVAL;
        if (copied < 0)
-               goto release;
+               goto out;
        release_sock(sk);
        skb = sock_alloc_send_skb(sk, size, noblock, &rc);
        lock_sock(sk);
        if (!skb)
-               goto release;
+               goto out;
        skb->dev      = llc->dev;
        skb->protocol = llc_proto_type(addr->sllc_arphrd);
        skb_reserve(skb, hdrlen);
@@ -943,29 +947,31 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        if (sk->sk_type == SOCK_DGRAM || addr->sllc_ua) {
                llc_build_and_send_ui_pkt(llc->sap, skb, addr->sllc_mac,
                                          addr->sllc_sap);
+               skb = NULL;
                goto out;
        }
        if (addr->sllc_test) {
                llc_build_and_send_test_pkt(llc->sap, skb, addr->sllc_mac,
                                            addr->sllc_sap);
+               skb = NULL;
                goto out;
        }
        if (addr->sllc_xid) {
                llc_build_and_send_xid_pkt(llc->sap, skb, addr->sllc_mac,
                                           addr->sllc_sap);
+               skb = NULL;
                goto out;
        }
        rc = -ENOPROTOOPT;
        if (!(sk->sk_type == SOCK_STREAM && !addr->sllc_ua))
                goto out;
        rc = llc_ui_send_data(sk, skb, noblock);
+       skb = NULL;
 out:
-       if (rc) {
-               kfree_skb(skb);
-release:
+       kfree_skb(skb);
+       if (rc)
                dprintk("%s: failed sending from %02X to %02X: %d\n",
                        __func__, llc->laddr.lsap, llc->daddr.lsap, rc);
-       }
        release_sock(sk);
        return rc ? : copied;
 }
index 4d78375..647c055 100644 (file)
@@ -372,6 +372,7 @@ int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb)
        llc_pdu_init_as_i_cmd(skb, 1, llc->vS, llc->vR);
        rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
        if (likely(!rc)) {
+               skb_get(skb);
                llc_conn_send_pdu(sk, skb);
                llc_conn_ac_inc_vs_by_1(sk, skb);
        }
@@ -389,7 +390,8 @@ static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb)
        llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
        rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
        if (likely(!rc)) {
-               rc = llc_conn_send_pdu(sk, skb);
+               skb_get(skb);
+               llc_conn_send_pdu(sk, skb);
                llc_conn_ac_inc_vs_by_1(sk, skb);
        }
        return rc;
@@ -406,6 +408,7 @@ int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
        llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
        rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
        if (likely(!rc)) {
+               skb_get(skb);
                llc_conn_send_pdu(sk, skb);
                llc_conn_ac_inc_vs_by_1(sk, skb);
        }
@@ -916,7 +919,8 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk,
        llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR);
        rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
        if (likely(!rc)) {
-               rc = llc_conn_send_pdu(sk, skb);
+               skb_get(skb);
+               llc_conn_send_pdu(sk, skb);
                llc_conn_ac_inc_vs_by_1(sk, skb);
        }
        return rc;
index 4ff89cb..7b620ac 100644 (file)
@@ -30,7 +30,7 @@
 #endif
 
 static int llc_find_offset(int state, int ev_type);
-static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb);
+static void llc_conn_send_pdus(struct sock *sk);
 static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
 static int llc_exec_conn_trans_actions(struct sock *sk,
                                       struct llc_conn_state_trans *trans,
@@ -55,6 +55,8 @@ int sysctl_llc2_busy_timeout = LLC2_BUSY_TIME * HZ;
  *     (executing it's actions and changing state), upper layer will be
  *     indicated or confirmed, if needed. Returns 0 for success, 1 for
  *     failure. The socket lock has to be held before calling this function.
+ *
+ *     This function always consumes a reference to the skb.
  */
 int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
 {
@@ -62,12 +64,6 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
        struct llc_sock *llc = llc_sk(skb->sk);
        struct llc_conn_state_ev *ev = llc_conn_ev(skb);
 
-       /*
-        * We have to hold the skb, because llc_conn_service will kfree it in
-        * the sending path and we need to look at the skb->cb, where we encode
-        * llc_conn_state_ev.
-        */
-       skb_get(skb);
        ev->ind_prim = ev->cfm_prim = 0;
        /*
         * Send event to state machine
@@ -75,21 +71,12 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
        rc = llc_conn_service(skb->sk, skb);
        if (unlikely(rc != 0)) {
                printk(KERN_ERR "%s: llc_conn_service failed\n", __func__);
-               goto out_kfree_skb;
-       }
-
-       if (unlikely(!ev->ind_prim && !ev->cfm_prim)) {
-               /* indicate or confirm not required */
-               if (!skb->next)
-                       goto out_kfree_skb;
                goto out_skb_put;
        }
 
-       if (unlikely(ev->ind_prim && ev->cfm_prim)) /* Paranoia */
-               skb_get(skb);
-
        switch (ev->ind_prim) {
        case LLC_DATA_PRIM:
+               skb_get(skb);
                llc_save_primitive(sk, skb, LLC_DATA_PRIM);
                if (unlikely(sock_queue_rcv_skb(sk, skb))) {
                        /*
@@ -106,6 +93,7 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
                 * skb->sk pointing to the newly created struct sock in
                 * llc_conn_handler. -acme
                 */
+               skb_get(skb);
                skb_queue_tail(&sk->sk_receive_queue, skb);
                sk->sk_state_change(sk);
                break;
@@ -121,7 +109,6 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
                                sk->sk_state_change(sk);
                        }
                }
-               kfree_skb(skb);
                sock_put(sk);
                break;
        case LLC_RESET_PRIM:
@@ -130,14 +117,11 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
                 * RESET is not being notified to upper layers for now
                 */
                printk(KERN_INFO "%s: received a reset ind!\n", __func__);
-               kfree_skb(skb);
                break;
        default:
-               if (ev->ind_prim) {
+               if (ev->ind_prim)
                        printk(KERN_INFO "%s: received unknown %d prim!\n",
                                __func__, ev->ind_prim);
-                       kfree_skb(skb);
-               }
                /* No indication */
                break;
        }
@@ -179,25 +163,22 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
                printk(KERN_INFO "%s: received a reset conf!\n", __func__);
                break;
        default:
-               if (ev->cfm_prim) {
+               if (ev->cfm_prim)
                        printk(KERN_INFO "%s: received unknown %d prim!\n",
                                        __func__, ev->cfm_prim);
-                       break;
-               }
-               goto out_skb_put; /* No confirmation */
+               /* No confirmation */
+               break;
        }
-out_kfree_skb:
-       kfree_skb(skb);
 out_skb_put:
        kfree_skb(skb);
        return rc;
 }
 
-int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
+void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
 {
        /* queue PDU to send to MAC layer */
        skb_queue_tail(&sk->sk_write_queue, skb);
-       return llc_conn_send_pdus(sk, skb);
+       llc_conn_send_pdus(sk);
 }
 
 /**
@@ -255,7 +236,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit)
        if (howmany_resend > 0)
                llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
        /* any PDUs to re-send are queued up; start sending to MAC */
-       llc_conn_send_pdus(sk, NULL);
+       llc_conn_send_pdus(sk);
 out:;
 }
 
@@ -296,7 +277,7 @@ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit)
        if (howmany_resend > 0)
                llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
        /* any PDUs to re-send are queued up; start sending to MAC */
-       llc_conn_send_pdus(sk, NULL);
+       llc_conn_send_pdus(sk);
 out:;
 }
 
@@ -340,16 +321,12 @@ out:
 /**
  *     llc_conn_send_pdus - Sends queued PDUs
  *     @sk: active connection
- *     @hold_skb: the skb held by caller, or NULL if does not care
  *
- *     Sends queued pdus to MAC layer for transmission. When @hold_skb is
- *     NULL, always return 0. Otherwise, return 0 if @hold_skb is sent
- *     successfully, or 1 for failure.
+ *     Sends queued pdus to MAC layer for transmission.
  */
-static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
+static void llc_conn_send_pdus(struct sock *sk)
 {
        struct sk_buff *skb;
-       int ret = 0;
 
        while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
                struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
@@ -361,20 +338,10 @@ static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
                        skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
                        if (!skb2)
                                break;
-                       dev_queue_xmit(skb2);
-               } else {
-                       bool is_target = skb == hold_skb;
-                       int rc;
-
-                       if (is_target)
-                               skb_get(skb);
-                       rc = dev_queue_xmit(skb);
-                       if (is_target)
-                               ret = rc;
+                       skb = skb2;
                }
+               dev_queue_xmit(skb);
        }
-
-       return ret;
 }
 
 /**
@@ -846,7 +813,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
        else {
                dprintk("%s: adding to backlog...\n", __func__);
                llc_set_backlog_type(skb, LLC_PACKET);
-               if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
+               if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
                        goto drop_unlock;
        }
 out:
index 8db03c2..ad65477 100644 (file)
@@ -38,6 +38,8 @@
  *     closed and -EBUSY when sending data is not permitted in this state or
  *     LLC has send an I pdu with p bit set to 1 and is waiting for it's
  *     response.
+ *
+ *     This function always consumes a reference to the skb.
  */
 int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb)
 {
@@ -46,20 +48,22 @@ int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb)
        struct llc_sock *llc = llc_sk(sk);
 
        if (unlikely(llc->state == LLC_CONN_STATE_ADM))
-               goto out;
+               goto out_free;
        rc = -EBUSY;
        if (unlikely(llc_data_accept_state(llc->state) || /* data_conn_refuse */
                     llc->p_flag)) {
                llc->failed_data_req = 1;
-               goto out;
+               goto out_free;
        }
        ev = llc_conn_ev(skb);
        ev->type      = LLC_CONN_EV_TYPE_PRIM;
        ev->prim      = LLC_DATA_PRIM;
        ev->prim_type = LLC_PRIM_TYPE_REQ;
        skb->dev      = llc->dev;
-       rc = llc_conn_state_process(sk, skb);
-out:
+       return llc_conn_state_process(sk, skb);
+
+out_free:
+       kfree_skb(skb);
        return rc;
 }
 
index a94bd56..7ae4cc6 100644 (file)
@@ -58,8 +58,10 @@ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb)
                            ev->daddr.lsap, LLC_PDU_CMD);
        llc_pdu_init_as_ui_cmd(skb);
        rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
-       if (likely(!rc))
+       if (likely(!rc)) {
+               skb_get(skb);
                rc = dev_queue_xmit(skb);
+       }
        return rc;
 }
 
@@ -81,8 +83,10 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
                            ev->daddr.lsap, LLC_PDU_CMD);
        llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
        rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
-       if (likely(!rc))
+       if (likely(!rc)) {
+               skb_get(skb);
                rc = dev_queue_xmit(skb);
+       }
        return rc;
 }
 
@@ -135,8 +139,10 @@ int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb)
                            ev->daddr.lsap, LLC_PDU_CMD);
        llc_pdu_init_as_test_cmd(skb);
        rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
-       if (likely(!rc))
+       if (likely(!rc)) {
+               skb_get(skb);
                rc = dev_queue_xmit(skb);
+       }
        return rc;
 }
 
index a7f7b8f..be41906 100644 (file)
@@ -197,29 +197,22 @@ out:
  *     After executing actions of the event, upper layer will be indicated
  *     if needed(on receiving an UI frame). sk can be null for the
  *     datalink_proto case.
+ *
+ *     This function always consumes a reference to the skb.
  */
 static void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb)
 {
        struct llc_sap_state_ev *ev = llc_sap_ev(skb);
 
-       /*
-        * We have to hold the skb, because llc_sap_next_state
-        * will kfree it in the sending path and we need to
-        * look at the skb->cb, where we encode llc_sap_state_ev.
-        */
-       skb_get(skb);
        ev->ind_cfm_flag = 0;
        llc_sap_next_state(sap, skb);
-       if (ev->ind_cfm_flag == LLC_IND) {
-               if (skb->sk->sk_state == TCP_LISTEN)
-                       kfree_skb(skb);
-               else {
-                       llc_save_primitive(skb->sk, skb, ev->prim);
 
-                       /* queue skb to the user. */
-                       if (sock_queue_rcv_skb(skb->sk, skb))
-                               kfree_skb(skb);
-               }
+       if (ev->ind_cfm_flag == LLC_IND && skb->sk->sk_state != TCP_LISTEN) {
+               llc_save_primitive(skb->sk, skb, ev->prim);
+
+               /* queue skb to the user. */
+               if (sock_queue_rcv_skb(skb->sk, skb) == 0)
+                       return;
        }
        kfree_skb(skb);
 }
index b1438fd..64b544a 100644 (file)
@@ -487,9 +487,14 @@ static ssize_t ieee80211_if_fmt_aqm(
        const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
 {
        struct ieee80211_local *local = sdata->local;
-       struct txq_info *txqi = to_txq_info(sdata->vif.txq);
+       struct txq_info *txqi;
        int len;
 
+       if (!sdata->vif.txq)
+               return 0;
+
+       txqi = to_txq_info(sdata->vif.txq);
+
        spin_lock_bh(&local->fq.lock);
        rcu_read_lock();
 
@@ -658,7 +663,9 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
        DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
        DEBUGFS_ADD(hw_queues);
 
-       if (sdata->local->ops->wake_tx_queue)
+       if (sdata->local->ops->wake_tx_queue &&
+           sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+           sdata->vif.type != NL80211_IFTYPE_NAN)
                DEBUGFS_ADD(aqm);
 }
 
index 26a2f49..54dd884 100644 (file)
@@ -2633,7 +2633,8 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
 
        rcu_read_lock();
        ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID);
-       if (WARN_ON_ONCE(ssid == NULL))
+       if (WARN_ONCE(!ssid || ssid[1] > IEEE80211_MAX_SSID_LEN,
+                     "invalid SSID element (len=%d)", ssid ? ssid[1] : -1))
                ssid_len = 0;
        else
                ssid_len = ssid[1];
@@ -5233,7 +5234,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
 
        rcu_read_lock();
        ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
-       if (!ssidie) {
+       if (!ssidie || ssidie[1] > sizeof(assoc_data->ssid)) {
                rcu_read_unlock();
                kfree(assoc_data);
                return -EINVAL;
index 768d14c..0e05ff0 100644 (file)
@@ -3467,9 +3467,18 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
        case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
                /* process for all: mesh, mlme, ibss */
                break;
+       case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+               if (is_multicast_ether_addr(mgmt->da) &&
+                   !is_broadcast_ether_addr(mgmt->da))
+                       return RX_DROP_MONITOR;
+
+               /* process only for station/IBSS */
+               if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+                   sdata->vif.type != NL80211_IFTYPE_ADHOC)
+                       return RX_DROP_MONITOR;
+               break;
        case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
        case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
-       case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
        case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
                if (is_multicast_ether_addr(mgmt->da) &&
                    !is_broadcast_ether_addr(mgmt->da))
index adf94ba..4d31d96 100644 (file)
@@ -520,10 +520,33 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
        return 0;
 }
 
+static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_sub_if_data *sdata_iter;
+
+       if (!ieee80211_is_radar_required(local))
+               return true;
+
+       if (!regulatory_pre_cac_allowed(local->hw.wiphy))
+               return false;
+
+       mutex_lock(&local->iflist_mtx);
+       list_for_each_entry(sdata_iter, &local->interfaces, list) {
+               if (sdata_iter->wdev.cac_started) {
+                       mutex_unlock(&local->iflist_mtx);
+                       return false;
+               }
+       }
+       mutex_unlock(&local->iflist_mtx);
+
+       return true;
+}
+
 static bool ieee80211_can_scan(struct ieee80211_local *local,
                               struct ieee80211_sub_if_data *sdata)
 {
-       if (ieee80211_is_radar_required(local))
+       if (!__ieee80211_can_leave_ch(sdata))
                return false;
 
        if (!list_empty(&local->roc_list))
@@ -630,7 +653,10 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
 
        lockdep_assert_held(&local->mtx);
 
-       if (local->scan_req || ieee80211_is_radar_required(local))
+       if (local->scan_req)
+               return -EBUSY;
+
+       if (!__ieee80211_can_leave_ch(sdata))
                return -EBUSY;
 
        if (!ieee80211_can_scan(local, sdata)) {
index 051a02d..32a7a53 100644 (file)
@@ -247,7 +247,8 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
        struct sta_info *sta;
        int i;
 
-       spin_lock_bh(&fq->lock);
+       local_bh_disable();
+       spin_lock(&fq->lock);
 
        if (sdata->vif.type == NL80211_IFTYPE_AP)
                ps = &sdata->bss->ps;
@@ -273,9 +274,9 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
                                                &txqi->flags))
                                continue;
 
-                       spin_unlock_bh(&fq->lock);
+                       spin_unlock(&fq->lock);
                        drv_wake_tx_queue(local, txqi);
-                       spin_lock_bh(&fq->lock);
+                       spin_lock(&fq->lock);
                }
        }
 
@@ -288,12 +289,14 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
            (ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac)
                goto out;
 
-       spin_unlock_bh(&fq->lock);
+       spin_unlock(&fq->lock);
 
        drv_wake_tx_queue(local, txqi);
+       local_bh_enable();
        return;
 out:
-       spin_unlock_bh(&fq->lock);
+       spin_unlock(&fq->lock);
+       local_bh_enable();
 }
 
 static void
index 9c464d2..888d306 100644 (file)
@@ -613,7 +613,7 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
        if (unlikely(cp->flags & IP_VS_CONN_F_NFCT))
                ret = ip_vs_confirm_conntrack(skb);
        if (ret == NF_ACCEPT) {
-               nf_reset(skb);
+               nf_reset_ct(skb);
                skb_forward_csum(skb);
        }
        return ret;
index 0c63120..5cd610b 100644 (file)
@@ -1792,8 +1792,8 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
        if (nf_ct_is_confirmed(ct))
                extra_jiffies += nfct_time_stamp;
 
-       if (ct->timeout != extra_jiffies)
-               ct->timeout = extra_jiffies;
+       if (READ_ONCE(ct->timeout) != extra_jiffies)
+               WRITE_ONCE(ct->timeout, extra_jiffies);
 acct:
        if (do_acct)
                nf_ct_acct_update(ct, ctinfo, skb->len);
index af1497a..69d6173 100644 (file)
@@ -218,8 +218,13 @@ static void nft_connlimit_destroy_clone(const struct nft_ctx *ctx,
 static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr)
 {
        struct nft_connlimit *priv = nft_expr_priv(expr);
+       bool ret;
 
-       return nf_conncount_gc_list(net, &priv->list);
+       local_bh_disable();
+       ret = nf_conncount_gc_list(net, &priv->list);
+       local_bh_enable();
+
+       return ret;
 }
 
 static struct nft_expr_type nft_connlimit_type;
index 8dfea26..ccdd790 100644 (file)
@@ -107,9 +107,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
        llcp_sock->service_name = kmemdup(llcp_addr.service_name,
                                          llcp_sock->service_name_len,
                                          GFP_KERNEL);
-
+       if (!llcp_sock->service_name) {
+               ret = -ENOMEM;
+               goto put_dev;
+       }
        llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
        if (llcp_sock->ssap == LLCP_SAP_MAX) {
+               kfree(llcp_sock->service_name);
+               llcp_sock->service_name = NULL;
                ret = -EADDRINUSE;
                goto put_dev;
        }
index 3572e11..1c77f52 100644 (file)
@@ -165,7 +165,8 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
 {
        int err;
 
-       err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype);
+       err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype,
+                           skb->mac_len);
        if (err)
                return err;
 
@@ -178,7 +179,7 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
 {
        int err;
 
-       err = skb_mpls_pop(skb, ethertype);
+       err = skb_mpls_pop(skb, ethertype, skb->mac_len);
        if (err)
                return err;
 
index d2437b5..21c90d3 100644 (file)
@@ -237,7 +237,7 @@ static netdev_tx_t internal_dev_recv(struct sk_buff *skb)
        }
 
        skb_dst_drop(skb);
-       nf_reset(skb);
+       nf_reset_ct(skb);
        secpath_reset(skb);
 
        skb->pkt_type = PACKET_HOST;
index e2742b0..82a50e8 100644 (file)
@@ -1821,7 +1821,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
        skb_dst_drop(skb);
 
        /* drop conntrack reference */
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        spkt = &PACKET_SKB_CB(skb)->sa.pkt;
 
@@ -2121,7 +2121,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
        skb_dst_drop(skb);
 
        /* drop conntrack reference */
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        spin_lock(&sk->sk_receive_queue.lock);
        po->stats.stats1.tp_packets++;
index 45acab2..9de2ae2 100644 (file)
@@ -143,6 +143,9 @@ static void rds_ib_add_one(struct ib_device *device)
        refcount_set(&rds_ibdev->refcount, 1);
        INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
 
+       INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
+       INIT_LIST_HEAD(&rds_ibdev->conn_list);
+
        rds_ibdev->max_wrs = device->attrs.max_qp_wr;
        rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE);
 
@@ -203,9 +206,6 @@ static void rds_ib_add_one(struct ib_device *device)
                device->name,
                rds_ibdev->use_fastreg ? "FRMR" : "FMR");
 
-       INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
-       INIT_LIST_HEAD(&rds_ibdev->conn_list);
-
        down_write(&rds_ib_devices_lock);
        list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
        up_write(&rds_ib_devices_lock);
index 1091bf3..ecc17da 100644 (file)
@@ -556,6 +556,7 @@ struct rxrpc_call {
        struct rxrpc_peer       *peer;          /* Peer record for remote address */
        struct rxrpc_sock __rcu *socket;        /* socket responsible */
        struct rxrpc_net        *rxnet;         /* Network namespace to which call belongs */
+       const struct rxrpc_security *security;  /* applied security module */
        struct mutex            user_mutex;     /* User access mutex */
        unsigned long           ack_at;         /* When deferred ACK needs to happen */
        unsigned long           ack_lost_at;    /* When ACK is figured as lost */
index 00c095d..135bf5c 100644 (file)
@@ -84,7 +84,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
                smp_store_release(&b->conn_backlog_head,
                                  (head + 1) & (size - 1));
 
-               trace_rxrpc_conn(conn, rxrpc_conn_new_service,
+               trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
                                 atomic_read(&conn->usage), here);
        }
 
@@ -97,7 +97,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
        call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
        call->state = RXRPC_CALL_SERVER_PREALLOC;
 
-       trace_rxrpc_call(call, rxrpc_call_new_service,
+       trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
                         atomic_read(&call->usage),
                         here, (const void *)user_call_ID);
 
@@ -307,6 +307,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
 
        rxrpc_see_call(call);
        call->conn = conn;
+       call->security = conn->security;
        call->peer = rxrpc_get_peer(conn->params.peer);
        call->cong_cwnd = call->peer->cong_cwnd;
        return call;
index 32d8dc6..a31c18c 100644 (file)
@@ -240,7 +240,8 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
        if (p->intr)
                __set_bit(RXRPC_CALL_IS_INTR, &call->flags);
        call->tx_total_len = p->tx_total_len;
-       trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
+       trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
+                        atomic_read(&call->usage),
                         here, (const void *)p->user_call_ID);
 
        /* We need to protect a partially set up call against the user as we
@@ -290,8 +291,8 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
        if (ret < 0)
                goto error;
 
-       trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
-                        here, NULL);
+       trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
+                        atomic_read(&call->usage), here, NULL);
 
        rxrpc_start_call_timer(call);
 
@@ -313,8 +314,8 @@ error_dup_user_ID:
 error:
        __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
                                    RX_CALL_DEAD, ret);
-       trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
-                        here, ERR_PTR(ret));
+       trace_rxrpc_call(call->debug_id, rxrpc_call_error,
+                        atomic_read(&call->usage), here, ERR_PTR(ret));
        rxrpc_release_call(rx, call);
        mutex_unlock(&call->user_mutex);
        rxrpc_put_call(call, rxrpc_call_put);
@@ -376,7 +377,8 @@ bool rxrpc_queue_call(struct rxrpc_call *call)
        if (n == 0)
                return false;
        if (rxrpc_queue_work(&call->processor))
-               trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
+               trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1,
+                                here, NULL);
        else
                rxrpc_put_call(call, rxrpc_call_put_noqueue);
        return true;
@@ -391,7 +393,8 @@ bool __rxrpc_queue_call(struct rxrpc_call *call)
        int n = atomic_read(&call->usage);
        ASSERTCMP(n, >=, 1);
        if (rxrpc_queue_work(&call->processor))
-               trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
+               trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n,
+                                here, NULL);
        else
                rxrpc_put_call(call, rxrpc_call_put_noqueue);
        return true;
@@ -406,7 +409,8 @@ void rxrpc_see_call(struct rxrpc_call *call)
        if (call) {
                int n = atomic_read(&call->usage);
 
-               trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
+               trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n,
+                                here, NULL);
        }
 }
 
@@ -418,7 +422,7 @@ void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
        const void *here = __builtin_return_address(0);
        int n = atomic_inc_return(&call->usage);
 
-       trace_rxrpc_call(call, op, n, here, NULL);
+       trace_rxrpc_call(call->debug_id, op, n, here, NULL);
 }
 
 /*
@@ -445,7 +449,8 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
 
        _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
 
-       trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
+       trace_rxrpc_call(call->debug_id, rxrpc_call_release,
+                        atomic_read(&call->usage),
                         here, (const void *)call->flags);
 
        ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
@@ -488,10 +493,10 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
 
        _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
 
-       if (conn) {
+       if (conn)
                rxrpc_disconnect_call(call);
-               conn->security->free_call_crypto(call);
-       }
+       if (call->security)
+               call->security->free_call_crypto(call);
 
        rxrpc_cleanup_ring(call);
        _leave("");
@@ -534,12 +539,13 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
 {
        struct rxrpc_net *rxnet = call->rxnet;
        const void *here = __builtin_return_address(0);
+       unsigned int debug_id = call->debug_id;
        int n;
 
        ASSERT(call != NULL);
 
        n = atomic_dec_return(&call->usage);
-       trace_rxrpc_call(call, op, n, here, NULL);
+       trace_rxrpc_call(debug_id, op, n, here, NULL);
        ASSERTCMP(n, >=, 0);
        if (n == 0) {
                _debug("call %d dead", call->debug_id);
index 3f1da1b..376370c 100644 (file)
@@ -212,7 +212,8 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
        rxrpc_get_local(conn->params.local);
        key_get(conn->params.key);
 
-       trace_rxrpc_conn(conn, rxrpc_conn_new_client, atomic_read(&conn->usage),
+       trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client,
+                        atomic_read(&conn->usage),
                         __builtin_return_address(0));
        trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
        _leave(" = %p", conn);
@@ -352,6 +353,7 @@ static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
 
        if (cp->exclusive) {
                call->conn = candidate;
+               call->security = candidate->security;
                call->security_ix = candidate->security_ix;
                call->service_id = candidate->service_id;
                _leave(" = 0 [exclusive %d]", candidate->debug_id);
@@ -403,6 +405,7 @@ static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
 candidate_published:
        set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
        call->conn = candidate;
+       call->security = candidate->security;
        call->security_ix = candidate->security_ix;
        call->service_id = candidate->service_id;
        spin_unlock(&local->client_conns_lock);
@@ -425,6 +428,7 @@ found_extant_conn:
 
        spin_lock(&conn->channel_lock);
        call->conn = conn;
+       call->security = conn->security;
        call->security_ix = conn->security_ix;
        call->service_id = conn->service_id;
        list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
@@ -985,11 +989,12 @@ rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
 void rxrpc_put_client_conn(struct rxrpc_connection *conn)
 {
        const void *here = __builtin_return_address(0);
+       unsigned int debug_id = conn->debug_id;
        int n;
 
        do {
                n = atomic_dec_return(&conn->usage);
-               trace_rxrpc_conn(conn, rxrpc_conn_put_client, n, here);
+               trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here);
                if (n > 0)
                        return;
                ASSERTCMP(n, >=, 0);
index ed05b69..38d718e 100644 (file)
@@ -269,7 +269,7 @@ bool rxrpc_queue_conn(struct rxrpc_connection *conn)
        if (n == 0)
                return false;
        if (rxrpc_queue_work(&conn->processor))
-               trace_rxrpc_conn(conn, rxrpc_conn_queued, n + 1, here);
+               trace_rxrpc_conn(conn->debug_id, rxrpc_conn_queued, n + 1, here);
        else
                rxrpc_put_connection(conn);
        return true;
@@ -284,7 +284,7 @@ void rxrpc_see_connection(struct rxrpc_connection *conn)
        if (conn) {
                int n = atomic_read(&conn->usage);
 
-               trace_rxrpc_conn(conn, rxrpc_conn_seen, n, here);
+               trace_rxrpc_conn(conn->debug_id, rxrpc_conn_seen, n, here);
        }
 }
 
@@ -296,7 +296,7 @@ void rxrpc_get_connection(struct rxrpc_connection *conn)
        const void *here = __builtin_return_address(0);
        int n = atomic_inc_return(&conn->usage);
 
-       trace_rxrpc_conn(conn, rxrpc_conn_got, n, here);
+       trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n, here);
 }
 
 /*
@@ -310,7 +310,7 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
        if (conn) {
                int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
                if (n > 0)
-                       trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here);
+                       trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n + 1, here);
                else
                        conn = NULL;
        }
@@ -333,10 +333,11 @@ static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
 void rxrpc_put_service_conn(struct rxrpc_connection *conn)
 {
        const void *here = __builtin_return_address(0);
+       unsigned int debug_id = conn->debug_id;
        int n;
 
        n = atomic_dec_return(&conn->usage);
-       trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
+       trace_rxrpc_conn(debug_id, rxrpc_conn_put_service, n, here);
        ASSERTCMP(n, >=, 0);
        if (n == 1)
                rxrpc_set_service_reap_timer(conn->params.local->rxnet,
@@ -420,7 +421,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
                 */
                if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
                        continue;
-               trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, NULL);
+               trace_rxrpc_conn(conn->debug_id, rxrpc_conn_reap_service, 0, NULL);
 
                if (rxrpc_conn_is_client(conn))
                        BUG();
index b30e13f..123d6ce 100644 (file)
@@ -134,7 +134,7 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
                list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
                write_unlock(&rxnet->conn_lock);
 
-               trace_rxrpc_conn(conn, rxrpc_conn_new_service,
+               trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
                                 atomic_read(&conn->usage),
                                 __builtin_return_address(0));
        }
index c97ebdc..48f67a9 100644 (file)
@@ -147,10 +147,16 @@ void rxrpc_error_report(struct sock *sk)
 {
        struct sock_exterr_skb *serr;
        struct sockaddr_rxrpc srx;
-       struct rxrpc_local *local = sk->sk_user_data;
+       struct rxrpc_local *local;
        struct rxrpc_peer *peer;
        struct sk_buff *skb;
 
+       rcu_read_lock();
+       local = rcu_dereference_sk_user_data(sk);
+       if (unlikely(!local)) {
+               rcu_read_unlock();
+               return;
+       }
        _enter("%p{%d}", sk, local->debug_id);
 
        /* Clear the outstanding error value on the socket so that it doesn't
@@ -160,6 +166,7 @@ void rxrpc_error_report(struct sock *sk)
 
        skb = sock_dequeue_err_skb(sk);
        if (!skb) {
+               rcu_read_unlock();
                _leave("UDP socket errqueue empty");
                return;
        }
@@ -167,11 +174,11 @@ void rxrpc_error_report(struct sock *sk)
        serr = SKB_EXT_ERR(skb);
        if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
                _leave("UDP empty message");
+               rcu_read_unlock();
                rxrpc_free_skb(skb, rxrpc_skb_freed);
                return;
        }
 
-       rcu_read_lock();
        peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
        if (peer && !rxrpc_get_peer_maybe(peer))
                peer = NULL;
index 9c3ac96..64830d8 100644 (file)
@@ -216,7 +216,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
        peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
        if (peer) {
                atomic_set(&peer->usage, 1);
-               peer->local = local;
+               peer->local = rxrpc_get_local(local);
                INIT_HLIST_HEAD(&peer->error_targets);
                peer->service_conns = RB_ROOT;
                seqlock_init(&peer->service_conn_lock);
@@ -307,7 +307,6 @@ void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
        unsigned long hash_key;
 
        hash_key = rxrpc_peer_hash_key(local, &peer->srx);
-       peer->local = local;
        rxrpc_init_peer(rx, peer, hash_key);
 
        spin_lock(&rxnet->peer_hash_lock);
@@ -382,7 +381,7 @@ struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
        int n;
 
        n = atomic_inc_return(&peer->usage);
-       trace_rxrpc_peer(peer, rxrpc_peer_got, n, here);
+       trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n, here);
        return peer;
 }
 
@@ -396,7 +395,7 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
        if (peer) {
                int n = atomic_fetch_add_unless(&peer->usage, 1, 0);
                if (n > 0)
-                       trace_rxrpc_peer(peer, rxrpc_peer_got, n + 1, here);
+                       trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n + 1, here);
                else
                        peer = NULL;
        }
@@ -417,6 +416,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
        list_del_init(&peer->keepalive_link);
        spin_unlock_bh(&rxnet->peer_hash_lock);
 
+       rxrpc_put_local(peer->local);
        kfree_rcu(peer, rcu);
 }
 
@@ -426,11 +426,13 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
 void rxrpc_put_peer(struct rxrpc_peer *peer)
 {
        const void *here = __builtin_return_address(0);
+       unsigned int debug_id;
        int n;
 
        if (peer) {
+               debug_id = peer->debug_id;
                n = atomic_dec_return(&peer->usage);
-               trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
+               trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here);
                if (n == 0)
                        __rxrpc_put_peer(peer);
        }
@@ -443,13 +445,15 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
 void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
 {
        const void *here = __builtin_return_address(0);
+       unsigned int debug_id = peer->debug_id;
        int n;
 
        n = atomic_dec_return(&peer->usage);
-       trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
+       trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here);
        if (n == 0) {
                hash_del_rcu(&peer->hash_link);
                list_del_init(&peer->keepalive_link);
+               rxrpc_put_local(peer->local);
                kfree_rcu(peer, rcu);
        }
 }
index 3b0becb..a409079 100644 (file)
@@ -251,8 +251,8 @@ static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
                seq += subpacket;
        }
 
-       return call->conn->security->verify_packet(call, skb, offset, len,
-                                                  seq, cksum);
+       return call->security->verify_packet(call, skb, offset, len,
+                                            seq, cksum);
 }
 
 /*
@@ -291,7 +291,7 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
 
        *_offset = offset;
        *_len = len;
-       call->conn->security->locate_data(call, skb, _offset, _len);
+       call->security->locate_data(call, skb, _offset, _len);
        return 0;
 }
 
index 6a1547b..813fd68 100644 (file)
@@ -419,7 +419,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
                                 call->tx_winsize)
                                sp->hdr.flags |= RXRPC_MORE_PACKETS;
 
-                       ret = conn->security->secure_packet(
+                       ret = call->security->secure_packet(
                                call, skb, skb->mark, skb->head);
                        if (ret < 0)
                                goto out;
@@ -661,6 +661,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                case RXRPC_CALL_SERVER_PREALLOC:
                case RXRPC_CALL_SERVER_SECURING:
                case RXRPC_CALL_SERVER_ACCEPTING:
+                       rxrpc_put_call(call, rxrpc_call_put);
                        ret = -EBUSY;
                        goto error_release_sock;
                default:
index 2558f00..69d4676 100644 (file)
@@ -832,8 +832,7 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
 }
 
 static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
-       [TCA_ACT_KIND]          = { .type = NLA_NUL_STRING,
-                                   .len = IFNAMSIZ - 1 },
+       [TCA_ACT_KIND]          = { .type = NLA_STRING },
        [TCA_ACT_INDEX]         = { .type = NLA_U32 },
        [TCA_ACT_COOKIE]        = { .type = NLA_BINARY,
                                    .len = TC_COOKIE_MAX_SIZE },
@@ -865,8 +864,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
                        NL_SET_ERR_MSG(extack, "TC action kind must be specified");
                        goto err_out;
                }
-               nla_strlcpy(act_name, kind, IFNAMSIZ);
-
+               if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) {
+                       NL_SET_ERR_MSG(extack, "TC action name too long");
+                       goto err_out;
+               }
                if (tb[TCA_ACT_COOKIE]) {
                        cookie = nla_memdup_cookie(tb);
                        if (!cookie) {
@@ -1352,11 +1353,16 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
                          struct netlink_ext_ack *extack)
 {
        size_t attr_size = 0;
-       int ret = 0;
+       int loop, ret;
        struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
 
-       ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, actions,
-                             &attr_size, true, extack);
+       for (loop = 0; loop < 10; loop++) {
+               ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
+                                     actions, &attr_size, true, extack);
+               if (ret != -EAGAIN)
+                       break;
+       }
+
        if (ret < 0)
                return ret;
        ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
@@ -1406,11 +1412,8 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
                 */
                if (n->nlmsg_flags & NLM_F_REPLACE)
                        ovr = 1;
-replay:
                ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr,
                                     extack);
-               if (ret == -EAGAIN)
-                       goto replay;
                break;
        case RTM_DELACTION:
                ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
index 9ce073a..08923b2 100644 (file)
@@ -484,7 +484,11 @@ static int __init mirred_init_module(void)
                return err;
 
        pr_info("Mirror/redirect action on\n");
-       return tcf_register_action(&act_mirred_ops, &mirred_net_ops);
+       err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
+       if (err)
+               unregister_netdevice_notifier(&mirred_device_notifier);
+
+       return err;
 }
 
 static void __exit mirred_cleanup_module(void)
index e168df0..4cf6c55 100644 (file)
@@ -55,7 +55,7 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
        struct tcf_mpls *m = to_mpls(a);
        struct tcf_mpls_params *p;
        __be32 new_lse;
-       int ret;
+       int ret, mac_len;
 
        tcf_lastuse_update(&m->tcf_tm);
        bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
@@ -63,8 +63,12 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
        /* Ensure 'data' points at mac_header prior calling mpls manipulating
         * functions.
         */
-       if (skb_at_tc_ingress(skb))
+       if (skb_at_tc_ingress(skb)) {
                skb_push_rcsum(skb, skb->mac_len);
+               mac_len = skb->mac_len;
+       } else {
+               mac_len = skb_network_header(skb) - skb_mac_header(skb);
+       }
 
        ret = READ_ONCE(m->tcf_action);
 
@@ -72,12 +76,12 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
 
        switch (p->tcfm_action) {
        case TCA_MPLS_ACT_POP:
-               if (skb_mpls_pop(skb, p->tcfm_proto))
+               if (skb_mpls_pop(skb, p->tcfm_proto, mac_len))
                        goto drop;
                break;
        case TCA_MPLS_ACT_PUSH:
                new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol));
-               if (skb_mpls_push(skb, new_lse, p->tcfm_proto))
+               if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len))
                        goto drop;
                break;
        case TCA_MPLS_ACT_MODIFY:
index 64584a1..8717c0b 100644 (file)
@@ -162,11 +162,22 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
        return TC_H_MAJ(first);
 }
 
+static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
+{
+       if (kind)
+               return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ;
+       memset(name, 0, IFNAMSIZ);
+       return false;
+}
+
 static bool tcf_proto_is_unlocked(const char *kind)
 {
        const struct tcf_proto_ops *ops;
        bool ret;
 
+       if (strlen(kind) == 0)
+               return false;
+
        ops = tcf_proto_lookup_ops(kind, false, NULL);
        /* On error return false to take rtnl lock. Proto lookup/create
         * functions will perform lookup again and properly handle errors.
@@ -1843,6 +1854,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
 {
        struct net *net = sock_net(skb->sk);
        struct nlattr *tca[TCA_MAX + 1];
+       char name[IFNAMSIZ];
        struct tcmsg *t;
        u32 protocol;
        u32 prio;
@@ -1899,13 +1911,19 @@ replay:
        if (err)
                return err;
 
+       if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
+               NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
+               err = -EINVAL;
+               goto errout;
+       }
+
        /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
         * block is shared (no qdisc found), qdisc is not unlocked, classifier
         * type is not specified, classifier is not unlocked.
         */
        if (rtnl_held ||
            (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
-           !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
+           !tcf_proto_is_unlocked(name)) {
                rtnl_held = true;
                rtnl_lock();
        }
@@ -2063,6 +2081,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
 {
        struct net *net = sock_net(skb->sk);
        struct nlattr *tca[TCA_MAX + 1];
+       char name[IFNAMSIZ];
        struct tcmsg *t;
        u32 protocol;
        u32 prio;
@@ -2102,13 +2121,18 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
        if (err)
                return err;
 
+       if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
+               NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
+               err = -EINVAL;
+               goto errout;
+       }
        /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
         * found), qdisc is not unlocked, classifier type is not specified,
         * classifier is not unlocked.
         */
        if (!prio ||
            (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
-           !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
+           !tcf_proto_is_unlocked(name)) {
                rtnl_held = true;
                rtnl_lock();
        }
@@ -2216,6 +2240,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
 {
        struct net *net = sock_net(skb->sk);
        struct nlattr *tca[TCA_MAX + 1];
+       char name[IFNAMSIZ];
        struct tcmsg *t;
        u32 protocol;
        u32 prio;
@@ -2252,12 +2277,17 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
        if (err)
                return err;
 
+       if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
+               NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
+               err = -EINVAL;
+               goto errout;
+       }
        /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
         * unlocked, classifier type is not specified, classifier is not
         * unlocked.
         */
        if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
-           !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
+           !tcf_proto_is_unlocked(name)) {
                rtnl_held = true;
                rtnl_lock();
        }
index 82bd14e..3177dcb 100644 (file)
@@ -446,7 +446,7 @@ META_COLLECTOR(int_sk_wmem_queued)
                *err = -1;
                return;
        }
-       dst->value = sk->sk_wmem_queued;
+       dst->value = READ_ONCE(sk->sk_wmem_queued);
 }
 
 META_COLLECTOR(int_sk_fwd_alloc)
@@ -554,7 +554,7 @@ META_COLLECTOR(int_sk_rcvlowat)
                *err = -1;
                return;
        }
-       dst->value = sk->sk_rcvlowat;
+       dst->value = READ_ONCE(sk->sk_rcvlowat);
 }
 
 META_COLLECTOR(int_sk_rcvtimeo)
index 81d58b2..1047825 100644 (file)
@@ -1390,8 +1390,7 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
 }
 
 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
-       [TCA_KIND]              = { .type = NLA_NUL_STRING,
-                                   .len = IFNAMSIZ - 1 },
+       [TCA_KIND]              = { .type = NLA_STRING },
        [TCA_RATE]              = { .type = NLA_BINARY,
                                    .len = sizeof(struct tc_estimator) },
        [TCA_STAB]              = { .type = NLA_NESTED },
index 06c7a2d..39b427d 100644 (file)
@@ -1127,6 +1127,33 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
        [TCA_CBQ_POLICE]        = { .len = sizeof(struct tc_cbq_police) },
 };
 
+static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1],
+                        struct nlattr *opt,
+                        struct netlink_ext_ack *extack)
+{
+       int err;
+
+       if (!opt) {
+               NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
+               return -EINVAL;
+       }
+
+       err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt,
+                                         cbq_policy, extack);
+       if (err < 0)
+               return err;
+
+       if (tb[TCA_CBQ_WRROPT]) {
+               const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
+
+               if (wrr->priority > TC_CBQ_MAXPRIO) {
+                       NL_SET_ERR_MSG(extack, "priority is bigger than TC_CBQ_MAXPRIO");
+                       err = -EINVAL;
+               }
+       }
+       return err;
+}
+
 static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
                    struct netlink_ext_ack *extack)
 {
@@ -1139,13 +1166,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
        hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
        q->delay_timer.function = cbq_undelay;
 
-       if (!opt) {
-               NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
-               return -EINVAL;
-       }
-
-       err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy,
-                                         extack);
+       err = cbq_opt_parse(tb, opt, extack);
        if (err < 0)
                return err;
 
@@ -1464,13 +1485,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
        struct cbq_class *parent;
        struct qdisc_rate_table *rtab = NULL;
 
-       if (!opt) {
-               NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing");
-               return -EINVAL;
-       }
-
-       err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy,
-                                         extack);
+       err = cbq_opt_parse(tb, opt, extack);
        if (err < 0)
                return err;
 
index 1bef152..b2905b0 100644 (file)
@@ -306,7 +306,7 @@ static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
        if (err < 0)
                goto skip;
 
-       if (ecmd.base.speed != SPEED_UNKNOWN)
+       if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
                speed = ecmd.base.speed;
 
 skip:
index bad1cbe..05605b3 100644 (file)
@@ -361,6 +361,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
                goto errout;
 
        err = -EINVAL;
+       if (!tb[TCA_DSMARK_INDICES])
+               goto errout;
        indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
 
        if (hweight32(indices) != 1)
index cebfb65..b1da558 100644 (file)
@@ -177,7 +177,7 @@ static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
 
                parent = *p;
                skb = rb_to_skb(parent);
-               if (ktime_after(txtime, skb->tstamp)) {
+               if (ktime_compare(txtime, skb->tstamp) >= 0) {
                        p = &parent->rb_right;
                        leftmost = false;
                } else {
index 0e44039..42e557d 100644 (file)
@@ -509,6 +509,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                if (skb->ip_summed == CHECKSUM_PARTIAL &&
                    skb_checksum_help(skb)) {
                        qdisc_drop(skb, sch, to_free);
+                       skb = NULL;
                        goto finish_segs;
                }
 
@@ -593,9 +594,10 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 finish_segs:
        if (segs) {
                unsigned int len, last_len;
-               int nb = 0;
+               int nb;
 
-               len = skb->len;
+               len = skb ? skb->len : 0;
+               nb = skb ? 1 : 0;
 
                while (segs) {
                        skb2 = segs->next;
@@ -612,7 +614,10 @@ finish_segs:
                        }
                        segs = skb2;
                }
-               qdisc_tree_reduce_backlog(sch, -nb, prev_len - len);
+               /* Parent qdiscs accounted for 1 skb of size @prev_len */
+               qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
+       } else if (!skb) {
+               return NET_XMIT_DROP;
        }
        return NET_XMIT_SUCCESS;
 }
index 2f7b342..6719a65 100644 (file)
@@ -1044,12 +1044,11 @@ static void taprio_set_picos_per_byte(struct net_device *dev,
        if (err < 0)
                goto skip;
 
-       if (ecmd.base.speed != SPEED_UNKNOWN)
+       if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
                speed = ecmd.base.speed;
 
 skip:
-       picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
-                                  speed * 1000 * 1000);
+       picos_per_byte = (USEC_PER_SEC * 8) / speed;
 
        atomic64_set(&q->picos_per_byte, picos_per_byte);
        netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
@@ -1342,6 +1341,10 @@ static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
                NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
                goto out;
        }
+
+       /* Everything went ok, return success. */
+       err = 0;
+
 out:
        return err;
 }
index fc9a4c6..0851166 100644 (file)
@@ -175,7 +175,7 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
                mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
                mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
                mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
-               mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
+               mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
                mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
 
                if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
index 1008cdc..2277981 100644 (file)
@@ -201,7 +201,7 @@ int sctp_rcv(struct sk_buff *skb)
 
        if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
                goto discard_release;
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        if (sk_filter(sk, skb))
                goto discard_release;
@@ -243,7 +243,7 @@ int sctp_rcv(struct sk_buff *skb)
                bh_lock_sock(sk);
        }
 
-       if (sock_owned_by_user(sk)) {
+       if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
                if (sctp_add_backlog(sk, skb)) {
                        bh_unlock_sock(sk);
                        sctp_chunk_free(chunk);
@@ -321,8 +321,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
                local_bh_disable();
                bh_lock_sock(sk);
 
-               if (sock_owned_by_user(sk)) {
-                       if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
+               if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
+                       if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
                                sctp_chunk_free(chunk);
                        else
                                backloged = 1;
@@ -336,7 +336,13 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
                if (backloged)
                        return 0;
        } else {
-               sctp_inq_push(inqueue, chunk);
+               if (!sctp_newsk_ready(sk)) {
+                       if (!sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
+                               return 0;
+                       sctp_chunk_free(chunk);
+               } else {
+                       sctp_inq_push(inqueue, chunk);
+               }
        }
 
 done:
@@ -358,7 +364,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
        struct sctp_ep_common *rcvr = chunk->rcvr;
        int ret;
 
-       ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
+       ret = sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
        if (!ret) {
                /* Hold the assoc/ep while hanging on the backlog queue.
                 * This way, we know structures we need will not disappear
index e41ed2e..48d6395 100644 (file)
@@ -2155,7 +2155,7 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
        case SCTP_PARAM_SET_PRIMARY:
                if (ep->asconf_enable)
                        break;
-               goto fallthrough;
+               goto unhandled;
 
        case SCTP_PARAM_HOST_NAME_ADDRESS:
                /* Tell the peer, we won't support this param.  */
@@ -2166,11 +2166,11 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
        case SCTP_PARAM_FWD_TSN_SUPPORT:
                if (ep->prsctp_enable)
                        break;
-               goto fallthrough;
+               goto unhandled;
 
        case SCTP_PARAM_RANDOM:
                if (!ep->auth_enable)
-                       goto fallthrough;
+                       goto unhandled;
 
                /* SCTP-AUTH: Secion 6.1
                 * If the random number is not 32 byte long the association
@@ -2187,7 +2187,7 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
 
        case SCTP_PARAM_CHUNKS:
                if (!ep->auth_enable)
-                       goto fallthrough;
+                       goto unhandled;
 
                /* SCTP-AUTH: Section 3.2
                 * The CHUNKS parameter MUST be included once in the INIT or
@@ -2203,7 +2203,7 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
 
        case SCTP_PARAM_HMAC_ALGO:
                if (!ep->auth_enable)
-                       goto fallthrough;
+                       goto unhandled;
 
                hmacs = (struct sctp_hmac_algo_param *)param.p;
                n_elt = (ntohs(param.p->length) -
@@ -2226,7 +2226,7 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
                        retval = SCTP_IERROR_ABORT;
                }
                break;
-fallthrough:
+unhandled:
        default:
                pr_debug("%s: unrecognized param:%d for chunk:%d\n",
                         __func__, ntohs(param.p->type), cid);
index 939b8d2..5ca0ec0 100644 (file)
@@ -9500,7 +9500,7 @@ struct proto sctp_prot = {
        .backlog_rcv =  sctp_backlog_rcv,
        .hash        =  sctp_hash,
        .unhash      =  sctp_unhash,
-       .get_port    =  sctp_get_port,
+       .no_autobind =  true,
        .obj_size    =  sizeof(struct sctp_sock),
        .useroffset  =  offsetof(struct sctp_sock, subscribe),
        .usersize    =  offsetof(struct sctp_sock, initmsg) -
@@ -9542,7 +9542,7 @@ struct proto sctpv6_prot = {
        .backlog_rcv    = sctp_backlog_rcv,
        .hash           = sctp_hash,
        .unhash         = sctp_unhash,
-       .get_port       = sctp_get_port,
+       .no_autobind    = true,
        .obj_size       = sizeof(struct sctp6_sock),
        .useroffset     = offsetof(struct sctp6_sock, sctp.subscribe),
        .usersize       = offsetof(struct sctp6_sock, sctp.initmsg) -
index 4ca50dd..88556f0 100644 (file)
@@ -213,7 +213,7 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
        lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
        if (!lgr) {
                rc = SMC_CLC_DECL_MEM;
-               goto out;
+               goto ism_put_vlan;
        }
        lgr->is_smcd = ini->is_smcd;
        lgr->sync_err = 0;
@@ -289,6 +289,9 @@ clear_llc_lnk:
        smc_llc_link_clear(lnk);
 free_lgr:
        kfree(lgr);
+ism_put_vlan:
+       if (ini->is_smcd && ini->vlan_id)
+               smc_ism_put_vlan(ini->ism_dev, ini->vlan_id);
 out:
        if (rc < 0) {
                if (rc == -ENOMEM)
index 413a6ab..97e8369 100644 (file)
@@ -211,8 +211,7 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo,
        rc = sk_wait_event(sk, timeo,
                           sk->sk_err ||
                           sk->sk_shutdown & RCV_SHUTDOWN ||
-                          fcrit(conn) ||
-                          smc_cdc_rxed_any_close_or_senddone(conn),
+                          fcrit(conn),
                           &wait);
        remove_wait_queue(sk_sleep(sk), &wait);
        sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
@@ -262,6 +261,18 @@ static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len,
        return -EAGAIN;
 }
 
+static bool smc_rx_recvmsg_data_available(struct smc_sock *smc)
+{
+       struct smc_connection *conn = &smc->conn;
+
+       if (smc_rx_data_available(conn))
+               return true;
+       else if (conn->urg_state == SMC_URG_VALID)
+               /* we received a single urgent Byte - skip */
+               smc_rx_update_cons(smc, 0);
+       return false;
+}
+
 /* smc_rx_recvmsg - receive data from RMBE
  * @msg:       copy data to receive buffer
  * @pipe:      copy data to pipe if set - indicates splice() call
@@ -303,16 +314,18 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
                if (read_done >= target || (pipe && read_done))
                        break;
 
-               if (atomic_read(&conn->bytes_to_rcv))
+               if (smc_rx_recvmsg_data_available(smc))
                        goto copy;
-               else if (conn->urg_state == SMC_URG_VALID)
-                       /* we received a single urgent Byte - skip */
-                       smc_rx_update_cons(smc, 0);
 
                if (sk->sk_shutdown & RCV_SHUTDOWN ||
-                   smc_cdc_rxed_any_close_or_senddone(conn) ||
-                   conn->local_tx_ctrl.conn_state_flags.peer_conn_abort)
+                   conn->local_tx_ctrl.conn_state_flags.peer_conn_abort) {
+                       /* smc_cdc_msg_recv_action() could have run after
+                        * above smc_rx_recvmsg_data_available()
+                        */
+                       if (smc_rx_recvmsg_data_available(smc))
+                               goto copy;
                        break;
+               }
 
                if (read_done) {
                        if (sk->sk_err ||
index 9ac8872..70e52f5 100644 (file)
@@ -1249,19 +1249,21 @@ static void xs_error_report(struct sock *sk)
 {
        struct sock_xprt *transport;
        struct rpc_xprt *xprt;
-       int err;
 
        read_lock_bh(&sk->sk_callback_lock);
        if (!(xprt = xprt_from_sock(sk)))
                goto out;
 
        transport = container_of(xprt, struct sock_xprt, xprt);
-       err = -sk->sk_err;
-       if (err == 0)
+       transport->xprt_err = -sk->sk_err;
+       if (transport->xprt_err == 0)
                goto out;
        dprintk("RPC:       xs_error_report client %p, error=%d...\n",
-                       xprt, -err);
-       trace_rpc_socket_error(xprt, sk->sk_socket, err);
+                       xprt, -transport->xprt_err);
+       trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err);
+
+       /* barrier ensures xprt_err is set before XPRT_SOCK_WAKE_ERROR */
+       smp_mb__before_atomic();
        xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR);
  out:
        read_unlock_bh(&sk->sk_callback_lock);
@@ -2476,7 +2478,6 @@ static void xs_wake_write(struct sock_xprt *transport)
 static void xs_wake_error(struct sock_xprt *transport)
 {
        int sockerr;
-       int sockerr_len = sizeof(sockerr);
 
        if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
                return;
@@ -2485,9 +2486,7 @@ static void xs_wake_error(struct sock_xprt *transport)
                goto out;
        if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
                goto out;
-       if (kernel_getsockopt(transport->sock, SOL_SOCKET, SO_ERROR,
-                               (char *)&sockerr, &sockerr_len) != 0)
-               goto out;
+       sockerr = xchg(&transport->xprt_err, 0);
        if (sockerr < 0)
                xprt_wake_pending_tasks(&transport->xprt, sockerr);
 out:
index 6cc75ff..999eab5 100644 (file)
@@ -160,6 +160,7 @@ struct tipc_link {
        struct {
                u16 len;
                u16 limit;
+               struct sk_buff *target_bskb;
        } backlog[5];
        u16 snd_nxt;
        u16 window;
@@ -880,6 +881,7 @@ static void link_prepare_wakeup(struct tipc_link *l)
 void tipc_link_reset(struct tipc_link *l)
 {
        struct sk_buff_head list;
+       u32 imp;
 
        __skb_queue_head_init(&list);
 
@@ -901,11 +903,10 @@ void tipc_link_reset(struct tipc_link *l)
        __skb_queue_purge(&l->deferdq);
        __skb_queue_purge(&l->backlogq);
        __skb_queue_purge(&l->failover_deferdq);
-       l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
-       l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
-       l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
-       l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
-       l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
+       for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
+               l->backlog[imp].len = 0;
+               l->backlog[imp].target_bskb = NULL;
+       }
        kfree_skb(l->reasm_buf);
        kfree_skb(l->reasm_tnlmsg);
        kfree_skb(l->failover_reasm_skb);
@@ -947,7 +948,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
        u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
        struct sk_buff_head *transmq = &l->transmq;
        struct sk_buff_head *backlogq = &l->backlogq;
-       struct sk_buff *skb, *_skb, *bskb;
+       struct sk_buff *skb, *_skb, **tskb;
        int pkt_cnt = skb_queue_len(list);
        int rc = 0;
 
@@ -999,19 +1000,21 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                        seqno++;
                        continue;
                }
-               if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
+               tskb = &l->backlog[imp].target_bskb;
+               if (tipc_msg_bundle(*tskb, hdr, mtu)) {
                        kfree_skb(__skb_dequeue(list));
                        l->stats.sent_bundled++;
                        continue;
                }
-               if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
+               if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) {
                        kfree_skb(__skb_dequeue(list));
-                       __skb_queue_tail(backlogq, bskb);
-                       l->backlog[msg_importance(buf_msg(bskb))].len++;
+                       __skb_queue_tail(backlogq, *tskb);
+                       l->backlog[imp].len++;
                        l->stats.sent_bundled++;
                        l->stats.sent_bundles++;
                        continue;
                }
+               l->backlog[imp].target_bskb = NULL;
                l->backlog[imp].len += skb_queue_len(list);
                skb_queue_splice_tail_init(list, backlogq);
        }
@@ -1027,6 +1030,7 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
        u16 seqno = l->snd_nxt;
        u16 ack = l->rcv_nxt - 1;
        u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
+       u32 imp;
 
        while (skb_queue_len(&l->transmq) < l->window) {
                skb = skb_peek(&l->backlogq);
@@ -1037,7 +1041,10 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
                        break;
                __skb_dequeue(&l->backlogq);
                hdr = buf_msg(skb);
-               l->backlog[msg_importance(hdr)].len--;
+               imp = msg_importance(hdr);
+               l->backlog[imp].len--;
+               if (unlikely(skb == l->backlog[imp].target_bskb))
+                       l->backlog[imp].target_bskb = NULL;
                __skb_queue_tail(&l->transmq, skb);
                /* next retransmit attempt */
                if (link_is_bc_sndlink(l))
index e6d49cd..922d262 100644 (file)
@@ -543,10 +543,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb,  struct tipc_msg *msg,
        bmsg = buf_msg(_skb);
        tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
                      INT_H_SIZE, dnode);
-       if (msg_isdata(msg))
-               msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE);
-       else
-               msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE);
+       msg_set_importance(bmsg, msg_importance(msg));
        msg_set_seqno(bmsg, msg_seqno(msg));
        msg_set_ack(bmsg, msg_ack(msg));
        msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
index 3b9f8cc..f8bbc4a 100644 (file)
@@ -2119,13 +2119,13 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
        struct tipc_msg *hdr = buf_msg(skb);
 
        if (unlikely(msg_in_group(hdr)))
-               return sk->sk_rcvbuf;
+               return READ_ONCE(sk->sk_rcvbuf);
 
        if (unlikely(!msg_connected(hdr)))
-               return sk->sk_rcvbuf << msg_importance(hdr);
+               return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
 
        if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
-               return sk->sk_rcvbuf;
+               return READ_ONCE(sk->sk_rcvbuf);
 
        return FLOWCTL_MSG_LIM;
 }
@@ -3790,7 +3790,7 @@ int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
        i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
        i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
        i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
-       i += scnprintf(buf + i, sz - i, " | %d\n", sk->sk_backlog.len);
+       i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
 
        if (dqueues & TIPC_DUMP_SK_SNDQ) {
                i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
index ab47bf3..2ab43b2 100644 (file)
@@ -638,7 +638,7 @@ struct sock *__vsock_create(struct net *net,
 }
 EXPORT_SYMBOL_GPL(__vsock_create);
 
-static void __vsock_release(struct sock *sk)
+static void __vsock_release(struct sock *sk, int level)
 {
        if (sk) {
                struct sk_buff *skb;
@@ -648,9 +648,17 @@ static void __vsock_release(struct sock *sk)
                vsk = vsock_sk(sk);
                pending = NULL; /* Compiler warning. */
 
+               /* The release call is supposed to use lock_sock_nested()
+                * rather than lock_sock(), if a sock lock should be acquired.
+                */
                transport->release(vsk);
 
-               lock_sock(sk);
+               /* When "level" is SINGLE_DEPTH_NESTING, use the nested
+                * version to avoid the warning "possible recursive locking
+                * detected". When "level" is 0, lock_sock_nested(sk, level)
+                * is the same as lock_sock(sk).
+                */
+               lock_sock_nested(sk, level);
                sock_orphan(sk);
                sk->sk_shutdown = SHUTDOWN_MASK;
 
@@ -659,7 +667,7 @@ static void __vsock_release(struct sock *sk)
 
                /* Clean up any sockets that never were accepted. */
                while ((pending = vsock_dequeue_accept(sk)) != NULL) {
-                       __vsock_release(pending);
+                       __vsock_release(pending, SINGLE_DEPTH_NESTING);
                        sock_put(pending);
                }
 
@@ -708,7 +716,7 @@ EXPORT_SYMBOL_GPL(vsock_stream_has_space);
 
 static int vsock_release(struct socket *sock)
 {
-       __vsock_release(sock->sk);
+       __vsock_release(sock->sk, 0);
        sock->sk = NULL;
        sock->state = SS_FREE;
 
index 261521d..c443db7 100644 (file)
@@ -559,7 +559,7 @@ static void hvs_release(struct vsock_sock *vsk)
        struct sock *sk = sk_vsock(vsk);
        bool remove_sock;
 
-       lock_sock(sk);
+       lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
        remove_sock = hvs_close_lock_held(vsk);
        release_sock(sk);
        if (remove_sock)
index 5bb70c6..481f7f8 100644 (file)
@@ -204,10 +204,14 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
        return virtio_transport_get_ops()->send_pkt(pkt);
 }
 
-static void virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
+static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
                                        struct virtio_vsock_pkt *pkt)
 {
+       if (vvs->rx_bytes + pkt->len > vvs->buf_alloc)
+               return false;
+
        vvs->rx_bytes += pkt->len;
+       return true;
 }
 
 static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
@@ -458,6 +462,9 @@ void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
                vvs->buf_size_max = val;
        vvs->buf_size = val;
        vvs->buf_alloc = val;
+
+       virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_STREAM,
+                                           NULL);
 }
 EXPORT_SYMBOL_GPL(virtio_transport_set_buffer_size);
 
@@ -820,7 +827,7 @@ void virtio_transport_release(struct vsock_sock *vsk)
        struct sock *sk = &vsk->sk;
        bool remove_sock = true;
 
-       lock_sock(sk);
+       lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
        if (sk->sk_type == SOCK_STREAM)
                remove_sock = virtio_transport_close(vsk);
 
@@ -876,14 +883,18 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk,
                              struct virtio_vsock_pkt *pkt)
 {
        struct virtio_vsock_sock *vvs = vsk->trans;
-       bool free_pkt = false;
+       bool can_enqueue, free_pkt = false;
 
        pkt->len = le32_to_cpu(pkt->hdr.len);
        pkt->off = 0;
 
        spin_lock_bh(&vvs->rx_lock);
 
-       virtio_transport_inc_rx_pkt(vvs, pkt);
+       can_enqueue = virtio_transport_inc_rx_pkt(vvs, pkt);
+       if (!can_enqueue) {
+               free_pkt = true;
+               goto out;
+       }
 
        /* Try to copy small packets into the buffer of last packet queued,
         * to avoid wasting memory queueing the entire buffer with a small
index d21b158..4453dd3 100644 (file)
@@ -201,6 +201,38 @@ cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
        return __cfg80211_rdev_from_attrs(netns, info->attrs);
 }
 
+static int validate_beacon_head(const struct nlattr *attr,
+                               struct netlink_ext_ack *extack)
+{
+       const u8 *data = nla_data(attr);
+       unsigned int len = nla_len(attr);
+       const struct element *elem;
+       const struct ieee80211_mgmt *mgmt = (void *)data;
+       unsigned int fixedlen = offsetof(struct ieee80211_mgmt,
+                                        u.beacon.variable);
+
+       if (len < fixedlen)
+               goto err;
+
+       if (ieee80211_hdrlen(mgmt->frame_control) !=
+           offsetof(struct ieee80211_mgmt, u.beacon))
+               goto err;
+
+       data += fixedlen;
+       len -= fixedlen;
+
+       for_each_element(elem, data, len) {
+               /* nothing */
+       }
+
+       if (for_each_element_completed(elem, data, len))
+               return 0;
+
+err:
+       NL_SET_ERR_MSG_ATTR(extack, attr, "malformed beacon head");
+       return -EINVAL;
+}
+
 static int validate_ie_attr(const struct nlattr *attr,
                            struct netlink_ext_ack *extack)
 {
@@ -338,8 +370,9 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
 
        [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 },
        [NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 },
-       [NL80211_ATTR_BEACON_HEAD] = { .type = NLA_BINARY,
-                                      .len = IEEE80211_MAX_DATA_LEN },
+       [NL80211_ATTR_BEACON_HEAD] =
+               NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_beacon_head,
+                                      IEEE80211_MAX_DATA_LEN),
        [NL80211_ATTR_BEACON_TAIL] =
                NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr,
                                       IEEE80211_MAX_DATA_LEN),
@@ -2636,6 +2669,8 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
 
        control_freq = nla_get_u32(attrs[NL80211_ATTR_WIPHY_FREQ]);
 
+       memset(chandef, 0, sizeof(*chandef));
+
        chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq);
        chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
        chandef->center_freq1 = control_freq;
@@ -3176,7 +3211,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
 
        if (rdev->ops->get_channel) {
                int ret;
-               struct cfg80211_chan_def chandef;
+               struct cfg80211_chan_def chandef = {};
 
                ret = rdev_get_channel(rdev, wdev, &chandef);
                if (ret == 0) {
@@ -6270,6 +6305,9 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
        if (!rdev->ops->del_mpath)
                return -EOPNOTSUPP;
 
+       if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+               return -EOPNOTSUPP;
+
        return rdev_del_mpath(rdev, dev, dst);
 }
 
@@ -13644,7 +13682,7 @@ static int nl80211_get_ftm_responder_stats(struct sk_buff *skb,
        hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_GET_FTM_RESPONDER_STATS);
        if (!hdr)
-               return -ENOBUFS;
+               goto nla_put_failure;
 
        if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex))
                goto nla_put_failure;
index 5311d0a..446c76d 100644 (file)
@@ -2108,7 +2108,7 @@ static void reg_call_notifier(struct wiphy *wiphy,
 
 static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
 {
-       struct cfg80211_chan_def chandef;
+       struct cfg80211_chan_def chandef = {};
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        enum nl80211_iftype iftype;
 
@@ -3883,6 +3883,7 @@ bool regulatory_pre_cac_allowed(struct wiphy *wiphy)
 
        return pre_cac_allowed;
 }
+EXPORT_SYMBOL(regulatory_pre_cac_allowed);
 
 void regulatory_propagate_dfs_state(struct wiphy *wiphy,
                                    struct cfg80211_chan_def *chandef,
index 504133d..dc8f689 100644 (file)
@@ -156,14 +156,6 @@ bool regulatory_indoor_allowed(void);
 #define REG_PRE_CAC_EXPIRY_GRACE_MS 2000
 
 /**
- * regulatory_pre_cac_allowed - if pre-CAC allowed in the current dfs domain
- * @wiphy: wiphy for which pre-CAC capability is checked.
-
- * Pre-CAC is allowed only in ETSI domain.
- */
-bool regulatory_pre_cac_allowed(struct wiphy *wiphy);
-
-/**
  * regulatory_propagate_dfs_state - Propagate DFS channel state to other wiphys
  * @wiphy - wiphy on which radar is detected and the event will be propagated
  *     to other available wiphys having the same DFS domain
index d313c9b..aef240f 100644 (file)
@@ -1703,8 +1703,7 @@ cfg80211_parse_mbssid_frame_data(struct wiphy *wiphy,
 static void
 cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
                                   struct cfg80211_bss *nontrans_bss,
-                                  struct ieee80211_mgmt *mgmt, size_t len,
-                                  gfp_t gfp)
+                                  struct ieee80211_mgmt *mgmt, size_t len)
 {
        u8 *ie, *new_ie, *pos;
        const u8 *nontrans_ssid, *trans_ssid, *mbssid;
@@ -1715,6 +1714,8 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
        const struct cfg80211_bss_ies *old;
        u8 cpy_len;
 
+       lockdep_assert_held(&wiphy_to_rdev(wiphy)->bss_lock);
+
        ie = mgmt->u.probe_resp.variable;
 
        new_ie_len = ielen;
@@ -1723,26 +1724,30 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
                return;
        new_ie_len -= trans_ssid[1];
        mbssid = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen);
-       if (!mbssid)
+       /*
+        * It's not valid to have the MBSSID element before SSID
+        * ignore if that happens - the code below assumes it is
+        * after (while copying things inbetween).
+        */
+       if (!mbssid || mbssid < trans_ssid)
                return;
        new_ie_len -= mbssid[1];
-       rcu_read_lock();
+
        nontrans_ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID);
-       if (!nontrans_ssid) {
-               rcu_read_unlock();
+       if (!nontrans_ssid)
                return;
-       }
+
        new_ie_len += nontrans_ssid[1];
-       rcu_read_unlock();
 
        /* generate new ie for nontrans BSS
         * 1. replace SSID with nontrans BSS' SSID
         * 2. skip MBSSID IE
         */
-       new_ie = kzalloc(new_ie_len, gfp);
+       new_ie = kzalloc(new_ie_len, GFP_ATOMIC);
        if (!new_ie)
                return;
-       new_ies = kzalloc(sizeof(*new_ies) + new_ie_len, gfp);
+
+       new_ies = kzalloc(sizeof(*new_ies) + new_ie_len, GFP_ATOMIC);
        if (!new_ies)
                goto out_free;
 
@@ -1896,6 +1901,8 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
        cfg80211_parse_mbssid_frame_data(wiphy, data, mgmt, len,
                                         &non_tx_data, gfp);
 
+       spin_lock_bh(&wiphy_to_rdev(wiphy)->bss_lock);
+
        /* check if the res has other nontransmitting bss which is not
         * in MBSSID IE
         */
@@ -1910,8 +1917,9 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
                ies2 = rcu_access_pointer(tmp_bss->ies);
                if (ies2->tsf < ies1->tsf)
                        cfg80211_update_notlisted_nontrans(wiphy, tmp_bss,
-                                                          mgmt, len, gfp);
+                                                          mgmt, len);
        }
+       spin_unlock_bh(&wiphy_to_rdev(wiphy)->bss_lock);
 
        return res;
 }
index 7b6529d..cac9e28 100644 (file)
@@ -798,7 +798,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
-       struct cfg80211_chan_def chandef;
+       struct cfg80211_chan_def chandef = {};
        int ret;
 
        switch (wdev->iftype) {
index c67d7a8..73fd0ea 100644 (file)
@@ -202,6 +202,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
                               struct iw_point *data, char *ssid)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
+       int ret = 0;
 
        /* call only for station! */
        if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
@@ -219,7 +220,10 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
                if (ie) {
                        data->flags = 1;
                        data->length = ie[1];
-                       memcpy(ssid, ie + 2, data->length);
+                       if (data->length > IW_ESSID_MAX_SIZE)
+                               ret = -EINVAL;
+                       else
+                               memcpy(ssid, ie + 2, data->length);
                }
                rcu_read_unlock();
        } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) {
@@ -229,7 +233,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
        }
        wdev_unlock(wdev);
 
-       return 0;
+       return ret;
 }
 
 int cfg80211_mgd_wext_siwap(struct net_device *dev,
index 5c111bc..00e7823 100644 (file)
@@ -55,7 +55,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
                if (!sock_owned_by_user(sk)) {
                        queued = x25_process_rx_frame(sk, skb);
                } else {
-                       queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf);
+                       queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
                }
                bh_unlock_sock(sk);
                sock_put(sk);
index fa8fbb8..9044073 100644 (file)
@@ -305,9 +305,8 @@ out:
 }
 EXPORT_SYMBOL(xsk_umem_consume_tx);
 
-static int xsk_zc_xmit(struct sock *sk)
+static int xsk_zc_xmit(struct xdp_sock *xs)
 {
-       struct xdp_sock *xs = xdp_sk(sk);
        struct net_device *dev = xs->dev;
 
        return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
@@ -327,11 +326,10 @@ static void xsk_destruct_skb(struct sk_buff *skb)
        sock_wfree(skb);
 }
 
-static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
-                           size_t total_len)
+static int xsk_generic_xmit(struct sock *sk)
 {
-       u32 max_batch = TX_BATCH_SIZE;
        struct xdp_sock *xs = xdp_sk(sk);
+       u32 max_batch = TX_BATCH_SIZE;
        bool sent_frame = false;
        struct xdp_desc desc;
        struct sk_buff *skb;
@@ -394,6 +392,18 @@ out:
        return err;
 }
 
+static int __xsk_sendmsg(struct sock *sk)
+{
+       struct xdp_sock *xs = xdp_sk(sk);
+
+       if (unlikely(!(xs->dev->flags & IFF_UP)))
+               return -ENETDOWN;
+       if (unlikely(!xs->tx))
+               return -ENOBUFS;
+
+       return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
+}
+
 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
 {
        bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
@@ -402,21 +412,18 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
 
        if (unlikely(!xsk_is_bound(xs)))
                return -ENXIO;
-       if (unlikely(!(xs->dev->flags & IFF_UP)))
-               return -ENETDOWN;
-       if (unlikely(!xs->tx))
-               return -ENOBUFS;
-       if (need_wait)
+       if (unlikely(need_wait))
                return -EOPNOTSUPP;
 
-       return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
+       return __xsk_sendmsg(sk);
 }
 
 static unsigned int xsk_poll(struct file *file, struct socket *sock,
                             struct poll_table_struct *wait)
 {
        unsigned int mask = datagram_poll(file, sock, wait);
-       struct xdp_sock *xs = xdp_sk(sock->sk);
+       struct sock *sk = sock->sk;
+       struct xdp_sock *xs = xdp_sk(sk);
        struct net_device *dev;
        struct xdp_umem *umem;
 
@@ -426,9 +433,14 @@ static unsigned int xsk_poll(struct file *file, struct socket *sock,
        dev = xs->dev;
        umem = xs->umem;
 
-       if (umem->need_wakeup)
-               dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
-                                               umem->need_wakeup);
+       if (umem->need_wakeup) {
+               if (dev->netdev_ops->ndo_xsk_wakeup)
+                       dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
+                                                       umem->need_wakeup);
+               else
+                       /* Poll needs to drive Tx also in copy mode */
+                       __xsk_sendmsg(sk);
+       }
 
        if (xs->rx && !xskq_empty_desc(xs->rx))
                mask |= POLLIN | POLLRDNORM;
index 6088bc2..9b599ed 100644 (file)
@@ -706,7 +706,7 @@ resume:
        if (err)
                goto drop;
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        if (decaps) {
                sp = skb_sec_path(skb);
index 2ab4859..0f5131b 100644 (file)
@@ -185,7 +185,7 @@ static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
        skb->skb_iif = 0;
        skb->ignore_df = 0;
        skb_dst_drop(skb);
-       nf_reset(skb);
+       nf_reset_ct(skb);
        nf_reset_trace(skb);
 
        if (!xnet)
index 9499b35..b1db55b 100644 (file)
@@ -502,7 +502,7 @@ int xfrm_output_resume(struct sk_buff *skb, int err)
        struct net *net = xs_net(skb_dst(skb)->xfrm);
 
        while (likely((err = xfrm_output_one(skb, err)) == 0)) {
-               nf_reset(skb);
+               nf_reset_ct(skb);
 
                err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
                if (unlikely(err != 1))
index 21e9392..f2d1e57 100644 (file)
@@ -2808,7 +2808,7 @@ static void xfrm_policy_queue_process(struct timer_list *t)
                        continue;
                }
 
-               nf_reset(skb);
+               nf_reset_ct(skb);
                skb_dst_drop(skb);
                skb_dst_set(skb, dst);
 
index 7409722..7048bb3 100644 (file)
@@ -3,7 +3,8 @@
 #ifndef __ASM_GOTO_WORKAROUND_H
 #define __ASM_GOTO_WORKAROUND_H
 
-/* this will bring in asm_volatile_goto macro definition
+/*
+ * This will bring in asm_volatile_goto and asm_inline macro definitions
  * if enabled by compiler and config options.
  */
 #include <linux/types.h>
 #define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
 #endif
 
+/*
+ * asm_inline is defined as asm __inline in "include/linux/compiler_types.h"
+ * if supported by the kernel's CC (i.e CONFIG_CC_HAS_ASM_INLINE) which is not
+ * supported by CLANG.
+ */
+#ifdef asm_inline
+#undef asm_inline
+#define asm_inline asm
+#endif
+
 #define volatile(x...) volatile("")
 #endif
index e399380..4c31b30 100644 (file)
@@ -13,6 +13,7 @@
 #include <sys/resource.h>
 #include <sys/types.h>
 #include <sys/stat.h>
+#include <linux/perf_event.h>
 
 #include "libbpf.h"
 #include "bpf_load.h"
index 4b0432e..10ba926 100644 (file)
@@ -143,11 +143,6 @@ cc-ifversion = $(shell [ $(CONFIG_GCC_VERSION)0 $(1) $(2)000 ] && echo $(3) || e
 # Usage: KBUILD_LDFLAGS += $(call ld-option, -X, -Y)
 ld-option = $(call try-run, $(LD) $(KBUILD_LDFLAGS) $(1) -v,$(1),$(2),$(3))
 
-# ar-option
-# Usage: KBUILD_ARFLAGS := $(call ar-option,D)
-# Important: no spaces around options
-ar-option = $(call try-run, $(AR) rc$(1) "$$TMP",$(1),$(2))
-
 # ld-version
 # Note this is mainly for HJ Lu's 3 number binutil versions
 ld-version = $(shell $(LD) --version | $(srctree)/scripts/ld-version.sh)
index f72aba6..a9e4795 100644 (file)
@@ -389,7 +389,7 @@ $(sort $(subdir-obj-y)): $(subdir-ym) ;
 ifdef builtin-target
 
 quiet_cmd_ar_builtin = AR      $@
-      cmd_ar_builtin = rm -f $@; $(AR) rcSTP$(KBUILD_ARFLAGS) $@ $(real-prereqs)
+      cmd_ar_builtin = rm -f $@; $(AR) cDPrST $@ $(real-prereqs)
 
 $(builtin-target): $(real-obj-y) FORCE
        $(call if_changed,ar_builtin)
index 4a0cdd6..179d55a 100644 (file)
@@ -232,7 +232,7 @@ quiet_cmd_ld = LD      $@
 # ---------------------------------------------------------------------------
 
 quiet_cmd_ar = AR      $@
-      cmd_ar = rm -f $@; $(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(real-prereqs)
+      cmd_ar = rm -f $@; $(AR) cDPrsT $@ $(real-prereqs)
 
 # Objcopy
 # ---------------------------------------------------------------------------
diff --git a/scripts/coccinelle/api/devm_platform_ioremap_resource.cocci b/scripts/coccinelle/api/devm_platform_ioremap_resource.cocci
deleted file mode 100644 (file)
index 56a2e26..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/// Use devm_platform_ioremap_resource helper which wraps
-/// platform_get_resource() and devm_ioremap_resource() together.
-///
-// Confidence: High
-// Copyright: (C) 2019 Himanshu Jha GPLv2.
-// Copyright: (C) 2019 Julia Lawall, Inria/LIP6. GPLv2.
-// Keywords: platform_get_resource, devm_ioremap_resource,
-// Keywords: devm_platform_ioremap_resource
-
-virtual patch
-virtual report
-
-@r depends on patch && !report@
-expression e1, e2, arg1, arg2, arg3;
-identifier id;
-@@
-
-(
-- id = platform_get_resource(arg1, IORESOURCE_MEM, arg2);
-|
-- struct resource *id = platform_get_resource(arg1, IORESOURCE_MEM, arg2);
-)
-  ... when != id
-- e1 = devm_ioremap_resource(arg3, id);
-+ e1 = devm_platform_ioremap_resource(arg1, arg2);
-  ... when != id
-? id = e2
-
-@r1 depends on patch && !report@
-identifier r.id;
-type T;
-@@
-
-- T *id;
-  ...when != id
-
-@r2 depends on report && !patch@
-identifier id;
-expression e1, e2, arg1, arg2, arg3;
-position j0;
-@@
-
-(
-  id = platform_get_resource(arg1, IORESOURCE_MEM, arg2);
-|
-  struct resource *id = platform_get_resource(arg1, IORESOURCE_MEM, arg2);
-)
-  ... when != id
-  e1@j0 = devm_ioremap_resource(arg3, id);
-  ... when != id
-? id = e2
-
-@script:python depends on report && !patch@
-e1 << r2.e1;
-j0 << r2.j0;
-@@
-
-msg = "WARNING: Use devm_platform_ioremap_resource for %s" % (e1)
-coccilib.report.print_report(j0[0], msg)
index c832bb6..99e93a6 100644 (file)
@@ -6,6 +6,8 @@
 /// add a missing namespace tag to a module source file.
 ///
 
+virtual report
+
 @has_ns_import@
 declarer name MODULE_IMPORT_NS;
 identifier virtual.ns;
index 6d2e09a..2fa7bb8 100644 (file)
@@ -16,6 +16,8 @@ import sys
 
 from linux import utils
 
+printk_log_type = utils.CachedType("struct printk_log")
+
 
 class LxDmesg(gdb.Command):
     """Print Linux kernel log buffer."""
@@ -42,9 +44,14 @@ class LxDmesg(gdb.Command):
             b = utils.read_memoryview(inf, log_buf_addr, log_next_idx)
             log_buf = a.tobytes() + b.tobytes()
 
+        length_offset = printk_log_type.get_type()['len'].bitpos // 8
+        text_len_offset = printk_log_type.get_type()['text_len'].bitpos // 8
+        time_stamp_offset = printk_log_type.get_type()['ts_nsec'].bitpos // 8
+        text_offset = printk_log_type.get_type().sizeof
+
         pos = 0
         while pos < log_buf.__len__():
-            length = utils.read_u16(log_buf[pos + 8:pos + 10])
+            length = utils.read_u16(log_buf, pos + length_offset)
             if length == 0:
                 if log_buf_2nd_half == -1:
                     gdb.write("Corrupted log buffer!\n")
@@ -52,10 +59,11 @@ class LxDmesg(gdb.Command):
                 pos = log_buf_2nd_half
                 continue
 
-            text_len = utils.read_u16(log_buf[pos + 10:pos + 12])
-            text = log_buf[pos + 16:pos + 16 + text_len].decode(
+            text_len = utils.read_u16(log_buf, pos + text_len_offset)
+            text_start = pos + text_offset
+            text = log_buf[text_start:text_start + text_len].decode(
                 encoding='utf8', errors='replace')
-            time_stamp = utils.read_u64(log_buf[pos:pos + 8])
+            time_stamp = utils.read_u64(log_buf, pos + time_stamp_offset)
 
             for line in text.splitlines():
                 msg = u"[{time:12.6f}] {line}\n".format(
index 34e40e9..7b7c2fa 100644 (file)
@@ -15,7 +15,7 @@ import gdb
 import os
 import re
 
-from linux import modules
+from linux import modules, utils
 
 
 if hasattr(gdb, 'Breakpoint'):
@@ -116,6 +116,12 @@ lx-symbols command."""
             module_file = self._get_module_file(module_name)
 
         if module_file:
+            if utils.is_target_arch('s390'):
+                # Module text is preceded by PLT stubs on s390.
+                module_arch = module['arch']
+                plt_offset = int(module_arch['plt_offset'])
+                plt_size = int(module_arch['plt_size'])
+                module_addr = hex(int(module_addr, 0) + plt_offset + plt_size)
             gdb.write("loading @{addr}: {filename}\n".format(
                 addr=module_addr, filename=module_file))
             cmdline = "add-symbol-file {filename} {addr}{sections}".format(
index bc67126..ea94221 100644 (file)
@@ -92,15 +92,16 @@ def read_memoryview(inf, start, length):
     return memoryview(inf.read_memory(start, length))
 
 
-def read_u16(buffer):
+def read_u16(buffer, offset):
+    buffer_val = buffer[offset:offset + 2]
     value = [0, 0]
 
-    if type(buffer[0]) is str:
-        value[0] = ord(buffer[0])
-        value[1] = ord(buffer[1])
+    if type(buffer_val[0]) is str:
+        value[0] = ord(buffer_val[0])
+        value[1] = ord(buffer_val[1])
     else:
-        value[0] = buffer[0]
-        value[1] = buffer[1]
+        value[0] = buffer_val[0]
+        value[1] = buffer_val[1]
 
     if get_target_endianness() == LITTLE_ENDIAN:
         return value[0] + (value[1] << 8)
@@ -108,18 +109,18 @@ def read_u16(buffer):
         return value[1] + (value[0] << 8)
 
 
-def read_u32(buffer):
+def read_u32(buffer, offset):
     if get_target_endianness() == LITTLE_ENDIAN:
-        return read_u16(buffer[0:2]) + (read_u16(buffer[2:4]) << 16)
+        return read_u16(buffer, offset) + (read_u16(buffer, offset + 2) << 16)
     else:
-        return read_u16(buffer[2:4]) + (read_u16(buffer[0:2]) << 16)
+        return read_u16(buffer, offset + 2) + (read_u16(buffer, offset) << 16)
 
 
-def read_u64(buffer):
+def read_u64(buffer, offset):
     if get_target_endianness() == LITTLE_ENDIAN:
-        return read_u32(buffer[0:4]) + (read_u32(buffer[4:8]) << 32)
+        return read_u32(buffer, offset) + (read_u32(buffer, offset + 4) << 32)
     else:
-        return read_u32(buffer[4:8]) + (read_u32(buffer[0:4]) << 32)
+        return read_u32(buffer, offset + 4) + (read_u32(buffer, offset) << 32)
 
 
 target_arch = None
index 3961941..936d3ad 100644 (file)
@@ -166,7 +166,7 @@ struct symbol {
        struct module *module;
        unsigned int crc;
        int crc_valid;
-       const char *namespace;
+       char *namespace;
        unsigned int weak:1;
        unsigned int vmlinux:1;    /* 1 if symbol is defined in vmlinux */
        unsigned int kernel:1;     /* 1 if symbol is from kernel
@@ -348,20 +348,18 @@ static enum export export_from_sec(struct elf_info *elf, unsigned int sec)
                return export_unknown;
 }
 
-static const char *sym_extract_namespace(const char **symname)
+static char *sym_extract_namespace(const char **symname)
 {
-       size_t n;
-       char *dupsymname;
+       char *namespace = NULL;
+       char *ns_separator;
 
-       n = strcspn(*symname, ".");
-       if (n < strlen(*symname) - 1) {
-               dupsymname = NOFAIL(strdup(*symname));
-               dupsymname[n] = '\0';
-               *symname = dupsymname;
-               return dupsymname + n + 1;
+       ns_separator = strchr(*symname, '.');
+       if (ns_separator) {
+               namespace = NOFAIL(strndup(*symname, ns_separator - *symname));
+               *symname = ns_separator + 1;
        }
 
-       return NULL;
+       return namespace;
 }
 
 /**
@@ -375,7 +373,6 @@ static struct symbol *sym_add_exported(const char *name, const char *namespace,
 
        if (!s) {
                s = new_symbol(name, mod, export);
-               s->namespace = namespace;
        } else {
                if (!s->preloaded) {
                        warn("%s: '%s' exported twice. Previous export was in %s%s\n",
@@ -386,6 +383,8 @@ static struct symbol *sym_add_exported(const char *name, const char *namespace,
                        s->module = mod;
                }
        }
+       free(s->namespace);
+       s->namespace = namespace ? strdup(namespace) : NULL;
        s->preloaded = 0;
        s->vmlinux   = is_vmlinux(mod->name);
        s->kernel    = 0;
@@ -672,7 +671,8 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
        unsigned int crc;
        enum export export;
        bool is_crc = false;
-       const char *name, *namespace;
+       const char *name;
+       char *namespace;
 
        if ((!is_vmlinux(mod->name) || mod->is_dot_o) &&
            strstarts(symname, "__ksymtab"))
@@ -747,6 +747,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
                        name = symname + strlen("__ksymtab_");
                        namespace = sym_extract_namespace(&name);
                        sym_add_exported(name, namespace, mod, export);
+                       free(namespace);
                }
                if (strcmp(symname, "init_module") == 0)
                        mod->has_init = 1;
@@ -2195,7 +2196,7 @@ static int check_exports(struct module *mod)
                else
                        basename = mod->name;
 
-               if (exp->namespace) {
+               if (exp->namespace && exp->namespace[0]) {
                        add_namespace(&mod->required_namespaces,
                                      exp->namespace);
 
@@ -2652,15 +2653,20 @@ int main(int argc, char **argv)
                fatal("modpost: Section mismatches detected.\n"
                      "Set CONFIG_SECTION_MISMATCH_WARN_ONLY=y to allow them.\n");
        for (n = 0; n < SYMBOL_HASH_SIZE; n++) {
-               struct symbol *s = symbolhash[n];
+               struct symbol *s;
+
+               for (s = symbolhash[n]; s; s = s->next) {
+                       /*
+                        * Do not check "vmlinux". This avoids the same warnings
+                        * shown twice, and false-positives for ARCH=um.
+                        */
+                       if (is_vmlinux(s->module->name) && !s->module->is_dot_o)
+                               continue;
 
-               while (s) {
                        if (s->is_static)
                                warn("\"%s\" [%s] is a static %s\n",
                                     s->name, s->module->name,
                                     export_str(s->export));
-
-                       s = s->next;
                }
        }
 
index 6135574..1da7bca 100755 (executable)
 use warnings;
 use strict;
 use File::Find;
+use File::Spec;
 
 my $nm = ($ENV{'NM'} || "nm") . " -p";
 my $objdump = ($ENV{'OBJDUMP'} || "objdump") . " -s -j .comment";
-my $srctree = "";
-my $objtree = "";
-$srctree = "$ENV{'srctree'}/" if (exists($ENV{'srctree'}));
-$objtree = "$ENV{'objtree'}/" if (exists($ENV{'objtree'}));
+my $srctree = File::Spec->curdir();
+my $objtree = File::Spec->curdir();
+$srctree = File::Spec->rel2abs($ENV{'srctree'}) if (exists($ENV{'srctree'}));
+$objtree = File::Spec->rel2abs($ENV{'objtree'}) if (exists($ENV{'objtree'}));
 
 if ($#ARGV != -1) {
        print STDERR "usage: $0 takes no parameters\n";
@@ -231,9 +232,9 @@ sub do_nm
        }
        ($source = $basename) =~ s/\.o$//;
        if (-e "$source.c" || -e "$source.S") {
-               $source = "$objtree$File::Find::dir/$source";
+               $source = File::Spec->catfile($objtree, $File::Find::dir, $source)
        } else {
-               $source = "$srctree$File::Find::dir/$source";
+               $source = File::Spec->catfile($srctree, $File::Find::dir, $source)
        }
        if (! -e "$source.c" && ! -e "$source.S") {
                # No obvious source, exclude the object if it is conglomerate
index ac2b603..3754dac 100644 (file)
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # Linux kernel symbol namespace import generator
 #
@@ -41,7 +41,7 @@ generate_deps() {
                for source_file in $mod_source_files; do
                        sed '/MODULE_IMPORT_NS/Q' $source_file > ${source_file}.tmp
                        offset=$(wc -l ${source_file}.tmp | awk '{print $1;}')
-                       cat $source_file | grep MODULE_IMPORT_NS | sort -u >> ${source_file}.tmp
+                       cat $source_file | grep MODULE_IMPORT_NS | LANG=C sort -u >> ${source_file}.tmp
                        tail -n +$((offset +1)) ${source_file} | grep -v MODULE_IMPORT_NS >> ${source_file}.tmp
                        if ! diff -q ${source_file} ${source_file}.tmp; then
                                mv ${source_file}.tmp ${source_file}
index 8f0a278..74eab03 100644 (file)
@@ -389,11 +389,8 @@ static int nop_mcount(Elf_Shdr const *const relhdr,
                        mcountsym = get_mcountsym(sym0, relp, str0);
 
                if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) {
-                       if (make_nop) {
+                       if (make_nop)
                                ret = make_nop((void *)ehdr, _w(shdr->sh_offset) + _w(relp->r_offset));
-                               if (ret < 0)
-                                       return -1;
-                       }
                        if (warn_on_notrace_sect && !once) {
                                printf("Section %s has mcount callers being ignored\n",
                                       txtname);
index 365b3c2..a2998b1 100755 (executable)
@@ -93,7 +93,7 @@ scm_version()
        # Check for mercurial and a mercurial repo.
        if test -d .hg && hgid=`hg id 2>/dev/null`; then
                # Do we have an tagged version?  If so, latesttagdistance == 1
-               if [ "`hg log -r . --template '{latesttagdistance}'`" == "1" ]; then
+               if [ "`hg log -r . --template '{latesttagdistance}'`" = "1" ]; then
                        id=`hg log -r . --template '{latesttag}'`
                        printf '%s%s' -hg "$id"
                else
@@ -126,7 +126,7 @@ scm_version()
 
 collect_files()
 {
-       local file res
+       local file res=
 
        for file; do
                case "$file" in
index 19faace..35e6ca7 100644 (file)
@@ -13,9 +13,6 @@ integrity-$(CONFIG_INTEGRITY_PLATFORM_KEYRING) += platform_certs/platform_keyrin
 integrity-$(CONFIG_LOAD_UEFI_KEYS) += platform_certs/efi_parser.o \
                                        platform_certs/load_uefi.o
 integrity-$(CONFIG_LOAD_IPL_KEYS) += platform_certs/load_ipl_s390.o
-$(obj)/load_uefi.o: KBUILD_CFLAGS += -fshort-wchar
 
-subdir-$(CONFIG_IMA)                   += ima
 obj-$(CONFIG_IMA)                      += ima/
-subdir-$(CONFIG_EVM)                   += evm
 obj-$(CONFIG_EVM)                      += evm/
index 3a29e7c..a5813c7 100644 (file)
@@ -1946,7 +1946,14 @@ static int convert_context(struct context *oldc, struct context *newc, void *p)
                rc = string_to_context_struct(args->newp, NULL, s,
                                              newc, SECSID_NULL);
                if (rc == -EINVAL) {
-                       /* Retain string representation for later mapping. */
+                       /*
+                        * Retain string representation for later mapping.
+                        *
+                        * IMPORTANT: We need to copy the contents of oldc->str
+                        * back into s again because string_to_context_struct()
+                        * may have garbled it.
+                        */
+                       memcpy(s, oldc->str, oldc->len);
                        context_init(newc);
                        newc->str = s;
                        newc->len = oldc->len;
index 211ca85..cfab60d 100644 (file)
@@ -271,6 +271,11 @@ int snd_hdac_ext_bus_link_get(struct hdac_bus *bus,
                ret = snd_hdac_ext_bus_link_power_up(link);
 
                /*
+                * clear the register to invalidate all the output streams
+                */
+               snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV,
+                                ML_LOSIDV_STREAM_MASK, 0);
+               /*
                 *  wait for 521usec for codec to report status
                 *  HDA spec section 4.3 - Codec Discovery
                 */
index bca5de7..795cbda 100644 (file)
@@ -3474,6 +3474,8 @@ static int patch_nvhdmi(struct hda_codec *codec)
                nvhdmi_chmap_cea_alloc_validate_get_type;
        spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
 
+       codec->link_down_at_suspend = 1;
+
        generic_acomp_init(codec, &nvhdmi_audio_ops, nvhdmi_port2pin);
 
        return 0;
index b000b36..ce4f116 100644 (file)
@@ -5358,6 +5358,17 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
        }
 }
 
+static void alc256_fixup_dell_xps_13_headphone_noise2(struct hda_codec *codec,
+                                                     const struct hda_fixup *fix,
+                                                     int action)
+{
+       if (action != HDA_FIXUP_ACT_PRE_PROBE)
+               return;
+
+       snd_hda_codec_amp_stereo(codec, 0x1a, HDA_INPUT, 0, HDA_AMP_VOLMASK, 1);
+       snd_hda_override_wcaps(codec, 0x1a, get_wcaps(codec, 0x1a) & ~AC_WCAP_IN_AMP);
+}
+
 static void alc269_fixup_limit_int_mic_boost(struct hda_codec *codec,
                                             const struct hda_fixup *fix,
                                             int action)
@@ -5822,6 +5833,7 @@ enum {
        ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
        ALC275_FIXUP_DELL_XPS,
        ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
+       ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2,
        ALC293_FIXUP_LENOVO_SPK_NOISE,
        ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
        ALC255_FIXUP_DELL_SPK_NOISE,
@@ -5869,6 +5881,7 @@ enum {
        ALC225_FIXUP_WYSE_AUTO_MUTE,
        ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
        ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
+       ALC256_FIXUP_ASUS_HEADSET_MIC,
        ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
        ALC299_FIXUP_PREDATOR_SPK,
        ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
@@ -6558,6 +6571,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
        },
+       [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc256_fixup_dell_xps_13_headphone_noise2,
+               .chained = true,
+               .chain_id = ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE
+       },
        [ALC293_FIXUP_LENOVO_SPK_NOISE] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_disable_aamix,
@@ -6912,6 +6931,15 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
        },
+       [ALC256_FIXUP_ASUS_HEADSET_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x03a11020 }, /* headset mic with jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+       },
        [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -7001,17 +7029,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
        SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
        SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
-       SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+       SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
        SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
        SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
        SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
-       SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+       SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
        SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
        SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
        SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
        SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
        SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+       SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
        SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
        SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
        SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
@@ -7108,6 +7136,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
        SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
        SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
index 33cd267..ff5ab24 100644 (file)
@@ -348,6 +348,9 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
                ep = 0x84;
                ifnum = 0;
                goto add_sync_ep_from_ifnum;
+       case USB_ID(0x0582, 0x01d8): /* BOSS Katana */
+               /* BOSS Katana amplifiers do not need quirks */
+               return 0;
        }
 
        if (attr == USB_ENDPOINT_SYNC_ASYNC &&
index a4217c1..2769360 100644 (file)
@@ -266,8 +266,10 @@ struct kvm_vcpu_events {
 #define   KVM_DEV_ARM_ITS_CTRL_RESET           4
 
 /* KVM_IRQ_LINE irq field index values */
+#define KVM_ARM_IRQ_VCPU2_SHIFT                28
+#define KVM_ARM_IRQ_VCPU2_MASK         0xf
 #define KVM_ARM_IRQ_TYPE_SHIFT         24
-#define KVM_ARM_IRQ_TYPE_MASK          0xff
+#define KVM_ARM_IRQ_TYPE_MASK          0xf
 #define KVM_ARM_IRQ_VCPU_SHIFT         16
 #define KVM_ARM_IRQ_VCPU_MASK          0xff
 #define KVM_ARM_IRQ_NUM_SHIFT          0
index 9a50771..67c21f9 100644 (file)
@@ -325,8 +325,10 @@ struct kvm_vcpu_events {
 #define   KVM_ARM_VCPU_TIMER_IRQ_PTIMER                1
 
 /* KVM_IRQ_LINE irq field index values */
+#define KVM_ARM_IRQ_VCPU2_SHIFT                28
+#define KVM_ARM_IRQ_VCPU2_MASK         0xf
 #define KVM_ARM_IRQ_TYPE_SHIFT         24
-#define KVM_ARM_IRQ_TYPE_MASK          0xff
+#define KVM_ARM_IRQ_TYPE_MASK          0xf
 #define KVM_ARM_IRQ_VCPU_SHIFT         16
 #define KVM_ARM_IRQ_VCPU_MASK          0xff
 #define KVM_ARM_IRQ_NUM_SHIFT          0
index 47104e5..436ec76 100644 (file)
@@ -231,6 +231,12 @@ struct kvm_guest_debug_arch {
 #define KVM_SYNC_GSCB   (1UL << 9)
 #define KVM_SYNC_BPBC   (1UL << 10)
 #define KVM_SYNC_ETOKEN (1UL << 11)
+
+#define KVM_SYNC_S390_VALID_FIELDS \
+       (KVM_SYNC_PREFIX | KVM_SYNC_GPRS | KVM_SYNC_ACRS | KVM_SYNC_CRS | \
+        KVM_SYNC_ARCH0 | KVM_SYNC_PFAULT | KVM_SYNC_VRS | KVM_SYNC_RICCB | \
+        KVM_SYNC_FPRS | KVM_SYNC_GSCB | KVM_SYNC_BPBC | KVM_SYNC_ETOKEN)
+
 /* length and alignment of the sdnx as a power of two */
 #define SDNXC 8
 #define SDNXL (1UL << SDNXC)
index f0b0c90..f01950a 100644 (file)
@@ -31,6 +31,7 @@
 #define EXIT_REASON_EXCEPTION_NMI       0
 #define EXIT_REASON_EXTERNAL_INTERRUPT  1
 #define EXIT_REASON_TRIPLE_FAULT        2
+#define EXIT_REASON_INIT_SIGNAL                        3
 
 #define EXIT_REASON_PENDING_INTERRUPT   7
 #define EXIT_REASON_NMI_WINDOW          8
@@ -90,6 +91,7 @@
        { EXIT_REASON_EXCEPTION_NMI,         "EXCEPTION_NMI" }, \
        { EXIT_REASON_EXTERNAL_INTERRUPT,    "EXTERNAL_INTERRUPT" }, \
        { EXIT_REASON_TRIPLE_FAULT,          "TRIPLE_FAULT" }, \
+       { EXIT_REASON_INIT_SIGNAL,           "INIT_SIGNAL" }, \
        { EXIT_REASON_PENDING_INTERRUPT,     "PENDING_INTERRUPT" }, \
        { EXIT_REASON_NMI_WINDOW,            "NMI_WINDOW" }, \
        { EXIT_REASON_TASK_SWITCH,           "TASK_SWITCH" }, \
index fbf5e4a..5d1995f 100644 (file)
@@ -12,7 +12,11 @@ INSTALL ?= install
 CFLAGS += -Wall -O2
 CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/include/uapi -I$(srctree)/include
 
-ifeq ($(srctree),)
+# This will work when bpf is built in tools env. where srctree
+# isn't set and when invoked from selftests build, where srctree
+# is set to ".". building_out_of_srctree is undefined for in srctree
+# builds
+ifndef building_out_of_srctree
 srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 endif
index 63b1f50..c160a53 100644 (file)
@@ -67,6 +67,9 @@
 #define MADV_WIPEONFORK 18             /* Zero memory on fork, child only */
 #define MADV_KEEPONFORK 19             /* Undo MADV_WIPEONFORK */
 
+#define MADV_COLD      20              /* deactivate these pages */
+#define MADV_PAGEOUT   21              /* reclaim these pages */
+
 /* compatibility flags */
 #define MAP_FILE       0
 
index 328d05e..469dc51 100644 (file)
@@ -521,6 +521,7 @@ typedef struct drm_i915_irq_wait {
 #define   I915_SCHEDULER_CAP_PRIORITY  (1ul << 1)
 #define   I915_SCHEDULER_CAP_PREEMPTION        (1ul << 2)
 #define   I915_SCHEDULER_CAP_SEMAPHORES        (1ul << 3)
+#define   I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)
 
 #define I915_PARAM_HUC_STATUS           42
 
index 2a616aa..379a612 100644 (file)
@@ -13,6 +13,9 @@
 #include <linux/limits.h>
 #include <linux/ioctl.h>
 #include <linux/types.h>
+#ifndef __KERNEL__
+#include <linux/fscrypt.h>
+#endif
 
 /* Use of MS_* flags within the kernel is restricted to core mount(2) code. */
 #if !defined(__KERNEL__)
@@ -213,57 +216,6 @@ struct fsxattr {
 #define FS_IOC_SETFSLABEL              _IOW(0x94, 50, char[FSLABEL_MAX])
 
 /*
- * File system encryption support
- */
-/* Policy provided via an ioctl on the topmost directory */
-#define FS_KEY_DESCRIPTOR_SIZE 8
-
-#define FS_POLICY_FLAGS_PAD_4          0x00
-#define FS_POLICY_FLAGS_PAD_8          0x01
-#define FS_POLICY_FLAGS_PAD_16         0x02
-#define FS_POLICY_FLAGS_PAD_32         0x03
-#define FS_POLICY_FLAGS_PAD_MASK       0x03
-#define FS_POLICY_FLAG_DIRECT_KEY      0x04    /* use master key directly */
-#define FS_POLICY_FLAGS_VALID          0x07
-
-/* Encryption algorithms */
-#define FS_ENCRYPTION_MODE_INVALID             0
-#define FS_ENCRYPTION_MODE_AES_256_XTS         1
-#define FS_ENCRYPTION_MODE_AES_256_GCM         2
-#define FS_ENCRYPTION_MODE_AES_256_CBC         3
-#define FS_ENCRYPTION_MODE_AES_256_CTS         4
-#define FS_ENCRYPTION_MODE_AES_128_CBC         5
-#define FS_ENCRYPTION_MODE_AES_128_CTS         6
-#define FS_ENCRYPTION_MODE_SPECK128_256_XTS    7 /* Removed, do not use. */
-#define FS_ENCRYPTION_MODE_SPECK128_256_CTS    8 /* Removed, do not use. */
-#define FS_ENCRYPTION_MODE_ADIANTUM            9
-
-struct fscrypt_policy {
-       __u8 version;
-       __u8 contents_encryption_mode;
-       __u8 filenames_encryption_mode;
-       __u8 flags;
-       __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
-};
-
-#define FS_IOC_SET_ENCRYPTION_POLICY   _IOR('f', 19, struct fscrypt_policy)
-#define FS_IOC_GET_ENCRYPTION_PWSALT   _IOW('f', 20, __u8[16])
-#define FS_IOC_GET_ENCRYPTION_POLICY   _IOW('f', 21, struct fscrypt_policy)
-
-/* Parameters for passing an encryption key into the kernel keyring */
-#define FS_KEY_DESC_PREFIX             "fscrypt:"
-#define FS_KEY_DESC_PREFIX_SIZE                8
-
-/* Structure that userspace passes to the kernel keyring */
-#define FS_MAX_KEY_SIZE                        64
-
-struct fscrypt_key {
-       __u32 mode;
-       __u8 raw[FS_MAX_KEY_SIZE];
-       __u32 size;
-};
-
-/*
  * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
  *
  * Note: for historical reasons, these flags were originally used and
@@ -306,6 +258,7 @@ struct fscrypt_key {
 #define FS_TOPDIR_FL                   0x00020000 /* Top of directory hierarchies*/
 #define FS_HUGE_FILE_FL                        0x00040000 /* Reserved for ext4 */
 #define FS_EXTENT_FL                   0x00080000 /* Extents */
+#define FS_VERITY_FL                   0x00100000 /* Verity protected inode */
 #define FS_EA_INODE_FL                 0x00200000 /* Inode used for large EA */
 #define FS_EOFBLOCKS_FL                        0x00400000 /* Reserved for ext4 */
 #define FS_NOCOW_FL                    0x00800000 /* Do not cow file */
diff --git a/tools/include/uapi/linux/fscrypt.h b/tools/include/uapi/linux/fscrypt.h
new file mode 100644 (file)
index 0000000..39ccfe9
--- /dev/null
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * fscrypt user API
+ *
+ * These ioctls can be used on filesystems that support fscrypt.  See the
+ * "User API" section of Documentation/filesystems/fscrypt.rst.
+ */
+#ifndef _UAPI_LINUX_FSCRYPT_H
+#define _UAPI_LINUX_FSCRYPT_H
+
+#include <linux/types.h>
+
+/* Encryption policy flags */
+#define FSCRYPT_POLICY_FLAGS_PAD_4             0x00
+#define FSCRYPT_POLICY_FLAGS_PAD_8             0x01
+#define FSCRYPT_POLICY_FLAGS_PAD_16            0x02
+#define FSCRYPT_POLICY_FLAGS_PAD_32            0x03
+#define FSCRYPT_POLICY_FLAGS_PAD_MASK          0x03
+#define FSCRYPT_POLICY_FLAG_DIRECT_KEY         0x04
+#define FSCRYPT_POLICY_FLAGS_VALID             0x07
+
+/* Encryption algorithms */
+#define FSCRYPT_MODE_AES_256_XTS               1
+#define FSCRYPT_MODE_AES_256_CTS               4
+#define FSCRYPT_MODE_AES_128_CBC               5
+#define FSCRYPT_MODE_AES_128_CTS               6
+#define FSCRYPT_MODE_ADIANTUM                  9
+#define __FSCRYPT_MODE_MAX                     9
+
+/*
+ * Legacy policy version; ad-hoc KDF and no key verification.
+ * For new encrypted directories, use fscrypt_policy_v2 instead.
+ *
+ * Careful: the .version field for this is actually 0, not 1.
+ */
+#define FSCRYPT_POLICY_V1              0
+#define FSCRYPT_KEY_DESCRIPTOR_SIZE    8
+struct fscrypt_policy_v1 {
+       __u8 version;
+       __u8 contents_encryption_mode;
+       __u8 filenames_encryption_mode;
+       __u8 flags;
+       __u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE];
+};
+#define fscrypt_policy fscrypt_policy_v1
+
+/*
+ * Process-subscribed "logon" key description prefix and payload format.
+ * Deprecated; prefer FS_IOC_ADD_ENCRYPTION_KEY instead.
+ */
+#define FSCRYPT_KEY_DESC_PREFIX                "fscrypt:"
+#define FSCRYPT_KEY_DESC_PREFIX_SIZE   8
+#define FSCRYPT_MAX_KEY_SIZE           64
+struct fscrypt_key {
+       __u32 mode;
+       __u8 raw[FSCRYPT_MAX_KEY_SIZE];
+       __u32 size;
+};
+
+/*
+ * New policy version with HKDF and key verification (recommended).
+ */
+#define FSCRYPT_POLICY_V2              2
+#define FSCRYPT_KEY_IDENTIFIER_SIZE    16
+struct fscrypt_policy_v2 {
+       __u8 version;
+       __u8 contents_encryption_mode;
+       __u8 filenames_encryption_mode;
+       __u8 flags;
+       __u8 __reserved[4];
+       __u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE];
+};
+
+/* Struct passed to FS_IOC_GET_ENCRYPTION_POLICY_EX */
+struct fscrypt_get_policy_ex_arg {
+       __u64 policy_size; /* input/output */
+       union {
+               __u8 version;
+               struct fscrypt_policy_v1 v1;
+               struct fscrypt_policy_v2 v2;
+       } policy; /* output */
+};
+
+/*
+ * v1 policy keys are specified by an arbitrary 8-byte key "descriptor",
+ * matching fscrypt_policy_v1::master_key_descriptor.
+ */
+#define FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR       1
+
+/*
+ * v2 policy keys are specified by a 16-byte key "identifier" which the kernel
+ * calculates as a cryptographic hash of the key itself,
+ * matching fscrypt_policy_v2::master_key_identifier.
+ */
+#define FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER       2
+
+/*
+ * Specifies a key, either for v1 or v2 policies.  This doesn't contain the
+ * actual key itself; this is just the "name" of the key.
+ */
+struct fscrypt_key_specifier {
+       __u32 type;     /* one of FSCRYPT_KEY_SPEC_TYPE_* */
+       __u32 __reserved;
+       union {
+               __u8 __reserved[32]; /* reserve some extra space */
+               __u8 descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE];
+               __u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE];
+       } u;
+};
+
+/* Struct passed to FS_IOC_ADD_ENCRYPTION_KEY */
+struct fscrypt_add_key_arg {
+       struct fscrypt_key_specifier key_spec;
+       __u32 raw_size;
+       __u32 __reserved[9];
+       __u8 raw[];
+};
+
+/* Struct passed to FS_IOC_REMOVE_ENCRYPTION_KEY */
+struct fscrypt_remove_key_arg {
+       struct fscrypt_key_specifier key_spec;
+#define FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY     0x00000001
+#define FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS    0x00000002
+       __u32 removal_status_flags;     /* output */
+       __u32 __reserved[5];
+};
+
+/* Struct passed to FS_IOC_GET_ENCRYPTION_KEY_STATUS */
+struct fscrypt_get_key_status_arg {
+       /* input */
+       struct fscrypt_key_specifier key_spec;
+       __u32 __reserved[6];
+
+       /* output */
+#define FSCRYPT_KEY_STATUS_ABSENT              1
+#define FSCRYPT_KEY_STATUS_PRESENT             2
+#define FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED        3
+       __u32 status;
+#define FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF   0x00000001
+       __u32 status_flags;
+       __u32 user_count;
+       __u32 __out_reserved[13];
+};
+
+#define FS_IOC_SET_ENCRYPTION_POLICY           _IOR('f', 19, struct fscrypt_policy)
+#define FS_IOC_GET_ENCRYPTION_PWSALT           _IOW('f', 20, __u8[16])
+#define FS_IOC_GET_ENCRYPTION_POLICY           _IOW('f', 21, struct fscrypt_policy)
+#define FS_IOC_GET_ENCRYPTION_POLICY_EX                _IOWR('f', 22, __u8[9]) /* size + version */
+#define FS_IOC_ADD_ENCRYPTION_KEY              _IOWR('f', 23, struct fscrypt_add_key_arg)
+#define FS_IOC_REMOVE_ENCRYPTION_KEY           _IOWR('f', 24, struct fscrypt_remove_key_arg)
+#define FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS _IOWR('f', 25, struct fscrypt_remove_key_arg)
+#define FS_IOC_GET_ENCRYPTION_KEY_STATUS       _IOWR('f', 26, struct fscrypt_get_key_status_arg)
+
+/**********************************************************************/
+
+/* old names; don't add anything new here! */
+#ifndef __KERNEL__
+#define FS_KEY_DESCRIPTOR_SIZE         FSCRYPT_KEY_DESCRIPTOR_SIZE
+#define FS_POLICY_FLAGS_PAD_4          FSCRYPT_POLICY_FLAGS_PAD_4
+#define FS_POLICY_FLAGS_PAD_8          FSCRYPT_POLICY_FLAGS_PAD_8
+#define FS_POLICY_FLAGS_PAD_16         FSCRYPT_POLICY_FLAGS_PAD_16
+#define FS_POLICY_FLAGS_PAD_32         FSCRYPT_POLICY_FLAGS_PAD_32
+#define FS_POLICY_FLAGS_PAD_MASK       FSCRYPT_POLICY_FLAGS_PAD_MASK
+#define FS_POLICY_FLAG_DIRECT_KEY      FSCRYPT_POLICY_FLAG_DIRECT_KEY
+#define FS_POLICY_FLAGS_VALID          FSCRYPT_POLICY_FLAGS_VALID
+#define FS_ENCRYPTION_MODE_INVALID     0       /* never used */
+#define FS_ENCRYPTION_MODE_AES_256_XTS FSCRYPT_MODE_AES_256_XTS
+#define FS_ENCRYPTION_MODE_AES_256_GCM 2       /* never used */
+#define FS_ENCRYPTION_MODE_AES_256_CBC 3       /* never used */
+#define FS_ENCRYPTION_MODE_AES_256_CTS FSCRYPT_MODE_AES_256_CTS
+#define FS_ENCRYPTION_MODE_AES_128_CBC FSCRYPT_MODE_AES_128_CBC
+#define FS_ENCRYPTION_MODE_AES_128_CTS FSCRYPT_MODE_AES_128_CTS
+#define FS_ENCRYPTION_MODE_SPECK128_256_XTS    7       /* removed */
+#define FS_ENCRYPTION_MODE_SPECK128_256_CTS    8       /* removed */
+#define FS_ENCRYPTION_MODE_ADIANTUM    FSCRYPT_MODE_ADIANTUM
+#define FS_KEY_DESC_PREFIX             FSCRYPT_KEY_DESC_PREFIX
+#define FS_KEY_DESC_PREFIX_SIZE                FSCRYPT_KEY_DESC_PREFIX_SIZE
+#define FS_MAX_KEY_SIZE                        FSCRYPT_MAX_KEY_SIZE
+#endif /* !__KERNEL__ */
+
+#endif /* _UAPI_LINUX_FSCRYPT_H */
index 5e3f12d..233efbb 100644 (file)
@@ -243,6 +243,8 @@ struct kvm_hyperv_exit {
 #define KVM_INTERNAL_ERROR_SIMUL_EX    2
 /* Encounter unexpected vm-exit due to delivery event. */
 #define KVM_INTERNAL_ERROR_DELIVERY_EV 3
+/* Encounter unexpected vm-exit reason */
+#define KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON      4
 
 /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
 struct kvm_run {
@@ -996,6 +998,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_ARM_PTRAUTH_ADDRESS 171
 #define KVM_CAP_ARM_PTRAUTH_GENERIC 172
 #define KVM_CAP_PMU_EVENT_FILTER 173
+#define KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 174
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index 78efe87..cf525cd 100644 (file)
@@ -158,6 +158,7 @@ struct usbdevfs_hub_portinfo {
 #define USBDEVFS_CAP_MMAP                      0x20
 #define USBDEVFS_CAP_DROP_PRIVILEGES           0x40
 #define USBDEVFS_CAP_CONNINFO_EX               0x80
+#define USBDEVFS_CAP_SUSPEND                   0x100
 
 /* USBDEVFS_DISCONNECT_CLAIM flags & struct */
 
@@ -223,5 +224,8 @@ struct usbdevfs_streams {
  * extending size of the data returned.
  */
 #define USBDEVFS_CONNINFO_EX(len)  _IOC(_IOC_READ, 'U', 32, len)
+#define USBDEVFS_FORBID_SUSPEND    _IO('U', 33)
+#define USBDEVFS_ALLOW_SUSPEND     _IO('U', 34)
+#define USBDEVFS_WAIT_FOR_RESUME   _IO('U', 35)
 
 #endif /* _UAPI_LINUX_USBDEVICE_FS_H */
index c6f94cf..56ce629 100644 (file)
@@ -8,7 +8,11 @@ LIBBPF_MAJOR_VERSION := $(firstword $(subst ., ,$(LIBBPF_VERSION)))
 
 MAKEFLAGS += --no-print-directory
 
-ifeq ($(srctree),)
+# This will work when bpf is built in tools env. where srctree
+# isn't set and when invoked from selftests build, where srctree
+# is a ".". building_out_of_srctree is undefined for in srctree
+# builds
+ifndef building_out_of_srctree
 srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
@@ -110,6 +114,9 @@ override CFLAGS += $(INCLUDES)
 override CFLAGS += -fvisibility=hidden
 override CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
 
+# flags specific for shared library
+SHLIB_FLAGS := -DSHARED
+
 ifeq ($(VERBOSE),1)
   Q =
 else
@@ -126,14 +133,17 @@ all:
 export srctree OUTPUT CC LD CFLAGS V
 include $(srctree)/tools/build/Makefile.include
 
-BPF_IN         := $(OUTPUT)libbpf-in.o
+SHARED_OBJDIR  := $(OUTPUT)sharedobjs/
+STATIC_OBJDIR  := $(OUTPUT)staticobjs/
+BPF_IN_SHARED  := $(SHARED_OBJDIR)libbpf-in.o
+BPF_IN_STATIC  := $(STATIC_OBJDIR)libbpf-in.o
 VERSION_SCRIPT := libbpf.map
 
 LIB_TARGET     := $(addprefix $(OUTPUT),$(LIB_TARGET))
 LIB_FILE       := $(addprefix $(OUTPUT),$(LIB_FILE))
 PC_FILE                := $(addprefix $(OUTPUT),$(PC_FILE))
 
-GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \
+GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \
                           cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \
                           awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}' | \
                           sort -u | wc -l)
@@ -155,7 +165,7 @@ all: fixdep
 
 all_cmd: $(CMD_TARGETS) check
 
-$(BPF_IN): force elfdep bpfdep
+$(BPF_IN_SHARED): force elfdep bpfdep
        @(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \
        (diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \
        echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true
@@ -171,17 +181,20 @@ $(BPF_IN): force elfdep bpfdep
        @(test -f ../../include/uapi/linux/if_xdp.h -a -f ../../../include/uapi/linux/if_xdp.h && ( \
        (diff -B ../../include/uapi/linux/if_xdp.h ../../../include/uapi/linux/if_xdp.h >/dev/null) || \
        echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
-       $(Q)$(MAKE) $(build)=libbpf
+       $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)"
+
+$(BPF_IN_STATIC): force elfdep bpfdep
+       $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR)
 
 $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
 
-$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
+$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN_SHARED)
        $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
                                    -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
        @ln -sf $(@F) $(OUTPUT)libbpf.so
        @ln -sf $(@F) $(OUTPUT)libbpf.so.$(LIBBPF_MAJOR_VERSION)
 
-$(OUTPUT)libbpf.a: $(BPF_IN)
+$(OUTPUT)libbpf.a: $(BPF_IN_STATIC)
        $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
 
 $(OUTPUT)test_libbpf: test_libbpf.cpp $(OUTPUT)libbpf.a
@@ -197,7 +210,7 @@ check: check_abi
 
 check_abi: $(OUTPUT)libbpf.so
        @if [ "$(GLOBAL_SYM_COUNT)" != "$(VERSIONED_SYM_COUNT)" ]; then  \
-               echo "Warning: Num of global symbols in $(BPF_IN)"       \
+               echo "Warning: Num of global symbols in $(BPF_IN_SHARED)"        \
                     "($(GLOBAL_SYM_COUNT)) does NOT match with num of"  \
                     "versioned symbols in $^ ($(VERSIONED_SYM_COUNT))." \
                     "Please make sure all LIBBPF_API symbols are"       \
@@ -255,9 +268,9 @@ config-clean:
        $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
 
 clean:
-       $(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \
+       $(call QUIET_CLEAN, libbpf) $(RM) -rf $(TARGETS) $(CXX_TEST_TARGET) \
                *.o *~ *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) .*.d .*.cmd \
-               *.pc LIBBPF-CFLAGS
+               *.pc LIBBPF-CFLAGS $(SHARED_OBJDIR) $(STATIC_OBJDIR)
        $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
 
 
index 2e83a34..98216a6 100644 (file)
        (offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD))
 #endif
 
+/* Symbol versioning is different between static and shared library.
+ * Properly versioned symbols are needed for shared library, but
+ * only the symbol of the new version is needed for static library.
+ */
+#ifdef SHARED
+# define COMPAT_VERSION(internal_name, api_name, version) \
+       asm(".symver " #internal_name "," #api_name "@" #version);
+# define DEFAULT_VERSION(internal_name, api_name, version) \
+       asm(".symver " #internal_name "," #api_name "@@" #version);
+#else
+# define COMPAT_VERSION(internal_name, api_name, version)
+# define DEFAULT_VERSION(internal_name, api_name, version) \
+       extern typeof(internal_name) api_name \
+       __attribute__((alias(#internal_name)));
+#endif
+
 extern void libbpf_print(enum libbpf_print_level level,
                         const char *format, ...)
        __attribute__((format(printf, 2, 3)));
index 24fa313..a902838 100644 (file)
@@ -261,8 +261,8 @@ int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
        return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp,
                                        &config);
 }
-asm(".symver xsk_umem__create_v0_0_2, xsk_umem__create@LIBBPF_0.0.2");
-asm(".symver xsk_umem__create_v0_0_4, xsk_umem__create@@LIBBPF_0.0.4");
+COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2)
+DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4)
 
 static int xsk_load_xdp_prog(struct xsk_socket *xsk)
 {
index ed61fb3..5b2cd5e 100644 (file)
@@ -20,7 +20,13 @@ MAKEFLAGS += --no-print-directory
 LIBFILE = $(OUTPUT)libsubcmd.a
 
 CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
-CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
+CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -fPIC
+
+ifeq ($(DEBUG),0)
+  ifeq ($(feature-fortify-source), 1)
+    CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2
+  endif
+endif
 
 ifeq ($(CC_NO_CLANG), 0)
   CFLAGS += -O3
index 356b23a..2b62ba1 100644 (file)
@@ -71,6 +71,9 @@ ifdef::backend-docbook[]
 [header]
 template::[header-declarations]
 <refentry>
+ifdef::perf_date[]
+<refentryinfo><date>{perf_date}</date></refentryinfo>
+endif::perf_date[]
 <refmeta>
 <refentrytitle>{mantitle}</refentrytitle>
 <manvolnum>{manvolnum}</manvolnum>
index 4c62b07..52152d1 100644 (file)
@@ -36,8 +36,8 @@ III/ Jitdump file header format
 Each jitdump file starts with a fixed size header containing the following fields in order:
 
 
-* uint32_t magic     : a magic number tagging the file type. The value is 4-byte long and represents the string "JiTD" in ASCII form. It is 0x4A695444 or 0x4454694a depending on the endianness. The field can be used to detect the endianness of the file
-* uint32_t version   : a 4-byte value representing the format version. It is currently set to 2
+* uint32_t magic     : a magic number tagging the file type. The value is 4-byte long and represents the string "JiTD" in ASCII form. It written is as 0x4A695444. The reader will detect an endian mismatch when it reads 0x4454694a.
+* uint32_t version   : a 4-byte value representing the format version. It is currently set to 1
 * uint32_t total_size: size in bytes of file header
 * uint32_t elf_mach  : ELF architecture encoding (ELF e_machine value as specified in /usr/include/elf.h)
 * uint32_t pad1      : padding. Reserved for future use
index e1d4b48..2ff6ced 100644 (file)
@@ -37,7 +37,7 @@ static int arm__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
 
        arm = zalloc(sizeof(*arm));
        if (!arm)
-               return -1;
+               return ENOMEM;
 
 #define ARM_CONDS "(cc|cs|eq|ge|gt|hi|le|ls|lt|mi|ne|pl|vc|vs)"
        err = regcomp(&arm->call_insn, "^blx?" ARM_CONDS "?$", REG_EXTENDED);
@@ -59,5 +59,5 @@ out_free_call:
        regfree(&arm->call_insn);
 out_free_arm:
        free(arm);
-       return -1;
+       return SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP;
 }
index 43aa93e..037e292 100644 (file)
@@ -95,7 +95,7 @@ static int arm64__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
 
        arm = zalloc(sizeof(*arm));
        if (!arm)
-               return -1;
+               return ENOMEM;
 
        /* bl, blr */
        err = regcomp(&arm->call_insn, "^blr?$", REG_EXTENDED);
@@ -118,5 +118,5 @@ out_free_call:
        regfree(&arm->call_insn);
 out_free_arm:
        free(arm);
-       return -1;
+       return SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP;
 }
index b6b7bc7..3b4cdfc 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <sys/types.h>
+#include <errno.h>
 #include <unistd.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -30,7 +31,7 @@ get_cpuid(char *buffer, size_t sz)
                buffer[nb-1] = '\0';
                return 0;
        }
-       return -1;
+       return ENOBUFS;
 }
 
 char *
index 89bb8f2..a50e70b 100644 (file)
@@ -164,8 +164,10 @@ static int s390__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
        if (!arch->initialized) {
                arch->initialized = true;
                arch->associate_instruction_ops = s390__associate_ins_ops;
-               if (cpuid)
-                       err = s390__cpuid_parse(arch, cpuid);
+               if (cpuid) {
+                       if (s390__cpuid_parse(arch, cpuid))
+                               err = SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING;
+               }
        }
 
        return err;
index 8b0b018..7933f68 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <sys/types.h>
+#include <errno.h>
 #include <unistd.h>
 #include <stdio.h>
 #include <string.h>
@@ -54,7 +55,7 @@ int get_cpuid(char *buffer, size_t sz)
 
        sysinfo = fopen(SYSINFO, "r");
        if (sysinfo == NULL)
-               return -1;
+               return errno;
 
        while ((read = getline(&line, &line_sz, sysinfo)) != -1) {
                if (!strncmp(line, SYSINFO_MANU, strlen(SYSINFO_MANU))) {
@@ -89,7 +90,7 @@ int get_cpuid(char *buffer, size_t sz)
 
        /* Missing manufacturer, type or model information should not happen */
        if (!manufacturer[0] || !type[0] || !model[0])
-               return -1;
+               return EINVAL;
 
        /*
         * Scan /proc/service_levels and return the CPU-MF counter facility
@@ -133,14 +134,14 @@ skip_sysinfo:
        else
                nbytes = snprintf(buffer, sz, "%s,%s,%s", manufacturer, type,
                                  model);
-       return (nbytes >= sz) ? -1 : 0;
+       return (nbytes >= sz) ? ENOBUFS : 0;
 }
 
 char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
 {
        char *buf = malloc(128);
 
-       if (buf && get_cpuid(buf, 128) < 0)
+       if (buf && get_cpuid(buf, 128))
                zfree(&buf);
        return buf;
 }
index 44f5aba..7eb5621 100644 (file)
@@ -196,8 +196,10 @@ static int x86__annotate_init(struct arch *arch, char *cpuid)
        if (arch->initialized)
                return 0;
 
-       if (cpuid)
-               err = x86__cpuid_parse(arch, cpuid);
+       if (cpuid) {
+               if (x86__cpuid_parse(arch, cpuid))
+                       err = SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING;
+       }
 
        arch->initialized = true;
        return err;
index 662ecf8..aa6deb4 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <sys/types.h>
+#include <errno.h>
 #include <unistd.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -58,7 +59,7 @@ __get_cpuid(char *buffer, size_t sz, const char *fmt)
                buffer[nb-1] = '\0';
                return 0;
        }
-       return -1;
+       return ENOBUFS;
 }
 
 int
index 2227e2f..58a9e09 100644 (file)
@@ -705,14 +705,15 @@ static int process_sample_event(struct perf_tool *tool,
 
 static int cpu_isa_config(struct perf_kvm_stat *kvm)
 {
-       char buf[64], *cpuid;
+       char buf[128], *cpuid;
        int err;
 
        if (kvm->live) {
                err = get_cpuid(buf, sizeof(buf));
                if (err != 0) {
-                       pr_err("Failed to look up CPU type\n");
-                       return err;
+                       pr_err("Failed to look up CPU type: %s\n",
+                              str_error_r(err, buf, sizeof(buf)));
+                       return -err;
                }
                cpuid = buf;
        } else
index 286fc70..67be8d3 100644 (file)
@@ -1063,7 +1063,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
                        continue;
 
                insn = 0;
-               for (off = 0;; off += ilen) {
+               for (off = 0; off < (unsigned)len; off += ilen) {
                        uint64_t ip = start + off;
 
                        printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
@@ -1074,6 +1074,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
                                        printed += print_srccode(thread, x.cpumode, ip);
                                break;
                        } else {
+                               ilen = 0;
                                printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", ip,
                                                   dump_insn(&x, ip, buffer + off, len - off, &ilen));
                                if (ilen == 0)
@@ -1083,6 +1084,8 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
                                insn++;
                        }
                }
+               if (off != (unsigned)len)
+                       printed += fprintf(fp, "\tmismatch of LBR data and executable\n");
        }
 
        /*
@@ -1123,6 +1126,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
                goto out;
        }
        for (off = 0; off <= end - start; off += ilen) {
+               ilen = 0;
                printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", start + off,
                                   dump_insn(&x, start + off, buffer + off, len - off, &ilen));
                if (ilen == 0)
index e2e0f06..cea13cb 100755 (executable)
@@ -8,6 +8,7 @@ include/uapi/drm/i915_drm.h
 include/uapi/linux/fadvise.h
 include/uapi/linux/fcntl.h
 include/uapi/linux/fs.h
+include/uapi/linux/fscrypt.h
 include/uapi/linux/kcmp.h
 include/uapi/linux/kvm.h
 include/uapi/linux/in.h
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z15/transaction.json
new file mode 100644 (file)
index 0000000..1a0034f
--- /dev/null
@@ -0,0 +1,7 @@
+[
+  {
+    "BriefDescription": "Transaction count",
+    "MetricName": "transaction",
+    "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+  }
+]
index bd3fc57..61641a3 100644 (file)
@@ -4,4 +4,4 @@ Family-model,Version,Filename,EventType
 ^IBM.282[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_zec12,core
 ^IBM.296[45].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z13,core
 ^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core
-^IBM.856[12].*3\.6.[[:xdigit:]]+$,3,cf_m8561,core
+^IBM.856[12].*3\.6.[[:xdigit:]]+$,3,cf_z15,core
index 9e37287..e283726 100644 (file)
@@ -450,12 +450,12 @@ static struct fixed {
        const char *name;
        const char *event;
 } fixed[] = {
-       { "inst_retired.any", "event=0xc0" },
-       { "inst_retired.any_p", "event=0xc0" },
-       { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
-       { "cpu_clk_unhalted.thread", "event=0x3c" },
-       { "cpu_clk_unhalted.core", "event=0x3c" },
-       { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
+       { "inst_retired.any", "event=0xc0,period=2000003" },
+       { "inst_retired.any_p", "event=0xc0,period=2000003" },
+       { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03,period=2000003" },
+       { "cpu_clk_unhalted.thread", "event=0x3c,period=2000003" },
+       { "cpu_clk_unhalted.core", "event=0x3c,period=2000003" },
+       { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1,period=2000003" },
        { NULL, NULL},
 };
 
index dbc2719..dd865e0 100644 (file)
@@ -19,12 +19,11 @@ static void sigsegv_handler(int sig __maybe_unused)
 static void the_hook(void *_hook_flags)
 {
        int *hook_flags = _hook_flags;
-       int *p = NULL;
 
        *hook_flags = 1234;
 
        /* Generate a segfault, test perf_hooks__recover */
-       *p = 0;
+       raise(SIGSEGV);
 }
 
 int test__perf_hooks(struct test *test __maybe_unused, int subtest __maybe_unused)
index e830ead..4036c7f 100644 (file)
@@ -1631,6 +1631,19 @@ int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *
        case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF:
                scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation");
                break;
+       case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP:
+               scnprintf(buf, buflen, "Problems with arch specific instruction name regular expressions.");
+               break;
+       case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING:
+               scnprintf(buf, buflen, "Problems while parsing the CPUID in the arch specific initialization.");
+               break;
+       case SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE:
+               scnprintf(buf, buflen, "Invalid BPF file: %s.", dso->long_name);
+               break;
+       case SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF:
+               scnprintf(buf, buflen, "The %s BPF file has no BTF section, compile with -g or use pahole -J.",
+                         dso->long_name);
+               break;
        default:
                scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
                break;
@@ -1662,7 +1675,7 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
 
        build_id_path = strdup(filename);
        if (!build_id_path)
-               return -1;
+               return ENOMEM;
 
        /*
         * old style build-id cache has name of XX/XXXXXXX.. while
@@ -1713,13 +1726,13 @@ static int symbol__disassemble_bpf(struct symbol *sym,
        char tpath[PATH_MAX];
        size_t buf_size;
        int nr_skip = 0;
-       int ret = -1;
        char *buf;
        bfd *bfdf;
+       int ret;
        FILE *s;
 
        if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO)
-               return -1;
+               return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE;
 
        pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__,
                  sym->name, sym->start, sym->end - sym->start);
@@ -1732,8 +1745,10 @@ static int symbol__disassemble_bpf(struct symbol *sym,
        assert(bfd_check_format(bfdf, bfd_object));
 
        s = open_memstream(&buf, &buf_size);
-       if (!s)
+       if (!s) {
+               ret = errno;
                goto out;
+       }
        init_disassemble_info(&info, s,
                              (fprintf_ftype) fprintf);
 
@@ -1742,8 +1757,10 @@ static int symbol__disassemble_bpf(struct symbol *sym,
 
        info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
                                                 dso->bpf_prog.id);
-       if (!info_node)
+       if (!info_node) {
+               return SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
                goto out;
+       }
        info_linear = info_node->info_linear;
        sub_id = dso->bpf_prog.sub_id;
 
@@ -2071,11 +2088,11 @@ int symbol__annotate(struct symbol *sym, struct map *map,
        int err;
 
        if (!arch_name)
-               return -1;
+               return errno;
 
        args.arch = arch = arch__find(arch_name);
        if (arch == NULL)
-               return -ENOTSUP;
+               return ENOTSUP;
 
        if (parch)
                *parch = arch;
@@ -2971,7 +2988,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct evsel *evsel,
 
        notes->offsets = zalloc(size * sizeof(struct annotation_line *));
        if (notes->offsets == NULL)
-               return -1;
+               return ENOMEM;
 
        if (perf_evsel__is_group_event(evsel))
                nr_pcnt = evsel->core.nr_members;
@@ -2997,7 +3014,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct evsel *evsel,
 
 out_free_offsets:
        zfree(&notes->offsets);
-       return -1;
+       return err;
 }
 
 #define ANNOTATION__CFG(n) \
index d94be91..d76fd0e 100644 (file)
@@ -370,6 +370,10 @@ enum symbol_disassemble_errno {
 
        SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX       = __SYMBOL_ANNOTATE_ERRNO__START,
        SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF,
+       SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING,
+       SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP,
+       SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE,
+       SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF,
 
        __SYMBOL_ANNOTATE_ERRNO__END,
 };
index 5591af8..abc7fda 100644 (file)
@@ -30,6 +30,7 @@
 #include "counts.h"
 #include "event.h"
 #include "evsel.h"
+#include "util/env.h"
 #include "util/evsel_config.h"
 #include "util/evsel_fprintf.h"
 #include "evlist.h"
@@ -2512,7 +2513,7 @@ struct perf_env *perf_evsel__env(struct evsel *evsel)
 {
        if (evsel && evsel->evlist)
                return evsel->evlist->env;
-       return NULL;
+       return &perf_env;
 }
 
 static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
index 1bdf4c6..e3ccb0c 100644 (file)
@@ -395,7 +395,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
        size_t size;
        u16 idr_size;
        const char *sym;
-       uint32_t count;
+       uint64_t count;
        int ret, csize, usize;
        pid_t pid, tid;
        struct {
@@ -418,7 +418,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
                return -1;
 
        filename = event->mmap2.filename;
-       size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%u.so",
+       size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
                        jd->dir,
                        pid,
                        count);
@@ -529,7 +529,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
                return -1;
 
        filename = event->mmap2.filename;
-       size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%"PRIu64,
+       size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
                 jd->dir,
                 pid,
                 jr->move.code_index);
index 8d04e3d..8b14e4a 100644 (file)
@@ -233,14 +233,14 @@ static int detect_kbuild_dir(char **kbuild_dir)
        const char *prefix_dir = "";
        const char *suffix_dir = "";
 
+       /* _UTSNAME_LENGTH is 65 */
+       char release[128];
+
        char *autoconf_path;
 
        int err;
 
        if (!test_dir) {
-               /* _UTSNAME_LENGTH is 65 */
-               char release[128];
-
                err = fetch_kernel_version(NULL, release,
                                           sizeof(release));
                if (err)
index 5b83ed1..eec9b28 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include "symbol.h"
+#include <assert.h>
 #include <errno.h>
 #include <inttypes.h>
 #include <limits.h>
@@ -850,6 +851,8 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
                        }
 
                        after->start = map->end;
+                       after->pgoff += map->end - pos->start;
+                       assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end));
                        __map_groups__insert(pos->groups, after);
                        if (verbose >= 2 && !use_browser)
                                map__fprintf(after, fp);
index 53f3105..0246036 100644 (file)
@@ -14,6 +14,7 @@
 #include "thread_map.h"
 #include "trace-event.h"
 #include "mmap.h"
+#include "util/env.h"
 #include <internal/lib.h>
 #include "../perf-sys.h"
 
@@ -54,6 +55,11 @@ int parse_callchain_record(const char *arg __maybe_unused,
 }
 
 /*
+ * Add this one here not to drag util/env.c
+ */
+struct perf_env perf_env;
+
+/*
  * Support debug printing even though util/debug.c is not linked.  That means
  * implementing 'verbose' and 'eprintf'.
  */
index c3feccb..4cdbae6 100644 (file)
@@ -63,6 +63,13 @@ TARGETS += zram
 TARGETS_HOTPLUG = cpu-hotplug
 TARGETS_HOTPLUG += memory-hotplug
 
+# User can optionally provide a TARGETS skiplist.
+SKIP_TARGETS ?=
+ifneq ($(SKIP_TARGETS),)
+       TMP := $(filter-out $(SKIP_TARGETS), $(TARGETS))
+       override TARGETS := $(TMP)
+endif
+
 # Clear LDFLAGS and MAKEFLAGS if called from main
 # Makefile to avoid test build failures when test
 # Makefile doesn't have explicit build rules.
@@ -171,9 +178,12 @@ run_pstore_crash:
 # 1. output_dir=kernel_src
 # 2. a separate output directory is specified using O= KBUILD_OUTPUT
 # 3. a separate output directory is specified using KBUILD_OUTPUT
+# Avoid conflict with INSTALL_PATH set by the main Makefile
 #
-INSTALL_PATH ?= $(BUILD)/install
-INSTALL_PATH := $(abspath $(INSTALL_PATH))
+KSFT_INSTALL_PATH ?= $(BUILD)/kselftest_install
+KSFT_INSTALL_PATH := $(abspath $(KSFT_INSTALL_PATH))
+# Avoid changing the rest of the logic here and lib.mk.
+INSTALL_PATH := $(KSFT_INSTALL_PATH)
 ALL_SCRIPT := $(INSTALL_PATH)/run_kselftest.sh
 
 install: all
@@ -198,11 +208,16 @@ ifdef INSTALL_PATH
        echo "  cat /dev/null > \$$logfile" >> $(ALL_SCRIPT)
        echo "fi" >> $(ALL_SCRIPT)
 
+       @# While building run_kselftest.sh skip also non-existent TARGET dirs:
+       @# they could be the result of a build failure and should NOT be
+       @# included in the generated runlist.
        for TARGET in $(TARGETS); do \
                BUILD_TARGET=$$BUILD/$$TARGET;  \
+               [ ! -d $$INSTALL_PATH/$$TARGET ] && echo "Skipping non-existent dir: $$TARGET" && continue; \
                echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \
                echo "cd $$TARGET" >> $(ALL_SCRIPT); \
                echo -n "run_many" >> $(ALL_SCRIPT); \
+               echo -n "Emit Tests for $$TARGET\n"; \
                $(MAKE) -s --no-print-directory OUTPUT=$$BUILD_TARGET -C $$TARGET emit_tests >> $(ALL_SCRIPT); \
                echo "" >> $(ALL_SCRIPT);           \
                echo "cd \$$ROOT" >> $(ALL_SCRIPT); \
index 6cbeea7..8547ecb 100644 (file)
@@ -195,7 +195,7 @@ static void run_test(int cgroup_fd)
 
        if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread,
                                      (void *)&server_fd)))
-               goto close_bpf_object;
+               goto close_server_fd;
 
        pthread_mutex_lock(&server_started_mtx);
        pthread_cond_wait(&server_started, &server_started_mtx);
index a82da55..f4cd60d 100644 (file)
@@ -260,13 +260,14 @@ void test_tcp_rtt(void)
 
        if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread,
                                      (void *)&server_fd)))
-               goto close_cgroup_fd;
+               goto close_server_fd;
 
        pthread_mutex_lock(&server_started_mtx);
        pthread_cond_wait(&server_started, &server_started_mtx);
        pthread_mutex_unlock(&server_started_mtx);
 
        CHECK_FAIL(run_test(cgroup_fd, server_fd));
+close_server_fd:
        close(server_fd);
 close_cgroup_fd:
        close(cgroup_fd);
index d23d4da..e2d0619 100755 (executable)
@@ -63,6 +63,9 @@ fi
 
 # Setup
 tc qdisc add dev lo ingress
+echo 0 > /proc/sys/net/ipv4/conf/default/rp_filter
+echo 0 > /proc/sys/net/ipv4/conf/all/rp_filter
+echo 0 > /proc/sys/net/ipv4/conf/lo/rp_filter
 
 echo "Testing IPv4..."
 # Drops all IP/UDP packets coming from port 9
index acf7a74..59ea569 100755 (executable)
@@ -314,15 +314,15 @@ test_gso()
        command -v nc >/dev/null 2>&1 || \
                { echo >&2 "nc is not available: skipping TSO tests"; return; }
 
-       # listen on IPv*_DST, capture TCP into $TMPFILE
+       # listen on port 9000, capture TCP into $TMPFILE
        if [ "${PROTO}" == "IPv4" ] ; then
                IP_DST=${IPv4_DST}
                ip netns exec ${NS3} bash -c \
-                       "nc -4 -l -s ${IPv4_DST} -p 9000 > ${TMPFILE} &"
+                       "nc -4 -l -p 9000 > ${TMPFILE} &"
        elif [ "${PROTO}" == "IPv6" ] ; then
                IP_DST=${IPv6_DST}
                ip netns exec ${NS3} bash -c \
-                       "nc -6 -l -s ${IPv6_DST} -p 9000 > ${TMPFILE} &"
+                       "nc -6 -l -p 9000 > ${TMPFILE} &"
                RET=$?
        else
                echo "    test_gso: unknown PROTO: ${PROTO}"
index 00c9020..84de7bc 100644 (file)
@@ -3,9 +3,14 @@
 #
 # Runs a set of tests in a given subdirectory.
 export skip_rc=4
+export timeout_rc=124
 export logfile=/dev/stdout
 export per_test_logging=
 
+# Defaults for "settings" file fields:
+# "timeout" how many seconds to let each test run before failing.
+export kselftest_default_timeout=45
+
 # There isn't a shell-agnostic way to find the path of a sourced file,
 # so we must rely on BASE_DIR being set to find other tools.
 if [ -z "$BASE_DIR" ]; then
@@ -24,6 +29,16 @@ tap_prefix()
        fi
 }
 
+tap_timeout()
+{
+       # Make sure tests will time out if utility is available.
+       if [ -x /usr/bin/timeout ] ; then
+               /usr/bin/timeout "$kselftest_timeout" "$1"
+       else
+               "$1"
+       fi
+}
+
 run_one()
 {
        DIR="$1"
@@ -32,6 +47,18 @@ run_one()
 
        BASENAME_TEST=$(basename $TEST)
 
+       # Reset any "settings"-file variables.
+       export kselftest_timeout="$kselftest_default_timeout"
+       # Load per-test-directory kselftest "settings" file.
+       settings="$BASE_DIR/$DIR/settings"
+       if [ -r "$settings" ] ; then
+               while read line ; do
+                       field=$(echo "$line" | cut -d= -f1)
+                       value=$(echo "$line" | cut -d= -f2-)
+                       eval "kselftest_$field"="$value"
+               done < "$settings"
+       fi
+
        TEST_HDR_MSG="selftests: $DIR: $BASENAME_TEST"
        echo "# $TEST_HDR_MSG"
        if [ ! -x "$TEST" ]; then
@@ -44,14 +71,17 @@ run_one()
                echo "not ok $test_num $TEST_HDR_MSG"
        else
                cd `dirname $TEST` > /dev/null
-               (((((./$BASENAME_TEST 2>&1; echo $? >&3) |
+               ((((( tap_timeout ./$BASENAME_TEST 2>&1; echo $? >&3) |
                        tap_prefix >&4) 3>&1) |
                        (read xs; exit $xs)) 4>>"$logfile" &&
                echo "ok $test_num $TEST_HDR_MSG") ||
-               (if [ $? -eq $skip_rc ]; then   \
+               (rc=$?; \
+               if [ $rc -eq $skip_rc ]; then   \
                        echo "not ok $test_num $TEST_HDR_MSG # SKIP"
+               elif [ $rc -eq $timeout_rc ]; then \
+                       echo "not ok $test_num $TEST_HDR_MSG # TIMEOUT"
                else
-                       echo "not ok $test_num $TEST_HDR_MSG"
+                       echo "not ok $test_num $TEST_HDR_MSG # exit=$rc"
                fi)
                cd - >/dev/null
        fi
index ec30446..e2e1911 100755 (executable)
@@ -24,12 +24,12 @@ main()
                echo "$0: Installing in specified location - $install_loc ..."
        fi
 
-       install_dir=$install_loc/kselftest
+       install_dir=$install_loc/kselftest_install
 
 # Create install directory
        mkdir -p $install_dir
 # Build tests
-       INSTALL_PATH=$install_dir make install
+       KSFT_INSTALL_PATH=$install_dir make install
 }
 
 main "$@"
index 62c591f..c5ec868 100644 (file)
@@ -22,6 +22,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/smm_test
 TEST_GEN_PROGS_x86_64 += x86_64/state_test
 TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
 TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
@@ -48,7 +49,7 @@ CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
        -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
 
 no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
-        $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
+        $(CC) -Werror -no-pie -x c - -o "$$TMP", -no-pie)
 
 # On s390, build the testcases KVM-enabled
 pgste-option = $(call try-run, echo 'int main() { return 0; }' | \
index 0c17f2e..ff23401 100644 (file)
@@ -1083,6 +1083,9 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
 #define VMX_BASIC_MEM_TYPE_WB  6LLU
 #define VMX_BASIC_INOUT                0x0040000000000000LLU
 
+/* VMX_EPT_VPID_CAP bits */
+#define VMX_EPT_VPID_CAP_AD_BITS       (1ULL << 21)
+
 /* MSR_IA32_VMX_MISC bits */
 #define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
 #define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE   0x1F
index 69b1705..6ae5a47 100644 (file)
@@ -569,6 +569,10 @@ struct vmx_pages {
        void *enlightened_vmcs_hva;
        uint64_t enlightened_vmcs_gpa;
        void *enlightened_vmcs;
+
+       void *eptp_hva;
+       uint64_t eptp_gpa;
+       void *eptp;
 };
 
 struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
@@ -576,4 +580,14 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx);
 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
 bool load_vmcs(struct vmx_pages *vmx);
 
+void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+                  uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot);
+void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+                uint64_t nested_paddr, uint64_t paddr, uint64_t size,
+                uint32_t eptp_memslot);
+void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
+                       uint32_t memslot, uint32_t eptp_memslot);
+void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
+                 uint32_t eptp_memslot);
+
 #endif /* SELFTEST_KVM_VMX_H */
index 80a338b..41cf454 100644 (file)
@@ -705,7 +705,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
  *   on error (e.g. currently no memory region using memslot as a KVM
  *   memory slot ID).
  */
-static struct userspace_mem_region *
+struct userspace_mem_region *
 memslot2region(struct kvm_vm *vm, uint32_t memslot)
 {
        struct userspace_mem_region *region;
index f36262e..ac50c42 100644 (file)
@@ -68,4 +68,7 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
 void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent);
 void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent);
 
+struct userspace_mem_region *
+memslot2region(struct kvm_vm *vm, uint32_t memslot);
+
 #endif /* SELFTEST_KVM_UTIL_INTERNAL_H */
index c53dbc6..6698cb7 100644 (file)
@@ -1085,7 +1085,7 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
        for (i = 0; i < nmsrs; i++)
                state->msrs.entries[i].index = list->indices[i];
        r = ioctl(vcpu->fd, KVM_GET_MSRS, &state->msrs);
-        TEST_ASSERT(r == nmsrs, "Unexpected result from KVM_GET_MSRS, r: %i (failed at %x)",
+        TEST_ASSERT(r == nmsrs, "Unexpected result from KVM_GET_MSRS, r: %i (failed MSR was 0x%x)",
                 r, r == nmsrs ? -1 : list->indices[r]);
 
        r = ioctl(vcpu->fd, KVM_GET_DEBUGREGS, &state->debugregs);
index 9cef045..fab8f6b 100644 (file)
@@ -7,11 +7,39 @@
 
 #include "test_util.h"
 #include "kvm_util.h"
+#include "../kvm_util_internal.h"
 #include "processor.h"
 #include "vmx.h"
 
+#define PAGE_SHIFT_4K  12
+
+#define KVM_EPT_PAGE_TABLE_MIN_PADDR 0x1c0000
+
 bool enable_evmcs;
 
+struct eptPageTableEntry {
+       uint64_t readable:1;
+       uint64_t writable:1;
+       uint64_t executable:1;
+       uint64_t memory_type:3;
+       uint64_t ignore_pat:1;
+       uint64_t page_size:1;
+       uint64_t accessed:1;
+       uint64_t dirty:1;
+       uint64_t ignored_11_10:2;
+       uint64_t address:40;
+       uint64_t ignored_62_52:11;
+       uint64_t suppress_ve:1;
+};
+
+struct eptPageTablePointer {
+       uint64_t memory_type:3;
+       uint64_t page_walk_length:3;
+       uint64_t ad_enabled:1;
+       uint64_t reserved_11_07:5;
+       uint64_t address:40;
+       uint64_t reserved_63_52:12;
+};
 int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
 {
        uint16_t evmcs_ver;
@@ -174,15 +202,35 @@ bool load_vmcs(struct vmx_pages *vmx)
  */
 static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
 {
+       uint32_t sec_exec_ctl = 0;
+
        vmwrite(VIRTUAL_PROCESSOR_ID, 0);
        vmwrite(POSTED_INTR_NV, 0);
 
        vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS));
-       if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, 0))
+
+       if (vmx->eptp_gpa) {
+               uint64_t ept_paddr;
+               struct eptPageTablePointer eptp = {
+                       .memory_type = VMX_BASIC_MEM_TYPE_WB,
+                       .page_walk_length = 3, /* + 1 */
+                       .ad_enabled = !!(rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & VMX_EPT_VPID_CAP_AD_BITS),
+                       .address = vmx->eptp_gpa >> PAGE_SHIFT_4K,
+               };
+
+               memcpy(&ept_paddr, &eptp, sizeof(ept_paddr));
+               vmwrite(EPT_POINTER, ept_paddr);
+               sec_exec_ctl |= SECONDARY_EXEC_ENABLE_EPT;
+       }
+
+       if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, sec_exec_ctl))
                vmwrite(CPU_BASED_VM_EXEC_CONTROL,
                        rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
-       else
+       else {
                vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS));
+               GUEST_ASSERT(!sec_exec_ctl);
+       }
+
        vmwrite(EXCEPTION_BITMAP, 0);
        vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
        vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */
@@ -327,3 +375,152 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
        init_vmcs_host_state();
        init_vmcs_guest_state(guest_rip, guest_rsp);
 }
+
+void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+                  uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot)
+{
+       uint16_t index[4];
+       struct eptPageTableEntry *pml4e;
+
+       TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
+                   "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
+
+       TEST_ASSERT((nested_paddr % vm->page_size) == 0,
+                   "Nested physical address not on page boundary,\n"
+                   "  nested_paddr: 0x%lx vm->page_size: 0x%x",
+                   nested_paddr, vm->page_size);
+       TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn,
+                   "Physical address beyond beyond maximum supported,\n"
+                   "  nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+                   paddr, vm->max_gfn, vm->page_size);
+       TEST_ASSERT((paddr % vm->page_size) == 0,
+                   "Physical address not on page boundary,\n"
+                   "  paddr: 0x%lx vm->page_size: 0x%x",
+                   paddr, vm->page_size);
+       TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
+                   "Physical address beyond beyond maximum supported,\n"
+                   "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+                   paddr, vm->max_gfn, vm->page_size);
+
+       index[0] = (nested_paddr >> 12) & 0x1ffu;
+       index[1] = (nested_paddr >> 21) & 0x1ffu;
+       index[2] = (nested_paddr >> 30) & 0x1ffu;
+       index[3] = (nested_paddr >> 39) & 0x1ffu;
+
+       /* Allocate page directory pointer table if not present. */
+       pml4e = vmx->eptp_hva;
+       if (!pml4e[index[3]].readable) {
+               pml4e[index[3]].address = vm_phy_page_alloc(vm,
+                         KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
+                       >> vm->page_shift;
+               pml4e[index[3]].writable = true;
+               pml4e[index[3]].readable = true;
+               pml4e[index[3]].executable = true;
+       }
+
+       /* Allocate page directory table if not present. */
+       struct eptPageTableEntry *pdpe;
+       pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
+       if (!pdpe[index[2]].readable) {
+               pdpe[index[2]].address = vm_phy_page_alloc(vm,
+                         KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
+                       >> vm->page_shift;
+               pdpe[index[2]].writable = true;
+               pdpe[index[2]].readable = true;
+               pdpe[index[2]].executable = true;
+       }
+
+       /* Allocate page table if not present. */
+       struct eptPageTableEntry *pde;
+       pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
+       if (!pde[index[1]].readable) {
+               pde[index[1]].address = vm_phy_page_alloc(vm,
+                         KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
+                       >> vm->page_shift;
+               pde[index[1]].writable = true;
+               pde[index[1]].readable = true;
+               pde[index[1]].executable = true;
+       }
+
+       /* Fill in page table entry. */
+       struct eptPageTableEntry *pte;
+       pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
+       pte[index[0]].address = paddr >> vm->page_shift;
+       pte[index[0]].writable = true;
+       pte[index[0]].readable = true;
+       pte[index[0]].executable = true;
+
+       /*
+        * For now mark these as accessed and dirty because the only
+        * testcase we have needs that.  Can be reconsidered later.
+        */
+       pte[index[0]].accessed = true;
+       pte[index[0]].dirty = true;
+}
+
+/*
+ * Map a range of EPT guest physical addresses to the VM's physical address
+ *
+ * Input Args:
+ *   vm - Virtual Machine
+ *   nested_paddr - Nested guest physical address to map
+ *   paddr - VM Physical Address
+ *   size - The size of the range to map
+ *   eptp_memslot - Memory region slot for new virtual translation tables
+ *
+ * Output Args: None
+ *
+ * Return: None
+ *
+ * Within the VM given by vm, creates a nested guest translation for the
+ * page range starting at nested_paddr to the page range starting at paddr.
+ */
+void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+               uint64_t nested_paddr, uint64_t paddr, uint64_t size,
+               uint32_t eptp_memslot)
+{
+       size_t page_size = vm->page_size;
+       size_t npages = size / page_size;
+
+       TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow");
+       TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
+
+       while (npages--) {
+               nested_pg_map(vmx, vm, nested_paddr, paddr, eptp_memslot);
+               nested_paddr += page_size;
+               paddr += page_size;
+       }
+}
+
+/* Prepare an identity extended page table that maps all the
+ * physical pages in VM.
+ */
+void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
+                       uint32_t memslot, uint32_t eptp_memslot)
+{
+       sparsebit_idx_t i, last;
+       struct userspace_mem_region *region =
+               memslot2region(vm, memslot);
+
+       i = (region->region.guest_phys_addr >> vm->page_shift) - 1;
+       last = i + (region->region.memory_size >> vm->page_shift);
+       for (;;) {
+               i = sparsebit_next_clear(region->unused_phy_pages, i);
+               if (i > last)
+                       break;
+
+               nested_map(vmx, vm,
+                          (uint64_t)i << vm->page_shift,
+                          (uint64_t)i << vm->page_shift,
+                          1 << vm->page_shift,
+                          eptp_memslot);
+       }
+}
+
+void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
+                 uint32_t eptp_memslot)
+{
+       vmx->eptp = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
+       vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp);
+       vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
new file mode 100644 (file)
index 0000000..0bca1cf
--- /dev/null
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KVM dirty page logging test
+ *
+ * Copyright (C) 2018, Red Hat, Inc.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_name */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "vmx.h"
+
+#define VCPU_ID                                1
+
+/* The memory slot index to track dirty pages */
+#define TEST_MEM_SLOT_INDEX            1
+#define TEST_MEM_SIZE                  3
+
+/* L1 guest test virtual memory offset */
+#define GUEST_TEST_MEM                 0xc0000000
+
+/* L2 guest test virtual memory offset */
+#define NESTED_TEST_MEM1               0xc0001000
+#define NESTED_TEST_MEM2               0xc0002000
+
+static void l2_guest_code(void)
+{
+       *(volatile uint64_t *)NESTED_TEST_MEM1;
+       *(volatile uint64_t *)NESTED_TEST_MEM1 = 1;
+       GUEST_SYNC(true);
+       GUEST_SYNC(false);
+
+       *(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
+       GUEST_SYNC(true);
+       *(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
+       GUEST_SYNC(true);
+       GUEST_SYNC(false);
+
+       /* Exit to L1 and never come back.  */
+       vmcall();
+}
+
+void l1_guest_code(struct vmx_pages *vmx)
+{
+#define L2_GUEST_STACK_SIZE 64
+       unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+       GUEST_ASSERT(vmx->vmcs_gpa);
+       GUEST_ASSERT(prepare_for_vmx_operation(vmx));
+       GUEST_ASSERT(load_vmcs(vmx));
+
+       prepare_vmcs(vmx, l2_guest_code,
+                    &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+       GUEST_SYNC(false);
+       GUEST_ASSERT(!vmlaunch());
+       GUEST_SYNC(false);
+       GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+       GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+       vm_vaddr_t vmx_pages_gva = 0;
+       struct vmx_pages *vmx;
+       unsigned long *bmap;
+       uint64_t *host_test_mem;
+
+       struct kvm_vm *vm;
+       struct kvm_run *run;
+       struct ucall uc;
+       bool done = false;
+
+       /* Create VM */
+       vm = vm_create_default(VCPU_ID, 0, l1_guest_code);
+       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+       vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
+       vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+       run = vcpu_state(vm, VCPU_ID);
+
+       /* Add an extra memory slot for testing dirty logging */
+       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+                                   GUEST_TEST_MEM,
+                                   TEST_MEM_SLOT_INDEX,
+                                   TEST_MEM_SIZE,
+                                   KVM_MEM_LOG_DIRTY_PAGES);
+
+       /*
+        * Add an identity map for GVA range [0xc0000000, 0xc0002000).  This
+        * affects both L1 and L2.  However...
+        */
+       virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM,
+                TEST_MEM_SIZE * 4096, 0);
+
+       /*
+        * ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
+        * 0xc0000000.
+        *
+        * Note that prepare_eptp should be called only L1's GPA map is done,
+        * meaning after the last call to virt_map.
+        */
+       prepare_eptp(vmx, vm, 0);
+       nested_map_memslot(vmx, vm, 0, 0);
+       nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096, 0);
+       nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096, 0);
+
+       bmap = bitmap_alloc(TEST_MEM_SIZE);
+       host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
+
+       while (!done) {
+               memset(host_test_mem, 0xaa, TEST_MEM_SIZE * 4096);
+               _vcpu_run(vm, VCPU_ID);
+               TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                           "Unexpected exit reason: %u (%s),\n",
+                           run->exit_reason,
+                           exit_reason_str(run->exit_reason));
+
+               switch (get_ucall(vm, VCPU_ID, &uc)) {
+               case UCALL_ABORT:
+                       TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
+                                   __FILE__, uc.args[1]);
+                       /* NOT REACHED */
+               case UCALL_SYNC:
+                       /*
+                        * The nested guest wrote at offset 0x1000 in the memslot, but the
+                        * dirty bitmap must be filled in according to L1 GPA, not L2.
+                        */
+                       kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
+                       if (uc.args[1]) {
+                               TEST_ASSERT(test_bit(0, bmap), "Page 0 incorrectly reported clean\n");
+                               TEST_ASSERT(host_test_mem[0] == 1, "Page 0 not written by guest\n");
+                       } else {
+                               TEST_ASSERT(!test_bit(0, bmap), "Page 0 incorrectly reported dirty\n");
+                               TEST_ASSERT(host_test_mem[0] == 0xaaaaaaaaaaaaaaaaULL, "Page 0 written by guest\n");
+                       }
+
+                       TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty\n");
+                       TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest\n");
+                       TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty\n");
+                       TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest\n");
+                       break;
+               case UCALL_DONE:
+                       done = true;
+                       break;
+               default:
+                       TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
+               }
+       }
+}
index c7cced7..8aefd81 100644 (file)
@@ -21,3 +21,4 @@ ipv6_flowlabel
 ipv6_flowlabel_mgr
 so_txtime
 tcp_fastopen_backup_key
+nettest
index b8265ee..614b31a 100644 (file)
@@ -89,12 +89,9 @@ struct testcase testcases_v4[] = {
                .tfail = true,
        },
        {
-               /* send a single MSS: will fail with GSO, because the segment
-                * logic in udp4_ufo_fragment demands a gso skb to be > MTU
-                */
+               /* send a single MSS: will fall back to no GSO */
                .tlen = CONST_MSS_V4,
                .gso_len = CONST_MSS_V4,
-               .tfail = true,
                .r_num_mss = 1,
        },
        {
@@ -139,10 +136,9 @@ struct testcase testcases_v4[] = {
                .tfail = true,
        },
        {
-               /* send a single 1B MSS: will fail, see single MSS above */
+               /* send a single 1B MSS: will fall back to no GSO */
                .tlen = 1,
                .gso_len = 1,
-               .tfail = true,
                .r_num_mss = 1,
        },
        {
@@ -196,12 +192,9 @@ struct testcase testcases_v6[] = {
                .tfail = true,
        },
        {
-               /* send a single MSS: will fail with GSO, because the segment
-                * logic in udp4_ufo_fragment demands a gso skb to be > MTU
-                */
+               /* send a single MSS: will fall back to no GSO */
                .tlen = CONST_MSS_V6,
                .gso_len = CONST_MSS_V6,
-               .tfail = true,
                .r_num_mss = 1,
        },
        {
@@ -246,10 +239,9 @@ struct testcase testcases_v6[] = {
                .tfail = true,
        },
        {
-               /* send a single 1B MSS: will fail, see single MSS above */
+               /* send a single 1B MSS: will fall back to no GSO */
                .tlen = 1,
                .gso_len = 1,
-               .tfail = true,
                .r_num_mss = 1,
        },
        {
index 464c9b7..7550f08 100644 (file)
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
-CFLAGS += -g -I../../../../usr/include/ -lpthread
+CFLAGS += -g -I../../../../usr/include/ -pthread
 
 TEST_GEN_PROGS := pidfd_test pidfd_open_test pidfd_poll_test pidfd_wait
 
index 9868a5d..f85a093 100644 (file)
@@ -636,7 +636,7 @@ int main(int argc, char *argv[])
                        nrthreads = strtoul(optarg, NULL, 10);
                        break;
                case 'l':
-                       strncpy(logdir, optarg, LOGDIR_NAME_SIZE);
+                       strncpy(logdir, optarg, LOGDIR_NAME_SIZE - 1);
                        break;
                case 't':
                        run_time = strtoul(optarg, NULL, 10);
diff --git a/tools/testing/selftests/rtc/settings b/tools/testing/selftests/rtc/settings
new file mode 100644 (file)
index 0000000..ba4d85f
--- /dev/null
@@ -0,0 +1 @@
+timeout=90
index c0534e2..cb3fc09 100644 (file)
@@ -37,7 +37,7 @@ int main(int argc, char **argv)
        char *file = "/dev/zero";
        char *p;
 
-       while ((opt = getopt(argc, argv, "m:r:n:f:tTLUSH")) != -1) {
+       while ((opt = getopt(argc, argv, "m:r:n:f:tTLUwSH")) != -1) {
                switch (opt) {
                case 'm':
                        size = atoi(optarg) * MB;
index afff120..f45e510 100644 (file)
@@ -19,7 +19,7 @@
 
 int fd;
 const char v = 'V';
-static const char sopts[] = "bdehp:t:Tn:NLf:";
+static const char sopts[] = "bdehp:t:Tn:NLf:i";
 static const struct option lopts[] = {
        {"bootstatus",          no_argument, NULL, 'b'},
        {"disable",             no_argument, NULL, 'd'},
@@ -32,6 +32,7 @@ static const struct option lopts[] = {
        {"getpretimeout",       no_argument, NULL, 'N'},
        {"gettimeleft",         no_argument, NULL, 'L'},
        {"file",          required_argument, NULL, 'f'},
+       {"info",                no_argument, NULL, 'i'},
        {NULL,                  no_argument, NULL, 0x0}
 };
 
@@ -72,6 +73,7 @@ static void usage(char *progname)
        printf("Usage: %s [options]\n", progname);
        printf(" -f, --file\t\tOpen watchdog device file\n");
        printf("\t\t\tDefault is /dev/watchdog\n");
+       printf(" -i, --info\t\tShow watchdog_info\n");
        printf(" -b, --bootstatus\tGet last boot status (Watchdog/POR)\n");
        printf(" -d, --disable\t\tTurn off the watchdog timer\n");
        printf(" -e, --enable\t\tTurn on the watchdog timer\n");
@@ -97,6 +99,7 @@ int main(int argc, char *argv[])
        int c;
        int oneshot = 0;
        char *file = "/dev/watchdog";
+       struct watchdog_info info;
 
        setbuf(stdout, NULL);
 
@@ -118,6 +121,16 @@ int main(int argc, char *argv[])
                exit(-1);
        }
 
+       /*
+        * Validate that `file` is a watchdog device
+        */
+       ret = ioctl(fd, WDIOC_GETSUPPORT, &info);
+       if (ret) {
+               printf("WDIOC_GETSUPPORT error '%s'\n", strerror(errno));
+               close(fd);
+               exit(ret);
+       }
+
        optind = 0;
 
        while ((c = getopt_long(argc, argv, sopts, lopts, NULL)) != -1) {
@@ -205,6 +218,18 @@ int main(int argc, char *argv[])
                case 'f':
                        /* Handled above */
                        break;
+               case 'i':
+                       /*
+                        * watchdog_info was obtained as part of file open
+                        * validation. So we just show it here.
+                        */
+                       oneshot = 1;
+                       printf("watchdog_info:\n");
+                       printf(" identity:\t\t%s\n", info.identity);
+                       printf(" firmware_version:\t%u\n",
+                              info.firmware_version);
+                       printf(" options:\t\t%08x\n", info.options);
+                       break;
 
                default:
                        usage(argv[0]);
index f91aeb5..8f41cd6 100644 (file)
@@ -29,4 +29,6 @@ enum dma_data_direction {
 #define dma_unmap_single(...) do { } while (0)
 #define dma_unmap_page(...) do { } while (0)
 
+#define dma_max_mapping_size(...) SIZE_MAX
+
 #endif
diff --git a/tools/virtio/xen/xen.h b/tools/virtio/xen/xen.h
new file mode 100644 (file)
index 0000000..f569387
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef XEN_XEN_STUB_H
+#define XEN_XEN_STUB_H
+
+#define xen_domain() 0
+
+#endif
index c9449aa..57b20f7 100644 (file)
@@ -29,13 +29,11 @@ header-test- += linux/android/binderfs.h
 header-test-$(CONFIG_CPU_BIG_ENDIAN) += linux/byteorder/big_endian.h
 header-test-$(CONFIG_CPU_LITTLE_ENDIAN) += linux/byteorder/little_endian.h
 header-test- += linux/coda.h
-header-test- += linux/coda_psdev.h
 header-test- += linux/elfcore.h
 header-test- += linux/errqueue.h
 header-test- += linux/fsmap.h
 header-test- += linux/hdlc/ioctl.h
 header-test- += linux/ivtv.h
-header-test- += linux/jffs2.h
 header-test- += linux/kexec.h
 header-test- += linux/matroxfb.h
 header-test- += linux/netfilter_ipv4/ipt_LOG.h
@@ -55,20 +53,12 @@ header-test- += linux/v4l2-mediabus.h
 header-test- += linux/v4l2-subdev.h
 header-test- += linux/videodev2.h
 header-test- += linux/vm_sockets.h
-header-test- += scsi/scsi_bsg_fc.h
-header-test- += scsi/scsi_netlink.h
-header-test- += scsi/scsi_netlink_fc.h
 header-test- += sound/asequencer.h
 header-test- += sound/asoc.h
 header-test- += sound/asound.h
 header-test- += sound/compress_offload.h
 header-test- += sound/emu10k1.h
 header-test- += sound/sfnt_info.h
-header-test- += sound/sof/eq.h
-header-test- += sound/sof/fw.h
-header-test- += sound/sof/header.h
-header-test- += sound/sof/manifest.h
-header-test- += sound/sof/trace.h
 header-test- += xen/evtchn.h
 header-test- += xen/gntdev.h
 header-test- += xen/privcmd.h
index 55fed77..4fd4f6d 100644 (file)
@@ -30,7 +30,7 @@ TRACE_EVENT(vgic_update_irq_pending,
 #endif /* _TRACE_VGIC_H */
 
 #undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../../../virt/kvm/arm/vgic
+#define TRACE_INCLUDE_PATH ../../virt/kvm/arm/vgic
 #undef TRACE_INCLUDE_FILE
 #define TRACE_INCLUDE_FILE trace
 
index e6de315..fd68fbe 100644 (file)
@@ -617,8 +617,9 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
 
                stat_data->kvm = kvm;
                stat_data->offset = p->offset;
+               stat_data->mode = p->mode ? p->mode : 0644;
                kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
-               debugfs_create_file(p->name, 0644, kvm->debugfs_dentry,
+               debugfs_create_file(p->name, stat_data->mode, kvm->debugfs_dentry,
                                    stat_data, stat_fops_per_vm[p->kind]);
        }
        return 0;
@@ -3929,7 +3930,9 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
        if (!refcount_inc_not_zero(&stat_data->kvm->users_count))
                return -ENOENT;
 
-       if (simple_attr_open(inode, file, get, set, fmt)) {
+       if (simple_attr_open(inode, file, get,
+                            stat_data->mode & S_IWUGO ? set : NULL,
+                            fmt)) {
                kvm_put_kvm(stat_data->kvm);
                return -ENOMEM;
        }
@@ -4177,7 +4180,8 @@ static void kvm_init_debug(void)
 
        kvm_debugfs_num_entries = 0;
        for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
-               debugfs_create_file(p->name, 0644, kvm_debugfs_dir,
+               int mode = p->mode ? p->mode : 0644;
+               debugfs_create_file(p->name, mode, kvm_debugfs_dir,
                                    (void *)(long)p->offset,
                                    stat_fops[p->kind]);
        }