Merge tag 'pci-v4.18-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 19 Jul 2018 18:54:04 +0000 (11:54 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 19 Jul 2018 18:54:04 +0000 (11:54 -0700)
Pull PCI fixes from Bjorn Helgaas:

 - Fix crashes that happen when PHY drivers are left disabled in the V3
   Semiconductor, MediaTek, Faraday, Aardvark, DesignWare, Versatile,
   and X-Gene host controller drivers (Sergei Shtylyov)

 - Fix a NULL pointer dereference in the endpoint library configfs
   support (Kishon Vijay Abraham I)

 - Fix a race condition in Hyper-V IRQ handling (Dexuan Cui)

* tag 'pci-v4.18-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci:
  PCI: v3-semi: Fix I/O space page leak
  PCI: mediatek: Fix I/O space page leak
  PCI: faraday: Fix I/O space page leak
  PCI: aardvark: Fix I/O space page leak
  PCI: designware: Fix I/O space page leak
  PCI: versatile: Fix I/O space page leak
  PCI: xgene: Fix I/O space page leak
  PCI: OF: Fix I/O space page leak
  PCI: endpoint: Fix NULL pointer dereference error when CONFIGFS is disabled
  PCI: hv: Disable/enable IRQs rather than BH in hv_compose_msi_msg()

1261 files changed:
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/pm/intel_pstate.rst
Documentation/core-api/kernel-api.rst
Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt
Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt [new file with mode: 0644]
Documentation/devicetree/bindings/input/touchscreen/hideep.txt
Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt
Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
Documentation/devicetree/bindings/mips/brcm/soc.txt
Documentation/devicetree/bindings/net/fsl-fman.txt
Documentation/devicetree/bindings/power/power_domain.txt
Documentation/devicetree/bindings/regulator/tps65090.txt
Documentation/devicetree/bindings/reset/st,sti-softreset.txt
Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt
Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
Documentation/devicetree/bindings/sound/qcom,apq8096.txt
Documentation/devicetree/bindings/w1/w1-gpio.txt
Documentation/driver-api/infrastructure.rst
Documentation/filesystems/Locking
Documentation/filesystems/cifs/AUTHORS
Documentation/filesystems/cifs/CHANGES
Documentation/filesystems/cifs/TODO
Documentation/filesystems/vfs.txt
Documentation/kbuild/kbuild.txt
Documentation/kbuild/kconfig-language.txt
Documentation/kbuild/kconfig.txt
Documentation/networking/bonding.txt
Documentation/networking/e100.rst
Documentation/networking/e1000.rst
Documentation/networking/strparser.txt
Documentation/trace/histogram.txt
Documentation/usb/gadget_configfs.txt
Documentation/virtual/kvm/api.txt
MAINTAINERS
Makefile
arch/alpha/Kconfig
arch/alpha/lib/Makefile
arch/alpha/lib/dec_and_lock.c [deleted file]
arch/arm/boot/dts/am335x-bone-common.dtsi
arch/arm/boot/dts/am3517.dtsi
arch/arm/boot/dts/am437x-sk-evm.dts
arch/arm/boot/dts/armada-385-synology-ds116.dts
arch/arm/boot/dts/armada-38x.dtsi
arch/arm/boot/dts/bcm-cygnus.dtsi
arch/arm/boot/dts/bcm-hr2.dtsi
arch/arm/boot/dts/bcm-nsp.dtsi
arch/arm/boot/dts/bcm5301x.dtsi
arch/arm/boot/dts/da850.dtsi
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/imx51-zii-rdu1.dts
arch/arm/boot/dts/imx6q.dtsi
arch/arm/boot/dts/imx6sx.dtsi
arch/arm/boot/dts/socfpga.dtsi
arch/arm/boot/dts/socfpga_arria10.dtsi
arch/arm/common/Makefile
arch/arm/configs/imx_v4_v5_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/crypto/speck-neon-core.S
arch/arm/firmware/Makefile
arch/arm/kernel/head-nommu.S
arch/arm/kernel/signal.c
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-omap2/omap-smp.c
arch/arm/mach-pxa/irq.c
arch/arm/mm/init.c
arch/arm/net/bpf_jit_32.c
arch/arm/xen/enlighten.c
arch/arm64/Makefile
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
arch/arm64/boot/dts/amlogic/meson-axg.dtsi
arch/arm64/boot/dts/amlogic/meson-gx.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts
arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts
arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
arch/arm64/boot/dts/marvell/armada-cp110.dtsi
arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
arch/arm64/boot/dts/qcom/msm8916.dtsi
arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts
arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
arch/arm64/configs/defconfig
arch/arm64/crypto/aes-glue.c
arch/arm64/include/asm/alternative.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/simd.h
arch/arm64/include/asm/sysreg.h
arch/arm64/kernel/alternative.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/module.c
arch/arm64/kernel/smp.c
arch/arm64/kvm/fpsimd.c
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/proc.S
arch/m68k/include/asm/mcf_pgalloc.h
arch/microblaze/Kconfig.debug
arch/microblaze/include/asm/setup.h
arch/microblaze/include/asm/unistd.h
arch/microblaze/include/uapi/asm/unistd.h
arch/microblaze/kernel/Makefile
arch/microblaze/kernel/heartbeat.c [deleted file]
arch/microblaze/kernel/platform.c [deleted file]
arch/microblaze/kernel/reset.c
arch/microblaze/kernel/syscall_table.S
arch/microblaze/kernel/timer.c
arch/mips/Kconfig
arch/mips/ath79/mach-pb44.c
arch/mips/bcm47xx/setup.c
arch/mips/include/asm/io.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/uapi/asm/unistd.h
arch/mips/kernel/entry.S
arch/mips/kernel/mcount.S
arch/mips/kernel/process.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/signal.c
arch/mips/kernel/traps.c
arch/mips/mm/ioremap.c
arch/openrisc/include/asm/pgalloc.h
arch/openrisc/kernel/entry.S
arch/openrisc/kernel/head.S
arch/openrisc/kernel/traps.c
arch/parisc/Kconfig
arch/parisc/Makefile
arch/parisc/include/asm/signal.h
arch/parisc/include/uapi/asm/unistd.h
arch/parisc/kernel/drivers.c
arch/parisc/kernel/syscall_table.S
arch/parisc/kernel/unwind.c
arch/powerpc/Makefile
arch/powerpc/include/asm/book3s/32/pgalloc.h
arch/powerpc/include/asm/book3s/64/pgtable-4k.h
arch/powerpc/include/asm/book3s/64/pgtable-64k.h
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/nmi.h
arch/powerpc/include/asm/nohash/32/pgalloc.h
arch/powerpc/include/asm/nohash/64/pgalloc.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/kernel/dt_cpu_ftrs.c
arch/powerpc/kernel/pci_32.c
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/signal.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/stacktrace.c
arch/powerpc/kernel/syscalls.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/pgtable-book3s64.c
arch/powerpc/mm/subpage-prot.c
arch/powerpc/mm/tlb-radix.c
arch/powerpc/platforms/powermac/time.c
arch/riscv/Kconfig
arch/riscv/include/uapi/asm/elf.h
arch/riscv/kernel/irq.c
arch/riscv/kernel/module.c
arch/riscv/kernel/ptrace.c
arch/riscv/kernel/setup.c
arch/riscv/mm/init.c
arch/s390/Kconfig
arch/s390/include/asm/css_chars.h
arch/s390/kernel/compat_wrapper.c
arch/s390/kernel/entry.S
arch/s390/kernel/signal.c
arch/s390/kernel/syscalls/syscall.tbl
arch/s390/mm/pgalloc.c
arch/s390/net/bpf_jit_comp.c
arch/x86/Makefile
arch/x86/boot/compressed/eboot.c
arch/x86/crypto/aegis128-aesni-asm.S
arch/x86/crypto/aegis128l-aesni-asm.S
arch/x86/crypto/aegis256-aesni-asm.S
arch/x86/crypto/morus1280-avx2-asm.S
arch/x86/crypto/morus1280-sse2-asm.S
arch/x86/crypto/morus640-sse2-asm.S
arch/x86/entry/common.c
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64_compat.S
arch/x86/hyperv/hv_apic.c
arch/x86/hyperv/hv_init.c
arch/x86/include/asm/asm.h
arch/x86/include/asm/barrier.h
arch/x86/include/asm/irqflags.h
arch/x86/include/asm/mshyperv.h
arch/x86/include/asm/pgalloc.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_64.h
arch/x86/include/asm/vmx.h
arch/x86/kernel/Makefile
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/cacheinfo.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mcheck/mce-severity.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/cpu/mtrr/if.c
arch/x86/kernel/e820.c
arch/x86/kernel/head64.c
arch/x86/kernel/irqflags.S [new file with mode: 0644]
arch/x86/kernel/kvmclock.c
arch/x86/kernel/quirks.c
arch/x86/kernel/signal.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/traps.c
arch/x86/kernel/uprobes.c
arch/x86/kvm/Kconfig
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/mm/fault.c
arch/x86/mm/init_64.c
arch/x86/platform/efi/efi_64.c
arch/x86/purgatory/Makefile
arch/x86/xen/enlighten.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/enlighten_pvh.c
arch/x86/xen/irq.c
arch/x86/xen/smp_pv.c
block/bio.c
block/blk-core.c
block/blk-mq-debugfs.c
block/blk-mq.c
block/blk-softirq.c
block/blk-timeout.c
block/bsg.c
block/sed-opal.c
certs/blacklist.h
crypto/af_alg.c
crypto/algif_aead.c
crypto/algif_skcipher.c
crypto/asymmetric_keys/x509_cert_parser.c
crypto/morus640.c
crypto/sha3_generic.c
drivers/acpi/acpi_lpss.c
drivers/acpi/acpica/hwsleep.c
drivers/acpi/acpica/uterror.c
drivers/acpi/battery.c
drivers/acpi/ec.c
drivers/acpi/nfit/core.c
drivers/acpi/nfit/nfit.h
drivers/acpi/osl.c
drivers/acpi/pptt.c
drivers/ata/Kconfig
drivers/ata/ahci.c
drivers/ata/ahci_mvebu.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/libata-scsi.c
drivers/ata/sata_fsl.c
drivers/ata/sata_nv.c
drivers/atm/iphase.c
drivers/atm/zatm.c
drivers/base/Makefile
drivers/base/core.c
drivers/base/dma-coherent.c [deleted file]
drivers/base/dma-contiguous.c [deleted file]
drivers/base/dma-mapping.c [deleted file]
drivers/base/power/domain.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_worker.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/null_blk.c
drivers/bluetooth/hci_nokia.c
drivers/bus/ti-sysc.c
drivers/char/agp/alpha-agp.c
drivers/char/agp/amd64-agp.c
drivers/char/hw_random/core.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/kcs_bmc.c
drivers/char/random.c
drivers/clk/Makefile
drivers/clk/davinci/da8xx-cfgchip.c
drivers/clk/davinci/psc.h
drivers/clk/sunxi-ng/Makefile
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/timer-stm32.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/qcom-cpufreq-kryo.c
drivers/crypto/chelsio/chtls/chtls_io.c
drivers/dax/device.c
drivers/dax/super.c
drivers/dma/k3dma.c
drivers/dma/pl330.c
drivers/dma/ti/omap-dma.c
drivers/firmware/dmi-id.c
drivers/firmware/dmi_scan.c
drivers/firmware/efi/libstub/tpm.c
drivers/fpga/altera-cvp.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
drivers/gpu/drm/amd/include/atomfirmware.h
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
drivers/gpu/drm/arm/malidp_drv.c
drivers/gpu/drm/arm/malidp_hw.c
drivers/gpu/drm/arm/malidp_planes.c
drivers/gpu/drm/armada/armada_crtc.c
drivers/gpu/drm/armada/armada_hw.h
drivers/gpu/drm/armada/armada_overlay.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
drivers/gpu/drm/bridge/sil-sii8620.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_property.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
drivers/gpu/drm/etnaviv/etnaviv_sched.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_drm_ipp.c
drivers/gpu/drm/exynos/exynos_drm_plane.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_drm_scaler.c
drivers/gpu/drm/exynos/regs-gsc.h
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/mmio.h
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dvo.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/nouveau/dispnv50/curs507a.c
drivers/gpu/drm/nouveau/dispnv50/wndw.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/sun4i/Makefile
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/udl/udl_fb.c
drivers/gpu/drm/udl/udl_transfer.c
drivers/gpu/host1x/dev.c
drivers/gpu/host1x/job.c
drivers/hid/hid-core.c
drivers/hid/hid-debug.c
drivers/hid/hid-google-hammer.c
drivers/hid/hid-ids.h
drivers/hid/hid-steam.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hid/usbhid/hiddev.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/nct6775.c
drivers/i2c/algos/i2c-algo-bit.c
drivers/i2c/busses/i2c-cht-wc.c
drivers/i2c/busses/i2c-gpio.c
drivers/i2c/busses/i2c-stu300.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/i2c-core-base.c
drivers/i2c/i2c-core-smbus.c
drivers/iio/accel/mma8452.c
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
drivers/iio/light/tsl2772.c
drivers/iio/pressure/bmp280-core.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hfi1/uc.c
drivers/infiniband/hw/hfi1/ud.c
drivers/infiniband/hw/hfi1/verbs_txreq.c
drivers/infiniband/hw/hfi1/verbs_txreq.h
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/srq.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/sw/rxe/rxe_req.c
drivers/input/input-mt.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/goldfish_events.c
drivers/input/misc/Kconfig
drivers/input/misc/Makefile
drivers/input/misc/sc27xx-vibra.c [new file with mode: 0644]
drivers/input/mouse/elan_i2c.h
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/elan_i2c_smbus.c
drivers/input/mouse/elantech.c
drivers/input/mouse/psmouse-base.c
drivers/input/rmi4/Kconfig
drivers/input/rmi4/rmi_2d_sensor.c
drivers/input/rmi4/rmi_bus.c
drivers/input/rmi4/rmi_bus.h
drivers/input/rmi4/rmi_driver.c
drivers/input/rmi4/rmi_f01.c
drivers/input/rmi4/rmi_f03.c
drivers/input/rmi4/rmi_f11.c
drivers/input/rmi4/rmi_f12.c
drivers/input/rmi4/rmi_f30.c
drivers/input/rmi4/rmi_f34.c
drivers/input/rmi4/rmi_f54.c
drivers/input/touchscreen/silead.c
drivers/iommu/Kconfig
drivers/iommu/intel-iommu.c
drivers/irqchip/irq-gic-v2m.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-ls-scfg-msi.c
drivers/isdn/mISDN/socket.c
drivers/lightnvm/Kconfig
drivers/md/dm-raid.c
drivers/md/dm-table.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin.c
drivers/md/dm-writecache.c
drivers/md/dm-zoned-target.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/raid10.c
drivers/media/rc/bpf-lirc.c
drivers/misc/ibmasm/ibmasmfs.c
drivers/misc/mei/interrupt.c
drivers/misc/vmw_balloon.c
drivers/mmc/core/slot-gpio.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/renesas_sdhi_internal_dmac.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sunxi-mmc.c
drivers/mtd/chips/cfi_cmdset_0002.c
drivers/mtd/devices/mtd_dataflash.c
drivers/mtd/nand/raw/denali_dt.c
drivers/mtd/nand/raw/mxc_nand.c
drivers/mtd/nand/raw/nand_base.c
drivers/mtd/nand/raw/nand_macronix.c
drivers/mtd/nand/raw/nand_micron.c
drivers/mtd/spi-nor/cadence-quadspi.c
drivers/net/ethernet/amd/Kconfig
drivers/net/ethernet/apm/xgene-v2/Kconfig
drivers/net/ethernet/apm/xgene/Kconfig
drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
drivers/net/ethernet/aquantia/atlantic/aq_hw.h
drivers/net/ethernet/aquantia/atlantic/aq_main.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/arc/Kconfig
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cadence/macb_ptp.c
drivers/net/ethernet/calxeda/Kconfig
drivers/net/ethernet/cavium/Kconfig
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/cisco/enic/enic_clsf.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/fman/fman_port.c
drivers/net/ethernet/hisilicon/Kconfig
drivers/net/ethernet/huawei/hinic/hinic_rx.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/Kconfig
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/sriov.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlxsw/Kconfig
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/netronome/nfp/bpf/main.c
drivers/net/ethernet/netronome/nfp/flower/match.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qed/qed_debug.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qede/qede_ptp.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/qualcomm/qca_spi.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/Kconfig
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
drivers/net/ethernet/stmicro/stmmac/hwif.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/geneve.c
drivers/net/hamradio/bpqether.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/adf7242.c
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/fakelb.c
drivers/net/ieee802154/mcr20a.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/net_failover.c
drivers/net/phy/dp83tc811.c
drivers/net/phy/marvell.c
drivers/net/phy/phy_device.c
drivers/net/phy/sfp-bus.c
drivers/net/ppp/pppoe.c
drivers/net/tun.c
drivers/net/usb/asix_devices.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/rtl8150.c
drivers/net/usb/smsc75xx.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath10k/wmi.h
drivers/net/wireless/ath/wcn36xx/testmode.c
drivers/net/wireless/broadcom/brcm80211/Kconfig
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/marvell/mwifiex/usb.c
drivers/net/wireless/mediatek/mt7601u/phy.c
drivers/net/wireless/quantenna/qtnfmac/Kconfig
drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
drivers/net/wireless/realtek/rtlwifi/base.c
drivers/net/wireless/realtek/rtlwifi/base.h
drivers/net/wireless/realtek/rtlwifi/core.c
drivers/net/wireless/realtek/rtlwifi/pci.c
drivers/net/wireless/realtek/rtlwifi/ps.c
drivers/net/wireless/realtek/rtlwifi/usb.c
drivers/net/xen-netfront.c
drivers/nfc/pn533/usb.c
drivers/nvdimm/claim.c
drivers/nvdimm/pmem.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/core.c
drivers/nvmem/core.c
drivers/of/base.c
drivers/of/of_private.h
drivers/of/overlay.c
drivers/opp/core.c
drivers/pci/pci-acpi.c
drivers/perf/xgene_pmu.c
drivers/pinctrl/actions/pinctrl-owl.c
drivers/pinctrl/bcm/pinctrl-nsp-mux.c
drivers/pinctrl/devicetree.c
drivers/pinctrl/mediatek/pinctrl-mt7622.c
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
drivers/pinctrl/pinctrl-ingenic.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/sh-pfc/pfc-r8a77970.c
drivers/ptp/ptp_chardev.c
drivers/ptp/ptp_qoriq.c
drivers/rtc/interface.c
drivers/rtc/rtc-mrst.c
drivers/s390/block/dasd.c
drivers/s390/block/dasd_alias.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_eer.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dasd_int.h
drivers/s390/cio/Makefile
drivers/s390/cio/vfio_ccw_cp.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/cio/vfio_ccw_fsm.c
drivers/s390/cio/vfio_ccw_trace.h [new file with mode: 0644]
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/aacraid/aachba.c
drivers/scsi/ipr.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/sg.c
drivers/scsi/xen-scsifront.c
drivers/soc/imx/gpcv2.c
drivers/soc/qcom/Kconfig
drivers/soc/renesas/rcar-sysc.c
drivers/staging/android/ion/ion_heap.c
drivers/staging/comedi/drivers/quatech_daqp_cs.c
drivers/staging/rtl8723bs/core/rtw_ap.c
drivers/staging/rtlwifi/rtl8822be/hw.c
drivers/staging/rtlwifi/wifi.h
drivers/staging/typec/Kconfig
drivers/target/target_core_pr.c
drivers/target/target_core_user.c
drivers/thunderbolt/domain.c
drivers/tty/n_tty.c
drivers/tty/serdev/core.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/vt/vt.c
drivers/uio/uio.c
drivers/usb/chipidea/host.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc2/hcd.h
drivers/usb/dwc2/hcd_intr.c
drivers/usb/dwc2/hcd_queue.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/dwc3-of-simple.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/dwc3-qcom.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/udc/aspeed-vhub/Kconfig
drivers/usb/host/xhci-dbgcap.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-tegra.c
drivers/usb/host/xhci-trace.h
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/yurex.c
drivers/usb/serial/ch341.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/keyspan_pda.c
drivers/usb/serial/mos7840.c
drivers/usb/typec/tcpm.c
drivers/usb/typec/ucsi/ucsi.c
drivers/usb/typec/ucsi/ucsi_acpi.c
drivers/vfio/pci/Kconfig
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/net.c
drivers/xen/Makefile
drivers/xen/events/events_base.c
drivers/xen/grant-table.c
drivers/xen/manage.c
drivers/xen/privcmd-buf.c [new file with mode: 0644]
drivers/xen/privcmd.c
drivers/xen/privcmd.h
drivers/xen/xen-scsiback.c
fs/aio.c
fs/autofs/Makefile
fs/autofs/dev-ioctl.c
fs/autofs/init.c
fs/binfmt_elf.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/scrub.c
fs/btrfs/volumes.c
fs/ceph/inode.c
fs/cifs/cifs_debug.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/smb1ops.c
fs/cifs/smb2file.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smb2proto.h
fs/cifs/smb2transport.c
fs/cifs/smbdirect.c
fs/cifs/smbdirect.h
fs/cifs/trace.h
fs/cifs/transport.c
fs/eventfd.c
fs/eventpoll.c
fs/ext2/ext2.h
fs/ext2/super.c
fs/ext4/balloc.c
fs/ext4/ext4.h
fs/ext4/ext4_extents.h
fs/ext4/extents.c
fs/ext4/ialloc.c
fs/ext4/inline.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/inode.c
fs/jbd2/transaction.c
fs/jfs/xattr.c
fs/nfs/delegation.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/nfs4proc.c
fs/nfs/pnfs.h
fs/pipe.c
fs/proc/base.c
fs/proc/generic.c
fs/proc/task_mmu.c
fs/quota/dquot.c
fs/reiserfs/prints.c
fs/select.c
fs/timerfd.c
fs/udf/balloc.c
fs/udf/directory.c
fs/udf/inode.c
fs/udf/namei.c
fs/udf/udfdecl.h
fs/userfaultfd.c
fs/xfs/libxfs/xfs_ag_resv.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.h
fs/xfs/libxfs/xfs_format.h
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/libxfs/xfs_rtbitmap.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_fsmap.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_trans.c
include/acpi/processor.h
include/asm-generic/qspinlock_types.h
include/asm-generic/tlb.h
include/crypto/if_alg.h
include/dt-bindings/clock/imx6ul-clock.h
include/linux/acpi.h
include/linux/atmdev.h
include/linux/backing-dev-defs.h
include/linux/blkdev.h
include/linux/bpf-cgroup.h
include/linux/bpf.h
include/linux/bpf_lirc.h
include/linux/compat.h
include/linux/compiler-gcc.h
include/linux/compiler_types.h
include/linux/dax.h
include/linux/dma-contiguous.h
include/linux/filter.h
include/linux/fs.h
include/linux/fsl/guts.h
include/linux/ftrace.h
include/linux/hid.h
include/linux/if_bridge.h
include/linux/igmp.h
include/linux/iio/buffer-dma.h
include/linux/input/mt.h
include/linux/irq.h
include/linux/irqdesc.h
include/linux/kernel.h
include/linux/kthread.h
include/linux/libata.h
include/linux/marvell_phy.h
include/linux/memory.h
include/linux/mlx5/eswitch.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/mod_devicetable.h
include/linux/net.h
include/linux/netdevice.h
include/linux/nfs_xdr.h
include/linux/pm_domain.h
include/linux/poll.h
include/linux/refcount.h
include/linux/rmi.h
include/linux/scatterlist.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/slub_def.h
include/linux/spinlock.h
include/linux/syscalls.h
include/linux/uio_driver.h
include/net/bluetooth/bluetooth.h
include/net/ip6_fib.h
include/net/ip6_route.h
include/net/ipv6.h
include/net/iucv/af_iucv.h
include/net/net_namespace.h
include/net/netfilter/nf_tables_core.h
include/net/netfilter/nf_tproxy.h
include/net/netns/ipv6.h
include/net/pkt_cls.h
include/net/sctp/sctp.h
include/net/tc_act/tc_csum.h
include/net/tc_act/tc_tunnel_key.h
include/net/tcp.h
include/net/tls.h
include/net/udp.h
include/net/xdp_sock.h
include/rdma/ib_verbs.h
include/uapi/linux/aio_abi.h
include/uapi/linux/bpf.h
include/uapi/linux/ethtool.h
include/uapi/linux/nbd.h
include/uapi/linux/rseq.h
include/uapi/linux/target_core_user.h
include/uapi/linux/tcp.h
include/uapi/linux/types_32_64.h [deleted file]
include/xen/xen.h
init/Kconfig
kernel/Makefile
kernel/bpf/btf.c
kernel/bpf/cgroup.c
kernel/bpf/core.c
kernel/bpf/devmap.c
kernel/bpf/hashtab.c
kernel/bpf/sockmap.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/dma/Kconfig [new file with mode: 0644]
kernel/dma/Makefile [new file with mode: 0644]
kernel/dma/coherent.c [new file with mode: 0644]
kernel/dma/contiguous.c [new file with mode: 0644]
kernel/dma/debug.c [new file with mode: 0644]
kernel/dma/direct.c [new file with mode: 0644]
kernel/dma/mapping.c [new file with mode: 0644]
kernel/dma/noncoherent.c [new file with mode: 0644]
kernel/dma/swiotlb.c [new file with mode: 0644]
kernel/dma/virt.c [new file with mode: 0644]
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/irq/debugfs.c
kernel/kthread.c
kernel/locking/lockdep.c
kernel/locking/rwsem.c
kernel/rseq.c
kernel/sched/core.c
kernel/sched/cpufreq_schedutil.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/softirq.c
kernel/time/hrtimer.c
kernel/time/posix-cpu-timers.c
kernel/time/tick-common.c
kernel/time/time.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_events_filter.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_functions_graph.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_output.c
lib/Kconfig
lib/Kconfig.kasan
lib/Makefile
lib/dec_and_lock.c
lib/dma-debug.c [deleted file]
lib/dma-direct.c [deleted file]
lib/dma-noncoherent.c [deleted file]
lib/dma-virt.c [deleted file]
lib/percpu_ida.c
lib/refcount.c
lib/rhashtable.c
lib/scatterlist.c
lib/swiotlb.c [deleted file]
lib/test_bpf.c
lib/test_printf.c
mm/backing-dev.c
mm/debug.c
mm/gup.c
mm/hugetlb.c
mm/kasan/kasan.c
mm/memblock.c
mm/mmap.c
mm/page_alloc.c
mm/rmap.c
mm/slab_common.c
mm/slub.c
mm/vmstat.c
net/8021q/vlan.c
net/9p/client.c
net/Makefile
net/appletalk/ddp.c
net/atm/br2684.c
net/atm/clip.c
net/atm/common.c
net/atm/common.h
net/atm/lec.c
net/atm/mpc.c
net/atm/pppoatm.c
net/atm/pvc.c
net/atm/raw.c
net/atm/svc.c
net/ax25/af_ax25.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_v.c
net/batman-adv/debugfs.c
net/batman-adv/debugfs.h
net/batman-adv/hard-interface.c
net/batman-adv/translation-table.c
net/bluetooth/af_bluetooth.c
net/bluetooth/hci_sock.c
net/bluetooth/l2cap_sock.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bpf/test_run.c
net/bpfilter/.gitignore [new file with mode: 0644]
net/bpfilter/Kconfig
net/bpfilter/Makefile
net/bpfilter/bpfilter_kern.c
net/bpfilter/bpfilter_umh_blob.S [new file with mode: 0644]
net/caif/caif_socket.c
net/can/bcm.c
net/can/raw.c
net/core/datagram.c
net/core/dev.c
net/core/dev_ioctl.c
net/core/fib_rules.c
net/core/filter.c
net/core/gen_stats.c
net/core/skbuff.c
net/core/sock.c
net/dccp/ccids/ccid3.c
net/dccp/dccp.h
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dccp/proto.c
net/decnet/af_decnet.c
net/dns_resolver/dns_key.c
net/ieee802154/6lowpan/core.c
net/ieee802154/socket.c
net/ipv4/af_inet.c
net/ipv4/fib_frontend.c
net/ipv4/fou.c
net/ipv4/gre_offload.c
net/ipv4/igmp.c
net/ipv4/inet_fragment.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/nf_tproxy_ipv4.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_dctcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv6/Kconfig
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/calipso.c
net/ipv6/exthdrs.c
net/ipv6/inet6_hashtables.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ipv6_sockglue.c
net/ipv6/mcast.c
net/ipv6/ndisc.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/netfilter/nf_tproxy_ipv6.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/seg6_hmac.c
net/ipv6/seg6_iptunnel.c
net/iucv/af_iucv.c
net/kcm/kcmsock.c
net/key/af_key.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_ppp.c
net/llc/af_llc.c
net/mac80211/tx.c
net/ncsi/ncsi-aen.c
net/ncsi/ncsi-manage.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/nf_conncount.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_log.c
net/netfilter/nf_tables_set_core.c [new file with mode: 0644]
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_compat.c
net/netfilter/nft_set_bitmap.c
net/netfilter/nft_set_hash.c
net/netfilter/nft_set_rbtree.c
net/netfilter/xt_TPROXY.c
net/netlink/af_netlink.c
net/netrom/af_netrom.c
net/nfc/llcp_commands.c
net/nfc/llcp_sock.c
net/nfc/rawsock.c
net/nsh/nsh.c
net/packet/af_packet.c
net/phonet/socket.c
net/qrtr/qrtr.c
net/rds/connection.c
net/rds/loop.c
net/rds/loop.h
net/rose/af_rose.c
net/rxrpc/af_rxrpc.c
net/sched/act_csum.c
net/sched/act_ife.c
net/sched/act_tunnel_key.c
net/sched/cls_api.c
net/sched/cls_flower.c
net/sched/sch_blackhole.c
net/sched/sch_fq_codel.c
net/sched/sch_hfsc.c
net/sctp/chunk.c
net/sctp/ipv6.c
net/sctp/protocol.c
net/sctp/socket.c
net/sctp/transport.c
net/smc/af_smc.c
net/smc/smc.h
net/smc/smc_clc.c
net/smc/smc_close.c
net/smc/smc_tx.c
net/socket.c
net/strparser/strparser.c
net/sunrpc/xprt.c
net/tipc/discover.c
net/tipc/net.c
net/tipc/node.c
net/tipc/socket.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/virtio_transport.c
net/wireless/nl80211.c
net/x25/af_x25.c
net/xdp/xsk.c
net/xdp/xsk_queue.h
samples/bpf/.gitignore [new file with mode: 0644]
samples/bpf/parse_varlen.c
samples/bpf/test_overhead_user.c
samples/bpf/trace_event_user.c
samples/bpf/xdp2skb_meta.sh
samples/bpf/xdp_fwd_kern.c
samples/bpf/xdpsock_user.c
samples/vfio-mdev/mbochs.c
scripts/Kbuild.include
scripts/Makefile.build
scripts/Makefile.clean
scripts/Makefile.modbuiltin
scripts/Makefile.modinst
scripts/Makefile.modpost
scripts/Makefile.modsign
scripts/cc-can-link.sh
scripts/checkpatch.pl
scripts/extract-vmlinux
scripts/gcc-x86_64-has-stack-protector.sh
scripts/kconfig/expr.h
scripts/kconfig/preprocess.c
scripts/kconfig/zconf.y
scripts/tags.sh
security/keys/dh.c
security/selinux/selinuxfs.c
security/smack/smack_lsm.c
sound/core/rawmidi.c
sound/core/seq/seq_clientmgr.c
sound/core/timer.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_codec.h
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/lx6464es/lx6464es.c
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/powerpc/include/uapi/asm/kvm.h
tools/arch/powerpc/include/uapi/asm/unistd.h
tools/arch/x86/include/asm/cpufeatures.h
tools/bpf/bpftool/perf.c
tools/bpf/bpftool/prog.c
tools/build/Build.include
tools/build/Makefile
tools/include/uapi/drm/drm.h
tools/include/uapi/linux/bpf.h
tools/include/uapi/linux/if_link.h
tools/include/uapi/linux/kvm.h
tools/objtool/check.c
tools/objtool/elf.c
tools/perf/Documentation/perf-stat.txt
tools/perf/Makefile.config
tools/perf/arch/powerpc/util/skip-callchain-idx.c
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
tools/perf/arch/x86/util/perf_regs.c
tools/perf/bench/numa.c
tools/perf/builtin-annotate.c
tools/perf/builtin-c2c.c
tools/perf/builtin-report.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/jvmti/jvmti_agent.c
tools/perf/pmu-events/Build
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
tools/perf/scripts/python/sched-migration.py
tools/perf/tests/builtin-test.c
tools/perf/tests/parse-events.c
tools/perf/tests/shell/record+probe_libc_inet_pton.sh
tools/perf/tests/shell/trace+probe_vfs_getname.sh
tools/perf/tests/topology.c
tools/perf/ui/gtk/hists.c
tools/perf/util/c++/clang.cpp
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
tools/perf/util/llvm-utils.c
tools/perf/util/parse-events.y
tools/perf/util/pmu.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/sort.h
tools/power/x86/turbostat/turbostat.8
tools/power/x86/turbostat/turbostat.c
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/bpf/config
tools/testing/selftests/bpf/test_kmod.sh
tools/testing/selftests/bpf/test_lirc_mode2.sh
tools/testing/selftests/bpf/test_lwt_seg6local.sh
tools/testing/selftests/bpf/test_offload.py
tools/testing/selftests/bpf/test_sockmap.c
tools/testing/selftests/bpf/test_tunnel.sh
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/net/.gitignore
tools/testing/selftests/net/config
tools/testing/selftests/net/fib_tests.sh [changed mode: 0644->0755]
tools/testing/selftests/net/udpgso_bench.sh
tools/testing/selftests/pstore/pstore_post_reboot_tests
tools/testing/selftests/rseq/param_test.c
tools/testing/selftests/rseq/rseq-arm.h
tools/testing/selftests/rseq/rseq-mips.h [new file with mode: 0644]
tools/testing/selftests/rseq/rseq.h
tools/testing/selftests/rseq/run_param_test.sh [changed mode: 0644->0755]
tools/testing/selftests/sparc64/Makefile
tools/testing/selftests/sparc64/drivers/Makefile
tools/testing/selftests/static_keys/test_static_keys.sh
tools/testing/selftests/sync/config [new file with mode: 0644]
tools/testing/selftests/sysctl/sysctl.sh
tools/testing/selftests/user/test_user_copy.sh
tools/testing/selftests/vm/compaction_test.c
tools/testing/selftests/vm/mlock2-tests.c
tools/testing/selftests/vm/run_vmtests
tools/testing/selftests/vm/userfaultfd.c
tools/testing/selftests/x86/sigreturn.c
tools/testing/selftests/zram/zram.sh
tools/testing/selftests/zram/zram_lib.sh
tools/virtio/linux/scatterlist.h
virt/kvm/Kconfig
virt/kvm/arm/mmu.c
virt/kvm/arm/vgic/vgic-v3.c
virt/kvm/eventfd.c
virt/kvm/kvm_main.c

index efc7aa7a067099f6bacdb860cde13f2d876030a8..533ff5c68970aef7b71e976941e8b305f250f2d5 100644 (file)
        xirc2ps_cs=     [NET,PCMCIA]
                        Format:
                        <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
+
+       xhci-hcd.quirks         [USB,KNL]
+                       A hex value specifying bitmask with supplemental xhci
+                       host controller quirks. Meaning of each bit can be
+                       consulted in header drivers/usb/host/xhci.h.
index ab2fe0eda1d7c317faefab52363ce96755ac64d5..8f1d3de449b53fedcc78d1aee506e6882f2be90c 100644 (file)
@@ -324,8 +324,7 @@ Global Attributes
 
 ``intel_pstate`` exposes several global attributes (files) in ``sysfs`` to
 control its functionality at the system level.  They are located in the
-``/sys/devices/system/cpu/cpufreq/intel_pstate/`` directory and affect all
-CPUs.
+``/sys/devices/system/cpu/intel_pstate/`` directory and affect all CPUs.
 
 Some of them are not present if the ``intel_pstate=per_cpu_perf_limits``
 argument is passed to the kernel in the command line.
@@ -379,6 +378,17 @@ argument is passed to the kernel in the command line.
        but it affects the maximum possible value of per-policy P-state limits
        (see `Interpretation of Policy Attributes`_ below for details).
 
+``hwp_dynamic_boost``
+       This attribute is only present if ``intel_pstate`` works in the
+       `active mode with the HWP feature enabled <Active Mode With HWP_>`_ in
+       the processor.  If set (equal to 1), it causes the minimum P-state limit
+       to be increased dynamically for a short time whenever a task previously
+       waiting on I/O is selected to run on a given logical CPU (the purpose
+       of this mechanism is to improve performance).
+
+       This setting has no effect on logical CPUs whose minimum P-state limit
+       is directly set to the highest non-turbo P-state or above it.
+
 .. _status_attr:
 
 ``status``
@@ -410,7 +420,7 @@ argument is passed to the kernel in the command line.
        That only is supported in some configurations, though (for example, if
        the `HWP feature is enabled in the processor <Active Mode With HWP_>`_,
        the operation mode of the driver cannot be changed), and if it is not
-       supported in the current configuration, writes to this attribute with
+       supported in the current configuration, writes to this attribute will
        fail with an appropriate error.
 
 Interpretation of Policy Attributes
index 8e44aea366c262068900cddaabd240d8615ac552..76fe2d0f5e7d7db307bfa4ead890ead2d8840bdd 100644 (file)
@@ -284,7 +284,7 @@ Resources Management
 MTRR Handling
 -------------
 
-.. kernel-doc:: arch/x86/kernel/cpu/mtrr/main.c
+.. kernel-doc:: arch/x86/kernel/cpu/mtrr/mtrr.c
    :export:
 
 Security Framework
index bdadc3da9556d47e52372f0a68846779dccc1d95..6970f30a3770f8027a2509aab25f4fe75785667e 100644 (file)
@@ -66,7 +66,7 @@ Required root node properties:
        - "insignal,arndale-octa" - for Exynos5420-based Insignal Arndale
                                    Octa board.
        - "insignal,origen"       - for Exynos4210-based Insignal Origen board.
-       - "insignal,origen4412    - for Exynos4412-based Insignal Origen board.
+       - "insignal,origen4412"   - for Exynos4412-based Insignal Origen board.
 
 
 Optional nodes:
index 6fddb4f4f71a45f0fc001b7f904a89f6e50948b3..3055d5c2c04e0ab796215803196c7590a69e02be 100644 (file)
@@ -36,7 +36,7 @@ Optional nodes:
 
  - port/ports: to describe a connection to an external encoder. The
    binding follows Documentation/devicetree/bindings/graph.txt and
-   suppors a single port with a single endpoint.
+   supports a single port with a single endpoint.
 
  - See also Documentation/devicetree/bindings/display/tilcdc/panel.txt and
    Documentation/devicetree/bindings/display/tilcdc/tfp410.txt for connecting
index 20fc72d9e61e5721e56e0aeb0479682f921fd154..45a61b46228712592029e75fe117262ba47d9112 100644 (file)
@@ -1,7 +1,7 @@
 Nintendo Wii (Hollywood) GPIO controller
 
 Required properties:
-- compatible: "nintendo,hollywood-gpio
+- compatible: "nintendo,hollywood-gpio"
 - reg: Physical base address and length of the controller's registers.
 - gpio-controller: Marks the device node as a GPIO controller.
 - #gpio-cells: Should be <2>. The first cell is the pin number and the
diff --git a/Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt b/Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt
new file mode 100644 (file)
index 0000000..f2ec0d4
--- /dev/null
@@ -0,0 +1,23 @@
+Spreadtrum SC27xx PMIC Vibrator
+
+Required properties:
+- compatible: should be "sprd,sc2731-vibrator".
+- reg: address of vibrator control register.
+
+Example :
+
+       sc2731_pmic: pmic@0 {
+               compatible = "sprd,sc2731";
+               reg = <0>;
+               spi-max-frequency = <26000000>;
+               interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               vibrator@eb4 {
+                       compatible = "sprd,sc2731-vibrator";
+                       reg = <0xeb4>;
+               };
+       };
index 121d9b7c79a24cd05e6452bb8b52a14d3d20f46a..1063c30d53f7d0fd7b642d323d32799ba4fb51fe 100644 (file)
@@ -32,7 +32,7 @@ i2c@00000000 {
                reg = <0x6c>;
                interrupt-parent = <&gpx1>;
                interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
-               vdd-supply = <&ldo15_reg>";
+               vdd-supply = <&ldo15_reg>;
                vid-supply = <&ldo18_reg>;
                reset-gpios = <&gpx1 5 0>;
                touchscreen-size-x = <1080>;
index 1099fe0788fae19c27dd1153e6d9d9e4aba10c6f..f246ccbf8838c2c90496572af8aa4e4d17079be1 100644 (file)
@@ -15,7 +15,7 @@ Required properties:
   include "nvidia,tegra30-ictlr".      
 - reg : Specifies base physical address and size of the registers.
   Each controller must be described separately (Tegra20 has 4 of them,
-  whereas Tegra30 and later have 5"  
+  whereas Tegra30 and later have 5).
 - interrupt-controller : Identifies the node as an interrupt controller.
 - #interrupt-cells : Specifies the number of cells needed to encode an
   interrupt source. The value must be 3.
index 136bd612bd8359488447b9ae12b335ffb083ba80..6a36bf66d932d42320cfc6b3c998b16488609d61 100644 (file)
@@ -12,7 +12,7 @@ Required properties:
   specifier, shall be 2
 - interrupts: interrupts references to primary interrupt controller
   (only needed for exti controller with multiple exti under
-  same parent interrupt: st,stm32-exti and st,stm32h7-exti")
+  same parent interrupt: st,stm32-exti and st,stm32h7-exti)
 
 Example:
 
index 356c29789cf54862e1ece93dc40449e221481304..3a66d3c483e1aad12298fcf297767931db09051a 100644 (file)
@@ -152,7 +152,7 @@ Required properties:
 - compatible   : should contain one of:
                  "brcm,bcm7425-timers"
                  "brcm,bcm7429-timers"
-                 "brcm,bcm7435-timers and
+                 "brcm,bcm7435-timers" and
                  "brcm,brcmstb-timers"
 - reg          : the timers register range
 - interrupts   : the interrupt line for this timer block
index df873d1f3b7c598b6c30721d3eec915a20ea8621..f8c33890bc2970e08bf44934835a9b8c464675f1 100644 (file)
@@ -238,7 +238,7 @@ PROPERTIES
                Must include one of the following:
                - "fsl,fman-dtsec" for dTSEC MAC
                - "fsl,fman-xgec" for XGEC MAC
-               - "fsl,fman-memac for mEMAC MAC
+               - "fsl,fman-memac" for mEMAC MAC
 
 - cell-index
                Usage: required
index 9b387f861aed166bda522f6e3d4ebb8856a49218..7dec508987c75c70ac194876df9d4f8019586aab 100644 (file)
@@ -133,7 +133,7 @@ located inside a PM domain with index 0 of a power controller represented by a
 node with the label "power".
 In the second example the consumer device are partitioned across two PM domains,
 the first with index 0 and the second with index 1, of a power controller that
-is represented by a node with the label "power.
+is represented by a node with the label "power".
 
 Optional properties:
 - required-opps: This contains phandle to an OPP node in another device's OPP
index ca69f5e3040cfa48299682dd6371f99c90b49ffa..ae326f26359740bce4fe7ac119288447649b6429 100644 (file)
@@ -16,7 +16,7 @@ Required properties:
 Optional properties:
 - ti,enable-ext-control: This is applicable for DCDC1, DCDC2 and DCDC3.
   If DCDCs are externally controlled then this property should be there.
-- "dcdc-ext-control-gpios: This is applicable for DCDC1, DCDC2 and DCDC3.
+- dcdc-ext-control-gpios: This is applicable for DCDC1, DCDC2 and DCDC3.
   If DCDCs are externally controlled and if it is from GPIO then GPIO
   number should be provided. If it is externally controlled and no GPIO
   entry then driver will just configure this rails as external control
index a21658f18fe6d7d593adece10e056e072fa2c5e4..3661e6153a92bf8df66cea43d5f41415cc497786 100644 (file)
@@ -15,7 +15,7 @@ Please refer to reset.txt in this directory for common reset
 controller binding usage.
 
 Required properties:
-- compatible: Should be st,stih407-softreset";
+- compatible: Should be "st,stih407-softreset";
 - #reset-cells: 1, see below
 
 example:
index d330c73de9a2e0103aabc3cf365d02974faee73c..68b7d6207e3d75acd51400da27e5ca292c5026d0 100644 (file)
@@ -39,7 +39,7 @@ Required properties:
 
 Optional property:
 - clock-frequency:     Desired I2C bus clock frequency in Hz.
-                       When missing default to 400000Hz.
+                       When missing default to 100000Hz.
 
 Child nodes should conform to I2C bus binding as described in i2c.txt.
 
index 6a4aadc4ce06b27ff059c64f6c438d0fef863b21..84b28dbe9f15452bbe341f3dbf5e6f5452b72a19 100644 (file)
@@ -30,7 +30,7 @@ Required properties:
 
                          Board connectors:
                          * Headset Mic
-                         * Secondary Mic",
+                         * Secondary Mic
                          * DMIC
                          * Ext Spk
 
index aa54e49fc8a26b397232f5ee340b7472f0b57a1c..c7600a93ab39e58bb62cc02e1f77a2d5132f1b08 100644 (file)
@@ -35,7 +35,7 @@ This binding describes the APQ8096 sound card, which uses qdsp for audio.
                        "Digital Mic3"
 
                Audio pins and MicBias on WCD9335 Codec:
-                       "MIC_BIAS1
+                       "MIC_BIAS1"
                        "MIC_BIAS2"
                        "MIC_BIAS3"
                        "MIC_BIAS4"
index 6e09c35d9f1a281a0046ed2c07dfce1f1312f48f..37091902a0210328e76582426eaec0eaa3a7ae3d 100644 (file)
@@ -15,7 +15,7 @@ Optional properties:
 
 Examples:
 
-       onewire@0 {
+       onewire {
                compatible = "w1-gpio";
                gpios = <&gpio 126 0>, <&gpio 105 0>;
        };
index bee1b9a1702f1cc6c89811aff6b8bdbc1eefb0b0..6172f3cc3d0b2109916cfccda2da1836065f8766 100644 (file)
@@ -49,10 +49,10 @@ Device Drivers Base
 Device Drivers DMA Management
 -----------------------------
 
-.. kernel-doc:: drivers/base/dma-coherent.c
+.. kernel-doc:: kernel/dma/coherent.c
    :export:
 
-.. kernel-doc:: drivers/base/dma-mapping.c
+.. kernel-doc:: kernel/dma/mapping.c
    :export:
 
 Device drivers PnP support
index 2c391338c6757f505eac6dfcbe98a169452ad305..37bf0a9de75cbe79794e653ff161e4a5eb37a97a 100644 (file)
@@ -441,8 +441,6 @@ prototypes:
        int (*iterate) (struct file *, struct dir_context *);
        int (*iterate_shared) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
-       struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
-       __poll_t (*poll_mask) (struct file *, __poll_t);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
        int (*mmap) (struct file *, struct vm_area_struct *);
@@ -473,7 +471,7 @@ prototypes:
 };
 
 locking rules:
-       All except for ->poll_mask may block.
+       All may block.
 
 ->llseek() locking has moved from llseek to the individual llseek
 implementations.  If your fs is not using generic_file_llseek, you
@@ -505,9 +503,6 @@ in sys_read() and friends.
 the lease within the individual filesystem to record the result of the
 operation
 
-->poll_mask can be called with or without the waitqueue lock for the waitqueue
-returned from ->get_poll_head.
-
 --------------------------- dquot_operations -------------------------------
 prototypes:
        int (*write_dquot) (struct dquot *);
index 9f4f87e1624036349533adf9534bfd3c4b08535d..75865da2ce1475c27bea1050b3e80ff7160f6d6f 100644 (file)
@@ -42,9 +42,11 @@ Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code)
 Scott Lovenberg
 Pavel Shilovsky (for great work adding SMB2 support, and various SMB3 features)
 Aurelien Aptel (for DFS SMB3 work and some key bug fixes)
-Ronnie Sahlberg (for SMB3 xattr work and bug fixes)
+Ronnie Sahlberg (for SMB3 xattr work, bug fixes, and lots of great work on compounding)
 Shirish Pargaonkar (for many ACL patches over the years)
 Sachin Prabhu (many bug fixes, including for reconnect, copy offload and security)
+Paulo Alcantara
+Long Li (some great work on RDMA, SMB Direct)
 
 
 Test case and Bug Report contributors
@@ -58,5 +60,4 @@ mention to the Stanford Checker (SWAT) which pointed out many minor
 bugs in error paths.  Valuable suggestions also have come from Al Viro
 and Dave Miller.
 
-And thanks to the IBM LTC and Power test teams and SuSE testers for
-finding multiple bugs during excellent stress test runs.
+And thanks to the IBM LTC and Power test teams and SuSE and Citrix and RedHat testers for finding multiple bugs during excellent stress test runs.
index bc0025cdd1c9c0d285c32e8d7656103868126ca8..455e1cc494a9f2e78ee1d45b89bfbe5ee55048eb 100644 (file)
@@ -1,3 +1,6 @@
+See https://wiki.samba.org/index.php/LinuxCIFSKernel for
+more current information.
+
 Version 1.62
 ------------
 Add sockopt=TCP_NODELAY mount option. EA (xattr) routines hardened
index c5adf149b57f7f8f6e2d0b104d5b74f6bc7f5f84..852499aed64b52bb321c0b9656b0b606a4710772 100644 (file)
@@ -9,14 +9,14 @@ is a partial list of the known problems and missing features:
 
 a) SMB3 (and SMB3.02) missing optional features:
    - multichannel (started), integration with RDMA
-   - directory leases (improved metadata caching)
-   - T10 copy offload (copy chunk, and "Duplicate Extents" ioctl
+   - directory leases (improved metadata caching), started (root dir only)
+   - T10 copy offload ie "ODX" (copy chunk, and "Duplicate Extents" ioctl
      currently the only two server side copy mechanisms supported)
 
 b) improved sparse file support
 
 c) Directory entry caching relies on a 1 second timer, rather than
-using Directory Leases
+using Directory Leases, currently only the root file handle is cached longer
 
 d) quota support (needs minor kernel change since quota calls
 to make it to network filesystems or deviceless filesystems)
@@ -42,6 +42,8 @@ mount or a per server basis to client UIDs or nobody if no mapping
 exists. Also better integration with winbind for resolving SID owners
 
 k) Add tools to take advantage of more smb3 specific ioctls and features
+(passthrough ioctl/fsctl for sending various SMB3 fsctls to the server
+is in progress)
 
 l) encrypted file support
 
@@ -71,9 +73,8 @@ t) split cifs and smb3 support into separate modules so legacy (and less
 secure) CIFS dialect can be disabled in environments that don't need it
 and simplify the code.
 
-u) Finish up SMB3.1.1 dialect support
-
-v) POSIX Extensions for SMB3.1.1
+v) POSIX Extensions for SMB3.1.1 (started, create and mkdir support added
+so far).
 
 KNOWN BUGS
 ====================================
@@ -92,8 +93,8 @@ Misc testing to do
 1) check out max path names and max path name components against various server
 types. Try nested symlinks (8 deep). Return max path name in stat -f information
 
-2) Improve xfstest's cifs enablement and adapt xfstests where needed to test
-cifs better
+2) Improve xfstest's cifs/smb3 enablement and adapt xfstests where needed to test
+cifs/smb3 better
 
 3) Additional performance testing and optimization using iozone and similar - 
 there are some easy changes that can be done to parallelize sequential writes,
index 829a7b7857a46904cfb7f02646212504a3a7f259..f608180ad59d71ab2bcc2d2d818699bfaaee1470 100644 (file)
@@ -857,8 +857,6 @@ struct file_operations {
        ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
        int (*iterate) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
-       struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
-       __poll_t (*poll_mask) (struct file *, __poll_t);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
        int (*mmap) (struct file *, struct vm_area_struct *);
@@ -903,17 +901,6 @@ otherwise noted.
        activity on this file and (optionally) go to sleep until there
        is activity. Called by the select(2) and poll(2) system calls
 
-  get_poll_head: Returns the struct wait_queue_head that callers can
-  wait on.  Callers need to check the returned events using ->poll_mask
-  once woken.  Can return NULL to indicate polling is not supported,
-  or any error code using the ERR_PTR convention to indicate that a
-  grave error occured and ->poll_mask shall not be called.
-
-  poll_mask: return the mask of EPOLL* values describing the file descriptor
-  state.  Called either before going to sleep on the waitqueue returned by
-  get_poll_head, or after it has been woken.  If ->get_poll_head and
-  ->poll_mask are implemented ->poll does not need to be implement.
-
   unlocked_ioctl: called by the ioctl(2) system call.
 
   compat_ioctl: called by the ioctl(2) system call when 32 bit system calls
index 6c9c69ec3986be379a86f745f30a9eab9b817d96..114c7ce7b58de2c15e5b1c917c96e4926c605191 100644 (file)
@@ -50,6 +50,11 @@ LDFLAGS_MODULE
 --------------------------------------------------
 Additional options used for $(LD) when linking modules.
 
+KBUILD_KCONFIG
+--------------------------------------------------
+Set the top-level Kconfig file to the value of this environment
+variable.  The default name is "Kconfig".
+
 KBUILD_VERBOSE
 --------------------------------------------------
 Set the kbuild verbosity. Can be assigned same values as "V=...".
@@ -88,7 +93,8 @@ In most cases the name of the architecture is the same as the
 directory name found in the arch/ directory.
 But some architectures such as x86 and sparc have aliases.
 x86: i386 for 32 bit, x86_64 for 64 bit
-sparc: sparc for 32 bit, sparc64 for 64 bit
+sh: sh for 32 bit, sh64 for 64 bit
+sparc: sparc32 for 32 bit, sparc64 for 64 bit
 
 CROSS_COMPILE
 --------------------------------------------------
@@ -148,15 +154,6 @@ stripped after they are installed.  If INSTALL_MOD_STRIP is '1', then
 the default option --strip-debug will be used.  Otherwise,
 INSTALL_MOD_STRIP value will be used as the options to the strip command.
 
-INSTALL_FW_PATH
---------------------------------------------------
-INSTALL_FW_PATH specifies where to install the firmware blobs.
-The default value is:
-
-    $(INSTALL_MOD_PATH)/lib/firmware
-
-The value can be overridden in which case the default value is ignored.
-
 INSTALL_HDR_PATH
 --------------------------------------------------
 INSTALL_HDR_PATH specifies where to install user space headers when
index 3534a84d206caf324423a9422eb985b48c97813b..64e0775a62d4475ec378d033332b5099987954ff 100644 (file)
@@ -430,6 +430,12 @@ This sets the config program's title bar if the config program chooses
 to use it. It should be placed at the top of the configuration, before any
 other statement.
 
+'#' Kconfig source file comment:
+
+An unquoted '#' character anywhere in a source file line indicates
+the beginning of a source file comment.  The remainder of that line
+is a comment.
+
 
 Kconfig hints
 -------------
index 7233118f3a05481247f4c550542b099e9f655245..68c82914c0f3a1e791cab09d7b6a2b7253541443 100644 (file)
@@ -2,9 +2,9 @@ This file contains some assistance for using "make *config".
 
 Use "make help" to list all of the possible configuration targets.
 
-The xconfig ('qconf') and menuconfig ('mconf') programs also
-have embedded help text.  Be sure to check it for navigation,
-search, and other general help text.
+The xconfig ('qconf'), menuconfig ('mconf'), and nconfig ('nconf')
+programs also have embedded help text.  Be sure to check that for
+navigation, search, and other general help text.
 
 ======================================================================
 General
@@ -17,13 +17,16 @@ this happens, using a previously working .config file and running
 for you, so you may find that you need to see what NEW kernel
 symbols have been introduced.
 
-To see a list of new config symbols when using "make oldconfig", use
+To see a list of new config symbols, use
 
        cp user/some/old.config .config
        make listnewconfig
 
 and the config program will list any new symbols, one per line.
 
+Alternatively, you can use the brute force method:
+
+       make oldconfig
        scripts/diffconfig .config.old .config | less
 
 ______________________________________________________________________
@@ -160,7 +163,7 @@ Searching in menuconfig:
                This lists all config symbols that contain "hotplug",
                e.g., HOTPLUG_CPU, MEMORY_HOTPLUG.
 
-       For search help, enter / followed TAB-TAB-TAB (to highlight
+       For search help, enter / followed by TAB-TAB (to highlight
        <Help>) and Enter.  This will tell you that you can also use
        regular expressions (regexes) in the search string, so if you
        are not interested in MEMORY_HOTPLUG, you could try
@@ -202,6 +205,39 @@ Example:
        make MENUCONFIG_MODE=single_menu menuconfig
 
 
+======================================================================
+nconfig
+--------------------------------------------------
+
+nconfig is an alternate text-based configurator.  It lists function
+keys across the bottom of the terminal (window) that execute commands.
+You can also just use the corresponding numeric key to execute the
+commands unless you are in a data entry window.  E.g., instead of F6
+for Save, you can just press 6.
+
+Use F1 for Global help or F3 for the Short help menu.
+
+Searching in nconfig:
+
+       You can search either in the menu entry "prompt" strings
+       or in the configuration symbols.
+
+       Use / to begin a search through the menu entries.  This does
+       not support regular expressions.  Use <Down> or <Up> for
+       Next hit and Previous hit, respectively.  Use <Esc> to
+       terminate the search mode.
+
+       F8 (SymSearch) searches the configuration symbols for the
+       given string or regular expression (regex).
+
+NCONFIG_MODE
+--------------------------------------------------
+This mode shows all sub-menus in one large tree.
+
+Example:
+       make NCONFIG_MODE=single_menu nconfig
+
+
 ======================================================================
 xconfig
 --------------------------------------------------
@@ -230,8 +266,7 @@ gconfig
 
 Searching in gconfig:
 
-       None (gconfig isn't maintained as well as xconfig or menuconfig);
-       however, gconfig does have a few more viewing choices than
-       xconfig does.
+       There is no search command in gconfig.  However, gconfig does
+       have several different viewing choices, modes, and options.
 
 ###
index c13214d073a4866f49025033a86fed03275bca5f..d3e5dd26db12d75bc09d25cacbf0f775003cd527 100644 (file)
@@ -1490,7 +1490,7 @@ To remove an ARP target:
 
 To configure the interval between learning packet transmits:
 # echo 12 > /sys/class/net/bond0/bonding/lp_interval
-       NOTE: the lp_inteval is the number of seconds between instances where
+       NOTE: the lp_interval is the number of seconds between instances where
 the bonding driver sends learning packets to each slaves peer switch.  The
 default interval is 1 second.
 
index d4d8370279254472ed812488ea61d4e3bd64651b..f81111eba9c5dd157aecdd1f1370031d3d1b1f0f 100644 (file)
@@ -1,3 +1,4 @@
+==============================================================
 Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters
 ==============================================================
 
@@ -46,123 +47,131 @@ Driver Configuration Parameters
 The default value for each parameter is generally the recommended setting,
 unless otherwise noted.
 
-Rx Descriptors: Number of receive descriptors. A receive descriptor is a data
+Rx Descriptors:
+   Number of receive descriptors. A receive descriptor is a data
    structure that describes a receive buffer and its attributes to the network
    controller. The data in the descriptor is used by the controller to write
    data from the controller to host memory. In the 3.x.x driver the valid range
    for this parameter is 64-256. The default value is 256. This parameter can be
    changed using the command::
 
-   ethtool -G eth? rx n
+     ethtool -G eth? rx n
 
    Where n is the number of desired Rx descriptors.
 
-Tx Descriptors: Number of transmit descriptors. A transmit descriptor is a data
+Tx Descriptors:
+   Number of transmit descriptors. A transmit descriptor is a data
    structure that describes a transmit buffer and its attributes to the network
    controller. The data in the descriptor is used by the controller to read
    data from the host memory to the controller. In the 3.x.x driver the valid
    range for this parameter is 64-256. The default value is 128. This parameter
    can be changed using the command::
 
-   ethtool -G eth? tx n
+     ethtool -G eth? tx n
 
    Where n is the number of desired Tx descriptors.
 
-Speed/Duplex: The driver auto-negotiates the link speed and duplex settings by
+Speed/Duplex:
+   The driver auto-negotiates the link speed and duplex settings by
    default. The ethtool utility can be used as follows to force speed/duplex.::
 
-   ethtool -s eth?  autoneg off speed {10|100} duplex {full|half}
+     ethtool -s eth?  autoneg off speed {10|100} duplex {full|half}
 
    NOTE: setting the speed/duplex to incorrect values will cause the link to
    fail.
 
-Event Log Message Level:  The driver uses the message level flag to log events
+Event Log Message Level:
+   The driver uses the message level flag to log events
    to syslog. The message level can be set at driver load time. It can also be
    set using the command::
 
-   ethtool -s eth? msglvl n
+     ethtool -s eth? msglvl n
 
 
 Additional Configurations
 =========================
 
-  Configuring the Driver on Different Distributions
-  -------------------------------------------------
+Configuring the Driver on Different Distributions
+-------------------------------------------------
 
-  Configuring a network driver to load properly when the system is started is
-  distribution dependent. Typically, the configuration process involves adding
-  an alias line to /etc/modprobe.d/*.conf as well as editing other system
-  startup scripts and/or configuration files.  Many popular Linux
-  distributions ship with tools to make these changes for you. To learn the
-  proper way to configure a network device for your system, refer to your
-  distribution documentation.  If during this process you are asked for the
-  driver or module name, the name for the Linux Base Driver for the Intel
-  PRO/100 Family of Adapters is e100.
+Configuring a network driver to load properly when the system is started
+is distribution dependent.  Typically, the configuration process involves
+adding an alias line to `/etc/modprobe.d/*.conf` as well as editing other
+system startup scripts and/or configuration files.  Many popular Linux
+distributions ship with tools to make these changes for you.  To learn
+the proper way to configure a network device for your system, refer to
+your distribution documentation.  If during this process you are asked
+for the driver or module name, the name for the Linux Base Driver for
+the Intel PRO/100 Family of Adapters is e100.
 
-  As an example, if you install the e100 driver for two PRO/100 adapters
-  (eth0 and eth1), add the following to a configuration file in /etc/modprobe.d/
+As an example, if you install the e100 driver for two PRO/100 adapters
+(eth0 and eth1), add the following to a configuration file in
+/etc/modprobe.d/::
 
        alias eth0 e100
        alias eth1 e100
 
-  Viewing Link Messages
-  ---------------------
-  In order to see link messages and other Intel driver information on your
-  console, you must set the dmesg level up to six. This can be done by
-  entering the following on the command line before loading the e100 driver::
+Viewing Link Messages
+---------------------
+
+In order to see link messages and other Intel driver information on your
+console, you must set the dmesg level up to six.  This can be done by
+entering the following on the command line before loading the e100
+driver::
 
        dmesg -n 6
 
-  If you wish to see all messages issued by the driver, including debug
-  messages, set the dmesg level to eight.
+If you wish to see all messages issued by the driver, including debug
+messages, set the dmesg level to eight.
 
-  NOTE: This setting is not saved across reboots.
+NOTE: This setting is not saved across reboots.
 
+ethtool
+-------
 
-  ethtool
-  -------
+The driver utilizes the ethtool interface for driver configuration and
+diagnostics, as well as displaying statistical information.  The ethtool
+version 1.6 or later is required for this functionality.
 
-  The driver utilizes the ethtool interface for driver configuration and
-  diagnostics, as well as displaying statistical information.  The ethtool
-  version 1.6 or later is required for this functionality.
+The latest release of ethtool can be found from
+https://www.kernel.org/pub/software/network/ethtool/
 
-  The latest release of ethtool can be found from
-  https://www.kernel.org/pub/software/network/ethtool/
+Enabling Wake on LAN* (WoL)
+---------------------------
+WoL is provided through the ethtool* utility.  For instructions on
+enabling WoL with ethtool, refer to the ethtool man page.  WoL will be
+enabled on the system during the next shut down or reboot.  For this
+driver version, in order to enable WoL, the e100 driver must be loaded
+when shutting down or rebooting the system.
 
-  Enabling Wake on LAN* (WoL)
-  ---------------------------
-  WoL is provided through the ethtool* utility.  For instructions on enabling
-  WoL with ethtool, refer to the ethtool man page.
+NAPI
+----
 
-  WoL will be enabled on the system during the next shut down or reboot. For
-  this driver version, in order to enable WoL, the e100 driver must be
-  loaded when shutting down or rebooting the system.
+NAPI (Rx polling mode) is supported in the e100 driver.
 
-  NAPI
-  ----
+See https://wiki.linuxfoundation.org/networking/napi for more
+information on NAPI.
 
-  NAPI (Rx polling mode) is supported in the e100 driver.
+Multiple Interfaces on Same Ethernet Broadcast Network
+------------------------------------------------------
 
-  See https://wiki.linuxfoundation.org/networking/napi for more information
-  on NAPI.
+Due to the default ARP behavior on Linux, it is not possible to have one
+system on two IP networks in the same Ethernet broadcast domain
+(non-partitioned switch) behave as expected.  All Ethernet interfaces
+will respond to IP traffic for any IP address assigned to the system.
+This results in unbalanced receive traffic.
 
-  Multiple Interfaces on Same Ethernet Broadcast Network
-  ------------------------------------------------------
+If you have multiple interfaces in a server, either turn on ARP
+filtering by
 
-  Due to the default ARP behavior on Linux, it is not possible to have
-  one system on two IP networks in the same Ethernet broadcast domain
-  (non-partitioned switch) behave as expected. All Ethernet interfaces
-  will respond to IP traffic for any IP address assigned to the system.
-  This results in unbalanced receive traffic.
+(1) entering::
 
-  If you have multiple interfaces in a server, either turn on ARP
-  filtering by
+       echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
 
-  (1) entering:: echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
-      (this only works if your kernel's version is higher than 2.4.5), or
+    (this only works if your kernel's version is higher than 2.4.5), or
 
-  (2) installing the interfaces in separate broadcast domains (either
-      in different switches or in a switch partitioned to VLANs).
+(2) installing the interfaces in separate broadcast domains (either
+    in different switches or in a switch partitioned to VLANs).
 
 
 Support
index 616848940e63f7303633e0be67febc86bee6ac6f..f10dd40869218cb11e1d29bc5e4b6431d30af946 100644 (file)
@@ -1,3 +1,4 @@
+===========================================================
 Linux* Base Driver for Intel(R) Ethernet Network Connection
 ===========================================================
 
@@ -33,7 +34,8 @@ Command Line Parameters
 The default value for each parameter is generally the recommended setting,
 unless otherwise noted.
 
-NOTES:  For more information about the AutoNeg, Duplex, and Speed
+NOTES:
+       For more information about the AutoNeg, Duplex, and Speed
         parameters, see the "Speed and Duplex Configuration" section in
         this document.
 
@@ -44,22 +46,27 @@ NOTES:  For more information about the AutoNeg, Duplex, and Speed
 
 AutoNeg
 -------
+
 (Supported only on adapters with copper connections)
-Valid Range:   0x01-0x0F, 0x20-0x2F
-Default Value: 0x2F
+
+:Valid Range:   0x01-0x0F, 0x20-0x2F
+:Default Value: 0x2F
 
 This parameter is a bit-mask that specifies the speed and duplex settings
 advertised by the adapter.  When this parameter is used, the Speed and
 Duplex parameters must not be specified.
 
-NOTE:  Refer to the Speed and Duplex section of this readme for more
+NOTE:
+       Refer to the Speed and Duplex section of this readme for more
        information on the AutoNeg parameter.
 
 Duplex
 ------
+
 (Supported only on adapters with copper connections)
-Valid Range:   0-2 (0=auto-negotiate, 1=half, 2=full)
-Default Value: 0
+
+:Valid Range:   0-2 (0=auto-negotiate, 1=half, 2=full)
+:Default Value: 0
 
 This defines the direction in which data is allowed to flow.  Can be
 either one or two-directional.  If both Duplex and the link partner are
@@ -69,18 +76,22 @@ duplex.
 
 FlowControl
 -----------
-Valid Range:   0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
-Default Value: Reads flow control settings from the EEPROM
+
+:Valid Range:   0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
+:Default Value: Reads flow control settings from the EEPROM
 
 This parameter controls the automatic generation(Tx) and response(Rx)
 to Ethernet PAUSE frames.
 
 InterruptThrottleRate
 ---------------------
+
 (not supported on Intel(R) 82542, 82543 or 82544-based adapters)
-Valid Range:   0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
-                                 4=simplified balancing)
-Default Value: 3
+
+:Valid Range:
+   0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
+   4=simplified balancing)
+:Default Value: 3
 
 The driver can limit the amount of interrupts per second that the adapter
 will generate for incoming packets. It does this by writing a value to the
@@ -134,13 +145,15 @@ Setting InterruptThrottleRate to 0 turns off any interrupt moderation
 and may improve small packet latency, but is generally not suitable
 for bulk throughput traffic.
 
-NOTE:  InterruptThrottleRate takes precedence over the TxAbsIntDelay and
+NOTE:
+       InterruptThrottleRate takes precedence over the TxAbsIntDelay and
        RxAbsIntDelay parameters.  In other words, minimizing the receive
        and/or transmit absolute delays does not force the controller to
        generate more interrupts than what the Interrupt Throttle Rate
        allows.
 
-CAUTION:  If you are using the Intel(R) PRO/1000 CT Network Connection
+CAUTION:
+          If you are using the Intel(R) PRO/1000 CT Network Connection
           (controller 82547), setting InterruptThrottleRate to a value
           greater than 75,000, may hang (stop transmitting) adapters
           under certain network conditions.  If this occurs a NETDEV
@@ -150,7 +163,8 @@ CAUTION:  If you are using the Intel(R) PRO/1000 CT Network Connection
           hang, ensure that InterruptThrottleRate is set no greater
           than 75,000 and is not set to 0.
 
-NOTE:  When e1000 is loaded with default settings and multiple adapters
+NOTE:
+       When e1000 is loaded with default settings and multiple adapters
        are in use simultaneously, the CPU utilization may increase non-
        linearly.  In order to limit the CPU utilization without impacting
        the overall throughput, we recommend that you load the driver as
@@ -167,9 +181,11 @@ NOTE:  When e1000 is loaded with default settings and multiple adapters
 
 RxDescriptors
 -------------
-Valid Range:   48-256 for 82542 and 82543-based adapters
-               48-4096 for all other supported adapters
-Default Value: 256
+
+:Valid Range:
+ - 48-256 for 82542 and 82543-based adapters
+ - 48-4096 for all other supported adapters
+:Default Value: 256
 
 This value specifies the number of receive buffer descriptors allocated
 by the driver.  Increasing this value allows the driver to buffer more
@@ -179,15 +195,17 @@ Each descriptor is 16 bytes.  A receive buffer is also allocated for each
 descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending
 on the MTU setting. The maximum MTU size is 16110.
 
-NOTE:  MTU designates the frame size.  It only needs to be set for Jumbo
+NOTE:
+       MTU designates the frame size.  It only needs to be set for Jumbo
        Frames.  Depending on the available system resources, the request
        for a higher number of receive descriptors may be denied.  In this
        case, use a lower number.
 
 RxIntDelay
 ----------
-Valid Range:   0-65535 (0=off)
-Default Value: 0
+
+:Valid Range:   0-65535 (0=off)
+:Default Value: 0
 
 This value delays the generation of receive interrupts in units of 1.024
 microseconds.  Receive interrupt reduction can improve CPU efficiency if
@@ -197,7 +215,8 @@ of TCP traffic.  If the system is reporting dropped receives, this value
 may be set too high, causing the driver to run out of available receive
 descriptors.
 
-CAUTION:  When setting RxIntDelay to a value other than 0, adapters may
+CAUTION:
+          When setting RxIntDelay to a value other than 0, adapters may
           hang (stop transmitting) under certain network conditions.  If
           this occurs a NETDEV WATCHDOG message is logged in the system
           event log.  In addition, the controller is automatically reset,
@@ -206,9 +225,11 @@ CAUTION:  When setting RxIntDelay to a value other than 0, adapters may
 
 RxAbsIntDelay
 -------------
+
 (This parameter is supported only on 82540, 82545 and later adapters.)
-Valid Range:   0-65535 (0=off)
-Default Value: 128
+
+:Valid Range:   0-65535 (0=off)
+:Default Value: 128
 
 This value, in units of 1.024 microseconds, limits the delay in which a
 receive interrupt is generated.  Useful only if RxIntDelay is non-zero,
@@ -219,9 +240,11 @@ conditions.
 
 Speed
 -----
+
 (This parameter is supported only on adapters with copper connections.)
-Valid Settings: 0, 10, 100, 1000
-Default Value:  0 (auto-negotiate at all supported speeds)
+
+:Valid Settings: 0, 10, 100, 1000
+:Default Value:  0 (auto-negotiate at all supported speeds)
 
 Speed forces the line speed to the specified value in megabits per second
 (Mbps).  If this parameter is not specified or is set to 0 and the link
@@ -230,22 +253,26 @@ speed.  Duplex should also be set when Speed is set to either 10 or 100.
 
 TxDescriptors
 -------------
-Valid Range:   48-256 for 82542 and 82543-based adapters
-               48-4096 for all other supported adapters
-Default Value: 256
+
+:Valid Range:
+  - 48-256 for 82542 and 82543-based adapters
+  - 48-4096 for all other supported adapters
+:Default Value: 256
 
 This value is the number of transmit descriptors allocated by the driver.
 Increasing this value allows the driver to queue more transmits.  Each
 descriptor is 16 bytes.
 
-NOTE:  Depending on the available system resources, the request for a
+NOTE:
+       Depending on the available system resources, the request for a
        higher number of transmit descriptors may be denied.  In this case,
        use a lower number.
 
 TxIntDelay
 ----------
-Valid Range:   0-65535 (0=off)
-Default Value: 8
+
+:Valid Range:   0-65535 (0=off)
+:Default Value: 8
 
 This value delays the generation of transmit interrupts in units of
 1.024 microseconds.  Transmit interrupt reduction can improve CPU
@@ -255,9 +282,11 @@ causing the driver to run out of available transmit descriptors.
 
 TxAbsIntDelay
 -------------
+
 (This parameter is supported only on 82540, 82545 and later adapters.)
-Valid Range:   0-65535 (0=off)
-Default Value: 32
+
+:Valid Range:   0-65535 (0=off)
+:Default Value: 32
 
 This value, in units of 1.024 microseconds, limits the delay in which a
 transmit interrupt is generated.  Useful only if TxIntDelay is non-zero,
@@ -268,18 +297,21 @@ network conditions.
 
 XsumRX
 ------
+
 (This parameter is NOT supported on the 82542-based adapter.)
-Valid Range:   0-1
-Default Value: 1
+
+:Valid Range:   0-1
+:Default Value: 1
 
 A value of '1' indicates that the driver should enable IP checksum
 offload for received packets (both UDP and TCP) to the adapter hardware.
 
 Copybreak
 ---------
-Valid Range:   0-xxxxxxx (0=off)
-Default Value: 256
-Usage: modprobe e1000.ko copybreak=128
+
+:Valid Range:   0-xxxxxxx (0=off)
+:Default Value: 256
+:Usage: modprobe e1000.ko copybreak=128
 
 Driver copies all packets below or equaling this size to a fresh RX
 buffer before handing it up the stack.
@@ -291,8 +323,9 @@ it is also available during runtime at
 
 SmartPowerDownEnable
 --------------------
-Valid Range: 0-1
-Default Value:  0 (disabled)
+
+:Valid Range: 0-1
+:Default Value:  0 (disabled)
 
 Allows PHY to turn off in lower power states. The user can turn off
 this parameter in supported chipsets.
@@ -308,14 +341,14 @@ fiber interface board only links at 1000 Mbps full-duplex.
 
 For copper-based boards, the keywords interact as follows:
 
-  The default operation is auto-negotiate.  The board advertises all
+- The default operation is auto-negotiate.  The board advertises all
   supported speed and duplex combinations, and it links at the highest
   common speed and duplex mode IF the link partner is set to auto-negotiate.
 
-  If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps
+- If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps
   is advertised (The 1000BaseT spec requires auto-negotiation.)
 
-  If Speed = 10 or 100, then both Speed and Duplex should be set.  Auto-
+- If Speed = 10 or 100, then both Speed and Duplex should be set.  Auto-
   negotiation is disabled, and the AutoNeg parameter is ignored.  Partner
   SHOULD also be forced.
 
@@ -327,13 +360,15 @@ process.
 The parameter may be specified as either a decimal or hexadecimal value as
 determined by the bitmap below.
 
+============== ====== ====== ======= ======= ====== ====== ======= ======
 Bit position   7      6      5       4       3      2      1       0
 Decimal Value  128    64     32      16      8      4      2       1
 Hex value      80     40     20      10      8      4      2       1
 Speed (Mbps)   N/A    N/A    1000    N/A     100    100    10      10
 Duplex                       Full            Full   Half   Full    Half
+============== ====== ====== ======= ======= ====== ====== ======= ======
 
-Some examples of using AutoNeg:
+Some examples of using AutoNeg::
 
   modprobe e1000 AutoNeg=0x01 (Restricts autonegotiation to 10 Half)
   modprobe e1000 AutoNeg=1 (Same as above)
@@ -354,8 +389,9 @@ previously mentioned to force the adapter to the same speed and duplex.
 Additional Configurations
 =========================
 
-  Jumbo Frames
-  ------------
+Jumbo Frames
+------------
+
   Jumbo Frames support is enabled by changing the MTU to a value larger than
   the default of 1500.  Use the ifconfig command to increase the MTU size.
   For example::
@@ -367,11 +403,11 @@ Additional Configurations
 
        MTU=9000
 
-   to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>.  This example
-   applies to the Red Hat distributions; other distributions may store this
-   setting in a different location.
+  to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>.  This example
+  applies to the Red Hat distributions; other distributions may store this
+  setting in a different location.
 
-  Notes:
+Notes:
   Degradation in throughput performance may be observed in some Jumbo frames
   environments. If this is observed, increasing the application's socket buffer
   size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help.
@@ -385,12 +421,14 @@ Additional Configurations
     poor performance or loss of link.
 
   - Adapters based on the Intel(R) 82542 and 82573V/E controller do not
-    support Jumbo Frames. These correspond to the following product names:
+    support Jumbo Frames. These correspond to the following product names::
+
      Intel(R) PRO/1000 Gigabit Server Adapter
      Intel(R) PRO/1000 PM Network Connection
 
-  ethtool
-  -------
+ethtool
+-------
+
   The driver utilizes the ethtool interface for driver configuration and
   diagnostics, as well as displaying statistical information.  The ethtool
   version 1.6 or later is required for this functionality.
@@ -398,8 +436,9 @@ Additional Configurations
   The latest release of ethtool can be found from
   https://www.kernel.org/pub/software/network/ethtool/
 
-  Enabling Wake on LAN* (WoL)
-  ---------------------------
+Enabling Wake on LAN* (WoL)
+---------------------------
+
   WoL is configured through the ethtool* utility.
 
   WoL will be enabled on the system during the next shut down or reboot.
index 13081b3decefa834824b544182d0986e83bc50b4..a7d354ddda7baeb59760215cb41222e3b4698a8d 100644 (file)
@@ -48,7 +48,7 @@ void strp_pause(struct strparser *strp)
      Temporarily pause a stream parser. Message parsing is suspended
      and no new messages are delivered to the upper layer.
 
-void strp_pause(struct strparser *strp)
+void strp_unpause(struct strparser *strp)
 
      Unpause a paused stream parser.
 
index e73bcf9cb5f31cc756521702bbc15fd142e09c71..7ffea6aa22e3c89d4b6e6c7359d40a55c4241176 100644 (file)
@@ -1729,35 +1729,35 @@ If a variable isn't a key variable or prefixed with 'vals=', the
 associated event field will be saved in a variable but won't be summed
 as a value:
 
-  # echo 'hist:keys=next_pid:ts1=common_timestamp ... >> event/trigger
+  # echo 'hist:keys=next_pid:ts1=common_timestamp ...' >> event/trigger
 
 Multiple variables can be assigned at the same time.  The below would
 result in both ts0 and b being created as variables, with both
 common_timestamp and field1 additionally being summed as values:
 
-  # echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ... >> \
+  # echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ...' >> \
        event/trigger
 
 Note that variable assignments can appear either preceding or
 following their use.  The command below behaves identically to the
 command above:
 
-  # echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ... >> \
+  # echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ...' >> \
        event/trigger
 
 Any number of variables not bound to a 'vals=' prefix can also be
 assigned by simply separating them with colons.  Below is the same
 thing but without the values being summed in the histogram:
 
-  # echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ... >> event/trigger
+  # echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ...' >> event/trigger
 
 Variables set as above can be referenced and used in expressions on
 another event.
 
 For example, here's how a latency can be calculated:
 
-  # echo 'hist:keys=pid,prio:ts0=common_timestamp ... >> event1/trigger
-  # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ... >> event2/trigger
+  # echo 'hist:keys=pid,prio:ts0=common_timestamp ...' >> event1/trigger
+  # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ...' >> event2/trigger
 
 In the first line above, the event's timetamp is saved into the
 variable ts0.  In the next line, ts0 is subtracted from the second
@@ -1766,7 +1766,7 @@ yet another variable, 'wakeup_lat'.  The hist trigger below in turn
 makes use of the wakeup_lat variable to compute a combined latency
 using the same key and variable from yet another event:
 
-  # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ... >> event3/trigger
+  # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ...' >> event3/trigger
 
 2.2.2 Synthetic Events
 ----------------------
@@ -1807,10 +1807,11 @@ the command that defined it with a '!':
 At this point, there isn't yet an actual 'wakeup_latency' event
 instantiated in the event subsytem - for this to happen, a 'hist
 trigger action' needs to be instantiated and bound to actual fields
-and variables defined on other events (see Section 6.3.3 below).
+and variables defined on other events (see Section 2.2.3 below on
+how that is done using hist trigger 'onmatch' action). Once that is
+done, the 'wakeup_latency' synthetic event instance is created.
 
-Once that is done, an event instance is created, and a histogram can
-be defined using it:
+A histogram can now be defined for the new synthetic event:
 
   # echo 'hist:keys=pid,prio,lat.log2:sort=pid,lat' >> \
         /sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
@@ -1960,7 +1961,7 @@ hist trigger specification.
     back to that pid, the timestamp difference is calculated.  If the
     resulting latency, stored in wakeup_lat, exceeds the current
     maximum latency, the values specified in the save() fields are
-    recoreded:
+    recorded:
 
     # echo 'hist:keys=pid:ts0=common_timestamp.usecs \
             if comm=="cyclictest"' >> \
index 635e57493709e16fbecc0235723dc742a608ae0e..b8cb38a98c1989eef926795b79b7399d65700135 100644 (file)
@@ -226,7 +226,7 @@ $ rm configs/<config name>.<number>/<function>
 where <config name>.<number> specify the configuration and <function> is
 a symlink to a function being removed from the configuration, e.g.:
 
-$ rm configfs/c.1/ncm.usb0
+$ rm configs/c.1/ncm.usb0
 
 ...
 ...
index 495b7742ab58086b5c81fff88eeb884769391b49..d10944e619d3d28c43bcca85bc3dd2761cee37f0 100644 (file)
@@ -4610,7 +4610,7 @@ This capability indicates that kvm will implement the interfaces to handle
 reset, migration and nested KVM for branch prediction blocking. The stfle
 facility 82 should not be provided to the guest without this capability.
 
-8.14 KVM_CAP_HYPERV_TLBFLUSH
+8.18 KVM_CAP_HYPERV_TLBFLUSH
 
 Architectures: x86
 
index 9d5eeff51b5fd32979f64d288375b6489ff25712..1505c8ea8e7b2e44126bd92f459c2f26fb4dd7c8 100644 (file)
@@ -581,7 +581,7 @@ W:  https://www.infradead.org/~dhowells/kafs/
 
 AGPGART DRIVER
 M:     David Airlie <airlied@linux.ie>
-T:     git git://people.freedesktop.org/~airlied/linux (part of drm maint)
+T:     git git://anongit.freedesktop.org/drm/drm
 S:     Maintained
 F:     drivers/char/agp/
 F:     include/linux/agp*
@@ -2523,7 +2523,7 @@ S:        Supported
 F:     drivers/scsi/esas2r
 
 ATUSB IEEE 802.15.4 RADIO DRIVER
-M:     Stefan Schmidt <stefan@osg.samsung.com>
+M:     Stefan Schmidt <stefan@datenfreihafen.org>
 L:     linux-wpan@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ieee802154/atusb.c
@@ -2971,9 +2971,13 @@ N:       bcm585*
 N:     bcm586*
 N:     bcm88312
 N:     hr2
-F:     arch/arm64/boot/dts/broadcom/ns2*
+N:     stingray
+F:     arch/arm64/boot/dts/broadcom/northstar2/*
+F:     arch/arm64/boot/dts/broadcom/stingray/*
 F:     drivers/clk/bcm/clk-ns*
+F:     drivers/clk/bcm/clk-sr*
 F:     drivers/pinctrl/bcm/pinctrl-ns*
+F:     include/dt-bindings/clock/bcm-sr*
 
 BROADCOM KONA GPIO DRIVER
 M:     Ray Jui <rjui@broadcom.com>
@@ -4360,12 +4364,7 @@ L:       iommu@lists.linux-foundation.org
 T:     git git://git.infradead.org/users/hch/dma-mapping.git
 W:     http://git.infradead.org/users/hch/dma-mapping.git
 S:     Supported
-F:     lib/dma-debug.c
-F:     lib/dma-direct.c
-F:     lib/dma-noncoherent.c
-F:     lib/dma-virt.c
-F:     drivers/base/dma-mapping.c
-F:     drivers/base/dma-coherent.c
+F:     kernel/dma/
 F:     include/asm-generic/dma-mapping.h
 F:     include/linux/dma-direct.h
 F:     include/linux/dma-mapping.h
@@ -4461,6 +4460,7 @@ F:        Documentation/blockdev/drbd/
 
 DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+R:     "Rafael J. Wysocki" <rafael@kernel.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
 S:     Supported
 F:     Documentation/kobject.txt
@@ -4631,7 +4631,7 @@ F:        include/uapi/drm/vmwgfx_drm.h
 DRM DRIVERS
 M:     David Airlie <airlied@linux.ie>
 L:     dri-devel@lists.freedesktop.org
-T:     git git://people.freedesktop.org/~airlied/linux
+T:     git git://anongit.freedesktop.org/drm/drm
 B:     https://bugs.freedesktop.org/
 C:     irc://chat.freenode.net/dri-devel
 S:     Maintained
@@ -5674,7 +5674,7 @@ F:        drivers/crypto/caam/
 F:     Documentation/devicetree/bindings/crypto/fsl-sec4.txt
 
 FREESCALE DIU FRAMEBUFFER DRIVER
-M:     Timur Tabi <timur@tabi.org>
+M:     Timur Tabi <timur@kernel.org>
 L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/video/fbdev/fsl-diu-fb.*
@@ -5774,7 +5774,7 @@ S:        Maintained
 F:     drivers/net/wan/fsl_ucc_hdlc*
 
 FREESCALE QUICC ENGINE UCC UART DRIVER
-M:     Timur Tabi <timur@tabi.org>
+M:     Timur Tabi <timur@kernel.org>
 L:     linuxppc-dev@lists.ozlabs.org
 S:     Maintained
 F:     drivers/tty/serial/ucc_uart.c
@@ -5790,7 +5790,6 @@ F:        include/linux/fsl/
 
 FREESCALE SOC FS_ENET DRIVER
 M:     Pantelis Antoniou <pantelis.antoniou@gmail.com>
-M:     Vitaly Bordug <vbordug@ru.mvista.com>
 L:     linuxppc-dev@lists.ozlabs.org
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -5798,7 +5797,7 @@ F:        drivers/net/ethernet/freescale/fs_enet/
 F:     include/linux/fs_enet_pd.h
 
 FREESCALE SOC SOUND DRIVERS
-M:     Timur Tabi <timur@tabi.org>
+M:     Timur Tabi <timur@kernel.org>
 M:     Nicolin Chen <nicoleotsuka@gmail.com>
 M:     Xiubo Li <Xiubo.Lee@gmail.com>
 R:     Fabio Estevam <fabio.estevam@nxp.com>
@@ -6909,7 +6908,7 @@ F:        drivers/clk/clk-versaclock5.c
 
 IEEE 802.15.4 SUBSYSTEM
 M:     Alexander Aring <alex.aring@gmail.com>
-M:     Stefan Schmidt <stefan@osg.samsung.com>
+M:     Stefan Schmidt <stefan@datenfreihafen.org>
 L:     linux-wpan@vger.kernel.org
 W:     http://wpan.cakelab.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git
@@ -8629,7 +8628,7 @@ MARVELL MWIFIEX WIRELESS DRIVER
 M:     Amitkumar Karwar <amitkarwar@gmail.com>
 M:     Nishant Sarmukadam <nishants@marvell.com>
 M:     Ganapathi Bhat <gbhat@marvell.com>
-M:     Xinming Hu <huxm@marvell.com>
+M:     Xinming Hu <huxinming820@gmail.com>
 L:     linux-wireless@vger.kernel.org
 S:     Maintained
 F:     drivers/net/wireless/marvell/mwifiex/
@@ -9756,6 +9755,11 @@ L:       linux-scsi@vger.kernel.org
 S:     Maintained
 F:     drivers/scsi/NCR_D700.*
 
+NCSI LIBRARY:
+M:     Samuel Mendoza-Jonas <sam@mendozajonas.com>
+S:     Maintained
+F:     net/ncsi/
+
 NCT6775 HARDWARE MONITOR DRIVER
 M:     Guenter Roeck <linux@roeck-us.net>
 L:     linux-hwmon@vger.kernel.org
@@ -9882,6 +9886,7 @@ M:        Andrew Lunn <andrew@lunn.ch>
 M:     Vivien Didelot <vivien.didelot@savoirfairelinux.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
 S:     Maintained
+F:     Documentation/devicetree/bindings/net/dsa/
 F:     net/dsa/
 F:     include/net/dsa.h
 F:     include/linux/dsa/
@@ -10208,11 +10213,13 @@ F:    sound/soc/codecs/sgtl5000*
 
 NXP TDA998X DRM DRIVER
 M:     Russell King <linux@armlinux.org.uk>
-S:     Supported
+S:     Maintained
 T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel
 T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes
 F:     drivers/gpu/drm/i2c/tda998x_drv.c
 F:     include/drm/i2c/tda998x.h
+F:     include/dt-bindings/display/tda998x.h
+K:     "nxp,tda998x"
 
 NXP TFA9879 DRIVER
 M:     Peter Rosin <peda@axentia.se>
@@ -11476,6 +11483,15 @@ W:     http://wireless.kernel.org/en/users/Drivers/p54
 S:     Obsolete
 F:     drivers/net/wireless/intersil/prism54/
 
+PROC FILESYSTEM
+R:     Alexey Dobriyan <adobriyan@gmail.com>
+L:     linux-kernel@vger.kernel.org
+L:     linux-fsdevel@vger.kernel.org
+S:     Maintained
+F:     fs/proc/
+F:     include/linux/proc_fs.h
+F:     tools/testing/selftests/proc/
+
 PROC SYSCTL
 M:     "Luis R. Rodriguez" <mcgrof@kernel.org>
 M:     Kees Cook <keescook@chromium.org>
@@ -11808,9 +11824,9 @@ F:  Documentation/devicetree/bindings/opp/kryo-cpufreq.txt
 F:  drivers/cpufreq/qcom-cpufreq-kryo.c
 
 QUALCOMM EMAC GIGABIT ETHERNET DRIVER
-M:     Timur Tabi <timur@codeaurora.org>
+M:     Timur Tabi <timur@kernel.org>
 L:     netdev@vger.kernel.org
-S:     Supported
+S:     Maintained
 F:     drivers/net/ethernet/qualcomm/emac/
 
 QUALCOMM HEXAGON ARCHITECTURE
@@ -11821,7 +11837,7 @@ S:      Supported
 F:     arch/hexagon/
 
 QUALCOMM HIDMA DRIVER
-M:     Sinan Kaya <okaya@codeaurora.org>
+M:     Sinan Kaya <okaya@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org
 L:     linux-arm-msm@vger.kernel.org
 L:     dmaengine@vger.kernel.org
@@ -13648,7 +13664,7 @@ M:      Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 L:     iommu@lists.linux-foundation.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git
 S:     Supported
-F:     lib/swiotlb.c
+F:     kernel/dma/swiotlb.c
 F:     arch/*/kernel/pci-swiotlb.c
 F:     include/linux/swiotlb.h
 
@@ -15572,9 +15588,17 @@ M:     x86@kernel.org
 L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
 S:     Maintained
+F:     Documentation/devicetree/bindings/x86/
 F:     Documentation/x86/
 F:     arch/x86/
 
+X86 ENTRY CODE
+M:     Andy Lutomirski <luto@kernel.org>
+L:     linux-kernel@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/asm
+S:     Maintained
+F:     arch/x86/entry/
+
 X86 MCE INFRASTRUCTURE
 M:     Tony Luck <tony.luck@intel.com>
 M:     Borislav Petkov <bp@alien8.de>
@@ -15597,7 +15621,7 @@ F:      drivers/platform/x86/
 F:     drivers/platform/olpc/
 
 X86 VDSO
-M:     Andy Lutomirski <luto@amacapital.net>
+M:     Andy Lutomirski <luto@kernel.org>
 L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso
 S:     Maintained
index ca2af1ab91ebadf6ac5c62150b4e72f2a1f1441d..a89d8a0d3ee16024149856a903213929aa2963d1 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 18
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc5
 NAME = Merciless Moray
 
 # *DOCUMENTATION*
@@ -353,9 +353,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
          else if [ -x /bin/bash ]; then echo /bin/bash; \
          else echo sh; fi ; fi)
 
-HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS)
-HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS)
-HOST_LFS_LIBS := $(shell getconf LFS_LIBS)
+HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null)
+HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
+HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
 
 HOSTCC       = gcc
 HOSTCXX      = g++
@@ -507,11 +507,6 @@ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLA
   KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
 endif
 
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/cc-can-link.sh $(CC)), y)
-  CC_CAN_LINK := y
-  export CC_CAN_LINK
-endif
-
 # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included.
 # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile.
 # CC_VERSION_TEXT is referenced from Kconfig (so it needs export),
@@ -1717,6 +1712,6 @@ endif     # skip-makefile
 PHONY += FORCE
 FORCE:
 
-# Declare the contents of the .PHONY variable as phony.  We keep that
+# Declare the contents of the PHONY variable as phony.  We keep that
 # information in a variable so we can use it in if_changed and friends.
 .PHONY: $(PHONY)
index 0c4805a572c8739ff9d657c63961747e3ea08ff3..04a4a138ed131c7256aeb4108453400516b8965a 100644 (file)
@@ -555,11 +555,6 @@ config SMP
 
          If you don't know what to do here, say N.
 
-config HAVE_DEC_LOCK
-       bool
-       depends on SMP
-       default y
-
 config NR_CPUS
        int "Maximum number of CPUs (2-32)"
        range 2 32
index 04f9729de57c351c7e142b9aab9d9bca0878a2f3..854d5e79979e4ce929d7998bf1135a12880238f8 100644 (file)
@@ -35,8 +35,6 @@ lib-y =       __divqu.o __remqu.o __divlu.o __remlu.o \
        callback_srm.o srm_puts.o srm_printk.o \
        fls.o
 
-lib-$(CONFIG_SMP) += dec_and_lock.o
-
 # The division routines are built from single source, with different defines.
 AFLAGS___divqu.o = -DDIV
 AFLAGS___remqu.o =       -DREM
diff --git a/arch/alpha/lib/dec_and_lock.c b/arch/alpha/lib/dec_and_lock.c
deleted file mode 100644 (file)
index a117707..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/alpha/lib/dec_and_lock.c
- *
- * ll/sc version of atomic_dec_and_lock()
- * 
- */
-
-#include <linux/spinlock.h>
-#include <linux/atomic.h>
-#include <linux/export.h>
-
-  asm (".text                                  \n\
-       .global _atomic_dec_and_lock            \n\
-       .ent _atomic_dec_and_lock               \n\
-       .align  4                               \n\
-_atomic_dec_and_lock:                          \n\
-       .prologue 0                             \n\
-1:     ldl_l   $1, 0($16)                      \n\
-       subl    $1, 1, $1                       \n\
-       beq     $1, 2f                          \n\
-       stl_c   $1, 0($16)                      \n\
-       beq     $1, 4f                          \n\
-       mb                                      \n\
-       clr     $0                              \n\
-       ret                                     \n\
-2:     br      $29, 3f                         \n\
-3:     ldgp    $29, 0($29)                     \n\
-       br      $atomic_dec_and_lock_1..ng      \n\
-       .subsection 2                           \n\
-4:     br      1b                              \n\
-       .previous                               \n\
-       .end _atomic_dec_and_lock");
-
-static int __used atomic_dec_and_lock_1(atomic_t *atomic, spinlock_t *lock)
-{
-       /* Slow path */
-       spin_lock(lock);
-       if (atomic_dec_and_test(atomic))
-               return 1;
-       spin_unlock(lock);
-       return 0;
-}
-EXPORT_SYMBOL(_atomic_dec_and_lock);
index f9e8667f5886db82027643130ca87b58cbea8f62..73b514dddf65b281b0c3093f40b05496240b5455 100644 (file)
                        AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0)       /* mmc0_dat3.mmc0_dat3 */
                        AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0)       /* mmc0_cmd.mmc0_cmd */
                        AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0)       /* mmc0_clk.mmc0_clk */
-                       AM33XX_IOPAD(0x9a0, PIN_INPUT | MUX_MODE4)              /* mcasp0_aclkr.mmc0_sdwp */
                >;
        };
 
index ca294914bbb131b9725c43b8e7c768466bf0c775..23ea381d363fd12e6d9ac7f08e8613f1bf12443e 100644 (file)
@@ -39,6 +39,8 @@
                        ti,davinci-ctrl-ram-size = <0x2000>;
                        ti,davinci-rmii-en = /bits/ 8 <1>;
                        local-mac-address = [ 00 00 00 00 00 00 ];
+                       clocks = <&emac_ick>;
+                       clock-names = "ick";
                };
 
                davinci_mdio: ethernet@5c030000 {
@@ -49,6 +51,8 @@
                        bus_freq = <1000000>;
                        #address-cells = <1>;
                        #size-cells = <0>;
+                       clocks = <&emac_fck>;
+                       clock-names = "fck";
                };
 
                uart4: serial@4809e000 {
        };
 };
 
+/* Table Table 5-79 of the TRM shows 480ab000 is reserved */
+&usb_otg_hs {
+       status = "disabled";
+};
+
 &iva {
        status = "disabled";
 };
index 440351ad0b80686d06126df39b02d8de104a95d0..d4be3fd0b6f4094643ef98660e0f2dbcb5edca9d 100644 (file)
 
                touchscreen-size-x = <480>;
                touchscreen-size-y = <272>;
+
+               wakeup-source;
        };
 
        tlv320aic3106: tlv320aic3106@1b {
index 6782ce481ac967ded05bbfc124355aa894790d6f..d8769956cbfcff7b4a38e72959ba8b88f397a1c0 100644 (file)
                                              3700 5
                                              3900 6
                                              4000 7>;
-                       cooling-cells = <2>;
+                       #cooling-cells = <2>;
                };
 
                gpio-leds {
index 18edc9bc79273b794ce263ce7d9674f43732c58e..929459c42760592c00d4ea0d585be4da2ad7e301 100644 (file)
 
                        thermal: thermal@e8078 {
                                compatible = "marvell,armada380-thermal";
-                               reg = <0xe4078 0x4>, <0xe4074 0x4>;
+                               reg = <0xe4078 0x4>, <0xe4070 0x8>;
                                status = "okay";
                        };
 
index 9fe4f5a6379e3b60d79a6ed8a0327f680434861e..2c4df2d2d4a6e1165fe27565a19d681c47a32cfa 100644 (file)
                        reg = <0x18008000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
                        reg = <0x1800b000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
 
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_NONE>;
+                       interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
 
                        linux,pci-domain = <0>;
 
                                compatible = "brcm,iproc-msi";
                                msi-controller;
                                interrupt-parent = <&gic>;
-                               interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 97 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 98 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 99 IRQ_TYPE_NONE>;
+                               interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
                        };
                };
 
 
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0>;
-                       interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_NONE>;
+                       interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
 
                        linux,pci-domain = <1>;
 
                                compatible = "brcm,iproc-msi";
                                msi-controller;
                                interrupt-parent = <&gic>;
-                               interrupts = <GIC_SPI 102 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 103 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 104 IRQ_TYPE_NONE>,
-                                            <GIC_SPI 105 IRQ_TYPE_NONE>;
+                               interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
                        };
                };
 
index 3f9cedd8011f0c22fb05b6a50d1705fc5ceab05d..3084a7c957339f0edc2fef97d203b08635c96790 100644 (file)
                        reg = <0x38000 0x50>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 95 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                };
 
                        reg = <0x3b000 0x50>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                };
        };
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 186 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <0>;
 
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 182 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 183 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 184 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 185 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 192 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <1>;
 
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 188 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 189 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 190 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 191 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
index dcc55aa84583cdd18f7ef6ecd780eb947be1ef1f..09ba8504632284532e3b17c6d1531e2d732fadc4 100644 (file)
                        reg = <0x38000 0x50>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        dma-coherent;
                        status = "disabled";
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <0>;
 
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 127 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 128 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 129 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 130 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <1>;
 
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 133 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 134 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 135 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 136 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <2>;
 
                        compatible = "brcm,iproc-msi";
                        msi-controller;
                        interrupt-parent = <&gic>;
-                       interrupts = <GIC_SPI 139 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 140 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 141 IRQ_TYPE_NONE>,
-                                    <GIC_SPI 142 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
                        brcm,pcie-msi-inten;
                };
        };
index 9a076c409f4ed35fcf5fbe79807ede6e7e8466d5..ef995e50ee12bfd8b3d90d9e07062a41e04f4ff3 100644 (file)
        i2c0: i2c@18009000 {
                compatible = "brcm,iproc-i2c";
                reg = <0x18009000 0x50>;
-               interrupts = <GIC_SPI 121 IRQ_TYPE_NONE>;
+               interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
                #size-cells = <0>;
                clock-frequency = <100000>;
index f6f1597b03df931a1dea057921a43cea9f929a31..0f4f817a9e229c58f973f935a6f5906b1e0f8979 100644 (file)
                        gpio-controller;
                        #gpio-cells = <2>;
                        reg = <0x226000 0x1000>;
-                       interrupts = <42 IRQ_TYPE_EDGE_BOTH
-                               43 IRQ_TYPE_EDGE_BOTH 44 IRQ_TYPE_EDGE_BOTH
-                               45 IRQ_TYPE_EDGE_BOTH 46 IRQ_TYPE_EDGE_BOTH
-                               47 IRQ_TYPE_EDGE_BOTH 48 IRQ_TYPE_EDGE_BOTH
-                               49 IRQ_TYPE_EDGE_BOTH 50 IRQ_TYPE_EDGE_BOTH>;
+                       interrupts = <42 43 44 45 46 47 48 49 50>;
                        ti,ngpio = <144>;
                        ti,davinci-gpio-unbanked = <0>;
                        status = "disabled";
index 9dcd14edc20287f80c73a3b95d21e303ffa6d39c..e03495a799ce8d034feab58e263177794e706013 100644 (file)
                                dr_mode = "otg";
                                snps,dis_u3_susphy_quirk;
                                snps,dis_u2_susphy_quirk;
-                               snps,dis_metastability_quirk;
                        };
                };
 
                                dr_mode = "otg";
                                snps,dis_u3_susphy_quirk;
                                snps,dis_u2_susphy_quirk;
+                               snps,dis_metastability_quirk;
                        };
                };
 
index df9eca94d812290afe03affd59f76663ea1ab0ee..8a878687197b35a8e056ba55c4aaec56293123e1 100644 (file)
 
        pinctrl_ts: tsgrp {
                fsl,pins = <
-                       MX51_PAD_CSI1_D8__GPIO3_12              0x85
+                       MX51_PAD_CSI1_D8__GPIO3_12              0x04
                        MX51_PAD_CSI1_D9__GPIO3_13              0x85
                >;
        };
index 70483ce72ba6cf648809acb7f24be3af11817674..77f8f030dd0772aba631f57b704a7e60a9bd0532 100644 (file)
@@ -90,7 +90,7 @@
                                        clocks = <&clks IMX6Q_CLK_ECSPI5>,
                                                 <&clks IMX6Q_CLK_ECSPI5>;
                                        clock-names = "ipg", "per";
-                                       dmas = <&sdma 11 7 1>, <&sdma 12 7 2>;
+                                       dmas = <&sdma 11 8 1>, <&sdma 12 8 2>;
                                        dma-names = "rx", "tx";
                                        status = "disabled";
                                };
index d8b94f47498b67051ade669f23d2796a0b1e7433..4e4a55aad5c9ca9aa6fff90deb0ae1c5e99c3a13 100644 (file)
                        ranges = <0x81000000 0 0          0x08f80000 0 0x00010000 /* downstream I/O */
                                  0x82000000 0 0x08000000 0x08000000 0 0x00f00000>; /* non-prefetchable memory */
                        num-lanes = <1>;
-                       interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "msi";
                        #interrupt-cells = <1>;
                        interrupt-map-mask = <0 0 0 0x7>;
index 486d4e7433ed32d2662fabcf9b25fe54eab0f187..b38f8c24055800c45e1e81aef451f08ac9e27be5 100644 (file)
                nand0: nand@ff900000 {
                        #address-cells = <0x1>;
                        #size-cells = <0x1>;
-                       compatible = "denali,denali-nand-dt";
+                       compatible = "altr,socfpga-denali-nand";
                        reg = <0xff900000 0x100000>,
                              <0xffb80000 0x10000>;
                        reg-names = "nand_data", "denali_reg";
                        interrupts = <0x0 0x90 0x4>;
                        dma-mask = <0xffffffff>;
-                       clocks = <&nand_clk>;
+                       clocks = <&nand_x_clk>;
                        status = "disabled";
                };
 
index bead79e4b2aa2b624b8f7d21cef4751d6536b724..791ca15c799eba98850cbc3d4b96be7a509c422f 100644 (file)
                        #size-cells = <0>;
                        reg = <0xffda5000 0x100>;
                        interrupts = <0 102 4>;
-                       num-chipselect = <4>;
-                       bus-num = <0>;
+                       num-cs = <4>;
                        /*32bit_access;*/
                        tx-dma-channel = <&pdma 16>;
                        rx-dma-channel = <&pdma 17>;
                nand: nand@ffb90000 {
                        #address-cells = <1>;
                        #size-cells = <1>;
-                       compatible = "denali,denali-nand-dt", "altr,socfpga-denali-nand";
+                       compatible = "altr,socfpga-denali-nand";
                        reg = <0xffb90000 0x72000>,
                              <0xffb80000 0x10000>;
                        reg-names = "nand_data", "denali_reg";
index 1e9f7af8f70ff6ba23d9403f930f09dd6e0dda7e..3157be413297e5d22ad3174e2082b5199fc3083c 100644 (file)
@@ -10,7 +10,7 @@ obj-$(CONFIG_DMABOUNCE)               += dmabounce.o
 obj-$(CONFIG_SHARP_LOCOMO)     += locomo.o
 obj-$(CONFIG_SHARP_PARAM)      += sharpsl_param.o
 obj-$(CONFIG_SHARP_SCOOP)      += scoop.o
-obj-$(CONFIG_SMP)              += secure_cntvoff.o
+obj-$(CONFIG_CPU_V7)           += secure_cntvoff.o
 obj-$(CONFIG_PCI_HOST_ITE8152)  += it8152.o
 obj-$(CONFIG_MCPM)             += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
 CFLAGS_REMOVE_mcpm_entry.o     = -pg
index 054591dc9a0020dcdaa907f6b3cded43408075d0..4cd2f4a2bff4e20beb76fd524348aae58fbc3590 100644 (file)
@@ -141,9 +141,11 @@ CONFIG_USB_STORAGE=y
 CONFIG_USB_CHIPIDEA=y
 CONFIG_USB_CHIPIDEA_UDC=y
 CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
 CONFIG_NOP_USB_XCEIV=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_ETH=m
+CONFIG_USB_ULPI_BUS=y
 CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
index f70507ab91eeb1b59a0857cb9e6f55ff2825fe9f..200ebda47e0c3bee90eadd948b6f8f522fcfbedc 100644 (file)
@@ -302,6 +302,7 @@ CONFIG_USB_STORAGE=y
 CONFIG_USB_CHIPIDEA=y
 CONFIG_USB_CHIPIDEA_UDC=y
 CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
 CONFIG_USB_SERIAL=m
 CONFIG_USB_SERIAL_GENERIC=y
 CONFIG_USB_SERIAL_FTDI_SIO=m
@@ -338,6 +339,7 @@ CONFIG_USB_GADGETFS=m
 CONFIG_USB_FUNCTIONFS=m
 CONFIG_USB_MASS_STORAGE=m
 CONFIG_USB_G_SERIAL=m
+CONFIG_USB_ULPI_BUS=y
 CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
index 7e1c543162c3ab16f11f6be6ccec5a16abae31d0..8f6be19825456496ef471b3b03a78d32354d9736 100644 (file)
@@ -1,5 +1,4 @@
 CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_CGROUPS=y
@@ -10,20 +9,10 @@ CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_CMDLINE_PARTITION=y
-CONFIG_ARCH_MULTI_V7=y
-# CONFIG_ARCH_MULTI_V5 is not set
-# CONFIG_ARCH_MULTI_V4 is not set
 CONFIG_ARCH_VIRT=y
 CONFIG_ARCH_ALPINE=y
 CONFIG_ARCH_ARTPEC=y
 CONFIG_MACH_ARTPEC6=y
-CONFIG_ARCH_MVEBU=y
-CONFIG_MACH_ARMADA_370=y
-CONFIG_MACH_ARMADA_375=y
-CONFIG_MACH_ARMADA_38X=y
-CONFIG_MACH_ARMADA_39X=y
-CONFIG_MACH_ARMADA_XP=y
-CONFIG_MACH_DOVE=y
 CONFIG_ARCH_AT91=y
 CONFIG_SOC_SAMA5D2=y
 CONFIG_SOC_SAMA5D3=y
@@ -32,9 +21,9 @@ CONFIG_ARCH_BCM=y
 CONFIG_ARCH_BCM_CYGNUS=y
 CONFIG_ARCH_BCM_HR2=y
 CONFIG_ARCH_BCM_NSP=y
-CONFIG_ARCH_BCM_21664=y
-CONFIG_ARCH_BCM_281XX=y
 CONFIG_ARCH_BCM_5301X=y
+CONFIG_ARCH_BCM_281XX=y
+CONFIG_ARCH_BCM_21664=y
 CONFIG_ARCH_BCM2835=y
 CONFIG_ARCH_BCM_63XX=y
 CONFIG_ARCH_BRCMSTB=y
@@ -43,14 +32,14 @@ CONFIG_MACH_BERLIN_BG2=y
 CONFIG_MACH_BERLIN_BG2CD=y
 CONFIG_MACH_BERLIN_BG2Q=y
 CONFIG_ARCH_DIGICOLOR=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_EXYNOS5420_MCPM=y
 CONFIG_ARCH_HIGHBANK=y
 CONFIG_ARCH_HISI=y
 CONFIG_ARCH_HI3xxx=y
-CONFIG_ARCH_HIX5HD2=y
 CONFIG_ARCH_HIP01=y
 CONFIG_ARCH_HIP04=y
-CONFIG_ARCH_KEYSTONE=y
-CONFIG_ARCH_MESON=y
+CONFIG_ARCH_HIX5HD2=y
 CONFIG_ARCH_MXC=y
 CONFIG_SOC_IMX50=y
 CONFIG_SOC_IMX51=y
@@ -60,29 +49,30 @@ CONFIG_SOC_IMX6SL=y
 CONFIG_SOC_IMX6SX=y
 CONFIG_SOC_IMX6UL=y
 CONFIG_SOC_IMX7D=y
-CONFIG_SOC_VF610=y
 CONFIG_SOC_LS1021A=y
+CONFIG_SOC_VF610=y
+CONFIG_ARCH_KEYSTONE=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_MACH_ARMADA_370=y
+CONFIG_MACH_ARMADA_375=y
+CONFIG_MACH_ARMADA_38X=y
+CONFIG_MACH_ARMADA_39X=y
+CONFIG_MACH_ARMADA_XP=y
+CONFIG_MACH_DOVE=y
 CONFIG_ARCH_OMAP3=y
 CONFIG_ARCH_OMAP4=y
 CONFIG_SOC_OMAP5=y
 CONFIG_SOC_AM33XX=y
 CONFIG_SOC_AM43XX=y
 CONFIG_SOC_DRA7XX=y
+CONFIG_ARCH_SIRF=y
 CONFIG_ARCH_QCOM=y
-CONFIG_ARCH_MEDIATEK=y
 CONFIG_ARCH_MSM8X60=y
 CONFIG_ARCH_MSM8960=y
 CONFIG_ARCH_MSM8974=y
 CONFIG_ARCH_ROCKCHIP=y
-CONFIG_ARCH_SOCFPGA=y
-CONFIG_PLAT_SPEAR=y
-CONFIG_ARCH_SPEAR13XX=y
-CONFIG_MACH_SPEAR1310=y
-CONFIG_MACH_SPEAR1340=y
-CONFIG_ARCH_STI=y
-CONFIG_ARCH_STM32=y
-CONFIG_ARCH_EXYNOS=y
-CONFIG_EXYNOS5420_MCPM=y
 CONFIG_ARCH_RENESAS=y
 CONFIG_ARCH_EMEV2=y
 CONFIG_ARCH_R7S72100=y
@@ -99,40 +89,33 @@ CONFIG_ARCH_R8A7792=y
 CONFIG_ARCH_R8A7793=y
 CONFIG_ARCH_R8A7794=y
 CONFIG_ARCH_SH73A0=y
+CONFIG_ARCH_SOCFPGA=y
+CONFIG_PLAT_SPEAR=y
+CONFIG_ARCH_SPEAR13XX=y
+CONFIG_MACH_SPEAR1310=y
+CONFIG_MACH_SPEAR1340=y
+CONFIG_ARCH_STI=y
+CONFIG_ARCH_STM32=y
 CONFIG_ARCH_SUNXI=y
-CONFIG_ARCH_SIRF=y
 CONFIG_ARCH_TEGRA=y
-CONFIG_ARCH_TEGRA_2x_SOC=y
-CONFIG_ARCH_TEGRA_3x_SOC=y
-CONFIG_ARCH_TEGRA_114_SOC=y
-CONFIG_ARCH_TEGRA_124_SOC=y
 CONFIG_ARCH_UNIPHIER=y
 CONFIG_ARCH_U8500=y
-CONFIG_MACH_HREFV60=y
-CONFIG_MACH_SNOWBALL=y
 CONFIG_ARCH_VEXPRESS=y
 CONFIG_ARCH_VEXPRESS_TC2_PM=y
 CONFIG_ARCH_WM8850=y
 CONFIG_ARCH_ZYNQ=y
-CONFIG_TRUSTED_FOUNDATIONS=y
-CONFIG_PCI=y
-CONFIG_PCI_HOST_GENERIC=y
-CONFIG_PCI_DRA7XX=y
-CONFIG_PCI_DRA7XX_EP=y
-CONFIG_PCI_KEYSTONE=y
-CONFIG_PCI_MSI=y
+CONFIG_PCIEPORTBUS=y
 CONFIG_PCI_MVEBU=y
 CONFIG_PCI_TEGRA=y
 CONFIG_PCI_RCAR_GEN2=y
 CONFIG_PCIE_RCAR=y
-CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_DRA7XX_EP=y
+CONFIG_PCI_KEYSTONE=y
 CONFIG_PCI_ENDPOINT=y
 CONFIG_PCI_ENDPOINT_CONFIGFS=y
 CONFIG_PCI_EPF_TEST=m
 CONFIG_SMP=y
 CONFIG_NR_CPUS=16
-CONFIG_HIGHPTE=y
-CONFIG_CMA=y
 CONFIG_SECCOMP=y
 CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
@@ -145,14 +128,14 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=m
 CONFIG_CPU_FREQ_GOV_USERSPACE=m
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPUFREQ_DT=y
 CONFIG_ARM_IMX6Q_CPUFREQ=y
 CONFIG_QORIQ_CPUFREQ=y
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_CPUIDLE=y
-CONFIG_NEON=y
-CONFIG_KERNEL_MODE_NEON=y
 CONFIG_ARM_ZYNQ_CPUIDLE=y
 CONFIG_ARM_EXYNOS_CPUIDLE=y
+CONFIG_KERNEL_MODE_NEON=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -170,23 +153,13 @@ CONFIG_IPV6_MIP6=m
 CONFIG_IPV6_TUNNEL=m
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_NET_DSA=m
-CONFIG_NET_SWITCHDEV=y
 CONFIG_CAN=y
-CONFIG_CAN_RAW=y
-CONFIG_CAN_BCM=y
-CONFIG_CAN_DEV=y
 CONFIG_CAN_AT91=m
 CONFIG_CAN_FLEXCAN=m
-CONFIG_CAN_RCAR=m
+CONFIG_CAN_SUN4I=y
 CONFIG_CAN_XILINXCAN=y
+CONFIG_CAN_RCAR=m
 CONFIG_CAN_MCP251X=y
-CONFIG_NET_DSA_BCM_SF2=m
-CONFIG_B53=m
-CONFIG_B53_SPI_DRIVER=m
-CONFIG_B53_MDIO_DRIVER=m
-CONFIG_B53_MMAP_DRIVER=m
-CONFIG_B53_SRAB_DRIVER=m
-CONFIG_CAN_SUN4I=y
 CONFIG_BT=m
 CONFIG_BT_HCIUART=m
 CONFIG_BT_HCIUART_BCM=y
@@ -199,11 +172,9 @@ CONFIG_RFKILL_INPUT=y
 CONFIG_RFKILL_GPIO=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=64
 CONFIG_OMAP_OCP2SCP=y
 CONFIG_SIMPLE_PM_BUS=y
-CONFIG_SUNXI_RSB=y
 CONFIG_MTD=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_BLOCK=y
@@ -236,7 +207,6 @@ CONFIG_PCI_ENDPOINT_TEST=m
 CONFIG_EEPROM_AT24=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
-CONFIG_SCSI_MULTI_LUN=y
 CONFIG_ATA=y
 CONFIG_SATA_AHCI=y
 CONFIG_SATA_AHCI_PLATFORM=y
@@ -251,14 +221,20 @@ CONFIG_SATA_MV=y
 CONFIG_SATA_RCAR=y
 CONFIG_NETDEVICES=y
 CONFIG_VIRTIO_NET=y
-CONFIG_HIX5HD2_GMAC=y
+CONFIG_B53_SPI_DRIVER=m
+CONFIG_B53_MDIO_DRIVER=m
+CONFIG_B53_MMAP_DRIVER=m
+CONFIG_B53_SRAB_DRIVER=m
+CONFIG_NET_DSA_BCM_SF2=m
 CONFIG_SUN4I_EMAC=y
-CONFIG_MACB=y
 CONFIG_BCMGENET=m
 CONFIG_BGMAC_BCMA=y
 CONFIG_SYSTEMPORT=m
+CONFIG_MACB=y
 CONFIG_NET_CALXEDA_XGMAC=y
 CONFIG_GIANFAR=y
+CONFIG_HIX5HD2_GMAC=y
+CONFIG_E1000E=y
 CONFIG_IGB=y
 CONFIG_MV643XX_ETH=y
 CONFIG_MVNETA=y
@@ -268,19 +244,17 @@ CONFIG_R8169=y
 CONFIG_SH_ETH=y
 CONFIG_SMSC911X=y
 CONFIG_STMMAC_ETH=y
-CONFIG_STMMAC_PLATFORM=y
 CONFIG_DWMAC_DWC_QOS_ETH=y
 CONFIG_TI_CPSW=y
 CONFIG_XILINX_EMACLITE=y
 CONFIG_AT803X_PHY=y
-CONFIG_MARVELL_PHY=y
-CONFIG_SMSC_PHY=y
 CONFIG_BROADCOM_PHY=y
 CONFIG_ICPLUS_PHY=y
-CONFIG_REALTEK_PHY=y
+CONFIG_MARVELL_PHY=y
 CONFIG_MICREL_PHY=y
-CONFIG_FIXED_PHY=y
+CONFIG_REALTEK_PHY=y
 CONFIG_ROCKCHIP_PHY=y
+CONFIG_SMSC_PHY=y
 CONFIG_USB_PEGASUS=y
 CONFIG_USB_RTL8152=m
 CONFIG_USB_LAN78XX=m
@@ -288,29 +262,29 @@ CONFIG_USB_USBNET=y
 CONFIG_USB_NET_SMSC75XX=y
 CONFIG_USB_NET_SMSC95XX=y
 CONFIG_BRCMFMAC=m
-CONFIG_RT2X00=m
-CONFIG_RT2800USB=m
 CONFIG_MWIFIEX=m
 CONFIG_MWIFIEX_SDIO=m
+CONFIG_RT2X00=m
+CONFIG_RT2800USB=m
 CONFIG_INPUT_JOYDEV=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_QT1070=m
 CONFIG_KEYBOARD_GPIO=y
 CONFIG_KEYBOARD_TEGRA=y
-CONFIG_KEYBOARD_SPEAR=y
+CONFIG_KEYBOARD_SAMSUNG=m
 CONFIG_KEYBOARD_ST_KEYSCAN=y
+CONFIG_KEYBOARD_SPEAR=y
 CONFIG_KEYBOARD_CROS_EC=m
-CONFIG_KEYBOARD_SAMSUNG=m
 CONFIG_MOUSE_PS2_ELANTECH=y
 CONFIG_MOUSE_CYAPA=m
 CONFIG_MOUSE_ELAN_I2C=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_ATMEL_MXT=m
 CONFIG_TOUCHSCREEN_MMS114=m
+CONFIG_TOUCHSCREEN_WM97XX=m
 CONFIG_TOUCHSCREEN_ST1232=m
 CONFIG_TOUCHSCREEN_STMPE=y
 CONFIG_TOUCHSCREEN_SUN4I=y
-CONFIG_TOUCHSCREEN_WM97XX=m
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_MAX77693_HAPTIC=m
 CONFIG_INPUT_MAX8997_HAPTIC=m
@@ -327,13 +301,12 @@ CONFIG_SERIAL_8250_DW=y
 CONFIG_SERIAL_8250_EM=y
 CONFIG_SERIAL_8250_MT6577=y
 CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 CONFIG_SERIAL_ATMEL=y
 CONFIG_SERIAL_ATMEL_CONSOLE=y
 CONFIG_SERIAL_ATMEL_TTYAT=y
-CONFIG_SERIAL_BCM63XX=y
-CONFIG_SERIAL_BCM63XX_CONSOLE=y
 CONFIG_SERIAL_MESON=y
 CONFIG_SERIAL_MESON_CONSOLE=y
 CONFIG_SERIAL_SAMSUNG=y
@@ -345,15 +318,14 @@ CONFIG_SERIAL_IMX=y
 CONFIG_SERIAL_IMX_CONSOLE=y
 CONFIG_SERIAL_SH_SCI=y
 CONFIG_SERIAL_SH_SCI_NR_UARTS=20
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
-CONFIG_SERIAL_SH_SCI_DMA=y
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_VT8500=y
 CONFIG_SERIAL_VT8500_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_OMAP=y
 CONFIG_SERIAL_OMAP_CONSOLE=y
+CONFIG_SERIAL_BCM63XX=y
+CONFIG_SERIAL_BCM63XX_CONSOLE=y
 CONFIG_SERIAL_XILINX_PS_UART=y
 CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
 CONFIG_SERIAL_FSL_LPUART=y
@@ -365,12 +337,10 @@ CONFIG_SERIAL_ST_ASC_CONSOLE=y
 CONFIG_SERIAL_STM32=y
 CONFIG_SERIAL_STM32_CONSOLE=y
 CONFIG_SERIAL_DEV_BUS=y
-CONFIG_HVC_DRIVER=y
 CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_ST=y
 CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_DAVINCI=y
-CONFIG_I2C_MESON=y
-CONFIG_I2C_MUX=y
 CONFIG_I2C_ARB_GPIO_CHALLENGE=m
 CONFIG_I2C_MUX_PCA954x=y
 CONFIG_I2C_MUX_PINCTRL=y
@@ -378,12 +348,13 @@ CONFIG_I2C_DEMUX_PINCTRL=y
 CONFIG_I2C_AT91=m
 CONFIG_I2C_BCM2835=y
 CONFIG_I2C_CADENCE=y
+CONFIG_I2C_DAVINCI=y
 CONFIG_I2C_DESIGNWARE_PLATFORM=y
 CONFIG_I2C_DIGICOLOR=m
 CONFIG_I2C_EMEV2=m
 CONFIG_I2C_GPIO=m
-CONFIG_I2C_EXYNOS5=y
 CONFIG_I2C_IMX=y
+CONFIG_I2C_MESON=y
 CONFIG_I2C_MV64XXX=y
 CONFIG_I2C_RIIC=y
 CONFIG_I2C_RK3X=y
@@ -427,7 +398,6 @@ CONFIG_SPI_SPIDEV=y
 CONFIG_SPMI=y
 CONFIG_PINCTRL_AS3722=y
 CONFIG_PINCTRL_PALMAS=y
-CONFIG_PINCTRL_BCM2835=y
 CONFIG_PINCTRL_APQ8064=y
 CONFIG_PINCTRL_APQ8084=y
 CONFIG_PINCTRL_IPQ8064=y
@@ -437,25 +407,33 @@ CONFIG_PINCTRL_MSM8X74=y
 CONFIG_PINCTRL_MSM8916=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_PINCTRL_QCOM_SSBI_PMIC=y
-CONFIG_GPIO_GENERIC_PLATFORM=y
 CONFIG_GPIO_DAVINCI=y
 CONFIG_GPIO_DWAPB=y
 CONFIG_GPIO_EM=y
 CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_SYSCON=y
 CONFIG_GPIO_UNIPHIER=y
 CONFIG_GPIO_XILINX=y
 CONFIG_GPIO_ZYNQ=y
 CONFIG_GPIO_PCA953X=y
 CONFIG_GPIO_PCA953X_IRQ=y
 CONFIG_GPIO_PCF857X=y
-CONFIG_GPIO_TWL4030=y
 CONFIG_GPIO_PALMAS=y
-CONFIG_GPIO_SYSCON=y
 CONFIG_GPIO_TPS6586X=y
 CONFIG_GPIO_TPS65910=y
+CONFIG_GPIO_TWL4030=y
+CONFIG_POWER_AVS=y
+CONFIG_ROCKCHIP_IODOMAIN=y
+CONFIG_POWER_RESET_AS3722=y
+CONFIG_POWER_RESET_GPIO=y
+CONFIG_POWER_RESET_GPIO_RESTART=y
+CONFIG_POWER_RESET_ST=y
+CONFIG_POWER_RESET_KEYSTONE=y
+CONFIG_POWER_RESET_RMOBILE=y
 CONFIG_BATTERY_ACT8945A=y
 CONFIG_BATTERY_CPCAP=m
 CONFIG_BATTERY_SBS=y
+CONFIG_AXP20X_POWER=m
 CONFIG_BATTERY_MAX17040=m
 CONFIG_BATTERY_MAX17042=m
 CONFIG_CHARGER_CPCAP=m
@@ -464,15 +442,6 @@ CONFIG_CHARGER_MAX77693=m
 CONFIG_CHARGER_MAX8997=m
 CONFIG_CHARGER_MAX8998=m
 CONFIG_CHARGER_TPS65090=y
-CONFIG_AXP20X_POWER=m
-CONFIG_POWER_RESET_AS3722=y
-CONFIG_POWER_RESET_GPIO=y
-CONFIG_POWER_RESET_GPIO_RESTART=y
-CONFIG_POWER_RESET_KEYSTONE=y
-CONFIG_POWER_RESET_RMOBILE=y
-CONFIG_POWER_RESET_ST=y
-CONFIG_POWER_AVS=y
-CONFIG_ROCKCHIP_IODOMAIN=y
 CONFIG_SENSORS_IIO_HWMON=y
 CONFIG_SENSORS_LM90=y
 CONFIG_SENSORS_LM95245=y
@@ -480,14 +449,12 @@ CONFIG_SENSORS_NTC_THERMISTOR=m
 CONFIG_SENSORS_PWM_FAN=m
 CONFIG_SENSORS_INA2XX=m
 CONFIG_CPU_THERMAL=y
-CONFIG_BCM2835_THERMAL=m
-CONFIG_BRCMSTB_THERMAL=m
 CONFIG_IMX_THERMAL=y
 CONFIG_ROCKCHIP_THERMAL=y
 CONFIG_RCAR_THERMAL=y
 CONFIG_ARMADA_THERMAL=y
-CONFIG_DAVINCI_WATCHDOG=m
-CONFIG_EXYNOS_THERMAL=m
+CONFIG_BCM2835_THERMAL=m
+CONFIG_BRCMSTB_THERMAL=m
 CONFIG_ST_THERMAL_MEMMAP=y
 CONFIG_WATCHDOG=y
 CONFIG_DA9063_WATCHDOG=m
@@ -495,20 +462,24 @@ CONFIG_XILINX_WATCHDOG=y
 CONFIG_ARM_SP805_WATCHDOG=y
 CONFIG_AT91SAM9X_WATCHDOG=y
 CONFIG_SAMA5D4_WATCHDOG=y
+CONFIG_DW_WATCHDOG=y
+CONFIG_DAVINCI_WATCHDOG=m
 CONFIG_ORION_WATCHDOG=y
 CONFIG_RN5T618_WATCHDOG=y
-CONFIG_ST_LPC_WATCHDOG=y
 CONFIG_SUNXI_WATCHDOG=y
 CONFIG_IMX2_WDT=y
+CONFIG_ST_LPC_WATCHDOG=y
 CONFIG_TEGRA_WATCHDOG=m
 CONFIG_MESON_WATCHDOG=y
-CONFIG_DW_WATCHDOG=y
 CONFIG_DIGICOLOR_WATCHDOG=y
 CONFIG_RENESAS_WDT=m
-CONFIG_BCM2835_WDT=y
 CONFIG_BCM47XX_WDT=y
-CONFIG_BCM7038_WDT=m
+CONFIG_BCM2835_WDT=y
 CONFIG_BCM_KONA_WDT=y
+CONFIG_BCM7038_WDT=m
+CONFIG_BCMA_HOST_SOC=y
+CONFIG_BCMA_DRIVER_GMAC_CMN=y
+CONFIG_BCMA_DRIVER_GPIO=y
 CONFIG_MFD_ACT8945A=y
 CONFIG_MFD_AS3711=y
 CONFIG_MFD_AS3722=y
@@ -516,7 +487,6 @@ CONFIG_MFD_ATMEL_FLEXCOM=y
 CONFIG_MFD_ATMEL_HLCDC=m
 CONFIG_MFD_BCM590XX=y
 CONFIG_MFD_AC100=y
-CONFIG_MFD_AXP20X=y
 CONFIG_MFD_AXP20X_I2C=y
 CONFIG_MFD_AXP20X_RSB=y
 CONFIG_MFD_CROS_EC=m
@@ -529,11 +499,11 @@ CONFIG_MFD_MAX77693=m
 CONFIG_MFD_MAX8907=y
 CONFIG_MFD_MAX8997=y
 CONFIG_MFD_MAX8998=y
-CONFIG_MFD_RK808=y
 CONFIG_MFD_CPCAP=y
 CONFIG_MFD_PM8XXX=y
 CONFIG_MFD_QCOM_RPM=y
 CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_RK808=y
 CONFIG_MFD_RN5T618=y
 CONFIG_MFD_SEC_CORE=y
 CONFIG_MFD_STMPE=y
@@ -543,10 +513,10 @@ CONFIG_MFD_TPS65217=y
 CONFIG_MFD_TPS65218=y
 CONFIG_MFD_TPS6586X=y
 CONFIG_MFD_TPS65910=y
-CONFIG_REGULATOR_ACT8945A=y
-CONFIG_REGULATOR_AB8500=y
 CONFIG_REGULATOR_ACT8865=y
+CONFIG_REGULATOR_ACT8945A=y
 CONFIG_REGULATOR_ANATOP=y
+CONFIG_REGULATOR_AB8500=y
 CONFIG_REGULATOR_AS3711=y
 CONFIG_REGULATOR_AS3722=y
 CONFIG_REGULATOR_AXP20X=y
@@ -554,10 +524,7 @@ CONFIG_REGULATOR_BCM590XX=y
 CONFIG_REGULATOR_CPCAP=y
 CONFIG_REGULATOR_DA9210=y
 CONFIG_REGULATOR_FAN53555=y
-CONFIG_REGULATOR_RK808=y
 CONFIG_REGULATOR_GPIO=y
-CONFIG_MFD_SYSCON=y
-CONFIG_POWER_RESET_SYSCON=y
 CONFIG_REGULATOR_LP872X=y
 CONFIG_REGULATOR_MAX14577=m
 CONFIG_REGULATOR_MAX8907=y
@@ -571,7 +538,8 @@ CONFIG_REGULATOR_PALMAS=y
 CONFIG_REGULATOR_PBIAS=y
 CONFIG_REGULATOR_PWM=y
 CONFIG_REGULATOR_QCOM_RPM=y
-CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=m
+CONFIG_REGULATOR_RK808=y
 CONFIG_REGULATOR_RN5T618=y
 CONFIG_REGULATOR_S2MPS11=y
 CONFIG_REGULATOR_S5M8767=y
@@ -592,18 +560,17 @@ CONFIG_MEDIA_CEC_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_MEDIA_USB_SUPPORT=y
-CONFIG_USB_VIDEO_CLASS=y
-CONFIG_USB_GSPCA=y
+CONFIG_USB_VIDEO_CLASS=m
 CONFIG_V4L_PLATFORM_DRIVERS=y
 CONFIG_SOC_CAMERA=m
 CONFIG_SOC_CAMERA_PLATFORM=m
-CONFIG_VIDEO_RCAR_VIN=m
-CONFIG_VIDEO_ATMEL_ISI=m
 CONFIG_VIDEO_SAMSUNG_EXYNOS4_IS=m
 CONFIG_VIDEO_S5P_FIMC=m
 CONFIG_VIDEO_S5P_MIPI_CSIS=m
 CONFIG_VIDEO_EXYNOS_FIMC_LITE=m
 CONFIG_VIDEO_EXYNOS4_FIMC_IS=m
+CONFIG_VIDEO_RCAR_VIN=m
+CONFIG_VIDEO_ATMEL_ISI=m
 CONFIG_V4L_MEM2MEM_DRIVERS=y
 CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
 CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
@@ -614,19 +581,15 @@ CONFIG_VIDEO_STI_DELTA=m
 CONFIG_VIDEO_RENESAS_JPU=m
 CONFIG_VIDEO_RENESAS_VSP1=m
 CONFIG_V4L_TEST_DRIVERS=y
+CONFIG_VIDEO_VIVID=m
 CONFIG_CEC_PLATFORM_DRIVERS=y
 CONFIG_VIDEO_SAMSUNG_S5P_CEC=m
 # CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
 CONFIG_VIDEO_ADV7180=m
 CONFIG_VIDEO_ML86V7667=m
 CONFIG_DRM=y
-CONFIG_DRM_I2C_ADV7511=m
-CONFIG_DRM_I2C_ADV7511_AUDIO=y
 # CONFIG_DRM_I2C_CH7006 is not set
 # CONFIG_DRM_I2C_SIL164 is not set
-CONFIG_DRM_DUMB_VGA_DAC=m
-CONFIG_DRM_NXP_PTN3460=m
-CONFIG_DRM_PARADE_PS8622=m
 CONFIG_DRM_NOUVEAU=m
 CONFIG_DRM_EXYNOS=m
 CONFIG_DRM_EXYNOS_FIMD=y
@@ -645,13 +608,18 @@ CONFIG_DRM_RCAR_LVDS=y
 CONFIG_DRM_SUN4I=m
 CONFIG_DRM_FSL_DCU=m
 CONFIG_DRM_TEGRA=y
+CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_DRM_PANEL_SAMSUNG_LD9040=m
 CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03=m
 CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
-CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_DUMB_VGA_DAC=m
+CONFIG_DRM_NXP_PTN3460=m
+CONFIG_DRM_PARADE_PS8622=m
 CONFIG_DRM_SII9234=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_I2C_ADV7511_AUDIO=y
 CONFIG_DRM_STI=m
-CONFIG_DRM_VC4=y
+CONFIG_DRM_VC4=m
 CONFIG_DRM_ETNAVIV=m
 CONFIG_DRM_MXSFB=m
 CONFIG_FB_ARMCLCD=y
@@ -659,8 +627,6 @@ CONFIG_FB_EFI=y
 CONFIG_FB_WM8505=y
 CONFIG_FB_SH_MOBILE_LCDC=y
 CONFIG_FB_SIMPLE=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_LCD_PLATFORM=m
 CONFIG_BACKLIGHT_PWM=y
 CONFIG_BACKLIGHT_AS3711=y
@@ -668,7 +634,6 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
 CONFIG_SOUND=m
 CONFIG_SND=m
-CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_HDA_TEGRA=m
 CONFIG_SND_HDA_INPUT_BEEP=y
 CONFIG_SND_HDA_PATCH_LOADER=y
@@ -692,7 +657,7 @@ CONFIG_SND_SOC_SNOW=m
 CONFIG_SND_SOC_ODROID=m
 CONFIG_SND_SOC_SH4_FSI=m
 CONFIG_SND_SOC_RCAR=m
-CONFIG_SND_SIMPLE_SCU_CARD=m
+CONFIG_SND_SOC_STI=m
 CONFIG_SND_SUN4I_CODEC=m
 CONFIG_SND_SOC_TEGRA=m
 CONFIG_SND_SOC_TEGRA20_I2S=m
@@ -703,31 +668,25 @@ CONFIG_SND_SOC_TEGRA_WM8903=m
 CONFIG_SND_SOC_TEGRA_WM9712=m
 CONFIG_SND_SOC_TEGRA_TRIMSLICE=m
 CONFIG_SND_SOC_TEGRA_ALC5632=m
-CONFIG_SND_SOC_CPCAP=m
 CONFIG_SND_SOC_TEGRA_MAX98090=m
 CONFIG_SND_SOC_AK4642=m
+CONFIG_SND_SOC_CPCAP=m
 CONFIG_SND_SOC_SGTL5000=m
 CONFIG_SND_SOC_SPDIF=m
-CONFIG_SND_SOC_WM8978=m
-CONFIG_SND_SOC_STI=m
 CONFIG_SND_SOC_STI_SAS=m
-CONFIG_SND_SIMPLE_CARD=m
+CONFIG_SND_SOC_WM8978=m
+CONFIG_SND_SIMPLE_SCU_CARD=m
 CONFIG_USB=y
 CONFIG_USB_OTG=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_XHCI_MVEBU=y
-CONFIG_USB_XHCI_RCAR=m
 CONFIG_USB_XHCI_TEGRA=m
 CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MSM=m
-CONFIG_USB_EHCI_EXYNOS=y
-CONFIG_USB_EHCI_TEGRA=y
 CONFIG_USB_EHCI_HCD_STI=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_ISP1760=y
+CONFIG_USB_EHCI_TEGRA=y
+CONFIG_USB_EHCI_EXYNOS=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_OHCI_HCD_STI=y
-CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_USB_OHCI_EXYNOS=m
 CONFIG_USB_R8A66597_HCD=m
 CONFIG_USB_RENESAS_USBHS=m
@@ -746,18 +705,18 @@ CONFIG_USB_TI_CPPI41_DMA=y
 CONFIG_USB_TUSB_OMAP_DMA=y
 CONFIG_USB_DWC3=y
 CONFIG_USB_DWC2=y
-CONFIG_USB_HSIC_USB3503=y
 CONFIG_USB_CHIPIDEA=y
 CONFIG_USB_CHIPIDEA_UDC=y
 CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
 CONFIG_AB8500_USB=y
-CONFIG_KEYSTONE_USB_PHY=y
+CONFIG_KEYSTONE_USB_PHY=m
 CONFIG_NOP_USB_XCEIV=m
 CONFIG_AM335X_PHY_USB=m
 CONFIG_TWL6030_USB=m
 CONFIG_USB_GPIO_VBUS=y
 CONFIG_USB_ISP1301=y
-CONFIG_USB_MSM_OTG=m
 CONFIG_USB_MXS_PHY=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_FSL_USB2=y
@@ -793,21 +752,20 @@ CONFIG_MMC_SDHCI_OF_ESDHC=y
 CONFIG_MMC_SDHCI_ESDHC_IMX=y
 CONFIG_MMC_SDHCI_DOVE=y
 CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_SDHCI_S3C=y
 CONFIG_MMC_SDHCI_PXAV3=y
 CONFIG_MMC_SDHCI_SPEAR=y
-CONFIG_MMC_SDHCI_S3C=y
 CONFIG_MMC_SDHCI_S3C_DMA=y
 CONFIG_MMC_SDHCI_BCM_KONA=y
+CONFIG_MMC_MESON_MX_SDIO=y
 CONFIG_MMC_SDHCI_ST=y
 CONFIG_MMC_OMAP=y
 CONFIG_MMC_OMAP_HS=y
 CONFIG_MMC_ATMELMCI=y
 CONFIG_MMC_SDHCI_MSM=y
-CONFIG_MMC_MESON_MX_SDIO=y
 CONFIG_MMC_MVSDIO=y
 CONFIG_MMC_SDHI=y
 CONFIG_MMC_DW=y
-CONFIG_MMC_DW_PLTFM=y
 CONFIG_MMC_DW_EXYNOS=y
 CONFIG_MMC_DW_ROCKCHIP=y
 CONFIG_MMC_SH_MMCIF=y
@@ -847,94 +805,85 @@ CONFIG_RTC_DRV_MAX77686=y
 CONFIG_RTC_DRV_RK808=m
 CONFIG_RTC_DRV_RS5C372=m
 CONFIG_RTC_DRV_BQ32K=m
-CONFIG_RTC_DRV_PALMAS=y
-CONFIG_RTC_DRV_ST_LPC=y
 CONFIG_RTC_DRV_TWL4030=y
+CONFIG_RTC_DRV_PALMAS=y
 CONFIG_RTC_DRV_TPS6586X=y
 CONFIG_RTC_DRV_TPS65910=y
 CONFIG_RTC_DRV_S35390A=m
 CONFIG_RTC_DRV_RX8581=m
 CONFIG_RTC_DRV_EM3027=y
+CONFIG_RTC_DRV_S5M=m
 CONFIG_RTC_DRV_DA9063=m
 CONFIG_RTC_DRV_EFI=m
 CONFIG_RTC_DRV_DIGICOLOR=m
-CONFIG_RTC_DRV_S5M=m
 CONFIG_RTC_DRV_S3C=m
 CONFIG_RTC_DRV_PL031=y
 CONFIG_RTC_DRV_AT91RM9200=m
 CONFIG_RTC_DRV_AT91SAM9=m
 CONFIG_RTC_DRV_VT8500=y
-CONFIG_RTC_DRV_SUN6I=y
 CONFIG_RTC_DRV_SUNXI=y
 CONFIG_RTC_DRV_MV=y
 CONFIG_RTC_DRV_TEGRA=y
+CONFIG_RTC_DRV_ST_LPC=y
 CONFIG_RTC_DRV_CPCAP=m
 CONFIG_DMADEVICES=y
-CONFIG_DW_DMAC=y
 CONFIG_AT_HDMAC=y
 CONFIG_AT_XDMAC=y
+CONFIG_DMA_BCM2835=y
+CONFIG_DMA_SUN6I=y
 CONFIG_FSL_EDMA=y
+CONFIG_IMX_DMA=y
+CONFIG_IMX_SDMA=y
 CONFIG_MV_XOR=y
+CONFIG_MXS_DMA=y
+CONFIG_PL330_DMA=y
+CONFIG_SIRF_DMA=y
+CONFIG_STE_DMA40=y
+CONFIG_ST_FDMA=m
 CONFIG_TEGRA20_APB_DMA=y
+CONFIG_XILINX_DMA=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_DW_DMAC=y
 CONFIG_SH_DMAE=y
 CONFIG_RCAR_DMAC=y
 CONFIG_RENESAS_USB_DMAC=m
-CONFIG_STE_DMA40=y
-CONFIG_SIRF_DMA=y
-CONFIG_TI_EDMA=y
-CONFIG_PL330_DMA=y
-CONFIG_IMX_SDMA=y
-CONFIG_IMX_DMA=y
-CONFIG_MXS_DMA=y
-CONFIG_DMA_BCM2835=y
-CONFIG_DMA_OMAP=y
-CONFIG_QCOM_BAM_DMA=y
-CONFIG_XILINX_DMA=y
-CONFIG_DMA_SUN6I=y
-CONFIG_ST_FDMA=m
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_MMIO=y
 CONFIG_STAGING=y
-CONFIG_SENSORS_ISL29018=y
-CONFIG_SENSORS_ISL29028=y
 CONFIG_MFD_NVEC=y
 CONFIG_KEYBOARD_NVEC=y
 CONFIG_SERIO_NVEC_PS2=y
 CONFIG_NVEC_POWER=y
 CONFIG_NVEC_PAZ00=y
-CONFIG_BCMA=y
-CONFIG_BCMA_HOST_SOC=y
-CONFIG_BCMA_DRIVER_GMAC_CMN=y
-CONFIG_BCMA_DRIVER_GPIO=y
-CONFIG_QCOM_GSBI=y
-CONFIG_QCOM_PM=y
-CONFIG_QCOM_SMEM=y
-CONFIG_QCOM_SMD_RPM=y
-CONFIG_QCOM_SMP2P=y
-CONFIG_QCOM_SMSM=y
-CONFIG_QCOM_WCNSS_CTRL=m
-CONFIG_ROCKCHIP_PM_DOMAINS=y
-CONFIG_COMMON_CLK_QCOM=y
-CONFIG_QCOM_CLK_RPM=y
-CONFIG_CHROME_PLATFORMS=y
 CONFIG_STAGING_BOARD=y
-CONFIG_CROS_EC_CHARDEV=m
 CONFIG_COMMON_CLK_MAX77686=y
 CONFIG_COMMON_CLK_RK808=m
 CONFIG_COMMON_CLK_S2MPS11=m
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_RPM=y
 CONFIG_APQ_MMCC_8084=y
 CONFIG_MSM_GCC_8660=y
 CONFIG_MSM_MMCC_8960=y
 CONFIG_MSM_MMCC_8974=y
-CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_BCM2835_MBOX=y
 CONFIG_ROCKCHIP_IOMMU=y
 CONFIG_TEGRA_IOMMU_GART=y
 CONFIG_TEGRA_IOMMU_SMMU=y
 CONFIG_REMOTEPROC=m
 CONFIG_ST_REMOTEPROC=m
 CONFIG_RPMSG_VIRTIO=m
+CONFIG_RASPBERRYPI_POWER=y
+CONFIG_QCOM_GSBI=y
+CONFIG_QCOM_PM=y
+CONFIG_QCOM_SMD_RPM=m
+CONFIG_QCOM_WCNSS_CTRL=m
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ARCH_TEGRA_2x_SOC=y
+CONFIG_ARCH_TEGRA_3x_SOC=y
+CONFIG_ARCH_TEGRA_114_SOC=y
+CONFIG_ARCH_TEGRA_124_SOC=y
 CONFIG_PM_DEVFREQ=y
 CONFIG_ARM_TEGRA_DEVFREQ=m
-CONFIG_MEMORY=y
-CONFIG_EXTCON=y
 CONFIG_TI_AEMIF=y
 CONFIG_IIO=y
 CONFIG_IIO_SW_TRIGGER=y
@@ -947,56 +896,54 @@ CONFIG_VF610_ADC=m
 CONFIG_XILINX_XADC=y
 CONFIG_MPU3050_I2C=y
 CONFIG_CM36651=m
+CONFIG_SENSORS_ISL29018=y
+CONFIG_SENSORS_ISL29028=y
 CONFIG_AK8975=y
-CONFIG_RASPBERRYPI_POWER=y
 CONFIG_IIO_HRTIMER_TRIGGER=y
 CONFIG_PWM=y
 CONFIG_PWM_ATMEL=m
 CONFIG_PWM_ATMEL_HLCDC_PWM=m
 CONFIG_PWM_ATMEL_TCB=m
+CONFIG_PWM_BCM2835=y
+CONFIG_PWM_BRCMSTB=m
 CONFIG_PWM_FSL_FTM=m
 CONFIG_PWM_MESON=m
 CONFIG_PWM_RCAR=m
 CONFIG_PWM_RENESAS_TPU=y
 CONFIG_PWM_ROCKCHIP=m
 CONFIG_PWM_SAMSUNG=m
+CONFIG_PWM_STI=y
 CONFIG_PWM_SUN4I=y
 CONFIG_PWM_TEGRA=y
 CONFIG_PWM_VT8500=y
+CONFIG_KEYSTONE_IRQ=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_SUN9I_USB=y
 CONFIG_PHY_HIX5HD2_SATA=y
-CONFIG_E1000E=y
-CONFIG_PWM_STI=y
-CONFIG_PWM_BCM2835=y
-CONFIG_PWM_BRCMSTB=m
-CONFIG_PHY_DM816X_USB=m
-CONFIG_OMAP_USB2=y
-CONFIG_TI_PIPE3=y
-CONFIG_TWL4030_USB=m
+CONFIG_PHY_BERLIN_SATA=y
 CONFIG_PHY_BERLIN_USB=y
 CONFIG_PHY_CPCAP_USB=m
-CONFIG_PHY_BERLIN_SATA=y
+CONFIG_PHY_QCOM_APQ8064_SATA=m
+CONFIG_PHY_RCAR_GEN2=m
 CONFIG_PHY_ROCKCHIP_DP=m
 CONFIG_PHY_ROCKCHIP_USB=y
-CONFIG_PHY_QCOM_APQ8064_SATA=m
+CONFIG_PHY_SAMSUNG_USB2=m
 CONFIG_PHY_MIPHY28LP=y
-CONFIG_PHY_RCAR_GEN2=m
 CONFIG_PHY_STIH407_USB=y
 CONFIG_PHY_STM32_USBPHYC=y
-CONFIG_PHY_SUN4I_USB=y
-CONFIG_PHY_SUN9I_USB=y
-CONFIG_PHY_SAMSUNG_USB2=m
 CONFIG_PHY_TEGRA_XUSB=y
-CONFIG_PHY_BRCM_SATA=y
-CONFIG_NVMEM=y
+CONFIG_PHY_DM816X_USB=m
+CONFIG_OMAP_USB2=y
+CONFIG_TI_PIPE3=y
+CONFIG_TWL4030_USB=m
 CONFIG_NVMEM_IMX_OCOTP=y
 CONFIG_NVMEM_SUNXI_SID=y
 CONFIG_NVMEM_VF610_OCOTP=y
-CONFIG_BCM2835_MBOX=y
 CONFIG_RASPBERRYPI_FIRMWARE=y
-CONFIG_EFI_VARS=m
-CONFIG_EFI_CAPSULE_LOADER=m
 CONFIG_BCM47XX_NVRAM=y
 CONFIG_BCM47XX_SPROM=y
+CONFIG_EFI_VARS=m
+CONFIG_EFI_CAPSULE_LOADER=m
 CONFIG_EXT4_FS=y
 CONFIG_AUTOFS4_FS=y
 CONFIG_MSDOS_FS=y
@@ -1004,7 +951,6 @@ CONFIG_VFAT_FS=y
 CONFIG_NTFS_FS=y
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_UBIFS_FS=y
-CONFIG_TMPFS=y
 CONFIG_SQUASHFS=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_SQUASHFS_XZ=y
@@ -1020,13 +966,7 @@ CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_NLS_UTF8=y
 CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_FS=y
 CONFIG_MAGIC_SYSRQ=y
-CONFIG_LOCKUP_DETECTOR=y
-CONFIG_CPUFREQ_DT=y
-CONFIG_KEYSTONE_IRQ=y
-CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_ST=y
 CONFIG_CRYPTO_USER=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
@@ -1035,27 +975,19 @@ CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_CRYPTO_DEV_MARVELL_CESA=m
 CONFIG_CRYPTO_DEV_EXYNOS_RNG=m
 CONFIG_CRYPTO_DEV_S5P=m
+CONFIG_CRYPTO_DEV_ATMEL_AES=m
+CONFIG_CRYPTO_DEV_ATMEL_TDES=m
+CONFIG_CRYPTO_DEV_ATMEL_SHA=m
 CONFIG_CRYPTO_DEV_SUN4I_SS=m
 CONFIG_CRYPTO_DEV_ROCKCHIP=m
 CONFIG_ARM_CRYPTO=y
-CONFIG_CRYPTO_SHA1_ARM=m
 CONFIG_CRYPTO_SHA1_ARM_NEON=m
 CONFIG_CRYPTO_SHA1_ARM_CE=m
 CONFIG_CRYPTO_SHA2_ARM_CE=m
-CONFIG_CRYPTO_SHA256_ARM=m
 CONFIG_CRYPTO_SHA512_ARM=m
 CONFIG_CRYPTO_AES_ARM=m
 CONFIG_CRYPTO_AES_ARM_BS=m
 CONFIG_CRYPTO_AES_ARM_CE=m
-CONFIG_CRYPTO_CHACHA20_NEON=m
-CONFIG_CRYPTO_CRC32_ARM_CE=m
-CONFIG_CRYPTO_CRCT10DIF_ARM_CE=m
 CONFIG_CRYPTO_GHASH_ARM_CE=m
-CONFIG_CRYPTO_DEV_ATMEL_AES=m
-CONFIG_CRYPTO_DEV_ATMEL_TDES=m
-CONFIG_CRYPTO_DEV_ATMEL_SHA=m
-CONFIG_VIDEO_VIVID=m
-CONFIG_VIRTIO=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_PCI_LEGACY=y
-CONFIG_VIRTIO_MMIO=y
+CONFIG_CRYPTO_CRC32_ARM_CE=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
index 3c1e203e53b9ccd752731f228b595a5678557782..57caa742016ed59bc8d3755fd6b9526f0c05f860 100644 (file)
         * Allocate stack space to store 128 bytes worth of tweaks.  For
         * performance, this space is aligned to a 16-byte boundary so that we
         * can use the load/store instructions that declare 16-byte alignment.
+        * For Thumb2 compatibility, don't do the 'bic' directly on 'sp'.
         */
-       sub             sp, #128
-       bic             sp, #0xf
+       sub             r12, sp, #128
+       bic             r12, #0xf
+       mov             sp, r12
 
 .if \n == 64
        // Load first tweak
index a71f16536b6c178c09334efc3047f4ddcc8da91b..6e41336b0bc4fc71ebaf5f5f4bae7e5e9e1b0395 100644 (file)
@@ -1 +1,4 @@
 obj-$(CONFIG_TRUSTED_FOUNDATIONS)      += trusted_foundations.o
+
+# tf_generic_smc() fails to build with -fsanitize-coverage=trace-pc
+KCOV_INSTRUMENT                := n
index dd546d65a3830d819a48fc1463d55d1cc2110c18..7a9b86978ee1e2b917d1104138d3aa468a88bb53 100644 (file)
@@ -177,7 +177,7 @@ M_CLASS(streq       r3, [r12, #PMSAv8_MAIR1])
        bic     r0, r0, #CR_I
 #endif
        mcr     p15, 0, r0, c1, c0, 0           @ write control reg
-       isb
+       instr_sync
 #elif defined (CONFIG_CPU_V7M)
 #ifdef CONFIG_ARM_MPU
        ldreq   r3, [r12, MPU_CTRL]
index f09e9d66d605f4159990ad044cfd29486d621d20..dec130e7078c9adc10dae920c2c706143c7e126e 100644 (file)
@@ -544,7 +544,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
         * Increment event counter and perform fixup for the pre-signal
         * frame.
         */
-       rseq_signal_deliver(regs);
+       rseq_signal_deliver(ksig, regs);
 
        /*
         * Set up the stack frame
@@ -666,7 +666,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
                        } else {
                                clear_thread_flag(TIF_NOTIFY_RESUME);
                                tracehook_notify_resume(regs);
-                               rseq_handle_notify_resume(regs);
+                               rseq_handle_notify_resume(NULL, regs);
                        }
                }
                local_irq_disable();
index e22fb40e34bc55be6dd807de63fb9cd009107916..6d5beb11bd965a805107328d2522144f4b857f9a 100644 (file)
@@ -774,7 +774,7 @@ static struct gpiod_lookup_table mmc_gpios_table = {
                GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd",
                            GPIO_ACTIVE_LOW),
                GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp",
-                           GPIO_ACTIVE_LOW),
+                           GPIO_ACTIVE_HIGH),
        },
 };
 
index 69df3620eca5ce1720f88ab86cf5a36df891d7e7..1c73694c871ad8289b572056d5c3727f3ee22eb2 100644 (file)
@@ -109,6 +109,45 @@ void omap5_erratum_workaround_801819(void)
 static inline void omap5_erratum_workaround_801819(void) { }
 #endif
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+/*
+ * Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with
+ * ICIALLU) to activate the workaround for secondary Core.
+ * NOTE: it is assumed that the primary core's configuration is done
+ * by the boot loader (kernel will detect a misconfiguration and complain
+ * if this is not done).
+ *
+ * In General Purpose(GP) devices, ACR bit settings can only be done
+ * by ROM code in "secure world" using the smc call and there is no
+ * option to update the "firmware" on such devices. This also works for
+ * High security(HS) devices, as a backup option in case the
+ * "update" is not done in the "security firmware".
+ */
+static void omap5_secondary_harden_predictor(void)
+{
+       u32 acr, acr_mask;
+
+       asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
+
+       /*
+        * ACTLR[0] (Enable invalidates of BTB with ICIALLU)
+        */
+       acr_mask = BIT(0);
+
+       /* Do we already have it done.. if yes, skip expensive smc */
+       if ((acr & acr_mask) == acr_mask)
+               return;
+
+       acr |= acr_mask;
+       omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
+
+       pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n",
+                __func__, smp_processor_id());
+}
+#else
+static inline void omap5_secondary_harden_predictor(void) { }
+#endif
+
 static void omap4_secondary_init(unsigned int cpu)
 {
        /*
@@ -131,6 +170,8 @@ static void omap4_secondary_init(unsigned int cpu)
                set_cntfreq();
                /* Configure ACR to disable streaming WA for 801819 */
                omap5_erratum_workaround_801819();
+               /* Enable ACR to allow for ICUALLU workaround */
+               omap5_secondary_harden_predictor();
        }
 
        /*
index 9c10248fadccc2d03ef3b3bcbddbe0b43347f158..4e8c2116808ecf3d36d36653184dc89d2941885e 100644 (file)
@@ -185,7 +185,7 @@ static int pxa_irq_suspend(void)
 {
        int i;
 
-       for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
+       for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
                void __iomem *base = irq_base(i);
 
                saved_icmr[i] = __raw_readl(base + ICMR);
@@ -204,7 +204,7 @@ static void pxa_irq_resume(void)
 {
        int i;
 
-       for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
+       for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
                void __iomem *base = irq_base(i);
 
                __raw_writel(saved_icmr[i], base + ICMR);
index c186474422f3fb25cb809a6d0bff48f476ef8595..0cc8e04295a40dc1d16f308396afdfb7540aa48c 100644 (file)
@@ -736,20 +736,29 @@ static int __mark_rodata_ro(void *unused)
        return 0;
 }
 
+static int kernel_set_to_readonly __read_mostly;
+
 void mark_rodata_ro(void)
 {
+       kernel_set_to_readonly = 1;
        stop_machine(__mark_rodata_ro, NULL, NULL);
        debug_checkwx();
 }
 
 void set_kernel_text_rw(void)
 {
+       if (!kernel_set_to_readonly)
+               return;
+
        set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
                                current->active_mm);
 }
 
 void set_kernel_text_ro(void)
 {
+       if (!kernel_set_to_readonly)
+               return;
+
        set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
                                current->active_mm);
 }
index 6e8b7161303936908b3b2b7adfced5d17de379ce..f6a62ae44a65b61e162203ad261a7fbb5d4b34cf 100644 (file)
@@ -1844,7 +1844,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
                /* there are 2 passes here */
                bpf_jit_dump(prog->len, image_size, 2, ctx.target);
 
-       set_memory_ro((unsigned long)header, header->pages);
+       bpf_jit_binary_lock_ro(header);
        prog->bpf_func = (void *)ctx.target;
        prog->jited = 1;
        prog->jited_len = image_size;
index 8073625371f5d22defae1efe6322a717201e8010..07060e5b58641cc008f41aa927c2e6043ab6afbb 100644 (file)
@@ -59,6 +59,9 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
 
 static __read_mostly unsigned int xen_events_irq;
 
+uint32_t xen_start_flags;
+EXPORT_SYMBOL(xen_start_flags);
+
 int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
                               unsigned long addr,
                               xen_pfn_t *gfn, int nr,
@@ -293,9 +296,7 @@ void __init xen_early_init(void)
        xen_setup_features();
 
        if (xen_feature(XENFEAT_dom0))
-               xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
-       else
-               xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED);
+               xen_start_flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
 
        if (!console_set_on_cmdline && !xen_initial_domain())
                add_preferred_console("hvc", 0, NULL);
index 45272266dafb64a1fda433e7f557bf11b89e908e..e7101b19d5902775bf0a2f951a866f1abcf614b1 100644 (file)
@@ -10,7 +10,7 @@
 #
 # Copyright (C) 1995-2001 by Russell King
 
-LDFLAGS_vmlinux        :=-p --no-undefined -X
+LDFLAGS_vmlinux        :=--no-undefined -X
 CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
 GZFLAGS                :=-9
 
@@ -60,15 +60,15 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
 KBUILD_CPPFLAGS        += -mbig-endian
 CHECKFLAGS     += -D__AARCH64EB__
 AS             += -EB
-LD             += -EB
-LDFLAGS                += -maarch64linuxb
+# We must use the linux target here, since distributions don't tend to package
+# the ELF linker scripts with binutils, and this results in a build failure.
+LDFLAGS                += -EB -maarch64linuxb
 UTS_MACHINE    := aarch64_be
 else
 KBUILD_CPPFLAGS        += -mlittle-endian
 CHECKFLAGS     += -D__AARCH64EL__
 AS             += -EL
-LD             += -EL
-LDFLAGS                += -maarch64linux
+LDFLAGS                += -EL -maarch64linux # See comment above
 UTS_MACHINE    := aarch64
 endif
 
index e6b059378dc04784927a9b996f24213685bf406a..67dac595dc72ebdeffcd5b6bffd50d115cce8cbc 100644 (file)
                        interrupts = <0 99 4>;
                        resets = <&rst SPIM0_RESET>;
                        reg-io-width = <4>;
-                       num-chipselect = <4>;
-                       bus-num = <0>;
+                       num-cs = <4>;
                        status = "disabled";
                };
 
                        interrupts = <0 100 4>;
                        resets = <&rst SPIM1_RESET>;
                        reg-io-width = <4>;
-                       num-chipselect = <4>;
-                       bus-num = <0>;
+                       num-cs = <4>;
                        status = "disabled";
                };
 
index 4b3331fbfe39d7b81d9466fb718975b6265c8e5e..dff9b15eb3c0b63a70c65070c465305c35985dee 100644 (file)
 
 &ethmac {
        status = "okay";
-       phy-mode = "rgmii";
        pinctrl-0 = <&eth_rgmii_y_pins>;
        pinctrl-names = "default";
+       phy-handle = <&eth_phy0>;
+       phy-mode = "rgmii";
+
+       mdio {
+               compatible = "snps,dwmac-mdio";
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               eth_phy0: ethernet-phy@0 {
+                       /* Realtek RTL8211F (0x001cc916) */
+                       reg = <0>;
+                       eee-broken-1000t;
+               };
+       };
 };
 
 &uart_A {
index fee87737a201f1121fe7a3ad3cd70c1d20415a0d..67d7115e4effbde75173aa4a4c07ae890b3183c5 100644 (file)
 
                        sd_emmc_b: sd@5000 {
                                compatible = "amlogic,meson-axg-mmc";
-                               reg = <0x0 0x5000 0x0 0x2000>;
+                               reg = <0x0 0x5000 0x0 0x800>;
                                interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                                clocks = <&clkc CLKID_SD_EMMC_B>,
 
                        sd_emmc_c: mmc@7000 {
                                compatible = "amlogic,meson-axg-mmc";
-                               reg = <0x0 0x7000 0x0 0x2000>;
+                               reg = <0x0 0x7000 0x0 0x800>;
                                interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                                clocks = <&clkc CLKID_SD_EMMC_C>,
index 3c31e21cbed7fcdde5bbdf030fcd6c194be5033d..b8dc4dbb391b669fc13eb13b1a24f01d24ab252f 100644 (file)
                        no-map;
                };
 
+               /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */
+               secmon_reserved_alt: secmon@5000000 {
+                       reg = <0x0 0x05000000 0x0 0x300000>;
+                       no-map;
+               };
+
                linux,cma {
                        compatible = "shared-dma-pool";
                        reusable;
 
                        sd_emmc_a: mmc@70000 {
                                compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
-                               reg = <0x0 0x70000 0x0 0x2000>;
+                               reg = <0x0 0x70000 0x0 0x800>;
                                interrupts = <GIC_SPI 216 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        sd_emmc_b: mmc@72000 {
                                compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
-                               reg = <0x0 0x72000 0x0 0x2000>;
+                               reg = <0x0 0x72000 0x0 0x800>;
                                interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
 
                        sd_emmc_c: mmc@74000 {
                                compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
-                               reg = <0x0 0x74000 0x0 0x2000>;
+                               reg = <0x0 0x74000 0x0 0x800>;
                                interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
                                status = "disabled";
                        };
index eb327664a4d8c38c196b7cec2dbbe5e5ac2c147f..6aaafff674f97f56625c2da8ea6a5b7dd10eb2d8 100644 (file)
@@ -6,7 +6,7 @@
 
 &apb {
        mali: gpu@c0000 {
-               compatible = "amlogic,meson-gxbb-mali", "arm,mali-450";
+               compatible = "amlogic,meson-gxl-mali", "arm,mali-450";
                reg = <0x0 0xc0000 0x0 0x40000>;
                interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
index 3e3eb31748a35a7790a9dc90e56971f004660298..f63bceb88caafa249d84de963c3daa034fb842b7 100644 (file)
 
        bus-width = <4>;
        cap-sd-highspeed;
-       sd-uhs-sdr12;
-       sd-uhs-sdr25;
-       sd-uhs-sdr50;
        max-frequency = <100000000>;
        disable-wp;
 
index 0cfd701809dec578ac31f5f68a7fcfbc21822619..a1b31013ab6e3494d810619fadf81752a67b94f4 100644 (file)
 &usb0 {
        status = "okay";
 };
+
+&usb2_phy0 {
+       /*
+        * HDMI_5V is also used as supply for the USB VBUS.
+        */
+       phy-supply = <&hdmi_5v>;
+};
index 27538eea547b19a0fe8c14a97de4aa303ba63978..c87a80e9bcc6a80bc0f8a59c43a32d6485facafe 100644 (file)
 / {
        compatible = "amlogic,meson-gxl";
 
-       reserved-memory {
-               /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */
-               secmon_reserved_alt: secmon@5000000 {
-                       reg = <0x0 0x05000000 0x0 0x300000>;
-                       no-map;
-               };
-       };
-
        soc {
                usb0: usb@c9000000 {
                        status = "disabled";
index 4a2a6af8e752dbbe3a17fa02861fb3603d7c44cb..4057197048dcbbacaee733c6067cc677fd1ad54d 100644 (file)
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <0>;
 
 
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_NONE>;
+               interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
 
                linux,pci-domain = <4>;
 
                        reg = <0x66080000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 394 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
                        reg = <0x660b0000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 395 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
index eb6f08cdbd796c3d764393f9e2e70db2129b0e28..77efa28c4dd53db718b22e64569385f6d92c2feb 100644 (file)
        enet-phy-lane-swap;
 };
 
+&sdio0 {
+       mmc-ddr-1_8v;
+};
+
 &uart2 {
        status = "okay";
 };
index 5084b037320fd9cb65133ca929517062a245af3b..55ba495ef56e1f54b518483bc9e5369fcb03b441 100644 (file)
@@ -42,3 +42,7 @@
 &gphy0 {
        enet-phy-lane-swap;
 };
+
+&sdio0 {
+       mmc-ddr-1_8v;
+};
index 99aaff0b6d72b6bc971863411b80caa3dd165048..b203152ad67ca18b4421bb035b2d13d32d7f9be5 100644 (file)
                        reg = <0x000b0000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 177 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
                        reg = <0x000e0000 0x100>;
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       interrupts = <GIC_SPI 178 IRQ_TYPE_NONE>;
+                       interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
                        clock-frequency = <100000>;
                        status = "disabled";
                };
index c6999624ed8abdcf4a7f8cea12635eb0606e6432..68c5a6c819aef2c3fbe8b59aac695cfb0c3a3a77 100644 (file)
        vmmc-supply = <&wlan_en>;
        ti,non-removable;
        non-removable;
+       cap-power-off-card;
+       keep-power-in-suspend;
        #address-cells = <0x1>;
        #size-cells = <0x0>;
        status = "ok";
index edb4ee0b8896b2c9a5572e1160e273eac42e062d..7f12624f6c8e8c6af0a3900f3e7d703a969c6c0e 100644 (file)
                dwmmc_2: dwmmc2@f723f000 {
                        bus-width = <0x4>;
                        non-removable;
+                       cap-power-off-card;
+                       keep-power-in-suspend;
                        vmmc-supply = <&reg_vdd_3v3>;
                        mmc-pwrseq = <&wl1835_pwrseq>;
 
index 7dabe25f6774827fd08ec78b3f3793e5b5658177..1c6ff8197a88b1f890fed5b592b9358986942145 100644 (file)
 
                CP110_LABEL(icu): interrupt-controller@1e0000 {
                        compatible = "marvell,cp110-icu";
-                       reg = <0x1e0000 0x10>;
+                       reg = <0x1e0000 0x440>;
                        #interrupt-cells = <3>;
                        interrupt-controller;
                        msi-parent = <&gicp>;
index 0f829db33efe2dfa2735a7cdf570de77c49a6356..4d5ef01f43a331c456eddf1a324f1e1d450bcea5 100644 (file)
@@ -75,7 +75,7 @@
 
                serial@75b1000 {
                        label = "LS-UART0";
-                       status = "okay";
+                       status = "disabled";
                        pinctrl-names = "default", "sleep";
                        pinctrl-0 = <&blsp2_uart2_4pins_default>;
                        pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
index 650f356f69ca748f0fbef0c52f4026e43f511e46..c2625d15a8c08f535e6f00f5c8228212c4d7ec5f 100644 (file)
 
                                port@0 {
                                        reg = <0>;
-                                       etf_out: endpoint {
+                                       etf_in: endpoint {
                                                slave-mode;
                                                remote-endpoint = <&funnel0_out>;
                                        };
                                };
                                port@1 {
                                        reg = <0>;
-                                       etf_in: endpoint {
+                                       etf_out: endpoint {
                                                remote-endpoint = <&replicator_in>;
                                        };
                                };
index 9b4dc41703e38036283aa2a4eededd3322e7a428..ae3b5adf32dfe4a31125880e3a8fa49877c83923 100644 (file)
@@ -54,7 +54,7 @@
        sound {
                compatible = "audio-graph-card";
                label = "UniPhier LD11";
-               widgets = "Headphone", "Headphone Jack";
+               widgets = "Headphone", "Headphones";
                dais = <&i2s_port2
                        &i2s_port3
                        &i2s_port4
index fe6608ea327772e3ad0125c020f8d0102dda35bb..7919233c9ce27e3c86dc8dffce13a97e00112c64 100644 (file)
@@ -54,7 +54,7 @@
        sound {
                compatible = "audio-graph-card";
                label = "UniPhier LD20";
-               widgets = "Headphone", "Headphone Jack";
+               widgets = "Headphone", "Headphones";
                dais = <&i2s_port2
                        &i2s_port3
                        &i2s_port4
index 3cfa8ca267384615694e693ed0371df694fea1f4..f9a186f6af8a9206de939bbdf3f6013988b8b994 100644 (file)
@@ -47,6 +47,7 @@ CONFIG_ARCH_MVEBU=y
 CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_ROCKCHIP=y
 CONFIG_ARCH_SEATTLE=y
+CONFIG_ARCH_SYNQUACER=y
 CONFIG_ARCH_RENESAS=y
 CONFIG_ARCH_R8A7795=y
 CONFIG_ARCH_R8A7796=y
@@ -58,7 +59,6 @@ CONFIG_ARCH_R8A77995=y
 CONFIG_ARCH_STRATIX10=y
 CONFIG_ARCH_TEGRA=y
 CONFIG_ARCH_SPRD=y
-CONFIG_ARCH_SYNQUACER=y
 CONFIG_ARCH_THUNDER=y
 CONFIG_ARCH_THUNDER2=y
 CONFIG_ARCH_UNIPHIER=y
@@ -67,25 +67,23 @@ CONFIG_ARCH_XGENE=y
 CONFIG_ARCH_ZX=y
 CONFIG_ARCH_ZYNQMP=y
 CONFIG_PCI=y
-CONFIG_HOTPLUG_PCI_PCIE=y
 CONFIG_PCI_IOV=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_ACPI=y
-CONFIG_PCI_LAYERSCAPE=y
-CONFIG_PCI_HISI=y
-CONFIG_PCIE_QCOM=y
-CONFIG_PCIE_KIRIN=y
-CONFIG_PCIE_ARMADA_8K=y
-CONFIG_PCIE_HISI_STB=y
 CONFIG_PCI_AARDVARK=y
 CONFIG_PCI_TEGRA=y
 CONFIG_PCIE_RCAR=y
-CONFIG_PCIE_ROCKCHIP=y
-CONFIG_PCIE_ROCKCHIP_HOST=m
 CONFIG_PCI_HOST_GENERIC=y
 CONFIG_PCI_XGENE=y
 CONFIG_PCI_HOST_THUNDER_PEM=y
 CONFIG_PCI_HOST_THUNDER_ECAM=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_HISI_STB=y
 CONFIG_ARM64_VA_BITS_48=y
 CONFIG_SCHED_MC=y
 CONFIG_NUMA=y
@@ -104,8 +102,6 @@ CONFIG_HIBERNATION=y
 CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
 CONFIG_ARM_CPUIDLE=y
 CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_GOV_ATTR_SET=y
-CONFIG_CPU_FREQ_GOV_COMMON=y
 CONFIG_CPU_FREQ_STAT=y
 CONFIG_CPU_FREQ_GOV_POWERSAVE=m
 CONFIG_CPU_FREQ_GOV_USERSPACE=y
@@ -113,11 +109,11 @@ CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
 CONFIG_CPUFREQ_DT=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
 CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
 CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
 CONFIG_ARM_SCPI_CPUFREQ=y
 CONFIG_ARM_TEGRA186_CPUFREQ=y
-CONFIG_ACPI_CPPC_CPUFREQ=m
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -236,11 +232,6 @@ CONFIG_SMSC911X=y
 CONFIG_SNI_AVE=y
 CONFIG_SNI_NETSEC=y
 CONFIG_STMMAC_ETH=m
-CONFIG_DWMAC_IPQ806X=m
-CONFIG_DWMAC_MESON=m
-CONFIG_DWMAC_ROCKCHIP=m
-CONFIG_DWMAC_SUNXI=m
-CONFIG_DWMAC_SUN8I=m
 CONFIG_MDIO_BUS_MUX_MMIOREG=y
 CONFIG_AT803X_PHY=m
 CONFIG_MARVELL_PHY=m
@@ -269,8 +260,8 @@ CONFIG_WL18XX=m
 CONFIG_WLCORE_SDIO=m
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_ADC=m
-CONFIG_KEYBOARD_CROS_EC=y
 CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_CROS_EC=y
 CONFIG_INPUT_TOUCHSCREEN=y
 CONFIG_TOUCHSCREEN_ATMEL_MXT=m
 CONFIG_INPUT_MISC=y
@@ -296,17 +287,13 @@ CONFIG_SERIAL_SAMSUNG=y
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
 CONFIG_SERIAL_TEGRA=y
 CONFIG_SERIAL_SH_SCI=y
-CONFIG_SERIAL_SH_SCI_NR_UARTS=11
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
 CONFIG_SERIAL_XILINX_PS_UART=y
 CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
 CONFIG_SERIAL_MVEBU_UART=y
 CONFIG_SERIAL_DEV_BUS=y
-CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
 CONFIG_VIRTIO_CONSOLE=y
-CONFIG_I2C_HID=m
 CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_MUX=y
 CONFIG_I2C_MUX_PCA954x=y
@@ -325,26 +312,26 @@ CONFIG_I2C_RCAR=y
 CONFIG_I2C_CROS_EC_TUNNEL=y
 CONFIG_SPI=y
 CONFIG_SPI_ARMADA_3700=y
-CONFIG_SPI_MESON_SPICC=m
-CONFIG_SPI_MESON_SPIFC=m
 CONFIG_SPI_BCM2835=m
 CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
 CONFIG_SPI_ORION=y
 CONFIG_SPI_PL022=y
-CONFIG_SPI_QUP=y
 CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_QUP=y
 CONFIG_SPI_S3C64XX=y
 CONFIG_SPI_SPIDEV=m
 CONFIG_SPMI=y
-CONFIG_PINCTRL_IPQ8074=y
 CONFIG_PINCTRL_SINGLE=y
 CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_IPQ8074=y
 CONFIG_PINCTRL_MSM8916=y
 CONFIG_PINCTRL_MSM8994=y
 CONFIG_PINCTRL_MSM8996=y
-CONFIG_PINCTRL_MT7622=y
 CONFIG_PINCTRL_QDF2XXX=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_MT7622=y
 CONFIG_GPIO_DWAPB=y
 CONFIG_GPIO_MB86S7X=y
 CONFIG_GPIO_PL061=y
@@ -368,13 +355,13 @@ CONFIG_SENSORS_INA2XX=m
 CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
 CONFIG_CPU_THERMAL=y
 CONFIG_THERMAL_EMULATION=y
+CONFIG_ROCKCHIP_THERMAL=m
+CONFIG_RCAR_GEN3_THERMAL=y
 CONFIG_ARMADA_THERMAL=y
 CONFIG_BRCMSTB_THERMAL=m
 CONFIG_EXYNOS_THERMAL=y
-CONFIG_RCAR_GEN3_THERMAL=y
-CONFIG_QCOM_TSENS=y
-CONFIG_ROCKCHIP_THERMAL=m
 CONFIG_TEGRA_BPMP_THERMAL=m
+CONFIG_QCOM_TSENS=y
 CONFIG_UNIPHIER_THERMAL=y
 CONFIG_WATCHDOG=y
 CONFIG_S3C2410_WATCHDOG=y
@@ -395,9 +382,9 @@ CONFIG_MFD_MAX77620=y
 CONFIG_MFD_SPMI_PMIC=y
 CONFIG_MFD_RK808=y
 CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_AXP20X=y
 CONFIG_REGULATOR_FAN53555=y
-CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_GPIO=y
 CONFIG_REGULATOR_HI6421V530=y
 CONFIG_REGULATOR_HI655X=y
@@ -407,16 +394,15 @@ CONFIG_REGULATOR_QCOM_SMD_RPM=y
 CONFIG_REGULATOR_QCOM_SPMI=y
 CONFIG_REGULATOR_RK808=y
 CONFIG_REGULATOR_S2MPS11=y
+CONFIG_RC_CORE=m
+CONFIG_RC_DECODERS=y
+CONFIG_RC_DEVICES=y
+CONFIG_IR_MESON=m
 CONFIG_MEDIA_SUPPORT=m
 CONFIG_MEDIA_CAMERA_SUPPORT=y
 CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
 CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
 CONFIG_MEDIA_CONTROLLER=y
-CONFIG_MEDIA_RC_SUPPORT=y
-CONFIG_RC_CORE=m
-CONFIG_RC_DEVICES=y
-CONFIG_RC_DECODERS=y
-CONFIG_IR_MESON=m
 CONFIG_VIDEO_V4L2_SUBDEV_API=y
 # CONFIG_DVB_NET is not set
 CONFIG_V4L_MEM2MEM_DRIVERS=y
@@ -441,8 +427,7 @@ CONFIG_ROCKCHIP_DW_HDMI=y
 CONFIG_ROCKCHIP_DW_MIPI_DSI=y
 CONFIG_ROCKCHIP_INNO_HDMI=y
 CONFIG_DRM_RCAR_DU=m
-CONFIG_DRM_RCAR_LVDS=y
-CONFIG_DRM_RCAR_VSP=y
+CONFIG_DRM_RCAR_LVDS=m
 CONFIG_DRM_TEGRA=m
 CONFIG_DRM_PANEL_SIMPLE=m
 CONFIG_DRM_I2C_ADV7511=m
@@ -455,7 +440,6 @@ CONFIG_FB_ARMCLCD=y
 CONFIG_BACKLIGHT_GENERIC=m
 CONFIG_BACKLIGHT_PWM=m
 CONFIG_BACKLIGHT_LP855X=m
-CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_MONO is not set
 # CONFIG_LOGO_LINUX_VGA16 is not set
@@ -468,6 +452,7 @@ CONFIG_SND_SOC_RCAR=m
 CONFIG_SND_SOC_AK4613=m
 CONFIG_SND_SIMPLE_CARD=m
 CONFIG_SND_AUDIO_GRAPH_CARD=m
+CONFIG_I2C_HID=m
 CONFIG_USB=y
 CONFIG_USB_OTG=y
 CONFIG_USB_XHCI_HCD=y
@@ -501,12 +486,12 @@ CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_ARMMMCI=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_ACPI=y
-CONFIG_MMC_SDHCI_F_SDH30=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_OF_ARASAN=y
 CONFIG_MMC_SDHCI_OF_ESDHC=y
 CONFIG_MMC_SDHCI_CADENCE=y
 CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_SDHCI_F_SDH30=y
 CONFIG_MMC_MESON_GX=y
 CONFIG_MMC_SDHCI_MSM=y
 CONFIG_MMC_SPI=y
@@ -524,11 +509,11 @@ CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_PWM=y
 CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_DISK=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_LEDS_TRIGGER_CPU=y
 CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
 CONFIG_LEDS_TRIGGER_PANIC=y
-CONFIG_LEDS_TRIGGER_DISK=y
 CONFIG_EDAC=y
 CONFIG_EDAC_GHES=y
 CONFIG_RTC_CLASS=y
@@ -537,13 +522,13 @@ CONFIG_RTC_DRV_RK808=m
 CONFIG_RTC_DRV_S5M=y
 CONFIG_RTC_DRV_DS3232=y
 CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_CROS_EC=y
 CONFIG_RTC_DRV_S3C=y
 CONFIG_RTC_DRV_PL031=y
 CONFIG_RTC_DRV_SUN6I=y
 CONFIG_RTC_DRV_ARMADA38X=y
 CONFIG_RTC_DRV_TEGRA=y
 CONFIG_RTC_DRV_XGENE=y
-CONFIG_RTC_DRV_CROS_EC=y
 CONFIG_DMADEVICES=y
 CONFIG_DMA_BCM2835=m
 CONFIG_K3_DMA=y
@@ -579,7 +564,6 @@ CONFIG_HWSPINLOCK_QCOM=y
 CONFIG_ARM_MHU=y
 CONFIG_PLATFORM_MHU=y
 CONFIG_BCM2835_MBOX=y
-CONFIG_HI6220_MBOX=y
 CONFIG_QCOM_APCS_IPC=y
 CONFIG_ROCKCHIP_IOMMU=y
 CONFIG_TEGRA_IOMMU_SMMU=y
@@ -602,7 +586,6 @@ CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_EXTCON_USBC_CROS_EC=y
 CONFIG_MEMORY=y
-CONFIG_TEGRA_MC=y
 CONFIG_IIO=y
 CONFIG_EXYNOS_ADC=y
 CONFIG_ROCKCHIP_SARADC=m
@@ -618,27 +601,27 @@ CONFIG_PWM_RCAR=m
 CONFIG_PWM_ROCKCHIP=y
 CONFIG_PWM_SAMSUNG=y
 CONFIG_PWM_TEGRA=m
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_HI6220_USB=y
 CONFIG_PHY_HISTB_COMBPHY=y
 CONFIG_PHY_HISI_INNO_USB2=y
-CONFIG_PHY_RCAR_GEN3_USB2=y
-CONFIG_PHY_RCAR_GEN3_USB3=m
-CONFIG_PHY_HI6220_USB=y
-CONFIG_PHY_QCOM_USB_HS=y
-CONFIG_PHY_SUN4I_USB=y
 CONFIG_PHY_MVEBU_CP110_COMPHY=y
 CONFIG_PHY_QCOM_QMP=m
-CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB3=m
 CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
 CONFIG_PHY_ROCKCHIP_PCIE=m
 CONFIG_PHY_ROCKCHIP_TYPEC=y
-CONFIG_PHY_XGENE=y
 CONFIG_PHY_TEGRA_XUSB=y
 CONFIG_QCOM_L2_PMU=y
 CONFIG_QCOM_L3_PMU=y
-CONFIG_MESON_EFUSE=m
 CONFIG_QCOM_QFPROM=y
 CONFIG_ROCKCHIP_EFUSE=y
 CONFIG_UNIPHIER_EFUSE=y
+CONFIG_MESON_EFUSE=m
 CONFIG_TEE=y
 CONFIG_OPTEE=y
 CONFIG_ARM_SCPI_PROTOCOL=y
@@ -647,7 +630,6 @@ CONFIG_EFI_CAPSULE_LOADER=y
 CONFIG_ACPI=y
 CONFIG_ACPI_APEI=y
 CONFIG_ACPI_APEI_GHES=y
-CONFIG_ACPI_APEI_PCIEAER=y
 CONFIG_ACPI_APEI_MEMORY_FAILURE=y
 CONFIG_ACPI_APEI_EINJ=y
 CONFIG_EXT2_FS=y
@@ -682,7 +664,6 @@ CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_FS=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
-CONFIG_LOCKUP_DETECTOR=y
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_DEBUG_PREEMPT is not set
 # CONFIG_FTRACE is not set
@@ -691,20 +672,15 @@ CONFIG_SECURITY=y
 CONFIG_CRYPTO_ECHAINIV=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_ARM64_CRYPTO=y
-CONFIG_CRYPTO_SHA256_ARM64=m
-CONFIG_CRYPTO_SHA512_ARM64=m
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
 CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_SHA512_ARM64_CE=m
+CONFIG_CRYPTO_SHA3_ARM64=m
+CONFIG_CRYPTO_SM3_ARM64_CE=m
 CONFIG_CRYPTO_GHASH_ARM64_CE=y
 CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
 CONFIG_CRYPTO_CRC32_ARM64_CE=m
-CONFIG_CRYPTO_AES_ARM64=m
-CONFIG_CRYPTO_AES_ARM64_CE=m
 CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
 CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
-CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m
 CONFIG_CRYPTO_CHACHA20_NEON=m
 CONFIG_CRYPTO_AES_ARM64_BS=m
-CONFIG_CRYPTO_SHA512_ARM64_CE=m
-CONFIG_CRYPTO_SHA3_ARM64=m
-CONFIG_CRYPTO_SM3_ARM64_CE=m
index 253188fb8cb0cea0e35d0f4ed77b5e2c6332d507..e3e50950a863675b72a3c1e0d605d81cf5f258f2 100644 (file)
@@ -223,8 +223,8 @@ static int ctr_encrypt(struct skcipher_request *req)
                kernel_neon_begin();
                aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_enc, rounds, blocks, walk.iv);
-               err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
                kernel_neon_end();
+               err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        if (walk.nbytes) {
                u8 __aligned(8) tail[AES_BLOCK_SIZE];
index a91933b1e2e62ba235ef05ddf8f9d34dbb6bcf49..4b650ec1d7dd1aa8d4418b6b896f81de4a2187ab 100644 (file)
@@ -28,7 +28,12 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt,
                                 __le32 *origptr, __le32 *updptr, int nr_inst);
 
 void __init apply_alternatives_all(void);
-void apply_alternatives(void *start, size_t length);
+
+#ifdef CONFIG_MODULES
+void apply_alternatives_module(void *start, size_t length);
+#else
+static inline void apply_alternatives_module(void *start, size_t length) { }
+#endif
 
 #define ALTINSTR_ENTRY(feature,cb)                                           \
        " .word 661b - .\n"                             /* label           */ \
index fda9a8ca48bef71b0d4a76be1a45295af1211dd6..fe8777b12f8667c2c0b23952057fc13041276442 100644 (file)
@@ -306,6 +306,7 @@ struct kvm_vcpu_arch {
 #define KVM_ARM64_FP_ENABLED           (1 << 1) /* guest FP regs loaded */
 #define KVM_ARM64_FP_HOST              (1 << 2) /* host FP regs loaded */
 #define KVM_ARM64_HOST_SVE_IN_USE      (1 << 3) /* backup for host TIF_SVE */
+#define KVM_ARM64_HOST_SVE_ENABLED     (1 << 4) /* SVE enabled for EL0 */
 
 #define vcpu_gp_regs(v)                (&(v)->arch.ctxt.gp_regs)
 
index 9f82d6b53851e4b6bedbb28f6d0e7480acd622a6..1bdeca8918a684814f84ca3841b88a3123749cbb 100644 (file)
@@ -224,10 +224,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
         * Only if the new pte is valid and kernel, otherwise TLB maintenance
         * or update_mmu_cache() have the necessary barriers.
         */
-       if (pte_valid_not_user(pte)) {
+       if (pte_valid_not_user(pte))
                dsb(ishst);
-               isb();
-       }
 }
 
 extern void __sync_icache_dcache(pte_t pteval);
@@ -434,7 +432,6 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
        WRITE_ONCE(*pmdp, pmd);
        dsb(ishst);
-       isb();
 }
 
 static inline void pmd_clear(pmd_t *pmdp)
@@ -485,7 +482,6 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
 {
        WRITE_ONCE(*pudp, pud);
        dsb(ishst);
-       isb();
 }
 
 static inline void pud_clear(pud_t *pudp)
index fa8b3fe932e6f568841017215524bc0b894cbf28..6495cc51246fc873bef97f99a2b0139f806516c1 100644 (file)
@@ -29,20 +29,15 @@ DECLARE_PER_CPU(bool, kernel_neon_busy);
 static __must_check inline bool may_use_simd(void)
 {
        /*
-        * The raw_cpu_read() is racy if called with preemption enabled.
-        * This is not a bug: kernel_neon_busy is only set when
-        * preemption is disabled, so we cannot migrate to another CPU
-        * while it is set, nor can we migrate to a CPU where it is set.
-        * So, if we find it clear on some CPU then we're guaranteed to
-        * find it clear on any CPU we could migrate to.
-        *
-        * If we are in between kernel_neon_begin()...kernel_neon_end(),
-        * the flag will be set, but preemption is also disabled, so we
-        * can't migrate to another CPU and spuriously see it become
-        * false.
+        * kernel_neon_busy is only set while preemption is disabled,
+        * and is clear whenever preemption is enabled. Since
+        * this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy
+        * cannot change under our feet -- if it's set we cannot be
+        * migrated, and if it's clear we cannot be migrated to a CPU
+        * where it is set.
         */
        return !in_irq() && !irqs_disabled() && !in_nmi() &&
-               !raw_cpu_read(kernel_neon_busy);
+               !this_cpu_read(kernel_neon_busy);
 }
 
 #else /* ! CONFIG_KERNEL_MODE_NEON */
index 6171178075dcab62def613141732a0b7601b1c43..a8f84812c6e8925c9429451dc3119bfbd5620e8c 100644 (file)
@@ -728,6 +728,17 @@ asm(
        asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
 } while (0)
 
+/*
+ * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the
+ * set mask are set. Other bits are left as-is.
+ */
+#define sysreg_clear_set(sysreg, clear, set) do {                      \
+       u64 __scs_val = read_sysreg(sysreg);                            \
+       u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set);            \
+       if (__scs_new != __scs_val)                                     \
+               write_sysreg(__scs_new, sysreg);                        \
+} while (0)
+
 static inline void config_sctlr_el1(u32 clear, u32 set)
 {
        u32 val;
index 5c4bce4ac381a4ab87107e4aa47a9b7beef7d891..36fb069fd049c7053f38b75b9916bba7cb630643 100644 (file)
@@ -122,7 +122,30 @@ static void patch_alternative(struct alt_instr *alt,
        }
 }
 
-static void __apply_alternatives(void *alt_region, bool use_linear_alias)
+/*
+ * We provide our own, private D-cache cleaning function so that we don't
+ * accidentally call into the cache.S code, which is patched by us at
+ * runtime.
+ */
+static void clean_dcache_range_nopatch(u64 start, u64 end)
+{
+       u64 cur, d_size, ctr_el0;
+
+       ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
+       d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0,
+                                                          CTR_DMINLINE_SHIFT);
+       cur = start & ~(d_size - 1);
+       do {
+               /*
+                * We must clean+invalidate to the PoC in order to avoid
+                * Cortex-A53 errata 826319, 827319, 824069 and 819472
+                * (this corresponds to ARM64_WORKAROUND_CLEAN_CACHE)
+                */
+               asm volatile("dc civac, %0" : : "r" (cur) : "memory");
+       } while (cur += d_size, cur < end);
+}
+
+static void __apply_alternatives(void *alt_region, bool is_module)
 {
        struct alt_instr *alt;
        struct alt_region *region = alt_region;
@@ -145,7 +168,7 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
                pr_info_once("patching kernel code\n");
 
                origptr = ALT_ORIG_PTR(alt);
-               updptr = use_linear_alias ? lm_alias(origptr) : origptr;
+               updptr = is_module ? origptr : lm_alias(origptr);
                nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
 
                if (alt->cpufeature < ARM64_CB_PATCH)
@@ -155,8 +178,20 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
 
                alt_cb(alt, origptr, updptr, nr_inst);
 
-               flush_icache_range((uintptr_t)origptr,
-                                  (uintptr_t)(origptr + nr_inst));
+               if (!is_module) {
+                       clean_dcache_range_nopatch((u64)origptr,
+                                                  (u64)(origptr + nr_inst));
+               }
+       }
+
+       /*
+        * The core module code takes care of cache maintenance in
+        * flush_module_icache().
+        */
+       if (!is_module) {
+               dsb(ish);
+               __flush_icache_all();
+               isb();
        }
 }
 
@@ -178,7 +213,7 @@ static int __apply_alternatives_multi_stop(void *unused)
                isb();
        } else {
                BUG_ON(alternatives_applied);
-               __apply_alternatives(&region, true);
+               __apply_alternatives(&region, false);
                /* Barriers provided by the cache flushing */
                WRITE_ONCE(alternatives_applied, 1);
        }
@@ -192,12 +227,14 @@ void __init apply_alternatives_all(void)
        stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
 }
 
-void apply_alternatives(void *start, size_t length)
+#ifdef CONFIG_MODULES
+void apply_alternatives_module(void *start, size_t length)
 {
        struct alt_region region = {
                .begin  = start,
                .end    = start + length,
        };
 
-       __apply_alternatives(&region, false);
+       __apply_alternatives(&region, true);
 }
+#endif
index d2856b129097899d37ba3790056fc28eefc8409e..f24892a40d2c8abd934dcb2dc133c4a136270d31 100644 (file)
@@ -937,7 +937,7 @@ static int __init parse_kpti(char *str)
        __kpti_forced = enabled ? 1 : -1;
        return 0;
 }
-__setup("kpti=", parse_kpti);
+early_param("kpti", parse_kpti);
 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 
 #ifdef CONFIG_ARM64_HW_AFDBM
index 155fd91e78f4a62180e7577355ca4a6b0eb283f4..f0f27aeefb73623a0983c1f3eec2054d306021dc 100644 (file)
@@ -448,9 +448,8 @@ int module_finalize(const Elf_Ehdr *hdr,
        const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 
        for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
-               if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
-                       apply_alternatives((void *)s->sh_addr, s->sh_size);
-               }
+               if (strcmp(".altinstructions", secstrs + s->sh_name) == 0)
+                       apply_alternatives_module((void *)s->sh_addr, s->sh_size);
 #ifdef CONFIG_ARM64_MODULE_PLTS
                if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
                    !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
index f3e2e3aec0b0632793abc2ce06dbaa2addd97eb5..2faa9863d2e569e704191bd1939dac2eb111cb5b 100644 (file)
@@ -179,7 +179,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
  * This is the secondary CPU boot entry.  We're using this CPUs
  * idle thread stack, but a set of temporary page tables.
  */
-asmlinkage void secondary_start_kernel(void)
+asmlinkage notrace void secondary_start_kernel(void)
 {
        u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
        struct mm_struct *mm = &init_mm;
index dc6ecfa5a2d2564c90a5ce92003a0e3b8490cbce..aac7808ce2162a9d2bdcdcc938b649655663912e 100644 (file)
@@ -5,13 +5,14 @@
  * Copyright 2018 Arm Limited
  * Author: Dave Martin <Dave.Martin@arm.com>
  */
-#include <linux/bottom_half.h>
+#include <linux/irqflags.h>
 #include <linux/sched.h>
 #include <linux/thread_info.h>
 #include <linux/kvm_host.h>
 #include <asm/kvm_asm.h>
 #include <asm/kvm_host.h>
 #include <asm/kvm_mmu.h>
+#include <asm/sysreg.h>
 
 /*
  * Called on entry to KVM_RUN unless this vcpu previously ran at least
@@ -61,10 +62,16 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
 {
        BUG_ON(!current->mm);
 
-       vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_HOST_SVE_IN_USE);
+       vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
+                             KVM_ARM64_HOST_SVE_IN_USE |
+                             KVM_ARM64_HOST_SVE_ENABLED);
        vcpu->arch.flags |= KVM_ARM64_FP_HOST;
+
        if (test_thread_flag(TIF_SVE))
                vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE;
+
+       if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
+               vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
 }
 
 /*
@@ -92,19 +99,30 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
  */
 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
 {
-       local_bh_disable();
+       unsigned long flags;
 
-       update_thread_flag(TIF_SVE,
-                          vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
+       local_irq_save(flags);
 
        if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
                /* Clean guest FP state to memory and invalidate cpu view */
                fpsimd_save();
                fpsimd_flush_cpu_state();
-       } else if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
-               /* Ensure user trap controls are correctly restored */
-               fpsimd_bind_task_to_cpu();
+       } else if (system_supports_sve()) {
+               /*
+                * The FPSIMD/SVE state in the CPU has not been touched, and we
+                * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
+                * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE
+                * for EL0.  To avoid spurious traps, restore the trap state
+                * seen by kvm_arch_vcpu_load_fp():
+                */
+               if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED)
+                       sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
+               else
+                       sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
        }
 
-       local_bh_enable();
+       update_thread_flag(TIF_SVE,
+                          vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
+
+       local_irq_restore(flags);
 }
index 49e217ac7e1ec2087c440c60ec71126f0e48ec32..61e93f0b548228f57a08f25a14291a1e46437115 100644 (file)
@@ -583,13 +583,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
                                                    size >> PAGE_SHIFT);
                        return NULL;
                }
-               if (!coherent)
-                       __dma_flush_area(page_to_virt(page), iosize);
-
                addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
                                                   prot,
                                                   __builtin_return_address(0));
-               if (!addr) {
+               if (addr) {
+                       memset(addr, 0, size);
+                       if (!coherent)
+                               __dma_flush_area(page_to_virt(page), iosize);
+               } else {
                        iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
                        dma_release_from_contiguous(dev, page,
                                                    size >> PAGE_SHIFT);
index 5f9a73a4452c2b87dd9a922933b12c85ab008377..03646e6a2ef4f240412d1eb62a1cbc27d04705b0 100644 (file)
@@ -217,8 +217,9 @@ ENDPROC(idmap_cpu_replace_ttbr1)
 
        .macro __idmap_kpti_put_pgtable_ent_ng, type
        orr     \type, \type, #PTE_NG           // Same bit for blocks and pages
-       str     \type, [cur_\()\type\()p]       // Update the entry and ensure it
-       dc      civac, cur_\()\type\()p         // is visible to all CPUs.
+       str     \type, [cur_\()\type\()p]       // Update the entry and ensure
+       dmb     sy                              // that it is visible to all
+       dc      civac, cur_\()\type\()p         // CPUs.
        .endm
 
 /*
index 8b707c249026032ac8bef80c6f1666c105d9b50d..12fe700632f458ea632a18bb9cdccd6660efd241 100644 (file)
@@ -44,6 +44,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
                                  unsigned long address)
 {
+       pgtable_page_dtor(page);
        __free_page(page);
 }
 
@@ -74,8 +75,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
        return page;
 }
 
-extern inline void pte_free(struct mm_struct *mm, struct page *page)
+static inline void pte_free(struct mm_struct *mm, struct page *page)
 {
+       pgtable_page_dtor(page);
        __free_page(page);
 }
 
index 331a3bb66297baa39404fbefa273663ebd1871fe..93a737c8d1a6448d5bb4dcb2d71c8d8b5241e0d7 100644 (file)
@@ -8,11 +8,4 @@ config TRACE_IRQFLAGS_SUPPORT
 
 source "lib/Kconfig.debug"
 
-config HEART_BEAT
-       bool "Heart beat function for kernel"
-       default n
-       help
-         This option turns on/off heart beat kernel functionality.
-         First GPIO node is taken.
-
 endmenu
index d5384f6f36f777d4487ea00f2908b39fe5a26403..ce9b7b7861569501c0339491338da38a2cdb0050 100644 (file)
@@ -19,15 +19,10 @@ extern char cmd_line[COMMAND_LINE_SIZE];
 
 extern char *klimit;
 
-void microblaze_heartbeat(void);
-void microblaze_setup_heartbeat(void);
-
 #   ifdef CONFIG_MMU
 extern void mmu_reset(void);
 #   endif /* CONFIG_MMU */
 
-extern void of_platform_reset_gpio_probe(void);
-
 void time_init(void);
 void init_IRQ(void);
 void machine_early_init(const char *cmdline, unsigned int ram,
index 9774e1d9507baebbd6efe9bcf67bb88bcd214d82..a62d09420a47b725cf67e12b99784a2259e24d2f 100644 (file)
@@ -38,6 +38,6 @@
 
 #endif /* __ASSEMBLY__ */
 
-#define __NR_syscalls         399
+#define __NR_syscalls         401
 
 #endif /* _ASM_MICROBLAZE_UNISTD_H */
index eb156f914793b29b558c9b48853af8d833f3d3d2..7a9f16a7641374855d4d8f9a7189792031f51185 100644 (file)
 #define __NR_pkey_alloc                396
 #define __NR_pkey_free         397
 #define __NR_statx             398
+#define __NR_io_pgetevents     399
+#define __NR_rseq              400
 
 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
index 7e99cf6984a1eb5f51597dbd8857f6f370d28328..dd71637437f4f6b1ff307d385b8a1ff293959075 100644 (file)
@@ -8,7 +8,6 @@ ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_timer.o = -pg
 CFLAGS_REMOVE_intc.o = -pg
 CFLAGS_REMOVE_early_printk.o = -pg
-CFLAGS_REMOVE_heartbeat.o = -pg
 CFLAGS_REMOVE_ftrace.o = -pg
 CFLAGS_REMOVE_process.o = -pg
 endif
@@ -17,12 +16,11 @@ extra-y := head.o vmlinux.lds
 
 obj-y += dma.o exceptions.o \
        hw_exception_handler.o irq.o \
-       platform.o process.o prom.o ptrace.o \
+       process.o prom.o ptrace.o \
        reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o
 
 obj-y += cpu/
 
-obj-$(CONFIG_HEART_BEAT)       += heartbeat.o
 obj-$(CONFIG_MODULES)          += microblaze_ksyms.o module.o
 obj-$(CONFIG_MMU)              += misc.o
 obj-$(CONFIG_STACKTRACE)       += stacktrace.o
diff --git a/arch/microblaze/kernel/heartbeat.c b/arch/microblaze/kernel/heartbeat.c
deleted file mode 100644 (file)
index 2022130..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2007-2009 PetaLogix
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/sched.h>
-#include <linux/sched/loadavg.h>
-#include <linux/io.h>
-
-#include <asm/setup.h>
-#include <asm/page.h>
-#include <asm/prom.h>
-
-static unsigned int base_addr;
-
-void microblaze_heartbeat(void)
-{
-       static unsigned int cnt, period, dist;
-
-       if (base_addr) {
-               if (cnt == 0 || cnt == dist)
-                       out_be32(base_addr, 1);
-               else if (cnt == 7 || cnt == dist + 7)
-                       out_be32(base_addr, 0);
-
-               if (++cnt > period) {
-                       cnt = 0;
-                       /*
-                        * The hyperbolic function below modifies the heartbeat
-                        * period length in dependency of the current (5min)
-                        * load. It goes through the points f(0)=126, f(1)=86,
-                        * f(5)=51, f(inf)->30.
-                        */
-                       period = ((672 << FSHIFT) / (5 * avenrun[0] +
-                                               (7 << FSHIFT))) + 30;
-                       dist = period / 4;
-               }
-       }
-}
-
-void microblaze_setup_heartbeat(void)
-{
-       struct device_node *gpio = NULL;
-       int *prop;
-       int j;
-       const char * const gpio_list[] = {
-               "xlnx,xps-gpio-1.00.a",
-               NULL
-       };
-
-       for (j = 0; gpio_list[j] != NULL; j++) {
-               gpio = of_find_compatible_node(NULL, NULL, gpio_list[j]);
-               if (gpio)
-                       break;
-       }
-
-       if (gpio) {
-               base_addr = be32_to_cpup(of_get_property(gpio, "reg", NULL));
-               base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE);
-               pr_notice("Heartbeat GPIO at 0x%x\n", base_addr);
-
-               /* GPIO is configured as output */
-               prop = (int *) of_get_property(gpio, "xlnx,is-bidir", NULL);
-               if (prop)
-                       out_be32(base_addr + 4, 0);
-       }
-}
diff --git a/arch/microblaze/kernel/platform.c b/arch/microblaze/kernel/platform.c
deleted file mode 100644 (file)
index 2540d60..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright 2008 Michal Simek <monstr@monstr.eu>
- *
- * based on virtex.c file
- *
- * Copyright 2007 Secret Lab Technologies Ltd.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/of_platform.h>
-#include <asm/setup.h>
-
-static struct of_device_id xilinx_of_bus_ids[] __initdata = {
-       { .compatible = "simple-bus", },
-       { .compatible = "xlnx,compound", },
-       {}
-};
-
-static int __init microblaze_device_probe(void)
-{
-       of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL);
-       of_platform_reset_gpio_probe();
-       return 0;
-}
-device_initcall(microblaze_device_probe);
index bab4c8330ef4f3f165ad2992d9660776fb0e3c41..fcbe1daf631662f8d45a580126f58d2090d90023 100644 (file)
@@ -18,7 +18,7 @@
 static int handle; /* reset pin handle */
 static unsigned int reset_val;
 
-void of_platform_reset_gpio_probe(void)
+static int of_platform_reset_gpio_probe(void)
 {
        int ret;
        handle = of_get_named_gpio(of_find_node_by_path("/"),
@@ -27,13 +27,13 @@ void of_platform_reset_gpio_probe(void)
        if (!gpio_is_valid(handle)) {
                pr_info("Skipping unavailable RESET gpio %d (%s)\n",
                                handle, "reset");
-               return;
+               return -ENODEV;
        }
 
        ret = gpio_request(handle, "reset");
        if (ret < 0) {
                pr_info("GPIO pin is already allocated\n");
-               return;
+               return ret;
        }
 
        /* get current setup value */
@@ -51,11 +51,12 @@ void of_platform_reset_gpio_probe(void)
 
        pr_info("RESET: Registered gpio device: %d, current val: %d\n",
                                                        handle, reset_val);
-       return;
+       return 0;
 err:
        gpio_free(handle);
-       return;
+       return ret;
 }
+device_initcall(of_platform_reset_gpio_probe);
 
 
 static void gpio_system_reset(void)
index 56bcf313121fb6bd31be14dc5d9f676a132cb85c..6ab6505937921e247231f8a09fbf5ab8b463d1a5 100644 (file)
@@ -400,3 +400,5 @@ ENTRY(sys_call_table)
        .long sys_pkey_alloc
        .long sys_pkey_free
        .long sys_statx
+       .long sys_io_pgetevents
+       .long sys_rseq
index 7de941cbbd940fb7ca72840e3bf6422993c6eb94..a6683484b3a12690c517ccf5803ac64f03f16c81 100644 (file)
@@ -156,9 +156,6 @@ static inline void timer_ack(void)
 static irqreturn_t timer_interrupt(int irq, void *dev_id)
 {
        struct clock_event_device *evt = &clockevent_xilinx_timer;
-#ifdef CONFIG_HEART_BEAT
-       microblaze_heartbeat();
-#endif
        timer_ack();
        evt->event_handler(evt);
        return IRQ_HANDLED;
@@ -318,10 +315,6 @@ static int __init xilinx_timer_init(struct device_node *timer)
                return ret;
        }
 
-#ifdef CONFIG_HEART_BEAT
-       microblaze_setup_heartbeat();
-#endif
-
        ret = xilinx_clocksource_init();
        if (ret)
                return ret;
index 3f9deec70b92383130b847ef3d9585db5134675e..08c10c518f8323fea7838d92fe03b748cbb2966e 100644 (file)
@@ -65,6 +65,7 @@ config MIPS
        select HAVE_OPROFILE
        select HAVE_PERF_EVENTS
        select HAVE_REGS_AND_STACK_ACCESS_API
+       select HAVE_RSEQ
        select HAVE_STACKPROTECTOR
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP
index 6b2c6f3baefa556018dffea409500b1c7846ed77..75fb96ca61db7ef6652722640a8d452cbc125d55 100644 (file)
@@ -34,7 +34,7 @@
 #define PB44_KEYS_DEBOUNCE_INTERVAL    (3 * PB44_KEYS_POLL_INTERVAL)
 
 static struct gpiod_lookup_table pb44_i2c_gpiod_table = {
-       .dev_id = "i2c-gpio",
+       .dev_id = "i2c-gpio.0",
        .table = {
                GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SDA,
                                NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
index 6054d49e608eec038e1bbd49599bc783270aa09a..8c9cbf13d32a0a471bc6f2653bbb3c459b1b2c2c 100644 (file)
@@ -212,6 +212,12 @@ static int __init bcm47xx_cpu_fixes(void)
                 */
                if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
                        cpu_wait = NULL;
+
+               /*
+                * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
+                * Enable ExternalSync for sync instruction to take effect
+                */
+               set_c0_config7(MIPS_CONF7_ES);
                break;
 #endif
        }
index a7d0b836f2f7dd9c8bf7897759aed6b9f59ade39..cea8ad864b3f6f416cb45687bfbcb5bd882933a7 100644 (file)
@@ -414,6 +414,8 @@ static inline type pfx##in##bwlq##p(unsigned long port)                     \
        __val = *__addr;                                                \
        slow;                                                           \
                                                                        \
+       /* prevent prefetching of coherent DMA data prematurely */      \
+       rmb();                                                          \
        return pfx##ioswab##bwlq(__addr, __val);                        \
 }
 
index ae461d91cd1faef06dc39399e7910eb28471d930..0bc270806ec5a68dbb030d6f47d26e46485c0ea0 100644 (file)
 #define MIPS_CONF7_WII         (_ULCAST_(1) << 31)
 
 #define MIPS_CONF7_RPS         (_ULCAST_(1) << 2)
+/* ExternalSync */
+#define MIPS_CONF7_ES          (_ULCAST_(1) << 8)
 
 #define MIPS_CONF7_IAR         (_ULCAST_(1) << 10)
 #define MIPS_CONF7_AR          (_ULCAST_(1) << 16)
@@ -2765,6 +2767,7 @@ __BUILD_SET_C0(status)
 __BUILD_SET_C0(cause)
 __BUILD_SET_C0(config)
 __BUILD_SET_C0(config5)
+__BUILD_SET_C0(config7)
 __BUILD_SET_C0(intcontrol)
 __BUILD_SET_C0(intctl)
 __BUILD_SET_C0(srsmap)
index bb05e9916a5fa7f969d915b742329ae67cd51b57..f25dd1d83fb74700b33e4bf2387ebf89ac200f64 100644 (file)
 #define __NR_pkey_alloc                        (__NR_Linux + 364)
 #define __NR_pkey_free                 (__NR_Linux + 365)
 #define __NR_statx                     (__NR_Linux + 366)
+#define __NR_rseq                      (__NR_Linux + 367)
+#define __NR_io_pgetevents             (__NR_Linux + 368)
 
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls            366
+#define __NR_Linux_syscalls            368
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux                 4000
-#define __NR_O32_Linux_syscalls                366
+#define __NR_O32_Linux_syscalls                368
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
 #define __NR_pkey_alloc                        (__NR_Linux + 324)
 #define __NR_pkey_free                 (__NR_Linux + 325)
 #define __NR_statx                     (__NR_Linux + 326)
+#define __NR_rseq                      (__NR_Linux + 327)
+#define __NR_io_pgetevents             (__NR_Linux + 328)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls            326
+#define __NR_Linux_syscalls            328
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux                  5000
-#define __NR_64_Linux_syscalls         326
+#define __NR_64_Linux_syscalls         328
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
 #define __NR_pkey_alloc                        (__NR_Linux + 328)
 #define __NR_pkey_free                 (__NR_Linux + 329)
 #define __NR_statx                     (__NR_Linux + 330)
+#define __NR_rseq                      (__NR_Linux + 331)
+#define __NR_io_pgetevents             (__NR_Linux + 332)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls            330
+#define __NR_Linux_syscalls            332
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux                 6000
-#define __NR_N32_Linux_syscalls                330
+#define __NR_N32_Linux_syscalls                332
 
 #endif /* _UAPI_ASM_UNISTD_H */
index 38a302919e6b5ae8aa07303065bd561529fe828e..d7de8adcfcc8767a826e7823d3bf189326da0e33 100644 (file)
@@ -79,6 +79,10 @@ FEXPORT(ret_from_fork)
        jal     schedule_tail           # a0 = struct task_struct *prev
 
 FEXPORT(syscall_exit)
+#ifdef CONFIG_DEBUG_RSEQ
+       move    a0, sp
+       jal     rseq_syscall
+#endif
        local_irq_disable               # make sure need_resched and
                                        # signals dont change between
                                        # sampling and return
@@ -141,6 +145,10 @@ work_notifysig:                            # deal with pending signals and
        j       resume_userspace_check
 
 FEXPORT(syscall_exit_partial)
+#ifdef CONFIG_DEBUG_RSEQ
+       move    a0, sp
+       jal     rseq_syscall
+#endif
        local_irq_disable               # make sure need_resched doesn't
                                        # change between and return
        LONG_L  a2, TI_FLAGS($28)       # current->work
index f2ee7e1e3342e498be961f8995fc91b1de1f2744..cff52b283e03843519201ca8fe8754e0899c0c3c 100644 (file)
@@ -119,10 +119,20 @@ NESTED(_mcount, PT_SIZE, ra)
 EXPORT_SYMBOL(_mcount)
        PTR_LA  t1, ftrace_stub
        PTR_L   t2, ftrace_trace_function /* Prepare t2 for (1) */
-       bne     t1, t2, static_trace
+       beq     t1, t2, fgraph_trace
         nop
 
+       MCOUNT_SAVE_REGS
+
+       move    a0, ra          /* arg1: self return address */
+       jalr    t2              /* (1) call *ftrace_trace_function */
+        move   a1, AT          /* arg2: parent's return address */
+
+       MCOUNT_RESTORE_REGS
+
+fgraph_trace:
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       PTR_LA  t1, ftrace_stub
        PTR_L   t3, ftrace_graph_return
        bne     t1, t3, ftrace_graph_caller
         nop
@@ -131,24 +141,11 @@ EXPORT_SYMBOL(_mcount)
        bne     t1, t3, ftrace_graph_caller
         nop
 #endif
-       b       ftrace_stub
-#ifdef CONFIG_32BIT
-        addiu sp, sp, 8
-#else
-        nop
-#endif
 
-static_trace:
-       MCOUNT_SAVE_REGS
-
-       move    a0, ra          /* arg1: self return address */
-       jalr    t2              /* (1) call *ftrace_trace_function */
-        move   a1, AT          /* arg2: parent's return address */
-
-       MCOUNT_RESTORE_REGS
 #ifdef CONFIG_32BIT
        addiu sp, sp, 8
 #endif
+
        .globl ftrace_stub
 ftrace_stub:
        RETURN_BACK
index 8d85046adcc8dd858cb5b392b68dc22da19185c4..9670e70139fd971d00bf7a06c46b06cec7ff1e35 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/kallsyms.h>
 #include <linux/random.h>
 #include <linux/prctl.h>
+#include <linux/nmi.h>
 
 #include <asm/asm.h>
 #include <asm/bootinfo.h>
@@ -655,28 +656,42 @@ unsigned long arch_align_stack(unsigned long sp)
        return sp & ALMASK;
 }
 
-static void arch_dump_stack(void *info)
+static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
+static struct cpumask backtrace_csd_busy;
+
+static void handle_backtrace(void *info)
 {
-       struct pt_regs *regs;
+       nmi_cpu_backtrace(get_irq_regs());
+       cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
+}
 
-       regs = get_irq_regs();
+static void raise_backtrace(cpumask_t *mask)
+{
+       call_single_data_t *csd;
+       int cpu;
 
-       if (regs)
-               show_regs(regs);
+       for_each_cpu(cpu, mask) {
+               /*
+                * If we previously sent an IPI to the target CPU & it hasn't
+                * cleared its bit in the busy cpumask then it didn't handle
+                * our previous IPI & it's not safe for us to reuse the
+                * call_single_data_t.
+                */
+               if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
+                       pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
+                               cpu);
+                       continue;
+               }
 
-       dump_stack();
+               csd = &per_cpu(backtrace_csd, cpu);
+               csd->func = handle_backtrace;
+               smp_call_function_single_async(cpu, csd);
+       }
 }
 
 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
 {
-       long this_cpu = get_cpu();
-
-       if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
-               dump_stack();
-
-       smp_call_function_many(mask, arch_dump_stack, NULL, 1);
-
-       put_cpu();
+       nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
 }
 
 int mips_get_process_fp_mode(struct task_struct *task)
index a9a7d78803cde30097a02c76aa49bef9f812be7e..91d3c8c46097cd960fd541cdf7c76d7e0d3636e3 100644 (file)
@@ -590,3 +590,5 @@ EXPORT(sys_call_table)
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free                   /* 4365 */
        PTR     sys_statx
+       PTR     sys_rseq
+       PTR     sys_io_pgetevents
index 65d5aeeb9bdb51ac846d5acc213f3a1af9b97533..358d9599983d17840cd909ea7851197e7b38b838 100644 (file)
@@ -439,4 +439,6 @@ EXPORT(sys_call_table)
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free                   /* 5325 */
        PTR     sys_statx
+       PTR     sys_rseq
+       PTR     sys_io_pgetevents
        .size   sys_call_table,.-sys_call_table
index cbf190ef9e8a5e2a0e499cfaf721908abf4213ec..c65eaacc1abfcf4c15a40721056cf6c3503927ee 100644 (file)
@@ -434,4 +434,6 @@ EXPORT(sysn32_call_table)
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free
        PTR     sys_statx                       /* 6330 */
+       PTR     sys_rseq
+       PTR     compat_sys_io_pgetevents
        .size   sysn32_call_table,.-sysn32_call_table
index 9ebe3e2403b1d7b84d66732cd261364208f6020d..73913f072e3916f36c23bda86870f83002a725c0 100644 (file)
@@ -583,4 +583,6 @@ EXPORT(sys32_call_table)
        PTR     sys_pkey_alloc
        PTR     sys_pkey_free                   /* 4365 */
        PTR     sys_statx
+       PTR     sys_rseq
+       PTR     compat_sys_io_pgetevents
        .size   sys32_call_table,.-sys32_call_table
index 9e224469c78887e9c2eb55779bfc8d4646ca2f09..0a9cfe7a0372940fceb71931cff1eddec2e10e37 100644 (file)
@@ -801,6 +801,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
                regs->regs[0] = 0;              /* Don't deal with this again.  */
        }
 
+       rseq_signal_deliver(ksig, regs);
+
        if (sig_uses_siginfo(&ksig->ka, abi))
                ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn,
                                          ksig, regs, oldset);
@@ -868,6 +870,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
+               rseq_handle_notify_resume(NULL, regs);
        }
 
        user_enter();
index d67fa74622ee287200bf6b6664c3292ad72131d5..8d505a21396e33626061adb82c20be60f5b217bf 100644 (file)
@@ -351,6 +351,7 @@ static void __show_regs(const struct pt_regs *regs)
 void show_regs(struct pt_regs *regs)
 {
        __show_regs((struct pt_regs *)regs);
+       dump_stack();
 }
 
 void show_registers(struct pt_regs *regs)
index 1986e09fb457c55ba16e3cd19f56f65e2737cb54..1601d90b087b8f933853ac87118aa09749f70f03 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/export.h>
 #include <asm/addrspace.h>
 #include <asm/byteorder.h>
+#include <linux/ioport.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
@@ -98,6 +99,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
        return error;
 }
 
+static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
+                              void *arg)
+{
+       unsigned long i;
+
+       for (i = 0; i < nr_pages; i++) {
+               if (pfn_valid(start_pfn + i) &&
+                   !PageReserved(pfn_to_page(start_pfn + i)))
+                       return 1;
+       }
+
+       return 0;
+}
+
 /*
  * Generic mapping function (not visible outside):
  */
@@ -116,8 +131,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
 
 void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
 {
+       unsigned long offset, pfn, last_pfn;
        struct vm_struct * area;
-       unsigned long offset;
        phys_addr_t last_addr;
        void * addr;
 
@@ -137,18 +152,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long
                return (void __iomem *) CKSEG1ADDR(phys_addr);
 
        /*
-        * Don't allow anybody to remap normal RAM that we're using..
+        * Don't allow anybody to remap RAM that may be allocated by the page
+        * allocator, since that could lead to races & data clobbering.
         */
-       if (phys_addr < virt_to_phys(high_memory)) {
-               char *t_addr, *t_end;
-               struct page *page;
-
-               t_addr = __va(phys_addr);
-               t_end = t_addr + (size - 1);
-
-               for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
-                       if(!PageReserved(page))
-                               return NULL;
+       pfn = PFN_DOWN(phys_addr);
+       last_pfn = PFN_DOWN(last_addr);
+       if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+                                 __ioremap_check_ram) == 1) {
+               WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
+                         &phys_addr, &last_addr);
+               return NULL;
        }
 
        /*
index 3e1a46615120a566adbfff65b5aab8e860bf3809..8999b922651210f6c20c83e7aa72b3bccf6c3d58 100644 (file)
@@ -98,8 +98,12 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte)
        __free_page(pte);
 }
 
+#define __pte_free_tlb(tlb, pte, addr) \
+do {                                   \
+       pgtable_page_dtor(pte);         \
+       tlb_remove_page((tlb), (pte));  \
+} while (0)
 
-#define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte))
 #define pmd_pgtable(pmd) pmd_page(pmd)
 
 #define check_pgt_cache()          do { } while (0)
index 690d55272ba688a2adc88bca00e66cc61903c711..0c826ad6e994cce359474229acf08ff0d0330b78 100644 (file)
@@ -277,12 +277,6 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
        l.addi  r3,r1,0                    // pt_regs
        /* r4 set be EXCEPTION_HANDLE */   // effective address of fault
 
-       /*
-        * __PHX__: TODO
-        *
-        * all this can be written much simpler. look at
-        * DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part
-        */
 #ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX
        l.lwz   r6,PT_PC(r3)               // address of an offending insn
        l.lwz   r6,0(r6)                   // instruction that caused pf
@@ -314,7 +308,7 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
 
 #else
 
-       l.lwz   r6,PT_SR(r3)               // SR
+       l.mfspr r6,r0,SPR_SR               // SR
        l.andi  r6,r6,SPR_SR_DSX           // check for delay slot exception
        l.sfne  r6,r0                      // exception happened in delay slot
        l.bnf   7f
index fb02b2a1d6f2d875372b125cf837feb119d0164e..9fc6b60140f007bea1442f60727a22aee24776c9 100644 (file)
  *      r4  - EEAR     exception EA
  *      r10 - current  pointing to current_thread_info struct
  *      r12 - syscall  0, since we didn't come from syscall
- *      r13 - temp     it actually contains new SR, not needed anymore
- *      r31 - handler  address of the handler we'll jump to
+ *      r30 - handler  address of the handler we'll jump to
  *
  *      handler has to save remaining registers to the exception
  *      ksp frame *before* tainting them!
        /* r1 is KSP, r30 is __pa(KSP) */                       ;\
        tophys  (r30,r1)                                        ;\
        l.sw    PT_GPR12(r30),r12                               ;\
+       /* r4 use for tmp before EA */                          ;\
        l.mfspr r12,r0,SPR_EPCR_BASE                            ;\
        l.sw    PT_PC(r30),r12                                  ;\
        l.mfspr r12,r0,SPR_ESR_BASE                             ;\
        /* r12 == 1 if we come from syscall */                  ;\
        CLEAR_GPR(r12)                                          ;\
        /* ----- turn on MMU ----- */                           ;\
-       l.ori   r30,r0,(EXCEPTION_SR)                           ;\
+       /* Carry DSX into exception SR */                       ;\
+       l.mfspr r30,r0,SPR_SR                                   ;\
+       l.andi  r30,r30,SPR_SR_DSX                              ;\
+       l.ori   r30,r30,(EXCEPTION_SR)                          ;\
        l.mtspr r0,r30,SPR_ESR_BASE                             ;\
        /* r30: EA address of handler */                        ;\
        LOAD_SYMBOL_2_GPR(r30,handler)                          ;\
index fac246e6f37a278e4cd7c001c2cd53a8df88dc4e..d8981cbb852a5f1fc1ea80667df3ed451579d13c 100644 (file)
@@ -300,7 +300,7 @@ static inline int in_delay_slot(struct pt_regs *regs)
                return 0;
        }
 #else
-       return regs->sr & SPR_SR_DSX;
+       return mfspr(SPR_SR) & SPR_SR_DSX;
 #endif
 }
 
index c480770fabcd6287571dacb9d40ccc224f8e13b1..17526bebcbd277765c791b30e04e0096052d78cb 100644 (file)
@@ -244,11 +244,11 @@ config PARISC_PAGE_SIZE_4KB
 
 config PARISC_PAGE_SIZE_16KB
        bool "16KB"
-       depends on PA8X00
+       depends on PA8X00 && BROKEN
 
 config PARISC_PAGE_SIZE_64KB
        bool "64KB"
-       depends on PA8X00
+       depends on PA8X00 && BROKEN
 
 endchoice
 
@@ -347,7 +347,7 @@ config NR_CPUS
        int "Maximum number of CPUs (2-32)"
        range 2 32
        depends on SMP
-       default "32"
+       default "4"
 
 endmenu
 
index 714284ea6cc214f1011c6e0593f5ad2b0c962ddc..5ce030266e7d03bbfd7da5885471b1a874eefcd7 100644 (file)
@@ -65,10 +65,6 @@ endif
 # kernel.
 cflags-y       += -mdisable-fpregs
 
-# Without this, "ld -r" results in .text sections that are too big
-# (> 0x40000) for branches to reach stubs.
-cflags-y       += -ffunction-sections
-
 # Use long jumps instead of long branches (needed if your linker fails to
 # link a too big vmlinux executable). Not enabled for building modules.
 ifdef CONFIG_MLONGCALLS
index eeb5c88586631e8935b96e0edfe410bbbc2ecffc..715c96ba2ec81c2907ead07ffd21fbf79a0fb0cb 100644 (file)
@@ -21,14 +21,6 @@ typedef struct {
        unsigned long sig[_NSIG_WORDS];
 } sigset_t;
 
-#ifndef __KERNEL__
-struct sigaction {
-       __sighandler_t sa_handler;
-       unsigned long sa_flags;
-       sigset_t sa_mask;               /* mask last for extensibility */
-};
-#endif
-
 #include <asm/sigcontext.h>
 
 #endif /* !__ASSEMBLY */
index 4872e77aa96b784d5a1e19bd7f9c4996b8cd0992..dc77c5a51db774a7c691568c010ce0a4500e7286 100644 (file)
 #define __NR_preadv2           (__NR_Linux + 347)
 #define __NR_pwritev2          (__NR_Linux + 348)
 #define __NR_statx             (__NR_Linux + 349)
+#define __NR_io_pgetevents     (__NR_Linux + 350)
 
-#define __NR_Linux_syscalls    (__NR_statx + 1)
+#define __NR_Linux_syscalls    (__NR_io_pgetevents + 1)
 
 
 #define __IGNORE_select                /* newselect */
index e0e1c9775c320b46d85da0f2e6ce22bc2275b9fb..5eb979d04b905420e28f63dd526e6ca13aaa9842 100644 (file)
@@ -154,17 +154,14 @@ int register_parisc_driver(struct parisc_driver *driver)
 {
        /* FIXME: we need this because apparently the sti
         * driver can be registered twice */
-       if(driver->drv.name) {
-               printk(KERN_WARNING 
-                      "BUG: skipping previously registered driver %s\n",
-                      driver->name);
+       if (driver->drv.name) {
+               pr_warn("BUG: skipping previously registered driver %s\n",
+                       driver->name);
                return 1;
        }
 
        if (!driver->probe) {
-               printk(KERN_WARNING 
-                      "BUG: driver %s has no probe routine\n",
-                      driver->name);
+               pr_warn("BUG: driver %s has no probe routine\n", driver->name);
                return 1;
        }
 
@@ -491,12 +488,9 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
 
        dev = create_parisc_device(mod_path);
        if (dev->id.hw_type != HPHW_FAULTY) {
-               printk(KERN_ERR "Two devices have hardware path [%s].  "
-                               "IODC data for second device: "
-                               "%02x%02x%02x%02x%02x%02x\n"
-                               "Rearranging GSC cards sometimes helps\n",
-                       parisc_pathname(dev), iodc_data[0], iodc_data[1],
-                       iodc_data[3], iodc_data[4], iodc_data[5], iodc_data[6]);
+               pr_err("Two devices have hardware path [%s].  IODC data for second device: %7phN\n"
+                      "Rearranging GSC cards sometimes helps\n",
+                       parisc_pathname(dev), iodc_data);
                return NULL;
        }
 
@@ -528,8 +522,7 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
         * the keyboard controller
         */
        if ((hpa & 0xfff) == 0 && insert_resource(&iomem_resource, &dev->hpa))
-               printk("Unable to claim HPA %lx for device %s\n",
-                               hpa, name);
+               pr_warn("Unable to claim HPA %lx for device %s\n", hpa, name);
 
        return dev;
 }
@@ -875,7 +868,7 @@ static void print_parisc_device(struct parisc_device *dev)
        static int count;
 
        print_pa_hwpath(dev, hw_path);
-       printk(KERN_INFO "%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
+       pr_info("%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
                ++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
                dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
 
index 6308749359e4b7d6ee348062d584f7b747f1a115..fe3f2a49d2b1063a93daa0a9d4077d2978c5bdaf 100644 (file)
        ENTRY_COMP(preadv2)
        ENTRY_COMP(pwritev2)
        ENTRY_SAME(statx)
+       ENTRY_COMP(io_pgetevents)       /* 350 */
 
 
 .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
index 143f90e2f9f3c631616d4af52f0fe3fa08f44af9..2ef83d78eec42bd3ad55a3c2e0f976e081417266 100644 (file)
@@ -25,7 +25,7 @@
 
 /* #define DEBUG 1 */
 #ifdef DEBUG
-#define dbg(x...) printk(x)
+#define dbg(x...) pr_debug(x)
 #else
 #define dbg(x...)
 #endif
@@ -182,7 +182,7 @@ int __init unwind_init(void)
        start = (long)&__start___unwind[0];
        stop = (long)&__stop___unwind[0];
 
-       printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n", 
+       dbg("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
            start, stop,
            (stop - start) / sizeof(struct unwind_table_entry));
 
index bd06a3ccda312a0a645cd0dbff887924f691d2ce..2ea575cb3401248c1cb97f9596c9e7079c3256b2 100644 (file)
@@ -244,6 +244,7 @@ cpu-as-$(CONFIG_4xx)                += -Wa,-m405
 cpu-as-$(CONFIG_ALTIVEC)       += $(call as-option,-Wa$(comma)-maltivec)
 cpu-as-$(CONFIG_E200)          += -Wa,-me200
 cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4
+cpu-as-$(CONFIG_PPC_E500MC)    += $(call as-option,-Wa$(comma)-me500mc)
 
 KBUILD_AFLAGS += $(cpu-as-y)
 KBUILD_CFLAGS += $(cpu-as-y)
index 6a6673907e45eeb934e66023e8630fe21d8fd31d..82e44b1a00ae91219f482afa654f2d0440c5aa78 100644 (file)
@@ -108,6 +108,7 @@ static inline void pgtable_free(void *table, unsigned index_size)
 }
 
 #define check_pgt_cache()      do { } while (0)
+#define get_hugepd_cache_index(x)  (x)
 
 #ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb,
@@ -137,7 +138,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
                                  unsigned long address)
 {
-       pgtable_page_dtor(table);
        pgtable_free_tlb(tlb, page_address(table), 0);
 }
 #endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */
index af5f2baac80f991951ac77dc3b3eaeb1e72aee46..a069dfcac9a94a94efe66a162cbbff88f1596934 100644 (file)
@@ -49,6 +49,27 @@ static inline int hugepd_ok(hugepd_t hpd)
 }
 #define is_hugepd(hpd)         (hugepd_ok(hpd))
 
+/*
+ * 16M and 16G huge page directory tables are allocated from slab cache
+ *
+ */
+#define H_16M_CACHE_INDEX (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE - 24)
+#define H_16G_CACHE_INDEX                                                      \
+       (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + H_PUD_INDEX_SIZE - 34)
+
+static inline int get_hugepd_cache_index(int index)
+{
+       switch (index) {
+       case H_16M_CACHE_INDEX:
+               return HTLB_16M_INDEX;
+       case H_16G_CACHE_INDEX:
+               return HTLB_16G_INDEX;
+       default:
+               BUG();
+       }
+       /* should not reach */
+}
+
 #else /* !CONFIG_HUGETLB_PAGE */
 static inline int pmd_huge(pmd_t pmd) { return 0; }
 static inline int pud_huge(pud_t pud) { return 0; }
index fb4b3ba52339e9233207ce7345e2f9d920835f97..d7ee249d6890cb30fcf10ebd608665d57ba2f781 100644 (file)
@@ -45,8 +45,17 @@ static inline int hugepd_ok(hugepd_t hpd)
 {
        return 0;
 }
+
 #define is_hugepd(pdep)                        0
 
+/*
+ * This should never get called
+ */
+static inline int get_hugepd_cache_index(int index)
+{
+       BUG();
+}
+
 #else /* !CONFIG_HUGETLB_PAGE */
 static inline int pmd_huge(pmd_t pmd) { return 0; }
 static inline int pud_huge(pud_t pud) { return 0; }
index 63cee159022b51400fbc52dd21ebd31f55f3db67..42aafba7a30834db7643213a3aec583a3cdd1b6a 100644 (file)
@@ -287,6 +287,11 @@ enum pgtable_index {
        PMD_INDEX,
        PUD_INDEX,
        PGD_INDEX,
+       /*
+        * Below are used with 4k page size and hugetlb
+        */
+       HTLB_16M_INDEX,
+       HTLB_16G_INDEX,
 };
 
 extern unsigned long __vmalloc_start;
index 0f571e0ebca19ccdc8b89540324ccc71849b75e5..bd9ba8defd7258ab6e853be0c39d7290f9f02393 100644 (file)
@@ -8,7 +8,7 @@ extern void arch_touch_nmi_watchdog(void);
 static inline void arch_touch_nmi_watchdog(void) {}
 #endif
 
-#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_STACKTRACE)
+#if defined(CONFIG_NMI_IPI) && defined(CONFIG_STACKTRACE)
 extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
                                           bool exclude_self);
 #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
index 1707781d2f208096517859d94f30c6533e0a3771..8825953c225b2e48e9e0cd7938d2185b5e821977 100644 (file)
@@ -109,6 +109,7 @@ static inline void pgtable_free(void *table, unsigned index_size)
 }
 
 #define check_pgt_cache()      do { } while (0)
+#define get_hugepd_cache_index(x)      (x)
 
 #ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb,
@@ -139,7 +140,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
                                  unsigned long address)
 {
        tlb_flush_pgtable(tlb, address);
-       pgtable_page_dtor(table);
        pgtable_free_tlb(tlb, page_address(table), 0);
 }
 #endif /* _ASM_POWERPC_PGALLOC_32_H */
index 0e693f322cb2e03a353e3803517820f4c324498b..e2d62d033708c4494a5e95d941b8d34cad3ec3e0 100644 (file)
@@ -141,6 +141,7 @@ static inline void pgtable_free(void *table, int shift)
        }
 }
 
+#define get_hugepd_cache_index(x)      (x)
 #ifdef CONFIG_SMP
 static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
 {
index cfcf6a874cfab3a094d4c931bdc7cade28184665..01b5171ea189994ab394685f8f4645a3fd86594c 100644 (file)
@@ -393,3 +393,4 @@ SYSCALL(pkey_alloc)
 SYSCALL(pkey_free)
 SYSCALL(pkey_mprotect)
 SYSCALL(rseq)
+COMPAT_SYS(io_pgetevents)
index 1e9708632dce30e1093d48dbed2db8d0d90a4e89..c19379f0a32e2b0fe59a9634140582a8afbc291e 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            388
+#define NR_syscalls            389
 
 #define __NR__exit __NR_exit
 
index ac5ba55066dd76a26f133d91623309036bcad4c8..985534d0b448b7ae7b9d4cad7c3f9257d4ce0789 100644 (file)
 #define __NR_pkey_free         385
 #define __NR_pkey_mprotect     386
 #define __NR_rseq              387
+#define __NR_io_pgetevents     388
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index 4be1c0de9406b159eede5503b3a8044645dac7fa..96dd3d871986428dadcbc9bb350c1b876fde8ab4 100644 (file)
@@ -711,7 +711,8 @@ static __init void cpufeatures_cpu_quirks(void)
                cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
                cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
                cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
-       } else /* DD2.1 and up have DD2_1 */
+       } else if ((version & 0xffff0000) == 0x004e0000)
+               /* DD2.1 and up have DD2_1 */
                cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
 
        if ((version & 0xffff0000) == 0x004e0000) {
index 4f861055a8521276c89c71cd67c41425c38c0ac2..d63b488d34d79033fa7229bfeb4d306cf6b56bc0 100644 (file)
@@ -285,9 +285,6 @@ pci_bus_to_hose(int bus)
  * Note that the returned IO or memory base is a physical address
  */
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE3(pciconfig_iobase, long, which,
                unsigned long, bus, unsigned long, devfn)
 {
@@ -313,4 +310,3 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which,
 
        return result;
 }
-#pragma GCC diagnostic pop
index 812171c09f42fecf2c97757e37f7ad45ec9a35d8..dff28f90351245d58f6b77130fb26fcb73351c5d 100644 (file)
@@ -203,9 +203,6 @@ void pcibios_setup_phb_io_space(struct pci_controller *hose)
 #define IOBASE_ISA_IO          3
 #define IOBASE_ISA_MEM         4
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, in_bus,
                          unsigned long, in_devfn)
 {
@@ -259,7 +256,6 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, in_bus,
 
        return -EOPNOTSUPP;
 }
-#pragma GCC diagnostic pop
 
 #ifdef CONFIG_NUMA
 int pcibus_to_node(struct pci_bus *bus)
index 7fb9f83dcde889f8340daa94ec66cc6b3cb1804b..8afd146bc9c70dc6480e2fff20d6239d327e33d3 100644 (file)
@@ -1051,9 +1051,6 @@ struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
 }
 
 /* We assume to be passed big endian arguments */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
 {
        struct rtas_args args;
@@ -1140,7 +1137,6 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
 
        return 0;
 }
-#pragma GCC diagnostic pop
 
 /*
  * Call early during boot, before mem init, to retrieve the RTAS
index 62b1a40d895777a10b3c7279fde05583ae3dc66b..40b44bb53a4efbb8b25c64786262e0123a3da640 100644 (file)
@@ -700,12 +700,19 @@ EXPORT_SYMBOL(check_legacy_ioport);
 static int ppc_panic_event(struct notifier_block *this,
                              unsigned long event, void *ptr)
 {
+       /*
+        * panic does a local_irq_disable, but we really
+        * want interrupts to be hard disabled.
+        */
+       hard_irq_disable();
+
        /*
         * If firmware-assisted dump has been registered then trigger
         * firmware-assisted dump and let firmware handle everything else.
         */
        crash_fadump(NULL, ptr);
-       ppc_md.panic(ptr);  /* May not return */
+       if (ppc_md.panic)
+               ppc_md.panic(ptr);  /* May not return */
        return NOTIFY_DONE;
 }
 
@@ -716,7 +723,8 @@ static struct notifier_block ppc_panic_block = {
 
 void __init setup_panic(void)
 {
-       if (!ppc_md.panic)
+       /* PPC64 always does a hard irq disable in its panic handler */
+       if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic)
                return;
        atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
 }
index 7a7ce8ad455e1533498fc3c7a5d8a853abb4d9cd..225bc5f91049436277e7c45787d8a7370d6dac78 100644 (file)
@@ -387,6 +387,14 @@ void early_setup_secondary(void)
 
 #endif /* CONFIG_SMP */
 
+void panic_smp_self_stop(void)
+{
+       hard_irq_disable();
+       spin_begin();
+       while (1)
+               spin_cpu_relax();
+}
+
 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
 static bool use_spinloop(void)
 {
index 17fe4339ba596150e8dc2b0eaf698aee3303ad18..b3e8db376ecde459bb8b5a1cd00b10c9606df289 100644 (file)
@@ -134,7 +134,7 @@ static void do_signal(struct task_struct *tsk)
        /* Re-enable the breakpoints for the signal stack */
        thread_change_pc(tsk, tsk->thread.regs);
 
-       rseq_signal_deliver(tsk->thread.regs);
+       rseq_signal_deliver(&ksig, tsk->thread.regs);
 
        if (is32) {
                if (ksig.ka.sa.sa_flags & SA_SIGINFO)
@@ -170,7 +170,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
                tracehook_notify_resume(regs);
-               rseq_handle_notify_resume(regs);
+               rseq_handle_notify_resume(NULL, regs);
        }
 
        user_enter();
index 5eedbb282d42fcf2caed7f3d09d0227b2e9e0734..e6474a45cef50623be68bc1fbf0b83635275dceb 100644 (file)
@@ -1038,9 +1038,6 @@ static int do_setcontext_tm(struct ucontext __user *ucp,
 }
 #endif
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 #ifdef CONFIG_PPC64
 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
                       struct ucontext __user *, new_ctx, int, ctx_size)
@@ -1134,7 +1131,6 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
        set_thread_flag(TIF_RESTOREALL);
        return 0;
 }
-#pragma GCC diagnostic pop
 
 #ifdef CONFIG_PPC64
 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
@@ -1231,9 +1227,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
        return 0;
 }
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 #ifdef CONFIG_PPC32
 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
                         int, ndbg, struct sig_dbg_op __user *, dbg)
@@ -1337,7 +1330,6 @@ SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
        return 0;
 }
 #endif
-#pragma GCC diagnostic pop
 
 /*
  * OK, we're invoking a handler
index d42b600203892d57d7fb3398f7cad38090df9ce6..83d51bf586c7e1ec3697a424a33a1559579147b8 100644 (file)
@@ -625,9 +625,6 @@ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
 /*
  * Handle {get,set,swap}_context operations
  */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
                struct ucontext __user *, new_ctx, long, ctx_size)
 {
@@ -693,7 +690,6 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
        set_thread_flag(TIF_RESTOREALL);
        return 0;
 }
-#pragma GCC diagnostic pop
 
 
 /*
index 5eadfffabe35134f6f34a6acca61c738c4efcbc9..4794d6b4f4d27a4db7f637a309897d64f1ad9e9c 100644 (file)
@@ -600,9 +600,6 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
        nmi_ipi_busy_count--;
        nmi_ipi_unlock();
 
-       /* Remove this CPU */
-       set_cpu_online(smp_processor_id(), false);
-
        spin_begin();
        while (1)
                spin_cpu_relax();
@@ -617,9 +614,6 @@ void smp_send_stop(void)
 
 static void stop_this_cpu(void *dummy)
 {
-       /* Remove this CPU */
-       set_cpu_online(smp_processor_id(), false);
-
        hard_irq_disable();
        spin_begin();
        while (1)
index 07e97f289c5207389ffb817330e5d66a4beb6e70..e2c50b55138f8ab52eecace4c6aad72c382e6bcd 100644 (file)
@@ -196,7 +196,7 @@ save_stack_trace_tsk_reliable(struct task_struct *tsk,
 EXPORT_SYMBOL_GPL(save_stack_trace_tsk_reliable);
 #endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
 
-#ifdef CONFIG_PPC_BOOK3S_64
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
 static void handle_backtrace_ipi(struct pt_regs *regs)
 {
        nmi_cpu_backtrace(regs);
@@ -242,4 +242,4 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
 {
        nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
 }
-#endif /* CONFIG_PPC64 */
+#endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */
index 083fa06962fda045cb5f00ac0ea5b61046e3d4c4..466216506eb2f4bfa7b6b94ed89140914b1ea682 100644 (file)
@@ -62,9 +62,6 @@ out:
        return ret;
 }
 
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE6(mmap2, unsigned long, addr, size_t, len,
                unsigned long, prot, unsigned long, flags,
                unsigned long, fd, unsigned long, pgoff)
@@ -78,7 +75,6 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len,
 {
        return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT);
 }
-#pragma GCC diagnostic pop
 
 #ifdef CONFIG_PPC32
 /*
index 7c5f479c5c00fb0f562801285e3795400edab084..8a9a49c138652ba2b971a265db233988e01aa7b1 100644 (file)
@@ -337,7 +337,8 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
        if (shift >= pdshift)
                hugepd_free(tlb, hugepte);
        else
-               pgtable_free_tlb(tlb, hugepte, pdshift - shift);
+               pgtable_free_tlb(tlb, hugepte,
+                                get_hugepd_cache_index(pdshift - shift));
 }
 
 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
index c1f4ca45c93a488df07d66525f0d935ca342f84c..4afbfbb64bfd0a21254a177f4fa3df3c37bff6ea 100644 (file)
@@ -409,6 +409,18 @@ static inline void pgtable_free(void *table, int index)
        case PUD_INDEX:
                kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
                break;
+#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
+               /* 16M hugepd directory at pud level */
+       case HTLB_16M_INDEX:
+               BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
+               kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
+               break;
+               /* 16G hugepd directory at the pgd level */
+       case HTLB_16G_INDEX:
+               BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
+               kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
+               break;
+#endif
                /* We don't free pgd table via RCU callback */
        default:
                BUG();
index 75cb646a79c383bc39c578a49ddf48a23ee9c44b..9d16ee251fc0131118c375282b2c3e103a2e0b0f 100644 (file)
@@ -186,9 +186,6 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
  * in a 2-bit field won't allow writes to a page that is otherwise
  * write-protected.
  */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
 SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
                unsigned long, len, u32 __user *, map)
 {
@@ -272,4 +269,3 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
        up_write(&mm->mmap_sem);
        return err;
 }
-#pragma GCC diagnostic pop
index 67a6e86d3e7efb25e170af7218453230703aa4a5..1135b43a597c5045be9a0425b67a5e5edd17d876 100644 (file)
@@ -689,22 +689,17 @@ EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
 static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
 
-void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
-                    unsigned long end)
+static inline void __radix__flush_tlb_range(struct mm_struct *mm,
+                                       unsigned long start, unsigned long end,
+                                       bool flush_all_sizes)
 
 {
-       struct mm_struct *mm = vma->vm_mm;
        unsigned long pid;
        unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
        unsigned long page_size = 1UL << page_shift;
        unsigned long nr_pages = (end - start) >> page_shift;
        bool local, full;
 
-#ifdef CONFIG_HUGETLB_PAGE
-       if (is_vm_hugetlb_page(vma))
-               return radix__flush_hugetlb_tlb_range(vma, start, end);
-#endif
-
        pid = mm->context.id;
        if (unlikely(pid == MMU_NO_CONTEXT))
                return;
@@ -738,37 +733,64 @@ is_local:
                                _tlbie_pid(pid, RIC_FLUSH_TLB);
                }
        } else {
-               bool hflush = false;
+               bool hflush = flush_all_sizes;
+               bool gflush = flush_all_sizes;
                unsigned long hstart, hend;
+               unsigned long gstart, gend;
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-               hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT;
-               hend = end >> HPAGE_PMD_SHIFT;
-               if (hstart < hend) {
-                       hstart <<= HPAGE_PMD_SHIFT;
-                       hend <<= HPAGE_PMD_SHIFT;
+               if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
                        hflush = true;
+
+               if (hflush) {
+                       hstart = (start + PMD_SIZE - 1) & PMD_MASK;
+                       hend = end & PMD_MASK;
+                       if (hstart == hend)
+                               hflush = false;
+               }
+
+               if (gflush) {
+                       gstart = (start + PUD_SIZE - 1) & PUD_MASK;
+                       gend = end & PUD_MASK;
+                       if (gstart == gend)
+                               gflush = false;
                }
-#endif
 
                asm volatile("ptesync": : :"memory");
                if (local) {
                        __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
                        if (hflush)
                                __tlbiel_va_range(hstart, hend, pid,
-                                               HPAGE_PMD_SIZE, MMU_PAGE_2M);
+                                               PMD_SIZE, MMU_PAGE_2M);
+                       if (gflush)
+                               __tlbiel_va_range(gstart, gend, pid,
+                                               PUD_SIZE, MMU_PAGE_1G);
                        asm volatile("ptesync": : :"memory");
                } else {
                        __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
                        if (hflush)
                                __tlbie_va_range(hstart, hend, pid,
-                                               HPAGE_PMD_SIZE, MMU_PAGE_2M);
+                                               PMD_SIZE, MMU_PAGE_2M);
+                       if (gflush)
+                               __tlbie_va_range(gstart, gend, pid,
+                                               PUD_SIZE, MMU_PAGE_1G);
                        fixup_tlbie();
                        asm volatile("eieio; tlbsync; ptesync": : :"memory");
                }
        }
        preempt_enable();
 }
+
+void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+                    unsigned long end)
+
+{
+#ifdef CONFIG_HUGETLB_PAGE
+       if (is_vm_hugetlb_page(vma))
+               return radix__flush_hugetlb_tlb_range(vma, start, end);
+#endif
+
+       __radix__flush_tlb_range(vma->vm_mm, start, end, false);
+}
 EXPORT_SYMBOL(radix__flush_tlb_range);
 
 static int radix_get_mmu_psize(int page_size)
@@ -837,6 +859,8 @@ void radix__tlb_flush(struct mmu_gather *tlb)
        int psize = 0;
        struct mm_struct *mm = tlb->mm;
        int page_size = tlb->page_size;
+       unsigned long start = tlb->start;
+       unsigned long end = tlb->end;
 
        /*
         * if page size is not something we understand, do a full mm flush
@@ -847,15 +871,45 @@ void radix__tlb_flush(struct mmu_gather *tlb)
         */
        if (tlb->fullmm) {
                __flush_all_mm(mm, true);
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
+       } else if (mm_tlb_flush_nested(mm)) {
+               /*
+                * If there is a concurrent invalidation that is clearing ptes,
+                * then it's possible this invalidation will miss one of those
+                * cleared ptes and miss flushing the TLB. If this invalidate
+                * returns before the other one flushes TLBs, that can result
+                * in it returning while there are still valid TLBs inside the
+                * range to be invalidated.
+                *
+                * See mm/memory.c:tlb_finish_mmu() for more details.
+                *
+                * The solution to this is ensure the entire range is always
+                * flushed here. The problem for powerpc is that the flushes
+                * are page size specific, so this "forced flush" would not
+                * do the right thing if there are a mix of page sizes in
+                * the range to be invalidated. So use __flush_tlb_range
+                * which invalidates all possible page sizes in the range.
+                *
+                * PWC flush probably is not be required because the core code
+                * shouldn't free page tables in this path, but accounting
+                * for the possibility makes us a bit more robust.
+                *
+                * need_flush_all is an uncommon case because page table
+                * teardown should be done with exclusive locks held (but
+                * after locks are dropped another invalidate could come
+                * in), it could be optimized further if necessary.
+                */
+               if (!tlb->need_flush_all)
+                       __radix__flush_tlb_range(mm, start, end, true);
+               else
+                       radix__flush_all_mm(mm);
+#endif
        } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
                if (!tlb->need_flush_all)
                        radix__flush_tlb_mm(mm);
                else
                        radix__flush_all_mm(mm);
        } else {
-               unsigned long start = tlb->start;
-               unsigned long end = tlb->end;
-
                if (!tlb->need_flush_all)
                        radix__flush_tlb_range_psize(mm, start, end, psize);
                else
@@ -1043,6 +1097,8 @@ extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
                for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
                        if (sib == cpu)
                                continue;
+                       if (!cpu_possible(sib))
+                               continue;
                        if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
                                flush = true;
                }
index 7c968e46736faa598861259d5a3781577256a7ff..12e6e4d3060236a05fcd987690b385576d8821cb 100644 (file)
 #define DBG(x...)
 #endif
 
-/* Apparently the RTC stores seconds since 1 Jan 1904 */
+/*
+ * Offset between Unix time (1970-based) and Mac time (1904-based). Cuda and PMU
+ * times wrap in 2040. If we need to handle later times, the read_time functions
+ * need to be changed to interpret wrapped times as post-2040.
+ */
 #define RTC_OFFSET     2082844800
 
 /*
@@ -97,8 +101,11 @@ static time64_t cuda_get_time(void)
        if (req.reply_len != 7)
                printk(KERN_ERR "cuda_get_time: got %d byte reply\n",
                       req.reply_len);
-       now = (req.reply[3] << 24) + (req.reply[4] << 16)
-               + (req.reply[5] << 8) + req.reply[6];
+       now = (u32)((req.reply[3] << 24) + (req.reply[4] << 16) +
+                   (req.reply[5] << 8) + req.reply[6]);
+       /* it's either after year 2040, or the RTC has gone backwards */
+       WARN_ON(now < RTC_OFFSET);
+
        return now - RTC_OFFSET;
 }
 
@@ -106,10 +113,10 @@ static time64_t cuda_get_time(void)
 
 static int cuda_set_rtc_time(struct rtc_time *tm)
 {
-       time64_t nowtime;
+       u32 nowtime;
        struct adb_request req;
 
-       nowtime = rtc_tm_to_time64(tm) + RTC_OFFSET;
+       nowtime = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
        if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
                         nowtime >> 24, nowtime >> 16, nowtime >> 8,
                         nowtime) < 0)
@@ -140,8 +147,12 @@ static time64_t pmu_get_time(void)
        if (req.reply_len != 4)
                printk(KERN_ERR "pmu_get_time: got %d byte reply from PMU\n",
                       req.reply_len);
-       now = (req.reply[0] << 24) + (req.reply[1] << 16)
-               + (req.reply[2] << 8) + req.reply[3];
+       now = (u32)((req.reply[0] << 24) + (req.reply[1] << 16) +
+                   (req.reply[2] << 8) + req.reply[3]);
+
+       /* it's either after year 2040, or the RTC has gone backwards */
+       WARN_ON(now < RTC_OFFSET);
+
        return now - RTC_OFFSET;
 }
 
@@ -149,10 +160,10 @@ static time64_t pmu_get_time(void)
 
 static int pmu_set_rtc_time(struct rtc_time *tm)
 {
-       time64_t nowtime;
+       u32 nowtime;
        struct adb_request req;
 
-       nowtime = rtc_tm_to_time64(tm) + RTC_OFFSET;
+       nowtime = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
        if (pmu_request(&req, NULL, 5, PMU_SET_RTC, nowtime >> 24,
                        nowtime >> 16, nowtime >> 8, nowtime) < 0)
                return -ENXIO;
index f12680c9b9475e2b130da3369644e797575f7a80..4764fdeb4f1f6837c771e42f9e84ca5bd8291af6 100644 (file)
@@ -107,6 +107,7 @@ config ARCH_RV32I
        select GENERIC_LIB_ASHLDI3
        select GENERIC_LIB_ASHRDI3
        select GENERIC_LIB_LSHRDI3
+       select GENERIC_LIB_UCMPDI2
 
 config ARCH_RV64I
        bool "RV64I"
index 5cae4c30cd8e2e2147b59285f9eccaaae7e9a789..1e0dfc36aab9e597aaf0d0fb3c99b2ac3dcae750 100644 (file)
@@ -21,8 +21,13 @@ typedef struct user_regs_struct elf_gregset_t;
 
 typedef union __riscv_fp_state elf_fpregset_t;
 
-#define ELF_RISCV_R_SYM(r_info) ((r_info) >> 32)
-#define ELF_RISCV_R_TYPE(r_info) ((r_info) & 0xffffffff)
+#if __riscv_xlen == 64
+#define ELF_RISCV_R_SYM(r_info)                ELF64_R_SYM(r_info)
+#define ELF_RISCV_R_TYPE(r_info)       ELF64_R_TYPE(r_info)
+#else
+#define ELF_RISCV_R_SYM(r_info)                ELF32_R_SYM(r_info)
+#define ELF_RISCV_R_TYPE(r_info)       ELF32_R_TYPE(r_info)
+#endif
 
 /*
  * RISC-V relocation types
index b74cbfbce2d0dd9df65ba779ab9dd8feb0d38eed..7bcdaed15703be6d8f141a8582abd024dc966fec 100644 (file)
 #include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 
-#ifdef CONFIG_RISCV_INTC
-#include <linux/irqchip/irq-riscv-intc.h>
-#endif
-
 void __init init_IRQ(void)
 {
        irqchip_init();
index 1d5e9b934b8ca5b5b78a64af5e1c06e334b7e5f2..3303ed2cd4193f82c51730a992d6c875b361ff80 100644 (file)
@@ -37,7 +37,7 @@ static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v)
 static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
                                     Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        u32 imm12 = (offset & 0x1000) << (31 - 12);
        u32 imm11 = (offset & 0x800) >> (11 - 7);
        u32 imm10_5 = (offset & 0x7e0) << (30 - 10);
@@ -50,7 +50,7 @@ static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
 static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
                                  Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        u32 imm20 = (offset & 0x100000) << (31 - 20);
        u32 imm19_12 = (offset & 0xff000);
        u32 imm11 = (offset & 0x800) << (20 - 11);
@@ -63,7 +63,7 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
 static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
                                         Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        u16 imm8 = (offset & 0x100) << (12 - 8);
        u16 imm7_6 = (offset & 0xc0) >> (6 - 5);
        u16 imm5 = (offset & 0x20) >> (5 - 2);
@@ -78,7 +78,7 @@ static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
 static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
                                       Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        u16 imm11 = (offset & 0x800) << (12 - 11);
        u16 imm10 = (offset & 0x400) >> (10 - 8);
        u16 imm9_8 = (offset & 0x300) << (12 - 11);
@@ -96,7 +96,7 @@ static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
 static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
                                         Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        s32 hi20;
 
        if (offset != (s32)offset) {
@@ -178,7 +178,7 @@ static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location,
 static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
                                       Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        s32 hi20;
 
        /* Always emit the got entry */
@@ -200,7 +200,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
 static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
                                       Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        s32 fill_v = offset;
        u32 hi20, lo12;
 
@@ -227,7 +227,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
 static int apply_r_riscv_call_rela(struct module *me, u32 *location,
                                   Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        s32 fill_v = offset;
        u32 hi20, lo12;
 
@@ -263,14 +263,14 @@ static int apply_r_riscv_align_rela(struct module *me, u32 *location,
 static int apply_r_riscv_add32_rela(struct module *me, u32 *location,
                                    Elf_Addr v)
 {
-       *(u32 *)location += (*(u32 *)v);
+       *(u32 *)location += (u32)v;
        return 0;
 }
 
 static int apply_r_riscv_sub32_rela(struct module *me, u32 *location,
                                    Elf_Addr v)
 {
-       *(u32 *)location -= (*(u32 *)v);
+       *(u32 *)location -= (u32)v;
        return 0;
 }
 
@@ -347,7 +347,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
                        unsigned int j;
 
                        for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) {
-                               u64 hi20_loc =
+                               unsigned long hi20_loc =
                                        sechdrs[sechdrs[relsec].sh_info].sh_addr
                                        + rel[j].r_offset;
                                u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info);
@@ -360,12 +360,12 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
                                        Elf_Sym *hi20_sym =
                                                (Elf_Sym *)sechdrs[symindex].sh_addr
                                                + ELF_RISCV_R_SYM(rel[j].r_info);
-                                       u64 hi20_sym_val =
+                                       unsigned long hi20_sym_val =
                                                hi20_sym->st_value
                                                + rel[j].r_addend;
 
                                        /* Calculate lo12 */
-                                       u64 offset = hi20_sym_val - hi20_loc;
+                                       size_t offset = hi20_sym_val - hi20_loc;
                                        if (IS_ENABLED(CONFIG_MODULE_SECTIONS)
                                            && hi20_type == R_RISCV_GOT_HI20) {
                                                offset = module_emit_got_entry(
index ba3e80712797c8ece03b07930f2ffb4b370588cc..9f82a7e34c648a370ec42f2e0bad711058e9baf2 100644 (file)
@@ -50,7 +50,7 @@ static int riscv_gpr_set(struct task_struct *target,
        struct pt_regs *regs;
 
        regs = task_pt_regs(target);
-       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0, -1);
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1);
        return ret;
 }
 
index ee44a48faf79dfd8e01cc07db341cacbeb9feed6..f0d2070866d49b170da74ae0e20e779f55a02199 100644 (file)
@@ -220,8 +220,3 @@ void __init setup_arch(char **cmdline_p)
        riscv_fill_hwcap();
 }
 
-static int __init riscv_device_init(void)
-{
-       return of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-}
-subsys_initcall_sync(riscv_device_init);
index c77df8142be2eaa9525130b3cbea70e191a20aba..58a522f9bcc319ae5d40a8ae15da5d9021921ebd 100644 (file)
@@ -28,7 +28,9 @@ static void __init zone_sizes_init(void)
 {
        unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
 
+#ifdef CONFIG_ZONE_DMA32
        max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
+#endif
        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 
        free_area_init_nodes(max_zone_pfns);
index baed39772c845d74d91c292aba3b3ea4063aa130..e44bb2b2873e0798eb67c13ee4f100d798077a2a 100644 (file)
@@ -160,6 +160,7 @@ config S390
        select HAVE_OPROFILE
        select HAVE_PERF_EVENTS
        select HAVE_REGS_AND_STACK_ACCESS_API
+       select HAVE_RSEQ
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_VIRT_CPU_ACCOUNTING
        select MODULES_USE_ELF_RELA
index 0563fd3e84585769f7acd0f093f30a48eaab9f54..480bb02ccacdd07de17ffda81fae140ee748a505 100644 (file)
@@ -6,36 +6,38 @@
 
 struct css_general_char {
        u64 : 12;
-       u32 dynio : 1;   /* bit 12 */
-       u32 : 4;
-       u32 eadm : 1;    /* bit 17 */
-       u32 : 23;
-       u32 aif : 1;     /* bit 41 */
-       u32 : 3;
-       u32 mcss : 1;    /* bit 45 */
-       u32 fcs : 1;     /* bit 46 */
-       u32 : 1;
-       u32 ext_mb : 1;  /* bit 48 */
-       u32 : 7;
-       u32 aif_tdd : 1; /* bit 56 */
-       u32 : 1;
-       u32 qebsm : 1;   /* bit 58 */
-       u32 : 2;
-       u32 aiv : 1;     /* bit 61 */
-       u32 : 5;
-       u32 aif_osa : 1; /* bit 67 */
-       u32 : 12;
-       u32 eadm_rf : 1; /* bit 80 */
-       u32 : 1;
-       u32 cib : 1;     /* bit 82 */
-       u32 : 5;
-       u32 fcx : 1;     /* bit 88 */
-       u32 : 19;
-       u32 alt_ssi : 1; /* bit 108 */
-       u32 : 1;
-       u32 narf : 1;    /* bit 110 */
-       u32 : 12;
-       u32 util_str : 1;/* bit 123 */
+       u64 dynio : 1;   /* bit 12 */
+       u64 : 4;
+       u64 eadm : 1;    /* bit 17 */
+       u64 : 23;
+       u64 aif : 1;     /* bit 41 */
+       u64 : 3;
+       u64 mcss : 1;    /* bit 45 */
+       u64 fcs : 1;     /* bit 46 */
+       u64 : 1;
+       u64 ext_mb : 1;  /* bit 48 */
+       u64 : 7;
+       u64 aif_tdd : 1; /* bit 56 */
+       u64 : 1;
+       u64 qebsm : 1;   /* bit 58 */
+       u64 : 2;
+       u64 aiv : 1;     /* bit 61 */
+       u64 : 2;
+
+       u64 : 3;
+       u64 aif_osa : 1; /* bit 67 */
+       u64 : 12;
+       u64 eadm_rf : 1; /* bit 80 */
+       u64 : 1;
+       u64 cib : 1;     /* bit 82 */
+       u64 : 5;
+       u64 fcx : 1;     /* bit 88 */
+       u64 : 19;
+       u64 alt_ssi : 1; /* bit 108 */
+       u64 : 1;
+       u64 narf : 1;    /* bit 110 */
+       u64 : 12;
+       u64 util_str : 1;/* bit 123 */
 } __packed;
 
 extern struct css_general_char css_general_characteristics;
index 607c5e9fba3ddcdfe762c347c8441c447fc529f5..2ce28bf0c5ec44939d815c5187153b0858475aa7 100644 (file)
@@ -183,3 +183,4 @@ COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb);
 COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
 COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags);
 COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags)
+COMPAT_SYSCALL_WRAP4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32, sig)
index f03402efab4b414eefdfd59135f4ee89dda68e8a..150130c897c39938d03d04e497100cca77d0a353 100644 (file)
@@ -357,6 +357,10 @@ ENTRY(system_call)
        stg     %r2,__PT_R2(%r11)               # store return value
 
 .Lsysc_return:
+#ifdef CONFIG_DEBUG_RSEQ
+       lgr     %r2,%r11
+       brasl   %r14,rseq_syscall
+#endif
        LOCKDEP_SYS_EXIT
 .Lsysc_tif:
        TSTMSK  __PT_FLAGS(%r11),_PIF_WORK
@@ -1265,7 +1269,7 @@ cleanup_critical:
        jl      0f
        clg     %r9,BASED(.Lcleanup_table+104)  # .Lload_fpu_regs_end
        jl      .Lcleanup_load_fpu_regs
-0:     BR_EX   %r14
+0:     BR_EX   %r14,%r11
 
        .align  8
 .Lcleanup_table:
@@ -1301,7 +1305,7 @@ cleanup_critical:
        ni      __SIE_PROG0C+3(%r9),0xfe        # no longer in SIE
        lctlg   %c1,%c1,__LC_USER_ASCE          # load primary asce
        larl    %r9,sie_exit                    # skip forward to sie_exit
-       BR_EX   %r14
+       BR_EX   %r14,%r11
 #endif
 
 .Lcleanup_system_call:
index 2d2960ab3e108ca5b0d6ef06476987ed8d5f4839..22f08245aa5d46ef5f80398ebcc4e064099c91a6 100644 (file)
@@ -498,7 +498,7 @@ void do_signal(struct pt_regs *regs)
                }
                /* No longer in a system call */
                clear_pt_regs_flag(regs, PIF_SYSCALL);
-
+               rseq_signal_deliver(&ksig, regs);
                if (is_compat_task())
                        handle_signal32(&ksig, oldset, regs);
                else
@@ -537,4 +537,5 @@ void do_notify_resume(struct pt_regs *regs)
 {
        clear_thread_flag(TIF_NOTIFY_RESUME);
        tracehook_notify_resume(regs);
+       rseq_handle_notify_resume(NULL, regs);
 }
index 8b210ead79569413ab74e3a1c03b506f48a1622f..022fc099b628292e3c9daeecb2eb18ac54816935 100644 (file)
 379  common    statx                   sys_statx                       compat_sys_statx
 380  common    s390_sthyi              sys_s390_sthyi                  compat_sys_s390_sthyi
 381  common    kexec_file_load         sys_kexec_file_load             compat_sys_kexec_file_load
+382  common    io_pgetevents           sys_io_pgetevents               compat_sys_io_pgetevents
+383  common    rseq                    sys_rseq                        compat_sys_rseq
index 84bd6329a88dd3ace39e612197dccec0a48dc4fc..e3bd5627afef3452c50325e955611a5814341b6f 100644 (file)
@@ -252,6 +252,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
                spin_unlock_bh(&mm->context.lock);
                if (mask != 0)
                        return;
+       } else {
+               atomic_xor_bits(&page->_refcount, 3U << 24);
        }
 
        pgtable_page_dtor(page);
@@ -304,6 +306,8 @@ static void __tlb_remove_table(void *_table)
                        break;
                /* fallthrough */
        case 3:         /* 4K page table with pgstes */
+               if (mask & 3)
+                       atomic_xor_bits(&page->_refcount, 3 << 24);
                pgtable_page_dtor(page);
                __free_page(page);
                break;
index d2db8acb1a55480895e38fdf142c3d074610230d..5f0234ec8038eb2d11e93b190f3f35e29f29207b 100644 (file)
@@ -1286,6 +1286,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
                goto free_addrs;
        }
        if (bpf_jit_prog(&jit, fp)) {
+               bpf_jit_binary_free(header);
                fp = orig_fp;
                goto free_addrs;
        }
index f0a6ea22429d7384d81f81e38fb39dbfc9e720ed..a08e82856563ddc34079e96b592e5c60edbc30aa 100644 (file)
@@ -258,11 +258,6 @@ archscripts: scripts_basic
 archheaders:
        $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
 
-archprepare:
-ifeq ($(CONFIG_KEXEC_FILE),y)
-       $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
-endif
-
 ###
 # Kernel objects
 
@@ -327,7 +322,6 @@ archclean:
        $(Q)rm -rf $(objtree)/arch/x86_64
        $(Q)$(MAKE) $(clean)=$(boot)
        $(Q)$(MAKE) $(clean)=arch/x86/tools
-       $(Q)$(MAKE) $(clean)=arch/x86/purgatory
 
 define archhelp
   echo  '* bzImage      - Compressed kernel image (arch/x86/boot/bzImage)'
index a8a8642d2b0b802424caf7b4f925edbce7e3284e..e98522ea6f09ee2fb52c2d69f333e6d4916940e5 100644 (file)
@@ -114,18 +114,12 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
        struct pci_setup_rom *rom = NULL;
        efi_status_t status;
        unsigned long size;
-       uint64_t attributes, romsize;
+       uint64_t romsize;
        void *romimage;
 
-       status = efi_call_proto(efi_pci_io_protocol, attributes, pci,
-                               EfiPciIoAttributeOperationGet, 0, 0,
-                               &attributes);
-       if (status != EFI_SUCCESS)
-               return status;
-
        /*
-        * Some firmware images contain EFI function pointers at the place where the
-        * romimage and romsize fields are supposed to be. Typically the EFI
+        * Some firmware images contain EFI function pointers at the place where
+        * the romimage and romsize fields are supposed to be. Typically the EFI
         * code is mapped at high addresses, translating to an unrealistically
         * large romsize. The UEFI spec limits the size of option ROMs to 16
         * MiB so we reject any ROMs over 16 MiB in size to catch this.
index 9254e0b6cc060011d63b2bfa9ec281768776b2bc..717bf07764210f065315d7e3afa09774d9a87efe 100644 (file)
@@ -535,6 +535,7 @@ ENTRY(crypto_aegis128_aesni_enc_tail)
        movdqu STATE3, 0x40(STATEP)
 
        FRAME_END
+       ret
 ENDPROC(crypto_aegis128_aesni_enc_tail)
 
 .macro decrypt_block a s0 s1 s2 s3 s4 i
index 9263c344f2c797d847b7b7ec534803010a5c9e72..4eda2b8db9e1b08d4af85e37c6cf19039bc9220b 100644 (file)
@@ -645,6 +645,7 @@ ENTRY(crypto_aegis128l_aesni_enc_tail)
        state_store0
 
        FRAME_END
+       ret
 ENDPROC(crypto_aegis128l_aesni_enc_tail)
 
 /*
index 1d977d515bf992c649d8890316dfca41fc511364..32aae83972680731a4f36f92b608344d15339d9f 100644 (file)
@@ -543,6 +543,7 @@ ENTRY(crypto_aegis256_aesni_enc_tail)
        state_store0
 
        FRAME_END
+       ret
 ENDPROC(crypto_aegis256_aesni_enc_tail)
 
 /*
index 37d422e77931129d06e88c30edd040bc78c3de04..07653d4582a66b45370fa764747e5fa1056ef748 100644 (file)
@@ -453,6 +453,7 @@ ENTRY(crypto_morus1280_avx2_enc_tail)
        vmovdqu STATE4, (4 * 32)(%rdi)
 
        FRAME_END
+       ret
 ENDPROC(crypto_morus1280_avx2_enc_tail)
 
 /*
index 1fe637c7be9db5515bbaff68f935dea11d151062..bd1aa1b608698fd50c967a250dc257a89b5fa3b4 100644 (file)
@@ -652,6 +652,7 @@ ENTRY(crypto_morus1280_sse2_enc_tail)
        movdqu STATE4_HI, (9 * 16)(%rdi)
 
        FRAME_END
+       ret
 ENDPROC(crypto_morus1280_sse2_enc_tail)
 
 /*
index 71c72a0a0862c25da3293b499f0b2994e14f9926..efa02816d921c246b02a40a5b85e1396068c9beb 100644 (file)
@@ -437,6 +437,7 @@ ENTRY(crypto_morus640_sse2_enc_tail)
        movdqu STATE4, (4 * 16)(%rdi)
 
        FRAME_END
+       ret
 ENDPROC(crypto_morus640_sse2_enc_tail)
 
 /*
index 92190879b228c82f4ec681aa9c07bccc0e32204a..3b2490b81918128a61f6df1807788436d4f8ceb7 100644 (file)
@@ -164,7 +164,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
                if (cached_flags & _TIF_NOTIFY_RESUME) {
                        clear_thread_flag(TIF_NOTIFY_RESUME);
                        tracehook_notify_resume(regs);
-                       rseq_handle_notify_resume(regs);
+                       rseq_handle_notify_resume(NULL, regs);
                }
 
                if (cached_flags & _TIF_USER_RETURN_NOTIFY)
index 2582881d19ceeeb75a9a90588547914ccdefdbd0..c371bfee137ac976a01716118faf6cf490a3b5aa 100644 (file)
@@ -477,7 +477,7 @@ ENTRY(entry_SYSENTER_32)
         * whereas POPF does not.)
         */
        addl    $PT_EFLAGS-PT_DS, %esp  /* point esp at pt_regs->flags */
-       btr     $X86_EFLAGS_IF_BIT, (%esp)
+       btrl    $X86_EFLAGS_IF_BIT, (%esp)
        popfl
 
        /*
index 9de7f1e1dede7f6e6ebdc66e5f63756a173cdc0a..7d0df78db727296d1c4451e3a930033669f47aa3 100644 (file)
@@ -84,13 +84,13 @@ ENTRY(entry_SYSENTER_compat)
        pushq   %rdx                    /* pt_regs->dx */
        pushq   %rcx                    /* pt_regs->cx */
        pushq   $-ENOSYS                /* pt_regs->ax */
-       pushq   %r8                     /* pt_regs->r8 */
+       pushq   $0                      /* pt_regs->r8  = 0 */
        xorl    %r8d, %r8d              /* nospec   r8 */
-       pushq   %r9                     /* pt_regs->r9 */
+       pushq   $0                      /* pt_regs->r9  = 0 */
        xorl    %r9d, %r9d              /* nospec   r9 */
-       pushq   %r10                    /* pt_regs->r10 */
+       pushq   $0                      /* pt_regs->r10 = 0 */
        xorl    %r10d, %r10d            /* nospec   r10 */
-       pushq   %r11                    /* pt_regs->r11 */
+       pushq   $0                      /* pt_regs->r11 = 0 */
        xorl    %r11d, %r11d            /* nospec   r11 */
        pushq   %rbx                    /* pt_regs->rbx */
        xorl    %ebx, %ebx              /* nospec   rbx */
@@ -374,13 +374,13 @@ ENTRY(entry_INT80_compat)
        pushq   %rcx                    /* pt_regs->cx */
        xorl    %ecx, %ecx              /* nospec   cx */
        pushq   $-ENOSYS                /* pt_regs->ax */
-       pushq   $0                      /* pt_regs->r8  = 0 */
+       pushq   %r8                     /* pt_regs->r8 */
        xorl    %r8d, %r8d              /* nospec   r8 */
-       pushq   $0                      /* pt_regs->r9  = 0 */
+       pushq   %r9                     /* pt_regs->r9 */
        xorl    %r9d, %r9d              /* nospec   r9 */
-       pushq   $0                      /* pt_regs->r10 = 0 */
+       pushq   %r10                    /* pt_regs->r10*/
        xorl    %r10d, %r10d            /* nospec   r10 */
-       pushq   $0                      /* pt_regs->r11 = 0 */
+       pushq   %r11                    /* pt_regs->r11 */
        xorl    %r11d, %r11d            /* nospec   r11 */
        pushq   %rbx                    /* pt_regs->rbx */
        xorl    %ebx, %ebx              /* nospec   rbx */
index f68855499391f4637532fa0de4c95ca844e6563e..40233836565118f4fb60239ded1fc09aabe34cf1 100644 (file)
@@ -114,6 +114,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
                ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
                nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
        }
+       if (nr_bank < 0)
+               goto ipi_mask_ex_done;
        if (!nr_bank)
                ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;
 
@@ -158,6 +160,9 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
 
        for_each_cpu(cur_cpu, mask) {
                vcpu = hv_cpu_number_to_vp_number(cur_cpu);
+               if (vcpu == VP_INVAL)
+                       goto ipi_mask_done;
+
                /*
                 * This particular version of the IPI hypercall can
                 * only target upto 64 CPUs.
index 4c431e1c1effc42ade651f9a69c3e06136827e17..1ff420217298edf88648d35b56cefb73b8beb788 100644 (file)
@@ -265,7 +265,7 @@ void __init hyperv_init(void)
 {
        u64 guest_id, required_msrs;
        union hv_x64_msr_hypercall_contents hypercall_msr;
-       int cpuhp;
+       int cpuhp, i;
 
        if (x86_hyper_type != X86_HYPER_MS_HYPERV)
                return;
@@ -293,6 +293,9 @@ void __init hyperv_init(void)
        if (!hv_vp_index)
                return;
 
+       for (i = 0; i < num_possible_cpus(); i++)
+               hv_vp_index[i] = VP_INVAL;
+
        hv_vp_assist_page = kcalloc(num_possible_cpus(),
                                    sizeof(*hv_vp_assist_page), GFP_KERNEL);
        if (!hv_vp_assist_page) {
index 219faaec51dfa192f69d8893c8844219c0c89029..990770f9e76b5a52af6f85692883a8507af00af6 100644 (file)
 #define _ASM_SI                __ASM_REG(si)
 #define _ASM_DI                __ASM_REG(di)
 
+#ifndef __x86_64__
+/* 32 bit */
+
+#define _ASM_ARG1      _ASM_AX
+#define _ASM_ARG2      _ASM_DX
+#define _ASM_ARG3      _ASM_CX
+
+#define _ASM_ARG1L     eax
+#define _ASM_ARG2L     edx
+#define _ASM_ARG3L     ecx
+
+#define _ASM_ARG1W     ax
+#define _ASM_ARG2W     dx
+#define _ASM_ARG3W     cx
+
+#define _ASM_ARG1B     al
+#define _ASM_ARG2B     dl
+#define _ASM_ARG3B     cl
+
+#else
+/* 64 bit */
+
+#define _ASM_ARG1      _ASM_DI
+#define _ASM_ARG2      _ASM_SI
+#define _ASM_ARG3      _ASM_DX
+#define _ASM_ARG4      _ASM_CX
+#define _ASM_ARG5      r8
+#define _ASM_ARG6      r9
+
+#define _ASM_ARG1Q     rdi
+#define _ASM_ARG2Q     rsi
+#define _ASM_ARG3Q     rdx
+#define _ASM_ARG4Q     rcx
+#define _ASM_ARG5Q     r8
+#define _ASM_ARG6Q     r9
+
+#define _ASM_ARG1L     edi
+#define _ASM_ARG2L     esi
+#define _ASM_ARG3L     edx
+#define _ASM_ARG4L     ecx
+#define _ASM_ARG5L     r8d
+#define _ASM_ARG6L     r9d
+
+#define _ASM_ARG1W     di
+#define _ASM_ARG2W     si
+#define _ASM_ARG3W     dx
+#define _ASM_ARG4W     cx
+#define _ASM_ARG5W     r8w
+#define _ASM_ARG6W     r9w
+
+#define _ASM_ARG1B     dil
+#define _ASM_ARG2B     sil
+#define _ASM_ARG3B     dl
+#define _ASM_ARG4B     cl
+#define _ASM_ARG5B     r8b
+#define _ASM_ARG6B     r9b
+
+#endif
+
 /*
  * Macros to generate condition code outputs from inline assembly,
  * The output operand must be type "bool".
index 042b5e892ed1063769b253bdf35e31171eb55c4d..14de0432d288414bd1437e44b8cb13facc6f12e9 100644 (file)
@@ -38,7 +38,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
 {
        unsigned long mask;
 
-       asm ("cmp %1,%2; sbb %0,%0;"
+       asm volatile ("cmp %1,%2; sbb %0,%0;"
                        :"=r" (mask)
                        :"g"(size),"r" (index)
                        :"cc");
index 89f08955fff733c688a5ce4f4a0b8d74050ee617..c4fc17220df959f2d5feb493af6374e7dacce613 100644 (file)
@@ -13,7 +13,7 @@
  * Interrupt control:
  */
 
-static inline unsigned long native_save_fl(void)
+extern inline unsigned long native_save_fl(void)
 {
        unsigned long flags;
 
index 3cd14311edfad6d04c5ea382b2b6845f66a9d6f8..5a7375ed5f7cd80ca9925f05d422a9090e3602be 100644 (file)
@@ -9,6 +9,8 @@
 #include <asm/hyperv-tlfs.h>
 #include <asm/nospec-branch.h>
 
+#define VP_INVAL       U32_MAX
+
 struct ms_hyperv_info {
        u32 features;
        u32 misc_features;
@@ -20,7 +22,6 @@ struct ms_hyperv_info {
 
 extern struct ms_hyperv_info ms_hyperv;
 
-
 /*
  * Generate the guest ID.
  */
@@ -281,6 +282,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
         */
        for_each_cpu(cpu, cpus) {
                vcpu = hv_cpu_number_to_vp_number(cpu);
+               if (vcpu == VP_INVAL)
+                       return -1;
                vcpu_bank = vcpu / 64;
                vcpu_offset = vcpu % 64;
                __set_bit(vcpu_offset, (unsigned long *)
index ada6410fd2ecf6fdde039a185ce570b20af53fca..fbd578daa66e97416058e961dd440774fa9ed586 100644 (file)
@@ -184,6 +184,9 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
 
 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
 {
+       if (!pgtable_l5_enabled())
+               return;
+
        BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
        free_page((unsigned long)p4d);
 }
index 99ecde23c3ec03e02a9aba157e4b31e3c6f53ed7..5715647fc4feca86c8b00e299b347ee602b1b4e6 100644 (file)
@@ -898,7 +898,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
 #define pgd_page(pgd)  pfn_to_page(pgd_pfn(pgd))
 
 /* to find an entry in a page-table-directory. */
-static __always_inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
 {
        if (!pgtable_l5_enabled())
                return (p4d_t *)pgd;
index 0fdcd21dadbd6422bf40f5cbb2361c08c5fafc14..3c5385f9a88fc1e78729647566d819abbd210b42 100644 (file)
@@ -216,7 +216,7 @@ static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
 }
 #endif
 
-static __always_inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
+static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
 {
        pgd_t pgd;
 
@@ -230,7 +230,7 @@ static __always_inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
        *p4dp = native_make_p4d(native_pgd_val(pgd));
 }
 
-static __always_inline void native_p4d_clear(p4d_t *p4d)
+static inline void native_p4d_clear(p4d_t *p4d)
 {
        native_set_p4d(p4d, native_make_p4d(0));
 }
index 425e6b8b95478248dd3a32122b1aca408691cadf..6aa8499e1f62042a510aaafb2a1ef1e3a89804bd 100644 (file)
 #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK    0x0000001f
 #define VMX_MISC_SAVE_EFER_LMA                 0x00000020
 #define VMX_MISC_ACTIVITY_HLT                  0x00000040
+#define VMX_MISC_ZERO_LEN_INS                  0x40000000
 
 /* VMFUNC functions */
 #define VMX_VMFUNC_EPTP_SWITCHING               0x00000001
@@ -351,11 +352,13 @@ enum vmcs_field {
 #define VECTORING_INFO_VALID_MASK              INTR_INFO_VALID_MASK
 
 #define INTR_TYPE_EXT_INTR              (0 << 8) /* external interrupt */
+#define INTR_TYPE_RESERVED              (1 << 8) /* reserved */
 #define INTR_TYPE_NMI_INTR             (2 << 8) /* NMI */
 #define INTR_TYPE_HARD_EXCEPTION       (3 << 8) /* processor exception */
 #define INTR_TYPE_SOFT_INTR             (4 << 8) /* software interrupt */
 #define INTR_TYPE_PRIV_SW_EXCEPTION    (5 << 8) /* ICE breakpoint - undocumented */
 #define INTR_TYPE_SOFT_EXCEPTION       (6 << 8) /* software exception */
+#define INTR_TYPE_OTHER_EVENT           (7 << 8) /* other event */
 
 /* GUEST_INTERRUPTIBILITY_INFO flags. */
 #define GUEST_INTR_STATE_STI           0x00000001
index 02d6f5cf4e70800188994e7e64f52916a9d7d83e..8824d01c0c352d6dbd2c12e228bd0de9ca335166 100644 (file)
@@ -61,6 +61,7 @@ obj-y                 += alternative.o i8253.o hw_breakpoint.o
 obj-y                  += tsc.o tsc_msr.o io_delay.o rtc.o
 obj-y                  += pci-iommu_table.o
 obj-y                  += resource.o
+obj-y                  += irqflags.o
 
 obj-y                          += process.o
 obj-y                          += fpu/
index efaf2d4f9c3c7983221298c2ecb37ce367345b1e..d492752f79e1b9f120de025a9fa89b692e720705 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/delay.h>
 #include <linux/crash_dump.h>
 #include <linux/reboot.h>
+#include <linux/memory.h>
 
 #include <asm/uv/uv_mmrs.h>
 #include <asm/uv/uv_hub.h>
@@ -392,6 +393,51 @@ extern int uv_hub_info_version(void)
 }
 EXPORT_SYMBOL(uv_hub_info_version);
 
+/* Default UV memory block size is 2GB */
+static unsigned long mem_block_size = (2UL << 30);
+
+/* Kernel parameter to specify UV mem block size */
+static int parse_mem_block_size(char *ptr)
+{
+       unsigned long size = memparse(ptr, NULL);
+
+       /* Size will be rounded down by set_block_size() below */
+       mem_block_size = size;
+       return 0;
+}
+early_param("uv_memblksize", parse_mem_block_size);
+
+static __init int adj_blksize(u32 lgre)
+{
+       unsigned long base = (unsigned long)lgre << UV_GAM_RANGE_SHFT;
+       unsigned long size;
+
+       for (size = mem_block_size; size > MIN_MEMORY_BLOCK_SIZE; size >>= 1)
+               if (IS_ALIGNED(base, size))
+                       break;
+
+       if (size >= mem_block_size)
+               return 0;
+
+       mem_block_size = size;
+       return 1;
+}
+
+static __init void set_block_size(void)
+{
+       unsigned int order = ffs(mem_block_size);
+
+       if (order) {
+               /* adjust for ffs return of 1..64 */
+               set_memory_block_size_order(order - 1);
+               pr_info("UV: mem_block_size set to 0x%lx\n", mem_block_size);
+       } else {
+               /* bad or zero value, default to 1UL << 31 (2GB) */
+               pr_err("UV: mem_block_size error with 0x%lx\n", mem_block_size);
+               set_memory_block_size_order(31);
+       }
+}
+
 /* Build GAM range lookup table: */
 static __init void build_uv_gr_table(void)
 {
@@ -1180,23 +1226,30 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
                                        << UV_GAM_RANGE_SHFT);
                int order = 0;
                char suffix[] = " KMGTPE";
+               int flag = ' ';
 
                while (size > 9999 && order < sizeof(suffix)) {
                        size /= 1024;
                        order++;
                }
 
+               /* adjust max block size to current range start */
+               if (gre->type == 1 || gre->type == 2)
+                       if (adj_blksize(lgre))
+                               flag = '*';
+
                if (!index) {
                        pr_info("UV: GAM Range Table...\n");
-                       pr_info("UV:  # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
+                       pr_info("UV:  # %20s %14s %6s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
                }
-               pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d   %04x  %02x %02x\n",
+               pr_info("UV: %2d: 0x%014lx-0x%014lx%c %5lu%c %3d   %04x  %02x %02x\n",
                        index++,
                        (unsigned long)lgre << UV_GAM_RANGE_SHFT,
                        (unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
-                       size, suffix[order],
+                       flag, size, suffix[order],
                        gre->type, gre->nasid, gre->sockid, gre->pnode);
 
+               /* update to next range start */
                lgre = gre->limit;
                if (sock_min > gre->sockid)
                        sock_min = gre->sockid;
@@ -1427,6 +1480,7 @@ static void __init uv_system_init_hub(void)
 
        build_socket_tables();
        build_uv_gr_table();
+       set_block_size();
        uv_init_hub_info(&hub_info);
        uv_possible_blades = num_possible_nodes();
        if (!_node_to_pnode)
index 082d7875cef82eb779b68e5105330482f5a419d8..38915fbfae73d5cfeacbf16624fa49cb9edc7ec7 100644 (file)
@@ -543,7 +543,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                nodes_per_socket = ((value >> 3) & 7) + 1;
        }
 
-       if (c->x86 >= 0x15 && c->x86 <= 0x17) {
+       if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
+           !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
+           c->x86 >= 0x15 && c->x86 <= 0x17) {
                unsigned int bit;
 
                switch (c->x86) {
index cd0fda1fff6d3800fbbbf59a19711eba2df0f96c..5c0ea39311fe305ab183cc3f5bde0fe3bf5d1c5f 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm/pgtable.h>
 #include <asm/set_memory.h>
 #include <asm/intel-family.h>
+#include <asm/hypervisor.h>
 
 static void __init spectre_v2_select_mitigation(void);
 static void __init ssb_select_mitigation(void);
@@ -154,7 +155,8 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
                guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
 
                /* SSBD controlled in MSR_SPEC_CTRL */
-               if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+               if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+                   static_cpu_has(X86_FEATURE_AMD_SSBD))
                        hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
 
                if (hostval != guestval) {
@@ -532,9 +534,10 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
                 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
                 * use a completely different MSR and bit dependent on family.
                 */
-               if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+               if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
+                   !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
                        x86_amd_ssb_disable();
-               else {
+               else {
                        x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
                        x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
                        wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
@@ -664,6 +667,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
                if (boot_cpu_has(X86_FEATURE_PTI))
                        return sprintf(buf, "Mitigation: PTI\n");
 
+               if (hypervisor_is_type(X86_HYPER_XEN_PV))
+                       return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
+
                break;
 
        case X86_BUG_SPECTRE_V1:
index 38354c66df81144b7d2998ee42fce7a6b15485cd..0c5fcbd998cf11badefad906a2122400a3512d58 100644 (file)
@@ -671,7 +671,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
                        num_sharing_cache = ((eax >> 14) & 0xfff) + 1;
 
                if (num_sharing_cache) {
-                       int bits = get_count_order(num_sharing_cache) - 1;
+                       int bits = get_count_order(num_sharing_cache);
 
                        per_cpu(cpu_llc_id, cpu) = c->apicid >> bits;
                }
index 0df7151cfef42cb908c9d76f0b4e78db1620f615..eb4cb3efd20e4cd5ec31d90c1dcf88b62b94622b 100644 (file)
@@ -1,3 +1,6 @@
+/* cpu_feature_enabled() cannot be used this early */
+#define USE_EARLY_PGTABLE_L5
+
 #include <linux/bootmem.h>
 #include <linux/linkage.h>
 #include <linux/bitops.h>
index 5bbd06f38ff68f58d1efc980db0fd9fc0af7d89a..f34d89c01edc5c761e0df331da1331f8a0f98f3a 100644 (file)
@@ -160,6 +160,11 @@ static struct severity {
                SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
                USER
                ),
+       MCESEV(
+               PANIC, "Data load in unrecoverable area of kernel",
+               SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
+               KERNEL
+               ),
 #endif
        MCESEV(
                PANIC, "Action required: unknown MCACOD",
index e4cf6ff1c2e1d341bb5ca890cd8dba266ce2aa18..c102ad51025e865c74e004fa4dc69f9c89a2d034 100644 (file)
@@ -772,23 +772,25 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
 static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
                          struct pt_regs *regs)
 {
-       int i, ret = 0;
        char *tmp;
+       int i;
 
        for (i = 0; i < mca_cfg.banks; i++) {
                m->status = mce_rdmsrl(msr_ops.status(i));
-               if (m->status & MCI_STATUS_VAL) {
-                       __set_bit(i, validp);
-                       if (quirk_no_way_out)
-                               quirk_no_way_out(i, m, regs);
-               }
+               if (!(m->status & MCI_STATUS_VAL))
+                       continue;
+
+               __set_bit(i, validp);
+               if (quirk_no_way_out)
+                       quirk_no_way_out(i, m, regs);
 
                if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+                       mce_read_aux(m, i);
                        *msg = tmp;
-                       ret = 1;
+                       return 1;
                }
        }
-       return ret;
+       return 0;
 }
 
 /*
@@ -1205,13 +1207,18 @@ void do_machine_check(struct pt_regs *regs, long error_code)
                lmce = m.mcgstatus & MCG_STATUS_LMCES;
 
        /*
+        * Local machine check may already know that we have to panic.
+        * Broadcast machine check begins rendezvous in mce_start()
         * Go through all banks in exclusion of the other CPUs. This way we
         * don't report duplicated events on shared banks because the first one
-        * to see it will clear it. If this is a Local MCE, then no need to
-        * perform rendezvous.
+        * to see it will clear it.
         */
-       if (!lmce)
+       if (lmce) {
+               if (no_way_out)
+                       mce_panic("Fatal local machine check", &m, msg);
+       } else {
                order = mce_start(&no_way_out);
+       }
 
        for (i = 0; i < cfg->banks; i++) {
                __clear_bit(i, toclear);
@@ -1287,12 +1294,17 @@ void do_machine_check(struct pt_regs *regs, long error_code)
                        no_way_out = worst >= MCE_PANIC_SEVERITY;
        } else {
                /*
-                * Local MCE skipped calling mce_reign()
-                * If we found a fatal error, we need to panic here.
+                * If there was a fatal machine check we should have
+                * already called mce_panic earlier in this function.
+                * Since we re-read the banks, we might have found
+                * something new. Check again to see if we found a
+                * fatal error. We call "mce_severity()" again to
+                * make sure we have the right "msg".
                 */
-                if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
-                       mce_panic("Machine check from unknown source",
-                               NULL, NULL);
+               if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
+                       mce_severity(&m, cfg->tolerant, &msg, true);
+                       mce_panic("Local fatal machine check!", &m, msg);
+               }
        }
 
        /*
index 1c2cfa0644aa979c97cc01a42925a44c25f9f852..97ccf4c3b45bec517605813b1f24518b10466002 100644 (file)
@@ -190,8 +190,11 @@ static void save_microcode_patch(void *data, unsigned int size)
                        p = memdup_patch(data, size);
                        if (!p)
                                pr_err("Error allocating buffer %p\n", data);
-                       else
+                       else {
                                list_replace(&iter->plist, &p->plist);
+                               kfree(iter->data);
+                               kfree(iter);
+                       }
                }
        }
 
index 4021d3859499c77c14eaa1c40864c752547df68c..40eee6cc412484470daba013f2a197439163707a 100644 (file)
@@ -106,7 +106,8 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
 
        memset(line, 0, LINE_SIZE);
 
-       length = strncpy_from_user(line, buf, LINE_SIZE - 1);
+       len = min_t(size_t, len, LINE_SIZE - 1);
+       length = strncpy_from_user(line, buf, len);
        if (length < 0)
                return length;
 
index d1f25c83144752272401afe8c8aec313d40298ae..c88c23c658c1e99faad3daa236448bc4208901d7 100644 (file)
@@ -1248,6 +1248,7 @@ void __init e820__memblock_setup(void)
 {
        int i;
        u64 end;
+       u64 addr = 0;
 
        /*
         * The bootstrap memblock region count maximum is 128 entries
@@ -1264,13 +1265,21 @@ void __init e820__memblock_setup(void)
                struct e820_entry *entry = &e820_table->entries[i];
 
                end = entry->addr + entry->size;
+               if (addr < entry->addr)
+                       memblock_reserve(addr, entry->addr - addr);
+               addr = end;
                if (end != (resource_size_t)end)
                        continue;
 
+               /*
+                * all !E820_TYPE_RAM ranges (including gap ranges) are put
+                * into memblock.reserved to make sure that struct pages in
+                * such regions are not left uninitialized after bootup.
+                */
                if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
-                       continue;
-
-               memblock_add(entry->addr, entry->size);
+                       memblock_reserve(entry->addr, entry->size);
+               else
+                       memblock_add(entry->addr, entry->size);
        }
 
        /* Throw away partial pages: */
index a21d6ace648e3006045f5bd13578f3b29d4ea0bf..8047379e575ad39cb47cdbb055131e9bb094bb4d 100644 (file)
@@ -44,7 +44,7 @@ static unsigned int __initdata next_early_pgt;
 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
 
 #ifdef CONFIG_X86_5LEVEL
-unsigned int __pgtable_l5_enabled __initdata;
+unsigned int __pgtable_l5_enabled __ro_after_init;
 unsigned int pgdir_shift __ro_after_init = 39;
 EXPORT_SYMBOL(pgdir_shift);
 unsigned int ptrs_per_p4d __ro_after_init = 1;
diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S
new file mode 100644 (file)
index 0000000..ddeeaac
--- /dev/null
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <asm/asm.h>
+#include <asm/export.h>
+#include <linux/linkage.h>
+
+/*
+ * unsigned long native_save_fl(void)
+ */
+ENTRY(native_save_fl)
+       pushf
+       pop %_ASM_AX
+       ret
+ENDPROC(native_save_fl)
+EXPORT_SYMBOL(native_save_fl)
+
+/*
+ * void native_restore_fl(unsigned long flags)
+ * %eax/%rdi: flags
+ */
+ENTRY(native_restore_fl)
+       push %_ASM_ARG1
+       popf
+       ret
+ENDPROC(native_restore_fl)
+EXPORT_SYMBOL(native_restore_fl)
index bf8d1eb7fca3d97976b7747f49a5e5d77d18edde..3b8e7c13c614a41fcf4533bd840630c4e3912d8f 100644 (file)
@@ -138,6 +138,7 @@ static unsigned long kvm_get_tsc_khz(void)
        src = &hv_clock[cpu].pvti;
        tsc_khz = pvclock_tsc_khz(src);
        put_cpu();
+       setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
        return tsc_khz;
 }
 
@@ -319,6 +320,8 @@ void __init kvmclock_init(void)
        printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
                msr_kvm_system_time, msr_kvm_wall_clock);
 
+       pvclock_set_pvti_cpu0_va(hv_clock);
+
        if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
                pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
 
@@ -366,14 +369,11 @@ int __init kvm_setup_vsyscall_timeinfo(void)
        vcpu_time = &hv_clock[cpu].pvti;
        flags = pvclock_read_flags(vcpu_time);
 
-       if (!(flags & PVCLOCK_TSC_STABLE_BIT)) {
-               put_cpu();
-               return 1;
-       }
-
-       pvclock_set_pvti_cpu0_va(hv_clock);
        put_cpu();
 
+       if (!(flags & PVCLOCK_TSC_STABLE_BIT))
+               return 1;
+
        kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
 #endif
        return 0;
index 697a4ce0430827c89be2cbd86caedfac97e884f7..736348ead4218a0007b715efbc1d56bd1bb73e65 100644 (file)
@@ -645,12 +645,19 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
 /* Skylake */
 static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
 {
-       u32 capid0;
+       u32 capid0, capid5;
 
        pci_read_config_dword(pdev, 0x84, &capid0);
+       pci_read_config_dword(pdev, 0x98, &capid5);
 
-       if ((capid0 & 0xc0) == 0xc0)
+       /*
+        * CAPID0{7:6} indicate whether this is an advanced RAS SKU
+        * CAPID5{8:5} indicate that various NVDIMM usage modes are
+        * enabled, so memory machine check recovery is also enabled.
+        */
+       if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
                static_branch_inc(&mcsafe_key);
+
 }
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
index 445ca11ff8634eb27fb93f93fe362fc4bffaf588..92a3b312a53c465bbde5f006b5707b62671a49ae 100644 (file)
@@ -692,7 +692,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
         * Increment event counter and perform fixup for the pre-signal
         * frame.
         */
-       rseq_signal_deliver(regs);
+       rseq_signal_deliver(ksig, regs);
 
        /* Set up the stack frame */
        if (is_ia32_frame(ksig)) {
index c2f7d1d2a5c36fca041809b727bde36afb347c34..db9656e13ea0418dbf9e619f183bd2176e2d94d6 100644 (file)
@@ -221,6 +221,11 @@ static void notrace start_secondary(void *unused)
 #ifdef CONFIG_X86_32
        /* switch away from the initial page table */
        load_cr3(swapper_pg_dir);
+       /*
+        * Initialize the CR4 shadow before doing anything that could
+        * try to read it.
+        */
+       cr4_init_shadow();
        __flush_tlb_all();
 #endif
        load_current_idt();
index a535dd64de6397b02b3f53cd685584ebf7ebf445..e6db475164edec4f33e6f056cde5cbdfbe51a556 100644 (file)
@@ -835,16 +835,18 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
        char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
                                                "simd exception";
 
-       if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
-               return;
        cond_local_irq_enable(regs);
 
        if (!user_mode(regs)) {
-               if (!fixup_exception(regs, trapnr)) {
-                       task->thread.error_code = error_code;
-                       task->thread.trap_nr = trapnr;
+               if (fixup_exception(regs, trapnr))
+                       return;
+
+               task->thread.error_code = error_code;
+               task->thread.trap_nr = trapnr;
+
+               if (notify_die(DIE_TRAP, str, regs, error_code,
+                                       trapnr, SIGFPE) != NOTIFY_STOP)
                        die(str, regs, error_code);
-               }
                return;
        }
 
index 58d8d800875d0c6a3789a0406fec1eed366eecfc..deb576b23b7cf49817533d00555d0dc976c42486 100644 (file)
@@ -293,7 +293,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
        insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
        /* has the side-effect of processing the entire instruction */
        insn_get_length(insn);
-       if (WARN_ON_ONCE(!insn_complete(insn)))
+       if (!insn_complete(insn))
                return -ENOEXEC;
 
        if (is_prefix_bad(insn))
index 92fd433c50b9b5135e4ada92dc8968f4c5ed75d4..1bbec387d289cb785e4acbd28389e5e071fdfdbb 100644 (file)
@@ -85,7 +85,7 @@ config KVM_AMD_SEV
        def_bool y
        bool "AMD Secure Encrypted Virtualization (SEV) support"
        depends on KVM_AMD && X86_64
-       depends on CRYPTO_DEV_CCP && CRYPTO_DEV_CCP_DD && CRYPTO_DEV_SP_PSP
+       depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
        ---help---
        Provides support for launching Encrypted VMs on AMD processors.
 
index 559a12b6184de38c67ef4f2001963600f41f8753..e30da9a2430cad425c56decdb5dd284c381fd9bc 100644 (file)
@@ -1705,6 +1705,17 @@ static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
                MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
 }
 
+static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
+{
+       return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
+}
+
+static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
+{
+       return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
+                       CPU_BASED_MONITOR_TRAP_FLAG;
+}
+
 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
 {
        return vmcs12->cpu_based_vm_exec_control & bit;
@@ -2560,6 +2571,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 #ifdef CONFIG_X86_64
        int cpu = raw_smp_processor_id();
+       unsigned long fs_base, kernel_gs_base;
 #endif
        int i;
 
@@ -2575,12 +2587,20 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
        vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
 
 #ifdef CONFIG_X86_64
-       save_fsgs_for_kvm();
-       vmx->host_state.fs_sel = current->thread.fsindex;
-       vmx->host_state.gs_sel = current->thread.gsindex;
-#else
-       savesegment(fs, vmx->host_state.fs_sel);
-       savesegment(gs, vmx->host_state.gs_sel);
+       if (likely(is_64bit_mm(current->mm))) {
+               save_fsgs_for_kvm();
+               vmx->host_state.fs_sel = current->thread.fsindex;
+               vmx->host_state.gs_sel = current->thread.gsindex;
+               fs_base = current->thread.fsbase;
+               kernel_gs_base = current->thread.gsbase;
+       } else {
+#endif
+               savesegment(fs, vmx->host_state.fs_sel);
+               savesegment(gs, vmx->host_state.gs_sel);
+#ifdef CONFIG_X86_64
+               fs_base = read_msr(MSR_FS_BASE);
+               kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
+       }
 #endif
        if (!(vmx->host_state.fs_sel & 7)) {
                vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
@@ -2600,10 +2620,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
        savesegment(ds, vmx->host_state.ds_sel);
        savesegment(es, vmx->host_state.es_sel);
 
-       vmcs_writel(HOST_FS_BASE, current->thread.fsbase);
+       vmcs_writel(HOST_FS_BASE, fs_base);
        vmcs_writel(HOST_GS_BASE, cpu_kernelmode_gs_base(cpu));
 
-       vmx->msr_host_kernel_gs_base = current->thread.gsbase;
+       vmx->msr_host_kernel_gs_base = kernel_gs_base;
        if (is_long_mode(&vmx->vcpu))
                wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
 #else
@@ -4311,11 +4331,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
        vmcs_conf->order = get_order(vmcs_conf->size);
        vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
 
-       /* KVM supports Enlightened VMCS v1 only */
-       if (static_branch_unlikely(&enable_evmcs))
-               vmcs_conf->revision_id = KVM_EVMCS_VERSION;
-       else
-               vmcs_conf->revision_id = vmx_msr_low;
+       vmcs_conf->revision_id = vmx_msr_low;
 
        vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
        vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
@@ -4385,7 +4401,13 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
                return NULL;
        vmcs = page_address(pages);
        memset(vmcs, 0, vmcs_config.size);
-       vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
+
+       /* KVM supports Enlightened VMCS v1 only */
+       if (static_branch_unlikely(&enable_evmcs))
+               vmcs->revision_id = KVM_EVMCS_VERSION;
+       else
+               vmcs->revision_id = vmcs_config.revision_id;
+
        return vmcs;
 }
 
@@ -4553,6 +4575,19 @@ static __init int alloc_kvm_area(void)
                        return -ENOMEM;
                }
 
+               /*
+                * When eVMCS is enabled, alloc_vmcs_cpu() sets
+                * vmcs->revision_id to KVM_EVMCS_VERSION instead of
+                * revision_id reported by MSR_IA32_VMX_BASIC.
+                *
+                * However, even though not explictly documented by
+                * TLFS, VMXArea passed as VMXON argument should
+                * still be marked with revision_id reported by
+                * physical CPU.
+                */
+               if (static_branch_unlikely(&enable_evmcs))
+                       vmcs->revision_id = vmcs_config.revision_id;
+
                per_cpu(vmxarea, cpu) = vmcs;
        }
        return 0;
@@ -11620,6 +11655,62 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
            !nested_cr3_valid(vcpu, vmcs12->host_cr3))
                return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
 
+       /*
+        * From the Intel SDM, volume 3:
+        * Fields relevant to VM-entry event injection must be set properly.
+        * These fields are the VM-entry interruption-information field, the
+        * VM-entry exception error code, and the VM-entry instruction length.
+        */
+       if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
+               u32 intr_info = vmcs12->vm_entry_intr_info_field;
+               u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
+               u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
+               bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
+               bool should_have_error_code;
+               bool urg = nested_cpu_has2(vmcs12,
+                                          SECONDARY_EXEC_UNRESTRICTED_GUEST);
+               bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
+
+               /* VM-entry interruption-info field: interruption type */
+               if (intr_type == INTR_TYPE_RESERVED ||
+                   (intr_type == INTR_TYPE_OTHER_EVENT &&
+                    !nested_cpu_supports_monitor_trap_flag(vcpu)))
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry interruption-info field: vector */
+               if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
+                   (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
+                   (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry interruption-info field: deliver error code */
+               should_have_error_code =
+                       intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
+                       x86_exception_has_error_code(vector);
+               if (has_error_code != should_have_error_code)
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry exception error code */
+               if (has_error_code &&
+                   vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry interruption-info field: reserved bits */
+               if (intr_info & INTR_INFO_RESVD_BITS_MASK)
+                       return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+               /* VM-entry instruction length */
+               switch (intr_type) {
+               case INTR_TYPE_SOFT_EXCEPTION:
+               case INTR_TYPE_SOFT_INTR:
+               case INTR_TYPE_PRIV_SW_EXCEPTION:
+                       if ((vmcs12->vm_entry_instruction_len > 15) ||
+                           (vmcs12->vm_entry_instruction_len == 0 &&
+                            !nested_cpu_has_zero_length_injection(vcpu)))
+                               return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+               }
+       }
+
        return 0;
 }
 
@@ -11686,7 +11777,6 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-       u32 msr_entry_idx;
        u32 exit_qual;
        int r;
 
@@ -11708,10 +11798,10 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
        nested_get_vmcs12_pages(vcpu, vmcs12);
 
        r = EXIT_REASON_MSR_LOAD_FAIL;
-       msr_entry_idx = nested_vmx_load_msr(vcpu,
-                                           vmcs12->vm_entry_msr_load_addr,
-                                           vmcs12->vm_entry_msr_load_count);
-       if (msr_entry_idx)
+       exit_qual = nested_vmx_load_msr(vcpu,
+                                       vmcs12->vm_entry_msr_load_addr,
+                                       vmcs12->vm_entry_msr_load_count);
+       if (exit_qual)
                goto fail;
 
        /*
index 0046aa70205aa2dfbc0577065250be717ca25b4e..2b812b3c50881d2b42738792a7ef1a72cdcb9d66 100644 (file)
@@ -1097,6 +1097,7 @@ static u32 msr_based_features[] = {
 
        MSR_F10H_DECFG,
        MSR_IA32_UCODE_REV,
+       MSR_IA32_ARCH_CAPABILITIES,
 };
 
 static unsigned int num_msr_based_features;
@@ -1105,7 +1106,8 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
 {
        switch (msr->index) {
        case MSR_IA32_UCODE_REV:
-               rdmsrl(msr->index, msr->data);
+       case MSR_IA32_ARCH_CAPABILITIES:
+               rdmsrl_safe(msr->index, &msr->data);
                break;
        default:
                if (kvm_x86_ops->get_msr_feature(msr))
index 331993c49dae9bd852c759afecbb3c6c17477e15..257f27620bc272e3312295714a120de07963441f 100644 (file)
@@ -110,6 +110,15 @@ static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
 #endif
 }
 
+static inline bool x86_exception_has_error_code(unsigned int vector)
+{
+       static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
+                       BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
+                       BIT(PF_VECTOR) | BIT(AC_VECTOR);
+
+       return (1U << vector) & exception_has_error_code;
+}
+
 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
 {
        return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
index 9a84a0d08727b7452ebea1e2e35b5ad3eb0b6e79..2aafa6ab6103d150ad26e097221a972f5d16363f 100644 (file)
@@ -641,11 +641,6 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
        return 0;
 }
 
-static const char nx_warning[] = KERN_CRIT
-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
-static const char smep_warning[] = KERN_CRIT
-"unable to execute userspace code (SMEP?) (uid: %d)\n";
-
 static void
 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
                unsigned long address)
@@ -664,20 +659,18 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
                pte = lookup_address_in_pgd(pgd, address, &level);
 
                if (pte && pte_present(*pte) && !pte_exec(*pte))
-                       printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
+                       pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
+                               from_kuid(&init_user_ns, current_uid()));
                if (pte && pte_present(*pte) && pte_exec(*pte) &&
                                (pgd_flags(*pgd) & _PAGE_USER) &&
                                (__read_cr4() & X86_CR4_SMEP))
-                       printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
+                       pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
+                               from_kuid(&init_user_ns, current_uid()));
        }
 
-       printk(KERN_ALERT "BUG: unable to handle kernel ");
-       if (address < PAGE_SIZE)
-               printk(KERN_CONT "NULL pointer dereference");
-       else
-               printk(KERN_CONT "paging request");
-
-       printk(KERN_CONT " at %px\n", (void *) address);
+       pr_alert("BUG: unable to handle kernel %s at %px\n",
+                address < PAGE_SIZE ? "NULL pointer dereference" : "paging request",
+                (void *)address);
 
        dump_pagetable(address);
 }
index 045f492d5f68260a581f44c210aa3753dc4bc225..a688617c727e1ec3558e158156c98b4632b0d9a2 100644 (file)
@@ -1350,16 +1350,28 @@ int kern_addr_valid(unsigned long addr)
 /* Amount of ram needed to start using large blocks */
 #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
 
+/* Adjustable memory block size */
+static unsigned long set_memory_block_size;
+int __init set_memory_block_size_order(unsigned int order)
+{
+       unsigned long size = 1UL << order;
+
+       if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE)
+               return -EINVAL;
+
+       set_memory_block_size = size;
+       return 0;
+}
+
 static unsigned long probe_memory_block_size(void)
 {
        unsigned long boot_mem_end = max_pfn << PAGE_SHIFT;
        unsigned long bz;
 
-       /* If this is UV system, always set 2G block size */
-       if (is_uv_system()) {
-               bz = MAX_BLOCK_SIZE;
+       /* If memory block size has been set, then use it */
+       bz = set_memory_block_size;
+       if (bz)
                goto done;
-       }
 
        /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
        if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) {
index e01f7ceb9e7a17436eb71634c5467bbb20a2a2de..77873ce700ae7d703a3385719282dbd31a092ed9 100644 (file)
@@ -166,14 +166,14 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
                pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
                set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
 
-               if (!(pgd_val(*pgd) & _PAGE_PRESENT))
+               if (!pgd_present(*pgd))
                        continue;
 
                for (i = 0; i < PTRS_PER_P4D; i++) {
                        p4d = p4d_offset(pgd,
                                         pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
 
-                       if (!(p4d_val(*p4d) & _PAGE_PRESENT))
+                       if (!p4d_present(*p4d))
                                continue;
 
                        pud = (pud_t *)p4d_page_vaddr(*p4d);
index 2e9ee023e6bcff25055bf5e05b0fc597d75f49bb..81a8e33115ad5b72d53fc0829930d60238dbb0c2 100644 (file)
@@ -6,7 +6,7 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string
 targets += $(purgatory-y)
 PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
 
-$(obj)/sha256.o: $(srctree)/lib/sha256.c
+$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
        $(call if_changed_rule,cc_o_c)
 
 LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
index c9081c6671f0b7a05ecfaaf206e7e1ed2b1f456a..3b5318505c69c487f8cfc9c46c93c526197caef6 100644 (file)
@@ -64,6 +64,13 @@ struct shared_info xen_dummy_shared_info;
 __read_mostly int xen_have_vector_callback;
 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
 
+/*
+ * NB: needs to live in .data because it's used by xen_prepare_pvh which runs
+ * before clearing the bss.
+ */
+uint32_t xen_start_flags __attribute__((section(".data"))) = 0;
+EXPORT_SYMBOL(xen_start_flags);
+
 /*
  * Point at some empty memory to start with. We map the real shared_info
  * page as soon as fixmap is up and running.
index 357969a3697cc7af6e08c12144ec06f43a8841ad..439a94bf89adb7d013afc084e0e69da6f25222db 100644 (file)
@@ -1203,15 +1203,24 @@ asmlinkage __visible void __init xen_start_kernel(void)
                return;
 
        xen_domain_type = XEN_PV_DOMAIN;
+       xen_start_flags = xen_start_info->flags;
 
        xen_setup_features();
 
-       xen_setup_machphys_mapping();
-
        /* Install Xen paravirt ops */
        pv_info = xen_info;
        pv_init_ops.patch = paravirt_patch_default;
        pv_cpu_ops = xen_cpu_ops;
+       xen_init_irq_ops();
+
+       /*
+        * Setup xen_vcpu early because it is needed for
+        * local_irq_disable(), irqs_disabled(), e.g. in printk().
+        *
+        * Don't do the full vcpu_info placement stuff until we have
+        * the cpu_possible_mask and a non-dummy shared_info.
+        */
+       xen_vcpu_info_reset(0);
 
        x86_platform.get_nmi_reason = xen_get_nmi_reason;
 
@@ -1224,10 +1233,12 @@ asmlinkage __visible void __init xen_start_kernel(void)
         * Set up some pagetable state before starting to set any ptes.
         */
 
+       xen_setup_machphys_mapping();
        xen_init_mmu_ops();
 
        /* Prevent unwanted bits from being set in PTEs. */
        __supported_pte_mask &= ~_PAGE_GLOBAL;
+       __default_kernel_pte_mask &= ~_PAGE_GLOBAL;
 
        /*
         * Prevent page tables from being allocated in highmem, even
@@ -1248,20 +1259,9 @@ asmlinkage __visible void __init xen_start_kernel(void)
        get_cpu_cap(&boot_cpu_data);
        x86_configure_nx();
 
-       xen_init_irq_ops();
-
        /* Let's presume PV guests always boot on vCPU with id 0. */
        per_cpu(xen_vcpu_id, 0) = 0;
 
-       /*
-        * Setup xen_vcpu early because idt_setup_early_handler needs it for
-        * local_irq_disable(), irqs_disabled().
-        *
-        * Don't do the full vcpu_info placement stuff until we have
-        * the cpu_possible_mask and a non-dummy shared_info.
-        */
-       xen_vcpu_info_reset(0);
-
        idt_setup_early_handler();
 
        xen_init_capabilities();
index aa1c6a6831a94dd383e11c575a2a0c91f32136b5..c85d1a88f47693232369411588cfc19084086b25 100644 (file)
@@ -97,6 +97,7 @@ void __init xen_prepare_pvh(void)
        }
 
        xen_pvh = 1;
+       xen_start_flags = pvh_start_info.flags;
 
        msr = cpuid_ebx(xen_cpuid_base() + 2);
        pfn = __pa(hypercall_page);
index 74179852e46c31108adf405e86230c3830add94a..7515a19fd324b54e15d5b6deb632e385913ce4fa 100644 (file)
@@ -128,8 +128,6 @@ static const struct pv_irq_ops xen_irq_ops __initconst = {
 
 void __init xen_init_irq_ops(void)
 {
-       /* For PVH we use default pv_irq_ops settings. */
-       if (!xen_feature(XENFEAT_hvm_callback_vector))
-               pv_irq_ops = xen_irq_ops;
+       pv_irq_ops = xen_irq_ops;
        x86_init.irqs.intr_init = xen_init_IRQ;
 }
index 2e20ae2fa2d6c3b865f2c745ad9896a752954907..e3b18ad49889afc5ae35d2e2796aecd108a93819 100644 (file)
@@ -32,6 +32,7 @@
 #include <xen/interface/vcpu.h>
 #include <xen/interface/xenpmu.h>
 
+#include <asm/spec-ctrl.h>
 #include <asm/xen/interface.h>
 #include <asm/xen/hypercall.h>
 
@@ -70,6 +71,8 @@ static void cpu_bringup(void)
        cpu_data(cpu).x86_max_cores = 1;
        set_cpu_sibling_map(cpu);
 
+       speculative_store_bypass_ht_init();
+
        xen_setup_cpu_clockevents();
 
        notify_cpu_starting(cpu);
@@ -250,6 +253,8 @@ static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
        }
        set_cpu_sibling_map(0);
 
+       speculative_store_bypass_ht_init();
+
        xen_pmu_init(0);
 
        if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0))
index 9710e275f23079b8b7548ee935ab653036e82c5d..67eff5eddc49190fe8be721edd7031e8b6284a1e 100644 (file)
@@ -1807,9 +1807,6 @@ again:
        if (!bio_integrity_endio(bio))
                return;
 
-       if (WARN_ONCE(bio->bi_next, "driver left bi_next not NULL"))
-               bio->bi_next = NULL;
-
        /*
         * Need to have a real endio function for chained bios, otherwise
         * various corner cases will break (like stacking block devices that
index cf0ee764b908b384f69be9efbb9d7a1352eb7a52..f84a9b7b6f5aa167c5559079f095e3d3dff28f0d 100644 (file)
@@ -273,10 +273,6 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
        bio_advance(bio, nbytes);
 
        /* don't actually finish bio if it's part of flush sequence */
-       /*
-        * XXX this code looks suspicious - it's not consistent with advancing
-        * req->bio in caller
-        */
        if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
                bio_endio(bio);
 }
@@ -3081,10 +3077,8 @@ bool blk_update_request(struct request *req, blk_status_t error,
                struct bio *bio = req->bio;
                unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
 
-               if (bio_bytes == bio->bi_iter.bi_size) {
+               if (bio_bytes == bio->bi_iter.bi_size)
                        req->bio = bio->bi_next;
-                       bio->bi_next = NULL;
-               }
 
                /* Completion has already been traced */
                bio_clear_flag(bio, BIO_TRACE_COMPLETION);
@@ -3479,6 +3473,10 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
        dst->cpu = src->cpu;
        dst->__sector = blk_rq_pos(src);
        dst->__data_len = blk_rq_bytes(src);
+       if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
+               dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
+               dst->special_vec = src->special_vec;
+       }
        dst->nr_phys_segments = src->nr_phys_segments;
        dst->ioprio = src->ioprio;
        dst->extra_len = src->extra_len;
index ffa622366922fed04e9dbd2606ffa294b25a697b..1c4532e9293800662d92b809d78637e06607fd90 100644 (file)
@@ -356,7 +356,7 @@ static const char *const blk_mq_rq_state_name_array[] = {
 
 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
 {
-       if (WARN_ON_ONCE((unsigned int)rq_state >
+       if (WARN_ON_ONCE((unsigned int)rq_state >=
                         ARRAY_SIZE(blk_mq_rq_state_name_array)))
                return "(?)";
        return blk_mq_rq_state_name_array[rq_state];
index 70c65bb6c0131c84130fae44808acb51cf427ace..95919268564b162ed291a683dd5c27668cad0834 100644 (file)
@@ -781,7 +781,6 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
                WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
        }
 
-       req->rq_flags &= ~RQF_TIMED_OUT;
        blk_add_timer(req);
 }
 
@@ -1076,6 +1075,9 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
 
 #define BLK_MQ_RESOURCE_DELAY  3               /* ms units */
 
+/*
+ * Returns true if we did some work AND can potentially do more.
+ */
 bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
                             bool got_budget)
 {
@@ -1206,8 +1208,17 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
                        blk_mq_run_hw_queue(hctx, true);
                else if (needs_restart && (ret == BLK_STS_RESOURCE))
                        blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
+
+               return false;
        }
 
+       /*
+        * If the host/device is unable to accept more work, inform the
+        * caller of that.
+        */
+       if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
+               return false;
+
        return (queued + errors) != 0;
 }
 
index 01e2b353a2b9aadc2b5d20fe227739d1d0ae296b..15c1f5e12eb89460bc42eb7f5807eaa03254e51d 100644 (file)
@@ -144,6 +144,7 @@ do_local:
 
        local_irq_restore(flags);
 }
+EXPORT_SYMBOL(__blk_complete_request);
 
 /**
  * blk_complete_request - end I/O on a request
index 4b8a48d48ba13394cf0ae7a7dc0016696ae5efd6..f2cfd56e1606ed9d8e1da979a1e1e6cdcb506a38 100644 (file)
@@ -210,6 +210,7 @@ void blk_add_timer(struct request *req)
        if (!req->timeout)
                req->timeout = q->rq_timeout;
 
+       req->rq_flags &= ~RQF_TIMED_OUT;
        blk_rq_set_deadline(req, jiffies + req->timeout);
 
        /*
index 66602c48995643dcff921e6f10bba8cd203d3c5c..3da540faf6735c2c3ccb6a81ae2ffb4443aedd3c 100644 (file)
@@ -267,8 +267,6 @@ bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
        } else if (hdr->din_xfer_len) {
                ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp),
                                hdr->din_xfer_len, GFP_KERNEL);
-       } else {
-               ret = blk_rq_map_user(q, rq, NULL, NULL, 0, GFP_KERNEL);
        }
 
        if (ret)
index 945f4b8610e0c7d85242b500141d4bc1c0f671a2..e0de4dd448b3c7238e8656b572de72206302bf87 100644 (file)
@@ -877,7 +877,7 @@ static size_t response_get_string(const struct parsed_resp *resp, int n,
                return 0;
        }
 
-       if (n > resp->num) {
+       if (n >= resp->num) {
                pr_debug("Response has %d tokens. Can't access %d\n",
                         resp->num, n);
                return 0;
@@ -916,7 +916,7 @@ static u64 response_get_u64(const struct parsed_resp *resp, int n)
                return 0;
        }
 
-       if (n > resp->num) {
+       if (n >= resp->num) {
                pr_debug("Response has %d tokens. Can't access %d\n",
                         resp->num, n);
                return 0;
index 150d82da8e996d8c28be4ae3e4bd64a0331c9c08..1efd6fa0dc608c2a3d598b56c798f3e772a2bdbc 100644 (file)
@@ -1,3 +1,3 @@
 #include <linux/kernel.h>
 
-extern const char __initdata *const blacklist_hashes[];
+extern const char __initconst *const blacklist_hashes[];
index 49fa8582138b2df45e087f3a31a80ac5d5bbdc2a..c166f424871c86a356b15eff8bdd3b2be6406a87 100644 (file)
@@ -1060,12 +1060,19 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
 }
 EXPORT_SYMBOL_GPL(af_alg_async_cb);
 
-__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events)
+/**
+ * af_alg_poll - poll system call handler
+ */
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
+                        poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
        struct af_alg_ctx *ctx = ask->private;
-       __poll_t mask = 0;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        if (!ctx->more || ctx->used)
                mask |= EPOLLIN | EPOLLRDNORM;
@@ -1075,7 +1082,7 @@ __poll_t af_alg_poll_mask(struct socket *sock, __poll_t events)
 
        return mask;
 }
-EXPORT_SYMBOL_GPL(af_alg_poll_mask);
+EXPORT_SYMBOL_GPL(af_alg_poll);
 
 /**
  * af_alg_alloc_areq - allocate struct af_alg_async_req
@@ -1148,8 +1155,10 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
 
                /* make one iovec available as scatterlist */
                err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
-               if (err < 0)
+               if (err < 0) {
+                       rsgl->sg_num_bytes = 0;
                        return err;
+               }
 
                /* chain the new scatterlist with previous one */
                if (areq->last_rsgl)
index 825524f274384fdfd2a569be01e593d8f41a72b2..c40a8c7ee8aedcb0f6adb3afb1e0bb60a233d68c 100644 (file)
@@ -375,7 +375,7 @@ static struct proto_ops algif_aead_ops = {
        .sendmsg        =       aead_sendmsg,
        .sendpage       =       af_alg_sendpage,
        .recvmsg        =       aead_recvmsg,
-       .poll_mask      =       af_alg_poll_mask,
+       .poll           =       af_alg_poll,
 };
 
 static int aead_check_key(struct socket *sock)
@@ -471,7 +471,7 @@ static struct proto_ops algif_aead_ops_nokey = {
        .sendmsg        =       aead_sendmsg_nokey,
        .sendpage       =       aead_sendpage_nokey,
        .recvmsg        =       aead_recvmsg_nokey,
-       .poll_mask      =       af_alg_poll_mask,
+       .poll           =       af_alg_poll,
 };
 
 static void *aead_bind(const char *name, u32 type, u32 mask)
index 4c04eb9888adf82f68a18d17c9d6e73adc74aa90..cfdaab2b7d766d517e239687bf2232e09a749991 100644 (file)
@@ -206,7 +206,7 @@ static struct proto_ops algif_skcipher_ops = {
        .sendmsg        =       skcipher_sendmsg,
        .sendpage       =       af_alg_sendpage,
        .recvmsg        =       skcipher_recvmsg,
-       .poll_mask      =       af_alg_poll_mask,
+       .poll           =       af_alg_poll,
 };
 
 static int skcipher_check_key(struct socket *sock)
@@ -302,7 +302,7 @@ static struct proto_ops algif_skcipher_ops_nokey = {
        .sendmsg        =       skcipher_sendmsg_nokey,
        .sendpage       =       skcipher_sendpage_nokey,
        .recvmsg        =       skcipher_recvmsg_nokey,
-       .poll_mask      =       af_alg_poll_mask,
+       .poll           =       af_alg_poll,
 };
 
 static void *skcipher_bind(const char *name, u32 type, u32 mask)
index 7d81e6bb461a330a225658dde9b002b3b24e26bc..b6cabac4b62ba6b920cb5947c56db5839711bcc7 100644 (file)
@@ -249,6 +249,15 @@ int x509_note_signature(void *context, size_t hdrlen,
                return -EINVAL;
        }
 
+       if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) {
+               /* Discard the BIT STRING metadata */
+               if (vlen < 1 || *(const u8 *)value != 0)
+                       return -EBADMSG;
+
+               value++;
+               vlen--;
+       }
+
        ctx->cert->raw_sig = value;
        ctx->cert->raw_sig_size = vlen;
        return 0;
index 9fbcde307daf90b554ac5e96da627f0f77eb24e9..5eede3749e646b425614aa86de9143c82545fcc6 100644 (file)
@@ -274,8 +274,9 @@ static void crypto_morus640_decrypt_chunk(struct morus640_state *state, u8 *dst,
                union morus640_block_in tail;
 
                memcpy(tail.bytes, src, size);
+               memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
 
-               crypto_morus640_load_a(&m, src);
+               crypto_morus640_load_a(&m, tail.bytes);
                crypto_morus640_core(state, &m);
                crypto_morus640_store_a(tail.bytes, &m);
                memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
index 264ec12c0b9c334a16d743651771a18b616616a0..7f6735d9003f13c1e4adb7ffde8d8be5cc700fe1 100644 (file)
@@ -152,7 +152,7 @@ static SHA3_INLINE void keccakf_round(u64 st[25])
        st[24] ^= bc[ 4];
 }
 
-static void __optimize("O3") keccakf(u64 st[25])
+static void keccakf(u64 st[25])
 {
        int round;
 
index 38a286975c31e152206b3e55b28473a2763a717a..f8fecfec5df9b85be3e0d6c78a54087952140fdc 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/pm_domain.h>
 #include <linux/pm_runtime.h>
 #include <linux/pwm.h>
+#include <linux/suspend.h>
 #include <linux/delay.h>
 
 #include "internal.h"
@@ -946,9 +947,10 @@ static void lpss_iosf_exit_d3_state(void)
        mutex_unlock(&lpss_iosf_mutex);
 }
 
-static int acpi_lpss_suspend(struct device *dev, bool wakeup)
+static int acpi_lpss_suspend(struct device *dev, bool runtime)
 {
        struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+       bool wakeup = runtime || device_may_wakeup(dev);
        int ret;
 
        if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
@@ -961,13 +963,14 @@ static int acpi_lpss_suspend(struct device *dev, bool wakeup)
         * wrong status for devices being about to be powered off. See
         * lpss_iosf_enter_d3_state() for further information.
         */
-       if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
+       if ((runtime || !pm_suspend_via_firmware()) &&
+           lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
                lpss_iosf_enter_d3_state();
 
        return ret;
 }
 
-static int acpi_lpss_resume(struct device *dev)
+static int acpi_lpss_resume(struct device *dev, bool runtime)
 {
        struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
        int ret;
@@ -976,7 +979,8 @@ static int acpi_lpss_resume(struct device *dev)
         * This call is kept first to be in symmetry with
         * acpi_lpss_runtime_suspend() one.
         */
-       if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
+       if ((runtime || !pm_resume_via_firmware()) &&
+           lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
                lpss_iosf_exit_d3_state();
 
        ret = acpi_dev_resume(dev);
@@ -1000,12 +1004,12 @@ static int acpi_lpss_suspend_late(struct device *dev)
                return 0;
 
        ret = pm_generic_suspend_late(dev);
-       return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
+       return ret ? ret : acpi_lpss_suspend(dev, false);
 }
 
 static int acpi_lpss_resume_early(struct device *dev)
 {
-       int ret = acpi_lpss_resume(dev);
+       int ret = acpi_lpss_resume(dev, false);
 
        return ret ? ret : pm_generic_resume_early(dev);
 }
@@ -1020,7 +1024,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
 
 static int acpi_lpss_runtime_resume(struct device *dev)
 {
-       int ret = acpi_lpss_resume(dev);
+       int ret = acpi_lpss_resume(dev, true);
 
        return ret ? ret : pm_generic_runtime_resume(dev);
 }
index fc0c2e2328cd35218c71a2db56f3634cce4a0d46..fe9d46d81750792350270c4f6c1920a77e236a05 100644 (file)
@@ -51,16 +51,23 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
                return_ACPI_STATUS(status);
        }
 
-       /*
-        * 1) Disable all GPEs
-        * 2) Enable all wakeup GPEs
-        */
+       /* Disable all GPEs */
        status = acpi_hw_disable_all_gpes();
        if (ACPI_FAILURE(status)) {
                return_ACPI_STATUS(status);
        }
+       /*
+        * If the target sleep state is S5, clear all GPEs and fixed events too
+        */
+       if (sleep_state == ACPI_STATE_S5) {
+               status = acpi_hw_clear_acpi_status();
+               if (ACPI_FAILURE(status)) {
+                       return_ACPI_STATUS(status);
+               }
+       }
        acpi_gbl_system_awake_and_running = FALSE;
 
+        /* Enable all wakeup GPEs */
        status = acpi_hw_enable_all_wakeup_gpes();
        if (ACPI_FAILURE(status)) {
                return_ACPI_STATUS(status);
index 5a64ddaed8a3782f94e278424368a7ce7167bfbb..e474302726926dd0c997c9432da0f9488af65011 100644 (file)
@@ -182,19 +182,19 @@ acpi_ut_prefixed_namespace_error(const char *module_name,
        switch (lookup_status) {
        case AE_ALREADY_EXISTS:
 
-               acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
+               acpi_os_printf(ACPI_MSG_BIOS_ERROR);
                message = "Failure creating";
                break;
 
        case AE_NOT_FOUND:
 
-               acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
+               acpi_os_printf(ACPI_MSG_BIOS_ERROR);
                message = "Could not resolve";
                break;
 
        default:
 
-               acpi_os_printf("\n" ACPI_MSG_ERROR);
+               acpi_os_printf(ACPI_MSG_ERROR);
                message = "Failure resolving";
                break;
        }
index b0113a5802a3c073f5787de456bc601f0f8c11cd..d79ad844c78fcee1e51cfa7cde066363e45f51ef 100644 (file)
@@ -717,10 +717,11 @@ void battery_hook_register(struct acpi_battery_hook *hook)
                         */
                        pr_err("extension failed to load: %s", hook->name);
                        __battery_hook_unregister(hook, 0);
-                       return;
+                       goto end;
                }
        }
        pr_info("new extension: %s\n", hook->name);
+end:
        mutex_unlock(&hook_mutex);
 }
 EXPORT_SYMBOL_GPL(battery_hook_register);
@@ -732,7 +733,7 @@ EXPORT_SYMBOL_GPL(battery_hook_register);
 */
 static void battery_hook_add_battery(struct acpi_battery *battery)
 {
-       struct acpi_battery_hook *hook_node;
+       struct acpi_battery_hook *hook_node, *tmp;
 
        mutex_lock(&hook_mutex);
        INIT_LIST_HEAD(&battery->list);
@@ -744,15 +745,15 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
         * when a battery gets hotplugged or initialized
         * during the battery module initialization.
         */
-       list_for_each_entry(hook_node, &battery_hook_list, list) {
+       list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) {
                if (hook_node->add_battery(battery->bat)) {
                        /*
                         * The notification of the extensions has failed, to
                         * prevent further errors we will unload the extension.
                         */
-                       __battery_hook_unregister(hook_node, 0);
                        pr_err("error in extension, unloading: %s",
                                        hook_node->name);
+                       __battery_hook_unregister(hook_node, 0);
                }
        }
        mutex_unlock(&hook_mutex);
index bb94cf0731feb92b89b78cec274668fe204ed1e4..442a9e24f4397674041fa9f557bd90fef7763297 100644 (file)
@@ -2037,6 +2037,17 @@ static inline void acpi_ec_query_exit(void)
        }
 }
 
+static const struct dmi_system_id acpi_ec_no_wakeup[] = {
+       {
+               .ident = "Thinkpad X1 Carbon 6th",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20KGS3JF01"),
+               },
+       },
+       { },
+};
+
 int __init acpi_ec_init(void)
 {
        int result;
@@ -2047,6 +2058,15 @@ int __init acpi_ec_init(void)
        if (result)
                return result;
 
+       /*
+        * Disable EC wakeup on following systems to prevent periodic
+        * wakeup from EC GPE.
+        */
+       if (dmi_check_system(acpi_ec_no_wakeup)) {
+               ec_no_wakeup = true;
+               pr_debug("Disabling EC wakeup on suspend-to-idle\n");
+       }
+
        /* Drivers must be started after acpi_ec_query_init() */
        dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
        /*
index d15814e1727fad991bf8c24c2d7fa728ad1bfcad..7c479002e798bf92f3dc58263c3c2064182922bb 100644 (file)
@@ -408,6 +408,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
        const guid_t *guid;
        int rc, i;
 
+       if (cmd_rc)
+               *cmd_rc = -EINVAL;
        func = cmd;
        if (cmd == ND_CMD_CALL) {
                call_pkg = buf;
@@ -518,6 +520,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
                 * If we return an error (like elsewhere) then caller wouldn't
                 * be able to rely upon data returned to make calculation.
                 */
+               if (cmd_rc)
+                       *cmd_rc = 0;
                return 0;
        }
 
@@ -1273,7 +1277,7 @@ static ssize_t scrub_show(struct device *dev,
 
                mutex_lock(&acpi_desc->init_mutex);
                rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
-                               work_busy(&acpi_desc->dwork.work)
+                               acpi_desc->scrub_busy
                                && !acpi_desc->cancel ? "+\n" : "\n");
                mutex_unlock(&acpi_desc->init_mutex);
        }
@@ -2939,6 +2943,32 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
        return 0;
 }
 
+static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
+{
+       lockdep_assert_held(&acpi_desc->init_mutex);
+
+       acpi_desc->scrub_busy = 1;
+       /* note this should only be set from within the workqueue */
+       if (tmo)
+               acpi_desc->scrub_tmo = tmo;
+       queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
+}
+
+static void sched_ars(struct acpi_nfit_desc *acpi_desc)
+{
+       __sched_ars(acpi_desc, 0);
+}
+
+static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
+{
+       lockdep_assert_held(&acpi_desc->init_mutex);
+
+       acpi_desc->scrub_busy = 0;
+       acpi_desc->scrub_count++;
+       if (acpi_desc->scrub_count_state)
+               sysfs_notify_dirent(acpi_desc->scrub_count_state);
+}
+
 static void acpi_nfit_scrub(struct work_struct *work)
 {
        struct acpi_nfit_desc *acpi_desc;
@@ -2949,14 +2979,10 @@ static void acpi_nfit_scrub(struct work_struct *work)
        mutex_lock(&acpi_desc->init_mutex);
        query_rc = acpi_nfit_query_poison(acpi_desc);
        tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
-       if (tmo) {
-               queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
-               acpi_desc->scrub_tmo = tmo;
-       } else {
-               acpi_desc->scrub_count++;
-               if (acpi_desc->scrub_count_state)
-                       sysfs_notify_dirent(acpi_desc->scrub_count_state);
-       }
+       if (tmo)
+               __sched_ars(acpi_desc, tmo);
+       else
+               notify_ars_done(acpi_desc);
        memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
        mutex_unlock(&acpi_desc->init_mutex);
 }
@@ -3037,7 +3063,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
                        break;
                }
 
-       queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
+       sched_ars(acpi_desc);
        return 0;
 }
 
@@ -3239,7 +3265,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
                }
        }
        if (scheduled) {
-               queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
+               sched_ars(acpi_desc);
                dev_dbg(dev, "ars_scan triggered\n");
        }
        mutex_unlock(&acpi_desc->init_mutex);
index 7d15856a739f9dc70cbb4e325d95395829cb6b63..a97ff42fe311bfa5041f54d67124aed4b85deb4d 100644 (file)
@@ -203,6 +203,7 @@ struct acpi_nfit_desc {
        unsigned int max_ars;
        unsigned int scrub_count;
        unsigned int scrub_mode;
+       unsigned int scrub_busy:1;
        unsigned int cancel:1;
        unsigned long dimm_cmd_force_en;
        unsigned long bus_cmd_force_en;
index 7ca41bf023c9f354cad85617f0cceab228640d65..8df9abfa947b0dca4719c674fd645d187ade242d 100644 (file)
@@ -45,6 +45,8 @@
 #include <linux/uaccess.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
 
+#include "acpica/accommon.h"
+#include "acpica/acnamesp.h"
 #include "internal.h"
 
 #define _COMPONENT             ACPI_OS_SERVICES
@@ -1490,6 +1492,76 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
 }
 EXPORT_SYMBOL(acpi_check_region);
 
+static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level,
+                                             void *_res, void **return_value)
+{
+       struct acpi_mem_space_context **mem_ctx;
+       union acpi_operand_object *handler_obj;
+       union acpi_operand_object *region_obj2;
+       union acpi_operand_object *region_obj;
+       struct resource *res = _res;
+       acpi_status status;
+
+       region_obj = acpi_ns_get_attached_object(handle);
+       if (!region_obj)
+               return AE_OK;
+
+       handler_obj = region_obj->region.handler;
+       if (!handler_obj)
+               return AE_OK;
+
+       if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+               return AE_OK;
+
+       if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE))
+               return AE_OK;
+
+       region_obj2 = acpi_ns_get_secondary_object(region_obj);
+       if (!region_obj2)
+               return AE_OK;
+
+       mem_ctx = (void *)&region_obj2->extra.region_context;
+
+       if (!(mem_ctx[0]->address >= res->start &&
+             mem_ctx[0]->address < res->end))
+               return AE_OK;
+
+       status = handler_obj->address_space.setup(region_obj,
+                                                 ACPI_REGION_DEACTIVATE,
+                                                 NULL, (void **)mem_ctx);
+       if (ACPI_SUCCESS(status))
+               region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
+
+       return status;
+}
+
+/**
+ * acpi_release_memory - Release any mappings done to a memory region
+ * @handle: Handle to namespace node
+ * @res: Memory resource
+ * @level: A level that terminates the search
+ *
+ * Walks through @handle and unmaps all SystemMemory Operation Regions that
+ * overlap with @res and that have already been activated (mapped).
+ *
+ * This is a helper that allows drivers to place special requirements on memory
+ * region that may overlap with operation regions, primarily allowing them to
+ * safely map the region as non-cached memory.
+ *
+ * The unmapped Operation Regions will be automatically remapped next time they
+ * are called, so the drivers do not need to do anything else.
+ */
+acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
+                               u32 level)
+{
+       if (!(res->flags & IORESOURCE_MEM))
+               return AE_TYPE;
+
+       return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
+                                  acpi_deactivate_mem_region, NULL, res, NULL);
+}
+EXPORT_SYMBOL_GPL(acpi_release_memory);
+
 /*
  * Let drivers know whether the resource checks are effective
  */
index e5ea1974d1e3820db7e97f8437a7931de70784f8..d1e26cb599bfca340e076500b9e27ec2f3c0bc73 100644 (file)
@@ -481,8 +481,14 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
        if (cpu_node) {
                cpu_node = acpi_find_processor_package_id(table, cpu_node,
                                                          level, flag);
-               /* Only the first level has a guaranteed id */
-               if (level == 0)
+               /*
+                * As per specification if the processor structure represents
+                * an actual processor, then ACPI processor ID must be valid.
+                * For processor containers ACPI_PPTT_ACPI_PROCESSOR_ID_VALID
+                * should be set if the UID is valid
+                */
+               if (level == 0 ||
+                   cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID)
                        return cpu_node->acpi_processor_id;
                return ACPI_PTR_DIFF(cpu_node, table);
        }
index 2b16e7c8fff357645d3cec4069330b89985347bb..39b181d6bd0d8cf2cbcd9dde1cf89b373ecae6a4 100644 (file)
@@ -398,7 +398,6 @@ config SATA_DWC_VDEBUG
 
 config SATA_HIGHBANK
        tristate "Calxeda Highbank SATA support"
-       depends on HAS_DMA
        depends on ARCH_HIGHBANK || COMPILE_TEST
        help
          This option enables support for the Calxeda Highbank SoC's
@@ -408,7 +407,6 @@ config SATA_HIGHBANK
 
 config SATA_MV
        tristate "Marvell SATA support"
-       depends on HAS_DMA
        depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \
                   ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST
        select GENERIC_PHY
index 738fb22978ddcd14ad1956c5119972f19b17d2a6..b2b9eba1d214765723165f1d3d7c1bda64720207 100644 (file)
@@ -400,6 +400,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
        { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
        { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
+       { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -1280,6 +1281,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
        return strcmp(buf, dmi->driver_data) < 0;
 }
 
+static bool ahci_broken_lpm(struct pci_dev *pdev)
+{
+       static const struct dmi_system_id sysids[] = {
+               /* Various Lenovo 50 series have LPM issues with older BIOSen */
+               {
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"),
+                       },
+                       .driver_data = "20180406", /* 1.31 */
+               },
+               {
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"),
+                       },
+                       .driver_data = "20180420", /* 1.28 */
+               },
+               {
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"),
+                       },
+                       .driver_data = "20180315", /* 1.33 */
+               },
+               {
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
+                       },
+                       /*
+                        * Note date based on release notes, 2.35 has been
+                        * reported to be good, but I've been unable to get
+                        * a hold of the reporter to get the DMI BIOS date.
+                        * TODO: fix this.
+                        */
+                       .driver_data = "20180310", /* 2.35 */
+               },
+               { }     /* terminate list */
+       };
+       const struct dmi_system_id *dmi = dmi_first_match(sysids);
+       int year, month, date;
+       char buf[9];
+
+       if (!dmi)
+               return false;
+
+       dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
+       snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
+
+       return strcmp(buf, dmi->driver_data) < 0;
+}
+
 static bool ahci_broken_online(struct pci_dev *pdev)
 {
 #define ENCODE_BUSDEVFN(bus, slot, func)                       \
@@ -1694,6 +1748,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                        "quirky BIOS, skipping spindown on poweroff\n");
        }
 
+       if (ahci_broken_lpm(pdev)) {
+               pi.flags |= ATA_FLAG_NO_LPM;
+               dev_warn(&pdev->dev,
+                        "BIOS update required for Link Power Management support\n");
+       }
+
        if (ahci_broken_suspend(pdev)) {
                hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
                dev_warn(&pdev->dev,
index 0045dacd814b44ec21f87e4acceb07e69056f214..72d90b4c3aaefa4b9051d02383b55c9e3899072b 100644 (file)
@@ -82,7 +82,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
  *
  * Return: 0 on success; Error code otherwise.
  */
-int ahci_mvebu_stop_engine(struct ata_port *ap)
+static int ahci_mvebu_stop_engine(struct ata_port *ap)
 {
        void __iomem *port_mmio = ahci_port_base(ap);
        u32 tmp, port_fbs;
index 965842a08743f38d08d4a4c58cc345ffc4c81f97..09620c2ffa0f72e1a696d10d3e4480818b101e51 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/kernel.h>
 #include <linux/gfp.h>
 #include <linux/module.h>
+#include <linux/nospec.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
@@ -1146,10 +1147,12 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
 
        /* get the slot number from the message */
        pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
-       if (pmp < EM_MAX_SLOTS)
+       if (pmp < EM_MAX_SLOTS) {
+               pmp = array_index_nospec(pmp, EM_MAX_SLOTS);
                emp = &pp->em_priv[pmp];
-       else
+       } else {
                return -EINVAL;
+       }
 
        /* mask off the activity bits if we are in sw_activity
         * mode, user should turn off sw_activity before setting
index 27d15ed7fa3d03771f020cf064749f6f9fe38633..cc71c63df3819f8da0ff312fed83dc17d706136c 100644 (file)
@@ -2493,6 +2493,9 @@ int ata_dev_configure(struct ata_device *dev)
            (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
                dev->horkage |= ATA_HORKAGE_NOLPM;
 
+       if (ap->flags & ATA_FLAG_NO_LPM)
+               dev->horkage |= ATA_HORKAGE_NOLPM;
+
        if (dev->horkage & ATA_HORKAGE_NOLPM) {
                ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
                dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
index d5412145d76d60c2cc1393f07315bff2431c28a0..01306c018398fa16583cab46bd1e51b9ccf86309 100644 (file)
@@ -614,8 +614,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
                list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
                        struct ata_queued_cmd *qc;
 
-                       for (i = 0; i < ATA_MAX_QUEUE; i++) {
-                               qc = __ata_qc_from_tag(ap, i);
+                       ata_qc_for_each_raw(ap, qc, i) {
                                if (qc->flags & ATA_QCFLAG_ACTIVE &&
                                    qc->scsicmd == scmd)
                                        break;
@@ -818,14 +817,13 @@ EXPORT_SYMBOL_GPL(ata_port_wait_eh);
 
 static int ata_eh_nr_in_flight(struct ata_port *ap)
 {
+       struct ata_queued_cmd *qc;
        unsigned int tag;
        int nr = 0;
 
        /* count only non-internal commands */
-       for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-               if (ata_tag_internal(tag))
-                       continue;
-               if (ata_qc_from_tag(ap, tag))
+       ata_qc_for_each(ap, qc, tag) {
+               if (qc)
                        nr++;
        }
 
@@ -847,13 +845,13 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t)
                goto out_unlock;
 
        if (cnt == ap->fastdrain_cnt) {
+               struct ata_queued_cmd *qc;
                unsigned int tag;
 
                /* No progress during the last interval, tag all
                 * in-flight qcs as timed out and freeze the port.
                 */
-               for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-                       struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
+               ata_qc_for_each(ap, qc, tag) {
                        if (qc)
                                qc->err_mask |= AC_ERR_TIMEOUT;
                }
@@ -999,6 +997,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
 
 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
 {
+       struct ata_queued_cmd *qc;
        int tag, nr_aborted = 0;
 
        WARN_ON(!ap->ops->error_handler);
@@ -1007,9 +1006,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
        ata_eh_set_pending(ap, 0);
 
        /* include internal tag in iteration */
-       for (tag = 0; tag <= ATA_MAX_QUEUE; tag++) {
-               struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
-
+       ata_qc_for_each_with_internal(ap, qc, tag) {
                if (qc && (!link || qc->dev->link == link)) {
                        qc->flags |= ATA_QCFLAG_FAILED;
                        ata_qc_complete(qc);
@@ -1712,9 +1709,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
                return;
 
        /* has LLDD analyzed already? */
-       for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-               qc = __ata_qc_from_tag(ap, tag);
-
+       ata_qc_for_each_raw(ap, qc, tag) {
                if (!(qc->flags & ATA_QCFLAG_FAILED))
                        continue;
 
@@ -2136,6 +2131,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
 {
        struct ata_port *ap = link->ap;
        struct ata_eh_context *ehc = &link->eh_context;
+       struct ata_queued_cmd *qc;
        struct ata_device *dev;
        unsigned int all_err_mask = 0, eflags = 0;
        int tag, nr_failed = 0, nr_quiet = 0;
@@ -2168,9 +2164,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
 
        all_err_mask |= ehc->i.err_mask;
 
-       for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-               struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
-
+       ata_qc_for_each_raw(ap, qc, tag) {
                if (!(qc->flags & ATA_QCFLAG_FAILED) ||
                    ata_dev_phys_link(qc->dev) != link)
                        continue;
@@ -2436,6 +2430,7 @@ static void ata_eh_link_report(struct ata_link *link)
 {
        struct ata_port *ap = link->ap;
        struct ata_eh_context *ehc = &link->eh_context;
+       struct ata_queued_cmd *qc;
        const char *frozen, *desc;
        char tries_buf[6] = "";
        int tag, nr_failed = 0;
@@ -2447,9 +2442,7 @@ static void ata_eh_link_report(struct ata_link *link)
        if (ehc->i.desc[0] != '\0')
                desc = ehc->i.desc;
 
-       for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-               struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
-
+       ata_qc_for_each_raw(ap, qc, tag) {
                if (!(qc->flags & ATA_QCFLAG_FAILED) ||
                    ata_dev_phys_link(qc->dev) != link ||
                    ((qc->flags & ATA_QCFLAG_QUIET) &&
@@ -2511,8 +2504,7 @@ static void ata_eh_link_report(struct ata_link *link)
                  ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
 #endif
 
-       for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-               struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
+       ata_qc_for_each_raw(ap, qc, tag) {
                struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
                char data_buf[20] = "";
                char cdb_buf[70] = "";
@@ -3992,12 +3984,11 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
  */
 void ata_eh_finish(struct ata_port *ap)
 {
+       struct ata_queued_cmd *qc;
        int tag;
 
        /* retry or finish qcs */
-       for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-               struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
-
+       ata_qc_for_each_raw(ap, qc, tag) {
                if (!(qc->flags & ATA_QCFLAG_FAILED))
                        continue;
 
index 6a91d04351d9b64d20251febd070ad28bfb3eb38..aad1b01447de6924df4b1c1713d5fc402df112a0 100644 (file)
@@ -3805,10 +3805,20 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
                 */
                goto invalid_param_len;
        }
-       if (block > dev->n_sectors)
-               goto out_of_range;
 
        all = cdb[14] & 0x1;
+       if (all) {
+               /*
+                * Ignore the block address (zone ID) as defined by ZBC.
+                */
+               block = 0;
+       } else if (block >= dev->n_sectors) {
+               /*
+                * Block must be a valid zone ID (a zone start LBA).
+                */
+               fp = 2;
+               goto invalid_fld;
+       }
 
        if (ata_ncq_enabled(qc->dev) &&
            ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
@@ -3837,10 +3847,6 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
  invalid_fld:
        ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
        return 1;
- out_of_range:
-       /* "Logical Block Address out of range" */
-       ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x00);
-       return 1;
 invalid_param_len:
        /* "Parameter list length error" */
        ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
index b8d9cfc60374e08dbed9b2f646d51aa4fec0e025..4dc528bf8e85e3088fa55859d056613e8db73281 100644 (file)
@@ -395,12 +395,6 @@ static inline unsigned int sata_fsl_tag(unsigned int tag,
 {
        /* We let libATA core do actual (queue) tag allocation */
 
-       /* all non NCQ/queued commands should have tag#0 */
-       if (ata_tag_internal(tag)) {
-               DPRINTK("mapping internal cmds to tag#0\n");
-               return 0;
-       }
-
        if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) {
                DPRINTK("tag %d invalid : out of range\n", tag);
                return 0;
@@ -1229,8 +1223,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
 
        /* Workaround for data length mismatch errata */
        if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) {
-               for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
-                       qc = ata_qc_from_tag(ap, tag);
+               ata_qc_for_each_with_internal(ap, qc, tag) {
                        if (qc && ata_is_atapi(qc->tf.protocol)) {
                                u32 hcontrol;
                                /* Set HControl[27] to clear error registers */
index 10ae11aa1926f3dca460e81879d3b24d8bc58a8b..72c9b922a77bc7793bb20ccd6432f249bcce45e1 100644 (file)
@@ -675,7 +675,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
        struct ata_port *ap = ata_shost_to_port(sdev->host);
        struct nv_adma_port_priv *pp = ap->private_data;
        struct nv_adma_port_priv *port0, *port1;
-       struct scsi_device *sdev0, *sdev1;
        struct pci_dev *pdev = to_pci_dev(ap->host->dev);
        unsigned long segment_boundary, flags;
        unsigned short sg_tablesize;
@@ -736,8 +735,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
 
        port0 = ap->host->ports[0]->private_data;
        port1 = ap->host->ports[1]->private_data;
-       sdev0 = ap->host->ports[0]->link.device[0].sdev;
-       sdev1 = ap->host->ports[1]->link.device[0].sdev;
        if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
            (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
                /*
index ff81a576347e5154c10c997717548be69e81bbab..82532c299bb5964a429e81353b9c5f94d9bb5ed2 100644 (file)
@@ -1618,7 +1618,7 @@ static int rx_init(struct atm_dev *dev)
        skb_queue_head_init(&iadev->rx_dma_q);  
        iadev->rx_free_desc_qhead = NULL;   
 
-       iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL);
+       iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
        if (!iadev->rx_open) {
                printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
                dev->number);  
index a8d2eb0ceb8d8f78788182f81f8e1e9f9dc8fbbb..2c288d1f42bba0fcdf31ccec72c069bfa60688b9 100644 (file)
@@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
                                        return -EFAULT;
                                if (pool < 0 || pool > ZATM_LAST_POOL)
                                        return -EINVAL;
+                               pool = array_index_nospec(pool,
+                                                         ZATM_LAST_POOL + 1);
                                if (copy_from_user(&info,
                                    &((struct zatm_pool_req __user *) arg)->info,
                                    sizeof(info))) return -EFAULT;
index b074f242a43594fc3d3a383a9dce5d126e8f3a78..704f442958103545aa89ad0e986130aa6ebc5b06 100644 (file)
@@ -8,10 +8,7 @@ obj-y                  := component.o core.o bus.o dd.o syscore.o \
                           topology.o container.o property.o cacheinfo.o \
                           devcon.o
 obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
-obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
 obj-y                  += power/
-obj-$(CONFIG_HAS_DMA)  += dma-mapping.o
-obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
 obj-$(CONFIG_ISA_BUS_API)      += isa.o
 obj-y                          += firmware_loader/
 obj-$(CONFIG_NUMA)     += node.o
index 36622b52e419db9573c5cfb31f03bf740324e961..df3e1a44707acc74010cf5ce6fab815c4f744896 100644 (file)
@@ -236,6 +236,13 @@ struct device_link *device_link_add(struct device *consumer,
                        link->rpm_active = true;
                }
                pm_runtime_new_link(consumer);
+               /*
+                * If the link is being added by the consumer driver at probe
+                * time, balance the decrementation of the supplier's runtime PM
+                * usage counter after consumer probe in driver_probe_device().
+                */
+               if (consumer->links.status == DL_DEV_PROBING)
+                       pm_runtime_get_noresume(supplier);
        }
        get_device(supplier);
        link->supplier = supplier;
@@ -255,12 +262,12 @@ struct device_link *device_link_add(struct device *consumer,
                        switch (consumer->links.status) {
                        case DL_DEV_PROBING:
                                /*
-                                * Balance the decrementation of the supplier's
-                                * runtime PM usage counter after consumer probe
-                                * in driver_probe_device().
+                                * Some callers expect the link creation during
+                                * consumer driver probe to resume the supplier
+                                * even without DL_FLAG_RPM_ACTIVE.
                                 */
                                if (flags & DL_FLAG_PM_RUNTIME)
-                                       pm_runtime_get_sync(supplier);
+                                       pm_runtime_resume(supplier);
 
                                link->status = DL_STATE_CONSUMER_PROBE;
                                break;
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
deleted file mode 100644 (file)
index 597d408..0000000
+++ /dev/null
@@ -1,434 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Coherent per-device memory handling.
- * Borrowed from i386
- */
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/dma-mapping.h>
-
-struct dma_coherent_mem {
-       void            *virt_base;
-       dma_addr_t      device_base;
-       unsigned long   pfn_base;
-       int             size;
-       int             flags;
-       unsigned long   *bitmap;
-       spinlock_t      spinlock;
-       bool            use_dev_dma_pfn_offset;
-};
-
-static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
-
-static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
-{
-       if (dev && dev->dma_mem)
-               return dev->dma_mem;
-       return NULL;
-}
-
-static inline dma_addr_t dma_get_device_base(struct device *dev,
-                                            struct dma_coherent_mem * mem)
-{
-       if (mem->use_dev_dma_pfn_offset)
-               return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
-       else
-               return mem->device_base;
-}
-
-static int dma_init_coherent_memory(
-       phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
-       struct dma_coherent_mem **mem)
-{
-       struct dma_coherent_mem *dma_mem = NULL;
-       void __iomem *mem_base = NULL;
-       int pages = size >> PAGE_SHIFT;
-       int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
-       int ret;
-
-       if (!size) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       mem_base = memremap(phys_addr, size, MEMREMAP_WC);
-       if (!mem_base) {
-               ret = -EINVAL;
-               goto out;
-       }
-       dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
-       if (!dma_mem) {
-               ret = -ENOMEM;
-               goto out;
-       }
-       dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
-       if (!dma_mem->bitmap) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       dma_mem->virt_base = mem_base;
-       dma_mem->device_base = device_addr;
-       dma_mem->pfn_base = PFN_DOWN(phys_addr);
-       dma_mem->size = pages;
-       dma_mem->flags = flags;
-       spin_lock_init(&dma_mem->spinlock);
-
-       *mem = dma_mem;
-       return 0;
-
-out:
-       kfree(dma_mem);
-       if (mem_base)
-               memunmap(mem_base);
-       return ret;
-}
-
-static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
-{
-       if (!mem)
-               return;
-
-       memunmap(mem->virt_base);
-       kfree(mem->bitmap);
-       kfree(mem);
-}
-
-static int dma_assign_coherent_memory(struct device *dev,
-                                     struct dma_coherent_mem *mem)
-{
-       if (!dev)
-               return -ENODEV;
-
-       if (dev->dma_mem)
-               return -EBUSY;
-
-       dev->dma_mem = mem;
-       return 0;
-}
-
-int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
-                               dma_addr_t device_addr, size_t size, int flags)
-{
-       struct dma_coherent_mem *mem;
-       int ret;
-
-       ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem);
-       if (ret)
-               return ret;
-
-       ret = dma_assign_coherent_memory(dev, mem);
-       if (ret)
-               dma_release_coherent_memory(mem);
-       return ret;
-}
-EXPORT_SYMBOL(dma_declare_coherent_memory);
-
-void dma_release_declared_memory(struct device *dev)
-{
-       struct dma_coherent_mem *mem = dev->dma_mem;
-
-       if (!mem)
-               return;
-       dma_release_coherent_memory(mem);
-       dev->dma_mem = NULL;
-}
-EXPORT_SYMBOL(dma_release_declared_memory);
-
-void *dma_mark_declared_memory_occupied(struct device *dev,
-                                       dma_addr_t device_addr, size_t size)
-{
-       struct dma_coherent_mem *mem = dev->dma_mem;
-       unsigned long flags;
-       int pos, err;
-
-       size += device_addr & ~PAGE_MASK;
-
-       if (!mem)
-               return ERR_PTR(-EINVAL);
-
-       spin_lock_irqsave(&mem->spinlock, flags);
-       pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
-       err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
-       spin_unlock_irqrestore(&mem->spinlock, flags);
-
-       if (err != 0)
-               return ERR_PTR(err);
-       return mem->virt_base + (pos << PAGE_SHIFT);
-}
-EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
-
-static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
-               ssize_t size, dma_addr_t *dma_handle)
-{
-       int order = get_order(size);
-       unsigned long flags;
-       int pageno;
-       void *ret;
-
-       spin_lock_irqsave(&mem->spinlock, flags);
-
-       if (unlikely(size > (mem->size << PAGE_SHIFT)))
-               goto err;
-
-       pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
-       if (unlikely(pageno < 0))
-               goto err;
-
-       /*
-        * Memory was found in the coherent area.
-        */
-       *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
-       ret = mem->virt_base + (pageno << PAGE_SHIFT);
-       spin_unlock_irqrestore(&mem->spinlock, flags);
-       memset(ret, 0, size);
-       return ret;
-err:
-       spin_unlock_irqrestore(&mem->spinlock, flags);
-       return NULL;
-}
-
-/**
- * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
- * @dev:       device from which we allocate memory
- * @size:      size of requested memory area
- * @dma_handle:        This will be filled with the correct dma handle
- * @ret:       This pointer will be filled with the virtual address
- *             to allocated area.
- *
- * This function should be only called from per-arch dma_alloc_coherent()
- * to support allocation from per-device coherent memory pools.
- *
- * Returns 0 if dma_alloc_coherent should continue with allocating from
- * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
- */
-int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
-               dma_addr_t *dma_handle, void **ret)
-{
-       struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
-
-       if (!mem)
-               return 0;
-
-       *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
-       if (*ret)
-               return 1;
-
-       /*
-        * In the case where the allocation can not be satisfied from the
-        * per-device area, try to fall back to generic memory if the
-        * constraints allow it.
-        */
-       return mem->flags & DMA_MEMORY_EXCLUSIVE;
-}
-EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
-
-void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
-{
-       if (!dma_coherent_default_memory)
-               return NULL;
-
-       return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
-                       dma_handle);
-}
-
-static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
-                                      int order, void *vaddr)
-{
-       if (mem && vaddr >= mem->virt_base && vaddr <
-                  (mem->virt_base + (mem->size << PAGE_SHIFT))) {
-               int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
-               unsigned long flags;
-
-               spin_lock_irqsave(&mem->spinlock, flags);
-               bitmap_release_region(mem->bitmap, page, order);
-               spin_unlock_irqrestore(&mem->spinlock, flags);
-               return 1;
-       }
-       return 0;
-}
-
-/**
- * dma_release_from_dev_coherent() - free memory to device coherent memory pool
- * @dev:       device from which the memory was allocated
- * @order:     the order of pages allocated
- * @vaddr:     virtual address of allocated pages
- *
- * This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, releases that memory.
- *
- * Returns 1 if we correctly released the memory, or 0 if the caller should
- * proceed with releasing memory from generic pools.
- */
-int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
-{
-       struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
-
-       return __dma_release_from_coherent(mem, order, vaddr);
-}
-EXPORT_SYMBOL(dma_release_from_dev_coherent);
-
-int dma_release_from_global_coherent(int order, void *vaddr)
-{
-       if (!dma_coherent_default_memory)
-               return 0;
-
-       return __dma_release_from_coherent(dma_coherent_default_memory, order,
-                       vaddr);
-}
-
-static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
-               struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
-{
-       if (mem && vaddr >= mem->virt_base && vaddr + size <=
-                  (mem->virt_base + (mem->size << PAGE_SHIFT))) {
-               unsigned long off = vma->vm_pgoff;
-               int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
-               int user_count = vma_pages(vma);
-               int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-
-               *ret = -ENXIO;
-               if (off < count && user_count <= count - off) {
-                       unsigned long pfn = mem->pfn_base + start + off;
-                       *ret = remap_pfn_range(vma, vma->vm_start, pfn,
-                                              user_count << PAGE_SHIFT,
-                                              vma->vm_page_prot);
-               }
-               return 1;
-       }
-       return 0;
-}
-
-/**
- * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
- * @dev:       device from which the memory was allocated
- * @vma:       vm_area for the userspace memory
- * @vaddr:     cpu address returned by dma_alloc_from_dev_coherent
- * @size:      size of the memory buffer allocated
- * @ret:       result from remap_pfn_range()
- *
- * This checks whether the memory was allocated from the per-device
- * coherent memory pool and if so, maps that memory to the provided vma.
- *
- * Returns 1 if @vaddr belongs to the device coherent pool and the caller
- * should return @ret, or 0 if they should proceed with mapping memory from
- * generic areas.
- */
-int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
-                          void *vaddr, size_t size, int *ret)
-{
-       struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
-
-       return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
-}
-EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
-
-int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
-                                  size_t size, int *ret)
-{
-       if (!dma_coherent_default_memory)
-               return 0;
-
-       return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
-                                       vaddr, size, ret);
-}
-
-/*
- * Support for reserved memory regions defined in device tree
- */
-#ifdef CONFIG_OF_RESERVED_MEM
-#include <linux/of.h>
-#include <linux/of_fdt.h>
-#include <linux/of_reserved_mem.h>
-
-static struct reserved_mem *dma_reserved_default_memory __initdata;
-
-static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
-{
-       struct dma_coherent_mem *mem = rmem->priv;
-       int ret;
-
-       if (!mem) {
-               ret = dma_init_coherent_memory(rmem->base, rmem->base,
-                                              rmem->size,
-                                              DMA_MEMORY_EXCLUSIVE, &mem);
-               if (ret) {
-                       pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
-                               &rmem->base, (unsigned long)rmem->size / SZ_1M);
-                       return ret;
-               }
-       }
-       mem->use_dev_dma_pfn_offset = true;
-       rmem->priv = mem;
-       dma_assign_coherent_memory(dev, mem);
-       return 0;
-}
-
-static void rmem_dma_device_release(struct reserved_mem *rmem,
-                                   struct device *dev)
-{
-       if (dev)
-               dev->dma_mem = NULL;
-}
-
-static const struct reserved_mem_ops rmem_dma_ops = {
-       .device_init    = rmem_dma_device_init,
-       .device_release = rmem_dma_device_release,
-};
-
-static int __init rmem_dma_setup(struct reserved_mem *rmem)
-{
-       unsigned long node = rmem->fdt_node;
-
-       if (of_get_flat_dt_prop(node, "reusable", NULL))
-               return -EINVAL;
-
-#ifdef CONFIG_ARM
-       if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
-               pr_err("Reserved memory: regions without no-map are not yet supported\n");
-               return -EINVAL;
-       }
-
-       if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
-               WARN(dma_reserved_default_memory,
-                    "Reserved memory: region for default DMA coherent area is redefined\n");
-               dma_reserved_default_memory = rmem;
-       }
-#endif
-
-       rmem->ops = &rmem_dma_ops;
-       pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
-               &rmem->base, (unsigned long)rmem->size / SZ_1M);
-       return 0;
-}
-
-static int __init dma_init_reserved_memory(void)
-{
-       const struct reserved_mem_ops *ops;
-       int ret;
-
-       if (!dma_reserved_default_memory)
-               return -ENOMEM;
-
-       ops = dma_reserved_default_memory->ops;
-
-       /*
-        * We rely on rmem_dma_device_init() does not propagate error of
-        * dma_assign_coherent_memory() for "NULL" device.
-        */
-       ret = ops->device_init(dma_reserved_default_memory, NULL);
-
-       if (!ret) {
-               dma_coherent_default_memory = dma_reserved_default_memory->priv;
-               pr_info("DMA: default coherent area is set\n");
-       }
-
-       return ret;
-}
-
-core_initcall(dma_init_reserved_memory);
-
-RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
-#endif
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
deleted file mode 100644 (file)
index d987dcd..0000000
+++ /dev/null
@@ -1,278 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Contiguous Memory Allocator for DMA mapping framework
- * Copyright (c) 2010-2011 by Samsung Electronics.
- * Written by:
- *     Marek Szyprowski <m.szyprowski@samsung.com>
- *     Michal Nazarewicz <mina86@mina86.com>
- */
-
-#define pr_fmt(fmt) "cma: " fmt
-
-#ifdef CONFIG_CMA_DEBUG
-#ifndef DEBUG
-#  define DEBUG
-#endif
-#endif
-
-#include <asm/page.h>
-#include <asm/dma-contiguous.h>
-
-#include <linux/memblock.h>
-#include <linux/err.h>
-#include <linux/sizes.h>
-#include <linux/dma-contiguous.h>
-#include <linux/cma.h>
-
-#ifdef CONFIG_CMA_SIZE_MBYTES
-#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
-#else
-#define CMA_SIZE_MBYTES 0
-#endif
-
-struct cma *dma_contiguous_default_area;
-
-/*
- * Default global CMA area size can be defined in kernel's .config.
- * This is useful mainly for distro maintainers to create a kernel
- * that works correctly for most supported systems.
- * The size can be set in bytes or as a percentage of the total memory
- * in the system.
- *
- * Users, who want to set the size of global CMA area for their system
- * should use cma= kernel parameter.
- */
-static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
-static phys_addr_t size_cmdline = -1;
-static phys_addr_t base_cmdline;
-static phys_addr_t limit_cmdline;
-
-static int __init early_cma(char *p)
-{
-       pr_debug("%s(%s)\n", __func__, p);
-       size_cmdline = memparse(p, &p);
-       if (*p != '@')
-               return 0;
-       base_cmdline = memparse(p + 1, &p);
-       if (*p != '-') {
-               limit_cmdline = base_cmdline + size_cmdline;
-               return 0;
-       }
-       limit_cmdline = memparse(p + 1, &p);
-
-       return 0;
-}
-early_param("cma", early_cma);
-
-#ifdef CONFIG_CMA_SIZE_PERCENTAGE
-
-static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
-{
-       struct memblock_region *reg;
-       unsigned long total_pages = 0;
-
-       /*
-        * We cannot use memblock_phys_mem_size() here, because
-        * memblock_analyze() has not been called yet.
-        */
-       for_each_memblock(memory, reg)
-               total_pages += memblock_region_memory_end_pfn(reg) -
-                              memblock_region_memory_base_pfn(reg);
-
-       return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
-}
-
-#else
-
-static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
-{
-       return 0;
-}
-
-#endif
-
-/**
- * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
- * @limit: End address of the reserved memory (optional, 0 for any).
- *
- * This function reserves memory from early allocator. It should be
- * called by arch specific code once the early allocator (memblock or bootmem)
- * has been activated and all other subsystems have already allocated/reserved
- * memory.
- */
-void __init dma_contiguous_reserve(phys_addr_t limit)
-{
-       phys_addr_t selected_size = 0;
-       phys_addr_t selected_base = 0;
-       phys_addr_t selected_limit = limit;
-       bool fixed = false;
-
-       pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
-
-       if (size_cmdline != -1) {
-               selected_size = size_cmdline;
-               selected_base = base_cmdline;
-               selected_limit = min_not_zero(limit_cmdline, limit);
-               if (base_cmdline + size_cmdline == limit_cmdline)
-                       fixed = true;
-       } else {
-#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
-               selected_size = size_bytes;
-#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
-               selected_size = cma_early_percent_memory();
-#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
-               selected_size = min(size_bytes, cma_early_percent_memory());
-#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
-               selected_size = max(size_bytes, cma_early_percent_memory());
-#endif
-       }
-
-       if (selected_size && !dma_contiguous_default_area) {
-               pr_debug("%s: reserving %ld MiB for global area\n", __func__,
-                        (unsigned long)selected_size / SZ_1M);
-
-               dma_contiguous_reserve_area(selected_size, selected_base,
-                                           selected_limit,
-                                           &dma_contiguous_default_area,
-                                           fixed);
-       }
-}
-
-/**
- * dma_contiguous_reserve_area() - reserve custom contiguous area
- * @size: Size of the reserved area (in bytes),
- * @base: Base address of the reserved area optional, use 0 for any
- * @limit: End address of the reserved memory (optional, 0 for any).
- * @res_cma: Pointer to store the created cma region.
- * @fixed: hint about where to place the reserved area
- *
- * This function reserves memory from early allocator. It should be
- * called by arch specific code once the early allocator (memblock or bootmem)
- * has been activated and all other subsystems have already allocated/reserved
- * memory. This function allows to create custom reserved areas for specific
- * devices.
- *
- * If @fixed is true, reserve contiguous area at exactly @base.  If false,
- * reserve in range from @base to @limit.
- */
-int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
-                                      phys_addr_t limit, struct cma **res_cma,
-                                      bool fixed)
-{
-       int ret;
-
-       ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
-                                       "reserved", res_cma);
-       if (ret)
-               return ret;
-
-       /* Architecture specific contiguous memory fixup. */
-       dma_contiguous_early_fixup(cma_get_base(*res_cma),
-                               cma_get_size(*res_cma));
-
-       return 0;
-}
-
-/**
- * dma_alloc_from_contiguous() - allocate pages from contiguous area
- * @dev:   Pointer to device for which the allocation is performed.
- * @count: Requested number of pages.
- * @align: Requested alignment of pages (in PAGE_SIZE order).
- * @gfp_mask: GFP flags to use for this allocation.
- *
- * This function allocates memory buffer for specified device. It uses
- * device specific contiguous memory area if available or the default
- * global one. Requires architecture specific dev_get_cma_area() helper
- * function.
- */
-struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
-                                      unsigned int align, gfp_t gfp_mask)
-{
-       if (align > CONFIG_CMA_ALIGNMENT)
-               align = CONFIG_CMA_ALIGNMENT;
-
-       return cma_alloc(dev_get_cma_area(dev), count, align, gfp_mask);
-}
-
-/**
- * dma_release_from_contiguous() - release allocated pages
- * @dev:   Pointer to device for which the pages were allocated.
- * @pages: Allocated pages.
- * @count: Number of allocated pages.
- *
- * This function releases memory allocated by dma_alloc_from_contiguous().
- * It returns false when provided pages do not belong to contiguous area and
- * true otherwise.
- */
-bool dma_release_from_contiguous(struct device *dev, struct page *pages,
-                                int count)
-{
-       return cma_release(dev_get_cma_area(dev), pages, count);
-}
-
-/*
- * Support for reserved memory regions defined in device tree
- */
-#ifdef CONFIG_OF_RESERVED_MEM
-#include <linux/of.h>
-#include <linux/of_fdt.h>
-#include <linux/of_reserved_mem.h>
-
-#undef pr_fmt
-#define pr_fmt(fmt) fmt
-
-static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
-{
-       dev_set_cma_area(dev, rmem->priv);
-       return 0;
-}
-
-static void rmem_cma_device_release(struct reserved_mem *rmem,
-                                   struct device *dev)
-{
-       dev_set_cma_area(dev, NULL);
-}
-
-static const struct reserved_mem_ops rmem_cma_ops = {
-       .device_init    = rmem_cma_device_init,
-       .device_release = rmem_cma_device_release,
-};
-
-static int __init rmem_cma_setup(struct reserved_mem *rmem)
-{
-       phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
-       phys_addr_t mask = align - 1;
-       unsigned long node = rmem->fdt_node;
-       struct cma *cma;
-       int err;
-
-       if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
-           of_get_flat_dt_prop(node, "no-map", NULL))
-               return -EINVAL;
-
-       if ((rmem->base & mask) || (rmem->size & mask)) {
-               pr_err("Reserved memory: incorrect alignment of CMA region\n");
-               return -EINVAL;
-       }
-
-       err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
-       if (err) {
-               pr_err("Reserved memory: unable to setup CMA region\n");
-               return err;
-       }
-       /* Architecture specific contiguous memory fixup. */
-       dma_contiguous_early_fixup(rmem->base, rmem->size);
-
-       if (of_get_flat_dt_prop(node, "linux,cma-default", NULL))
-               dma_contiguous_set_default(cma);
-
-       rmem->ops = &rmem_cma_ops;
-       rmem->priv = cma;
-
-       pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
-               &rmem->base, (unsigned long)rmem->size / SZ_1M);
-
-       return 0;
-}
-RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
-#endif
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
deleted file mode 100644 (file)
index f831a58..0000000
+++ /dev/null
@@ -1,345 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * drivers/base/dma-mapping.c - arch-independent dma-mapping routines
- *
- * Copyright (c) 2006  SUSE Linux Products GmbH
- * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
- */
-
-#include <linux/acpi.h>
-#include <linux/dma-mapping.h>
-#include <linux/export.h>
-#include <linux/gfp.h>
-#include <linux/of_device.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-/*
- * Managed DMA API
- */
-struct dma_devres {
-       size_t          size;
-       void            *vaddr;
-       dma_addr_t      dma_handle;
-       unsigned long   attrs;
-};
-
-static void dmam_release(struct device *dev, void *res)
-{
-       struct dma_devres *this = res;
-
-       dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
-                       this->attrs);
-}
-
-static int dmam_match(struct device *dev, void *res, void *match_data)
-{
-       struct dma_devres *this = res, *match = match_data;
-
-       if (this->vaddr == match->vaddr) {
-               WARN_ON(this->size != match->size ||
-                       this->dma_handle != match->dma_handle);
-               return 1;
-       }
-       return 0;
-}
-
-/**
- * dmam_alloc_coherent - Managed dma_alloc_coherent()
- * @dev: Device to allocate coherent memory for
- * @size: Size of allocation
- * @dma_handle: Out argument for allocated DMA handle
- * @gfp: Allocation flags
- *
- * Managed dma_alloc_coherent().  Memory allocated using this function
- * will be automatically released on driver detach.
- *
- * RETURNS:
- * Pointer to allocated memory on success, NULL on failure.
- */
-void *dmam_alloc_coherent(struct device *dev, size_t size,
-                          dma_addr_t *dma_handle, gfp_t gfp)
-{
-       struct dma_devres *dr;
-       void *vaddr;
-
-       dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
-       if (!dr)
-               return NULL;
-
-       vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
-       if (!vaddr) {
-               devres_free(dr);
-               return NULL;
-       }
-
-       dr->vaddr = vaddr;
-       dr->dma_handle = *dma_handle;
-       dr->size = size;
-
-       devres_add(dev, dr);
-
-       return vaddr;
-}
-EXPORT_SYMBOL(dmam_alloc_coherent);
-
-/**
- * dmam_free_coherent - Managed dma_free_coherent()
- * @dev: Device to free coherent memory for
- * @size: Size of allocation
- * @vaddr: Virtual address of the memory to free
- * @dma_handle: DMA handle of the memory to free
- *
- * Managed dma_free_coherent().
- */
-void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
-                       dma_addr_t dma_handle)
-{
-       struct dma_devres match_data = { size, vaddr, dma_handle };
-
-       dma_free_coherent(dev, size, vaddr, dma_handle);
-       WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
-}
-EXPORT_SYMBOL(dmam_free_coherent);
-
-/**
- * dmam_alloc_attrs - Managed dma_alloc_attrs()
- * @dev: Device to allocate non_coherent memory for
- * @size: Size of allocation
- * @dma_handle: Out argument for allocated DMA handle
- * @gfp: Allocation flags
- * @attrs: Flags in the DMA_ATTR_* namespace.
- *
- * Managed dma_alloc_attrs().  Memory allocated using this function will be
- * automatically released on driver detach.
- *
- * RETURNS:
- * Pointer to allocated memory on success, NULL on failure.
- */
-void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
-               gfp_t gfp, unsigned long attrs)
-{
-       struct dma_devres *dr;
-       void *vaddr;
-
-       dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
-       if (!dr)
-               return NULL;
-
-       vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
-       if (!vaddr) {
-               devres_free(dr);
-               return NULL;
-       }
-
-       dr->vaddr = vaddr;
-       dr->dma_handle = *dma_handle;
-       dr->size = size;
-       dr->attrs = attrs;
-
-       devres_add(dev, dr);
-
-       return vaddr;
-}
-EXPORT_SYMBOL(dmam_alloc_attrs);
-
-#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
-
-static void dmam_coherent_decl_release(struct device *dev, void *res)
-{
-       dma_release_declared_memory(dev);
-}
-
-/**
- * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
- * @dev: Device to declare coherent memory for
- * @phys_addr: Physical address of coherent memory to be declared
- * @device_addr: Device address of coherent memory to be declared
- * @size: Size of coherent memory to be declared
- * @flags: Flags
- *
- * Managed dma_declare_coherent_memory().
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
-                                dma_addr_t device_addr, size_t size, int flags)
-{
-       void *res;
-       int rc;
-
-       res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
-       if (!res)
-               return -ENOMEM;
-
-       rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
-                                        flags);
-       if (!rc)
-               devres_add(dev, res);
-       else
-               devres_free(res);
-
-       return rc;
-}
-EXPORT_SYMBOL(dmam_declare_coherent_memory);
-
-/**
- * dmam_release_declared_memory - Managed dma_release_declared_memory().
- * @dev: Device to release declared coherent memory for
- *
- * Managed dmam_release_declared_memory().
- */
-void dmam_release_declared_memory(struct device *dev)
-{
-       WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
-}
-EXPORT_SYMBOL(dmam_release_declared_memory);
-
-#endif
-
-/*
- * Create scatter-list for the already allocated DMA buffer.
- */
-int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
-                void *cpu_addr, dma_addr_t handle, size_t size)
-{
-       struct page *page = virt_to_page(cpu_addr);
-       int ret;
-
-       ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
-       if (unlikely(ret))
-               return ret;
-
-       sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
-       return 0;
-}
-EXPORT_SYMBOL(dma_common_get_sgtable);
-
-/*
- * Create userspace mapping for the DMA-coherent memory.
- */
-int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
-                   void *cpu_addr, dma_addr_t dma_addr, size_t size)
-{
-       int ret = -ENXIO;
-#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
-       unsigned long user_count = vma_pages(vma);
-       unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       unsigned long off = vma->vm_pgoff;
-
-       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-       if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
-               return ret;
-
-       if (off < count && user_count <= (count - off))
-               ret = remap_pfn_range(vma, vma->vm_start,
-                                     page_to_pfn(virt_to_page(cpu_addr)) + off,
-                                     user_count << PAGE_SHIFT,
-                                     vma->vm_page_prot);
-#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
-
-       return ret;
-}
-EXPORT_SYMBOL(dma_common_mmap);
-
-#ifdef CONFIG_MMU
-static struct vm_struct *__dma_common_pages_remap(struct page **pages,
-                       size_t size, unsigned long vm_flags, pgprot_t prot,
-                       const void *caller)
-{
-       struct vm_struct *area;
-
-       area = get_vm_area_caller(size, vm_flags, caller);
-       if (!area)
-               return NULL;
-
-       if (map_vm_area(area, prot, pages)) {
-               vunmap(area->addr);
-               return NULL;
-       }
-
-       return area;
-}
-
-/*
- * remaps an array of PAGE_SIZE pages into another vm_area
- * Cannot be used in non-sleeping contexts
- */
-void *dma_common_pages_remap(struct page **pages, size_t size,
-                       unsigned long vm_flags, pgprot_t prot,
-                       const void *caller)
-{
-       struct vm_struct *area;
-
-       area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
-       if (!area)
-               return NULL;
-
-       area->pages = pages;
-
-       return area->addr;
-}
-
-/*
- * remaps an allocated contiguous region into another vm_area.
- * Cannot be used in non-sleeping contexts
- */
-
-void *dma_common_contiguous_remap(struct page *page, size_t size,
-                       unsigned long vm_flags,
-                       pgprot_t prot, const void *caller)
-{
-       int i;
-       struct page **pages;
-       struct vm_struct *area;
-
-       pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
-       if (!pages)
-               return NULL;
-
-       for (i = 0; i < (size >> PAGE_SHIFT); i++)
-               pages[i] = nth_page(page, i);
-
-       area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
-
-       kfree(pages);
-
-       if (!area)
-               return NULL;
-       return area->addr;
-}
-
-/*
- * unmaps a range previously mapped by dma_common_*_remap
- */
-void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
-{
-       struct vm_struct *area = find_vm_area(cpu_addr);
-
-       if (!area || (area->flags & vm_flags) != vm_flags) {
-               WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
-               return;
-       }
-
-       unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
-       vunmap(cpu_addr);
-}
-#endif
-
-/*
- * enables DMA API use for a device
- */
-int dma_configure(struct device *dev)
-{
-       if (dev->bus->dma_configure)
-               return dev->bus->dma_configure(dev);
-       return 0;
-}
-
-void dma_deconfigure(struct device *dev)
-{
-       of_dma_deconfigure(dev);
-       acpi_dma_deconfigure(dev);
-}
index 4925af5c4cf039e6cc07918967aa6995353e4bd8..9e8484189034b83efb218fb3eb4604a0b2cd8f6d 100644 (file)
@@ -2235,7 +2235,7 @@ static void genpd_dev_pm_sync(struct device *dev)
 }
 
 static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
-                                unsigned int index)
+                                unsigned int index, bool power_on)
 {
        struct of_phandle_args pd_args;
        struct generic_pm_domain *pd;
@@ -2271,9 +2271,11 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
        dev->pm_domain->detach = genpd_dev_pm_detach;
        dev->pm_domain->sync = genpd_dev_pm_sync;
 
-       genpd_lock(pd);
-       ret = genpd_power_on(pd, 0);
-       genpd_unlock(pd);
+       if (power_on) {
+               genpd_lock(pd);
+               ret = genpd_power_on(pd, 0);
+               genpd_unlock(pd);
+       }
 
        if (ret)
                genpd_remove_device(pd, dev);
@@ -2307,7 +2309,7 @@ int genpd_dev_pm_attach(struct device *dev)
                                       "#power-domain-cells") != 1)
                return 0;
 
-       return __genpd_dev_pm_attach(dev, dev->of_node, 0);
+       return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
 }
 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
 
@@ -2359,14 +2361,14 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
        }
 
        /* Try to attach the device to the PM domain at the specified index. */
-       ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index);
+       ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false);
        if (ret < 1) {
                device_unregister(genpd_dev);
                return ret ? ERR_PTR(ret) : NULL;
        }
 
-       pm_runtime_set_active(genpd_dev);
        pm_runtime_enable(genpd_dev);
+       genpd_queue_power_off_work(dev_to_genpd(genpd_dev));
 
        return genpd_dev;
 }
@@ -2487,10 +2489,9 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
  * power domain corresponding to a DT node's "required-opps" property.
  *
  * @dev: Device for which the performance-state needs to be found.
- * @opp_node: DT node where the "required-opps" property is present. This can be
+ * @np: DT node where the "required-opps" property is present. This can be
  *     the device node itself (if it doesn't have an OPP table) or a node
  *     within the OPP table of a device (if device has an OPP table).
- * @state: Pointer to return performance state.
  *
  * Returns performance state corresponding to the "required-opps" property of
  * a DT node. This calls platform specific genpd->opp_to_performance_state()
@@ -2499,7 +2500,7 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
  * Returns performance state on success and 0 on failure.
  */
 unsigned int of_genpd_opp_to_performance_state(struct device *dev,
-                                              struct device_node *opp_node)
+                                              struct device_node *np)
 {
        struct generic_pm_domain *genpd;
        struct dev_pm_opp *opp;
@@ -2514,7 +2515,7 @@ unsigned int of_genpd_opp_to_performance_state(struct device *dev,
 
        genpd_lock(genpd);
 
-       opp = of_dev_pm_opp_find_required_opp(&genpd->dev, opp_node);
+       opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np);
        if (IS_ERR(opp)) {
                dev_err(dev, "Failed to find required OPP: %ld\n",
                        PTR_ERR(opp));
index a47e4987ee467ed04578b47499c29def872525f6..d146fedc38bb26535e3960963058a9635e5d7f7b 100644 (file)
@@ -1244,8 +1244,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
        _drbd_start_io_acct(device, req);
 
        /* process discards always from our submitter thread */
-       if ((bio_op(bio) & REQ_OP_WRITE_ZEROES) ||
-           (bio_op(bio) & REQ_OP_DISCARD))
+       if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
+           bio_op(bio) == REQ_OP_DISCARD)
                goto queue_for_submitter_thread;
 
        if (rw == WRITE && req->private_bio && req->i.size
index 1476cb3439f46e53a8f42a9397fb6b19afd8ff95..5e793dd7adfbd096239f4d0994d2f20e24b2b596 100644 (file)
@@ -282,8 +282,8 @@ void drbd_request_endio(struct bio *bio)
                what = COMPLETED_OK;
        }
 
-       bio_put(req->private_bio);
        req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
+       bio_put(bio);
 
        /* not req_mod(), we need irqsave here! */
        spin_lock_irqsave(&device->resource->req_lock, flags);
index d6b6f434fd4bb7652faf597ef9ab6c7b6dd7c362..4cb1d1be3cfbc9c14a6129ecdc2de5368ac668c4 100644 (file)
@@ -1613,6 +1613,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
                arg = (unsigned long) compat_ptr(arg);
        case LOOP_SET_FD:
        case LOOP_CHANGE_FD:
+       case LOOP_SET_BLOCK_SIZE:
                err = lo_ioctl(bdev, mode, cmd, arg);
                break;
        default:
index 3b7083b8ecbb3b0ffcad0d2879954780075333ee..74a05561b620a3be51bd30a09f1eeaa1b9168876 100644 (file)
@@ -76,6 +76,7 @@ struct link_dead_args {
 #define NBD_HAS_CONFIG_REF             4
 #define NBD_BOUND                      5
 #define NBD_DESTROY_ON_DISCONNECT      6
+#define NBD_DISCONNECT_ON_CLOSE        7
 
 struct nbd_config {
        u32 flags;
@@ -138,6 +139,7 @@ static void nbd_config_put(struct nbd_device *nbd);
 static void nbd_connect_reply(struct genl_info *info, int index);
 static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
 static void nbd_dead_link_work(struct work_struct *work);
+static void nbd_disconnect_and_put(struct nbd_device *nbd);
 
 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
 {
@@ -1305,6 +1307,12 @@ out:
 static void nbd_release(struct gendisk *disk, fmode_t mode)
 {
        struct nbd_device *nbd = disk->private_data;
+       struct block_device *bdev = bdget_disk(disk, 0);
+
+       if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
+                       bdev->bd_openers == 0)
+               nbd_disconnect_and_put(nbd);
+
        nbd_config_put(nbd);
        nbd_put(nbd);
 }
@@ -1705,6 +1713,10 @@ again:
                                &config->runtime_flags);
                        put_dev = true;
                }
+               if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
+                       set_bit(NBD_DISCONNECT_ON_CLOSE,
+                               &config->runtime_flags);
+               }
        }
 
        if (info->attrs[NBD_ATTR_SOCKETS]) {
@@ -1749,6 +1761,17 @@ out:
        return ret;
 }
 
+static void nbd_disconnect_and_put(struct nbd_device *nbd)
+{
+       mutex_lock(&nbd->config_lock);
+       nbd_disconnect(nbd);
+       nbd_clear_sock(nbd);
+       mutex_unlock(&nbd->config_lock);
+       if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
+                              &nbd->config->runtime_flags))
+               nbd_config_put(nbd);
+}
+
 static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
 {
        struct nbd_device *nbd;
@@ -1781,13 +1804,7 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
                nbd_put(nbd);
                return 0;
        }
-       mutex_lock(&nbd->config_lock);
-       nbd_disconnect(nbd);
-       nbd_clear_sock(nbd);
-       mutex_unlock(&nbd->config_lock);
-       if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
-                              &nbd->config->runtime_flags))
-               nbd_config_put(nbd);
+       nbd_disconnect_and_put(nbd);
        nbd_config_put(nbd);
        nbd_put(nbd);
        return 0;
@@ -1798,7 +1815,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
        struct nbd_device *nbd = NULL;
        struct nbd_config *config;
        int index;
-       int ret = -EINVAL;
+       int ret = 0;
        bool put_dev = false;
 
        if (!netlink_capable(skb, CAP_SYS_ADMIN))
@@ -1838,6 +1855,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
            !nbd->task_recv) {
                dev_err(nbd_to_dev(nbd),
                        "not configured, cannot reconfigure\n");
+               ret = -EINVAL;
                goto out;
        }
 
@@ -1862,6 +1880,14 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
                                               &config->runtime_flags))
                                refcount_inc(&nbd->refs);
                }
+
+               if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
+                       set_bit(NBD_DISCONNECT_ON_CLOSE,
+                                       &config->runtime_flags);
+               } else {
+                       clear_bit(NBD_DISCONNECT_ON_CLOSE,
+                                       &config->runtime_flags);
+               }
        }
 
        if (info->attrs[NBD_ATTR_SOCKETS]) {
index 7948049f6c4321b02e1611383dae1be86a7748f1..042c778e5a4e0bf2009c38a6b1cf37bc5d23ce89 100644 (file)
@@ -1365,7 +1365,7 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
 static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
 {
        pr_info("null: rq %p timed out\n", rq);
-       blk_mq_complete_request(rq);
+       __blk_complete_request(rq);
        return BLK_EH_DONE;
 }
 
index 14d159e2042d5c488c1e23b3247508aab0a2ebff..2dc33e65d2d0c957199f1e3c1bf8028d4e09ca88 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/types.h>
-#include <linux/unaligned/le_struct.h>
+#include <asm/unaligned.h>
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
 
index 1cc29629d23807b83bfee9f7d23fec1d5757941d..80d60f43db56123076fed5ea2047da35af88af8f 100644 (file)
@@ -169,9 +169,9 @@ static int sysc_get_clocks(struct sysc *ddata)
        const char *name;
        int nr_fck = 0, nr_ick = 0, i, error = 0;
 
-       ddata->clock_roles = devm_kzalloc(ddata->dev,
-                                         sizeof(*ddata->clock_roles) *
+       ddata->clock_roles = devm_kcalloc(ddata->dev,
                                          SYSC_MAX_CLOCKS,
+                                         sizeof(*ddata->clock_roles),
                                          GFP_KERNEL);
        if (!ddata->clock_roles)
                return -ENOMEM;
@@ -200,8 +200,8 @@ static int sysc_get_clocks(struct sysc *ddata)
                return -EINVAL;
        }
 
-       ddata->clocks = devm_kzalloc(ddata->dev,
-                                    sizeof(*ddata->clocks) * ddata->nr_clocks,
+       ddata->clocks = devm_kcalloc(ddata->dev,
+                                    ddata->nr_clocks, sizeof(*ddata->clocks),
                                     GFP_KERNEL);
        if (!ddata->clocks)
                return -ENOMEM;
index 53fe633df1e8d9c1187e862b6a305a1905bbc2bb..c9bf2c219841846570c6cffefe8e2e4c59583997 100644 (file)
@@ -11,7 +11,7 @@
 
 #include "agp.h"
 
-static int alpha_core_agp_vm_fault(struct vm_fault *vmf)
+static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf)
 {
        alpha_agp_info *agp = agp_bridge->dev_private_data;
        dma_addr_t dma_addr;
index e50c29c97ca74d20542a176387d3de8ee4780b79..c69e39fdd02b8c5c9a35931271c45383f4b18da3 100644 (file)
@@ -156,7 +156,7 @@ static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table)
 
        /* Address to map to */
        pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp);
-       aperturebase = tmp << 25;
+       aperturebase = (u64)tmp << 25;
        aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
 
        enable_gart_translation(hammer, gatt_table);
@@ -277,7 +277,7 @@ static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap)
        pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order);
        nb_order = (nb_order >> 1) & 7;
        pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
-       nb_aper = nb_base << 25;
+       nb_aper = (u64)nb_base << 25;
 
        /* Northbridge seems to contain crap. Try the AGP bridge. */
 
index 91bb98c42a1ca76376ae0db4b43fd7a89fca27b0..aaf9e5afaad435e2342a15fc963aa91367079957 100644 (file)
@@ -516,11 +516,18 @@ EXPORT_SYMBOL_GPL(hwrng_register);
 
 void hwrng_unregister(struct hwrng *rng)
 {
+       int err;
+
        mutex_lock(&rng_mutex);
 
        list_del(&rng->list);
-       if (current_rng == rng)
-               enable_best_rng();
+       if (current_rng == rng) {
+               err = enable_best_rng();
+               if (err) {
+                       drop_current_rng();
+                       cur_rng_set_by_user = 0;
+               }
+       }
 
        if (list_empty(&rng_list)) {
                mutex_unlock(&rng_mutex);
index ad353be871bf005c6c0ca875d663bed49906c302..90ec010bffbd9776c012586b4e01b24cdd0bd2d6 100644 (file)
@@ -2088,8 +2088,10 @@ static int try_smi_init(struct smi_info *new_smi)
        return 0;
 
 out_err:
-       ipmi_unregister_smi(new_smi->intf);
-       new_smi->intf = NULL;
+       if (new_smi->intf) {
+               ipmi_unregister_smi(new_smi->intf);
+               new_smi->intf = NULL;
+       }
 
        kfree(init_name);
 
index fbfc05e3f3d1756a58455dbdbf73c90162898f4c..bb882ab161fe1bbb4b678cc9bf105b77273296e3 100644 (file)
@@ -210,34 +210,23 @@ static void kcs_bmc_handle_cmd(struct kcs_bmc *kcs_bmc)
 int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc)
 {
        unsigned long flags;
-       int ret = 0;
+       int ret = -ENODATA;
        u8 status;
 
        spin_lock_irqsave(&kcs_bmc->lock, flags);
 
-       if (!kcs_bmc->running) {
-               kcs_force_abort(kcs_bmc);
-               ret = -ENODEV;
-               goto out_unlock;
-       }
-
-       status = read_status(kcs_bmc) & (KCS_STATUS_IBF | KCS_STATUS_CMD_DAT);
-
-       switch (status) {
-       case KCS_STATUS_IBF | KCS_STATUS_CMD_DAT:
-               kcs_bmc_handle_cmd(kcs_bmc);
-               break;
-
-       case KCS_STATUS_IBF:
-               kcs_bmc_handle_data(kcs_bmc);
-               break;
+       status = read_status(kcs_bmc);
+       if (status & KCS_STATUS_IBF) {
+               if (!kcs_bmc->running)
+                       kcs_force_abort(kcs_bmc);
+               else if (status & KCS_STATUS_CMD_DAT)
+                       kcs_bmc_handle_cmd(kcs_bmc);
+               else
+                       kcs_bmc_handle_data(kcs_bmc);
 
-       default:
-               ret = -ENODATA;
-               break;
+               ret = 0;
        }
 
-out_unlock:
        spin_unlock_irqrestore(&kcs_bmc->lock, flags);
 
        return ret;
index a8fb0020ba5ccfb9f4b72b689544299815fab60a..cd888d4ee605e0b9cf5f4970d34989d4b72f3b72 100644 (file)
@@ -402,7 +402,8 @@ static struct poolinfo {
 /*
  * Static global variables
  */
-static DECLARE_WAIT_QUEUE_HEAD(random_wait);
+static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
+static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
 static struct fasync_struct *fasync;
 
 static DEFINE_SPINLOCK(random_ready_list_lock);
@@ -721,8 +722,8 @@ retry:
 
                /* should we wake readers? */
                if (entropy_bits >= random_read_wakeup_bits &&
-                   wq_has_sleeper(&random_wait)) {
-                       wake_up_interruptible_poll(&random_wait, POLLIN);
+                   wq_has_sleeper(&random_read_wait)) {
+                       wake_up_interruptible(&random_read_wait);
                        kill_fasync(&fasync, SIGIO, POLL_IN);
                }
                /* If the input pool is getting full, send some
@@ -1396,7 +1397,7 @@ retry:
        trace_debit_entropy(r->name, 8 * ibytes);
        if (ibytes &&
            (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
-               wake_up_interruptible_poll(&random_wait, POLLOUT);
+               wake_up_interruptible(&random_write_wait);
                kill_fasync(&fasync, SIGIO, POLL_OUT);
        }
 
@@ -1838,7 +1839,7 @@ _random_read(int nonblock, char __user *buf, size_t nbytes)
                if (nonblock)
                        return -EAGAIN;
 
-               wait_event_interruptible(random_wait,
+               wait_event_interruptible(random_read_wait,
                        ENTROPY_BITS(&input_pool) >=
                        random_read_wakeup_bits);
                if (signal_pending(current))
@@ -1875,17 +1876,14 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
        return ret;
 }
 
-static struct wait_queue_head *
-random_get_poll_head(struct file *file, __poll_t events)
-{
-       return &random_wait;
-}
-
 static __poll_t
-random_poll_mask(struct file *file, __poll_t events)
+random_poll(struct file *file, poll_table * wait)
 {
-       __poll_t mask = 0;
+       __poll_t mask;
 
+       poll_wait(file, &random_read_wait, wait);
+       poll_wait(file, &random_write_wait, wait);
+       mask = 0;
        if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
                mask |= EPOLLIN | EPOLLRDNORM;
        if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
@@ -1992,8 +1990,7 @@ static int random_fasync(int fd, struct file *filp, int on)
 const struct file_operations random_fops = {
        .read  = random_read,
        .write = random_write,
-       .get_poll_head  = random_get_poll_head,
-       .poll_mask  = random_poll_mask,
+       .poll  = random_poll,
        .unlocked_ioctl = random_ioctl,
        .fasync = random_fasync,
        .llseek = noop_llseek,
@@ -2326,7 +2323,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
         * We'll be woken up again once below random_write_wakeup_thresh,
         * or when the calling thread is about to terminate.
         */
-       wait_event_interruptible(random_wait, kthread_should_stop() ||
+       wait_event_interruptible(random_write_wait, kthread_should_stop() ||
                        ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
        mix_pool_bytes(poolp, buffer, count);
        credit_entropy_bits(poolp, entropy);
index ae40cbe770f059d0ca1aded1dc7fc3530b3153df..0bb25dd009d18467c7ae6f5238e1a88c24844c7b 100644 (file)
@@ -96,7 +96,7 @@ obj-$(CONFIG_ARCH_SPRD)                       += sprd/
 obj-$(CONFIG_ARCH_STI)                 += st/
 obj-$(CONFIG_ARCH_STRATIX10)           += socfpga/
 obj-$(CONFIG_ARCH_SUNXI)               += sunxi/
-obj-$(CONFIG_ARCH_SUNXI)               += sunxi-ng/
+obj-$(CONFIG_SUNXI_CCU)                        += sunxi-ng/
 obj-$(CONFIG_ARCH_TEGRA)               += tegra/
 obj-y                                  += ti/
 obj-$(CONFIG_CLK_UNIPHIER)             += uniphier/
index aae62a5b8734e859e76a16f995b05f43bf5f9b4a..d1bbee19ed0fcf74edfb2019fc4907ba00533a66 100644 (file)
@@ -672,7 +672,7 @@ static int of_da8xx_usb_phy_clk_init(struct device *dev, struct regmap *regmap)
 
        usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap);
        if (IS_ERR(usb1)) {
-               if (PTR_ERR(usb0) == -EPROBE_DEFER)
+               if (PTR_ERR(usb1) == -EPROBE_DEFER)
                        return -EPROBE_DEFER;
 
                dev_warn(dev, "Failed to register usb1_clk48 (%ld)\n",
index 6a42529d31a91aa644d64c8708b45297ec361efe..cc5614567a70d61cf76aa6777caf9c2f39479c5d 100644 (file)
@@ -107,7 +107,7 @@ extern const struct davinci_psc_init_data of_da850_psc1_init_data;
 #ifdef CONFIG_ARCH_DAVINCI_DM355
 extern const struct davinci_psc_init_data dm355_psc_init_data;
 #endif
-#ifdef CONFIG_ARCH_DAVINCI_DM356
+#ifdef CONFIG_ARCH_DAVINCI_DM365
 extern const struct davinci_psc_init_data dm365_psc_init_data;
 #endif
 #ifdef CONFIG_ARCH_DAVINCI_DM644x
index acaa14cfa25ca3922178e865ddb4daefc676ee2a..49454700f2e5c2e469cbdb037c54900f8c74cf79 100644 (file)
@@ -1,24 +1,24 @@
 # SPDX-License-Identifier: GPL-2.0
 # Common objects
-lib-$(CONFIG_SUNXI_CCU)                += ccu_common.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_mmc_timing.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_reset.o
+obj-y                          += ccu_common.o
+obj-y                          += ccu_mmc_timing.o
+obj-y                          += ccu_reset.o
 
 # Base clock types
-lib-$(CONFIG_SUNXI_CCU)                += ccu_div.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_frac.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_gate.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_mux.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_mult.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_phase.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_sdm.o
+obj-y                          += ccu_div.o
+obj-y                          += ccu_frac.o
+obj-y                          += ccu_gate.o
+obj-y                          += ccu_mux.o
+obj-y                          += ccu_mult.o
+obj-y                          += ccu_phase.o
+obj-y                          += ccu_sdm.o
 
 # Multi-factor clocks
-lib-$(CONFIG_SUNXI_CCU)                += ccu_nk.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_nkm.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_nkmp.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_nm.o
-lib-$(CONFIG_SUNXI_CCU)                += ccu_mp.o
+obj-y                          += ccu_nk.o
+obj-y                          += ccu_nkm.o
+obj-y                          += ccu_nkmp.o
+obj-y                          += ccu_nm.o
+obj-y                          += ccu_mp.o
 
 # SoC support
 obj-$(CONFIG_SUN50I_A64_CCU)   += ccu-sun50i-a64.o
@@ -38,12 +38,3 @@ obj-$(CONFIG_SUN8I_R40_CCU)  += ccu-sun8i-r40.o
 obj-$(CONFIG_SUN9I_A80_CCU)    += ccu-sun9i-a80.o
 obj-$(CONFIG_SUN9I_A80_CCU)    += ccu-sun9i-a80-de.o
 obj-$(CONFIG_SUN9I_A80_CCU)    += ccu-sun9i-a80-usb.o
-
-# The lib-y file goals is supposed to work only in arch/*/lib or lib/. In our
-# case, we want to use that goal, but even though lib.a will be properly
-# generated, it will not be linked in, eventually resulting in a linker error
-# for missing symbols.
-#
-# We can work around that by explicitly adding lib.a to the obj-y goal. This is
-# an undocumented behaviour, but works well for now.
-obj-$(CONFIG_SUNXI_CCU)                += lib.a
index 57cb2f00fc07ce7f5ffb526bd9bb03ed11287626..d8c7f5750cdb025dfd3eae42d691318fc472e29b 100644 (file)
@@ -735,7 +735,7 @@ static void __arch_timer_setup(unsigned type,
                clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
                clk->name = "arch_mem_timer";
                clk->rating = 400;
-               clk->cpumask = cpu_all_mask;
+               clk->cpumask = cpu_possible_mask;
                if (arch_timer_mem_use_virtual) {
                        clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
                        clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
index e5cdc3af684cbbe2370406c79af63df143776cd3..2717f88c79040a1ec26e2dd0d01f5d2dd25d9734 100644 (file)
@@ -304,8 +304,10 @@ static int __init stm32_timer_init(struct device_node *node)
 
        to->private_data = kzalloc(sizeof(struct stm32_timer_private),
                                   GFP_KERNEL);
-       if (!to->private_data)
+       if (!to->private_data) {
+               ret = -ENOMEM;
                goto deinit;
+       }
 
        rstc = of_reset_control_get(node, NULL);
        if (!IS_ERR(rstc)) {
index 1de5ec8d5ea3e9995e3ffd413f728df078c25f8f..ece120da33538d2333c9f0082196b3080d364e2a 100644 (file)
@@ -294,6 +294,7 @@ struct pstate_funcs {
 static struct pstate_funcs pstate_funcs __read_mostly;
 
 static int hwp_active __read_mostly;
+static int hwp_mode_bdw __read_mostly;
 static bool per_cpu_limits __read_mostly;
 static bool hwp_boost __read_mostly;
 
@@ -1413,7 +1414,15 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
        cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
        cpu->pstate.scaling = pstate_funcs.get_scaling();
        cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
-       cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+
+       if (hwp_active && !hwp_mode_bdw) {
+               unsigned int phy_max, current_max;
+
+               intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
+               cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
+       } else {
+               cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+       }
 
        if (pstate_funcs.get_aperf_mperf_shift)
                cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
@@ -2467,28 +2476,36 @@ static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
 static inline void intel_pstate_request_control_from_smm(void) {}
 #endif /* CONFIG_ACPI */
 
+#define INTEL_PSTATE_HWP_BROADWELL     0x01
+
+#define ICPU_HWP(model, hwp_mode) \
+       { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode }
+
 static const struct x86_cpu_id hwp_support_ids[] __initconst = {
-       { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
+       ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
+       ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL),
+       ICPU_HWP(X86_MODEL_ANY, 0),
        {}
 };
 
 static int __init intel_pstate_init(void)
 {
+       const struct x86_cpu_id *id;
        int rc;
 
        if (no_load)
                return -ENODEV;
 
-       if (x86_match_cpu(hwp_support_ids)) {
+       id = x86_match_cpu(hwp_support_ids);
+       if (id) {
                copy_cpu_funcs(&core_funcs);
                if (!no_hwp) {
                        hwp_active++;
+                       hwp_mode_bdw = id->driver_data;
                        intel_pstate.attr = hwp_cpufreq_attrs;
                        goto hwp_cpu_matched;
                }
        } else {
-               const struct x86_cpu_id *id;
-
                id = x86_match_cpu(intel_pstate_cpu_ids);
                if (!id)
                        return -ENODEV;
index d049fe4b80c48e00d169f3835bb7b70b8022a879..29389accf3e97df7a5700b49e980c88de0974583 100644 (file)
@@ -42,6 +42,8 @@ enum _msm8996_version {
        NUM_OF_MSM8996_VERSIONS,
 };
 
+struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
+
 static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
 {
        size_t len;
@@ -74,7 +76,6 @@ static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
 static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
 {
        struct opp_table *opp_tables[NR_CPUS] = {0};
-       struct platform_device *cpufreq_dt_pdev;
        enum _msm8996_version msm8996_version;
        struct nvmem_cell *speedbin_nvmem;
        struct device_node *np;
@@ -86,8 +87,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
        int ret;
 
        cpu_dev = get_cpu_device(0);
-       if (NULL == cpu_dev)
-               ret = -ENODEV;
+       if (!cpu_dev)
+               return -ENODEV;
 
        msm8996_version = qcom_cpufreq_kryo_get_msm_id();
        if (NUM_OF_MSM8996_VERSIONS == msm8996_version) {
@@ -96,8 +97,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
        }
 
        np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
-       if (IS_ERR(np))
-               return PTR_ERR(np);
+       if (!np)
+               return -ENOENT;
 
        ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
        if (!ret) {
@@ -115,6 +116,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
 
        speedbin = nvmem_cell_read(speedbin_nvmem, &len);
        nvmem_cell_put(speedbin_nvmem);
+       if (IS_ERR(speedbin))
+               return PTR_ERR(speedbin);
 
        switch (msm8996_version) {
        case MSM8996_V3:
@@ -127,6 +130,7 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
                BUG();
                break;
        }
+       kfree(speedbin);
 
        for_each_possible_cpu(cpu) {
                cpu_dev = get_cpu_device(cpu);
@@ -162,8 +166,15 @@ free_opp:
        return ret;
 }
 
+static int qcom_cpufreq_kryo_remove(struct platform_device *pdev)
+{
+       platform_device_unregister(cpufreq_dt_pdev);
+       return 0;
+}
+
 static struct platform_driver qcom_cpufreq_kryo_driver = {
        .probe = qcom_cpufreq_kryo_probe,
+       .remove = qcom_cpufreq_kryo_remove,
        .driver = {
                .name = "qcom-cpufreq-kryo",
        },
@@ -198,8 +209,9 @@ static int __init qcom_cpufreq_kryo_init(void)
        if (unlikely(ret < 0))
                return ret;
 
-       ret = PTR_ERR_OR_ZERO(platform_device_register_simple(
-               "qcom-cpufreq-kryo", -1, NULL, 0));
+       kryo_cpufreq_pdev = platform_device_register_simple(
+               "qcom-cpufreq-kryo", -1, NULL, 0);
+       ret = PTR_ERR_OR_ZERO(kryo_cpufreq_pdev);
        if (0 == ret)
                return 0;
 
@@ -208,5 +220,12 @@ static int __init qcom_cpufreq_kryo_init(void)
 }
 module_init(qcom_cpufreq_kryo_init);
 
+static void __init qcom_cpufreq_kryo_exit(void)
+{
+       platform_device_unregister(kryo_cpufreq_pdev);
+       platform_driver_unregister(&qcom_cpufreq_kryo_driver);
+}
+module_exit(qcom_cpufreq_kryo_exit);
+
 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Kryo CPUfreq driver");
 MODULE_LICENSE("GPL v2");
index 00c7aab8e7d0f5861e778dc4d26affe5c1234603..afebbd87c4aa1d22ca179f558552cb2f410fcc0a 100644 (file)
@@ -1548,15 +1548,14 @@ skip_copy:
                        tp->urg_data = 0;
 
                if ((avail + offset) >= skb->len) {
-                       if (likely(skb))
-                               chtls_free_skb(sk, skb);
-                       buffers_freed++;
                        if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
                                tp->copied_seq += skb->len;
                                hws->rcvpld = skb->hdr_len;
                        } else {
                                tp->copied_seq += hws->rcvpld;
                        }
+                       chtls_free_skb(sk, skb);
+                       buffers_freed++;
                        hws->copied_seq = 0;
                        if (copied >= target &&
                            !skb_peek(&sk->sk_receive_queue))
index de2f8297a210bb4ea3998815353e72b7c11598da..108c37fca78279c06e896afcc411220996872760 100644 (file)
@@ -189,14 +189,16 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
 
        /* prevent private mappings from being established */
        if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
-               dev_info(dev, "%s: %s: fail, attempted private mapping\n",
+               dev_info_ratelimited(dev,
+                               "%s: %s: fail, attempted private mapping\n",
                                current->comm, func);
                return -EINVAL;
        }
 
        mask = dax_region->align - 1;
        if (vma->vm_start & mask || vma->vm_end & mask) {
-               dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
+               dev_info_ratelimited(dev,
+                               "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
                                current->comm, func, vma->vm_start, vma->vm_end,
                                mask);
                return -EINVAL;
@@ -204,13 +206,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
 
        if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
                        && (vma->vm_flags & VM_DONTCOPY) == 0) {
-               dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
+               dev_info_ratelimited(dev,
+                               "%s: %s: fail, dax range requires MADV_DONTFORK\n",
                                current->comm, func);
                return -EINVAL;
        }
 
        if (!vma_is_dax(vma)) {
-               dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
+               dev_info_ratelimited(dev,
+                               "%s: %s: fail, vma is not DAX capable\n",
                                current->comm, func);
                return -EINVAL;
        }
index 903d9c473749c24d636f573aba798a9680df2909..45276abf03aa2bd52aa9af56b8cbd45a4b1e5135 100644 (file)
@@ -86,6 +86,7 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
 {
        struct dax_device *dax_dev;
        bool dax_enabled = false;
+       struct request_queue *q;
        pgoff_t pgoff;
        int err, id;
        void *kaddr;
@@ -99,6 +100,13 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
                return false;
        }
 
+       q = bdev_get_queue(bdev);
+       if (!q || !blk_queue_dax(q)) {
+               pr_debug("%s: error: request queue doesn't support dax\n",
+                               bdevname(bdev, buf));
+               return false;
+       }
+
        err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
        if (err) {
                pr_debug("%s: error: unaligned partition for dax\n",
index fa31cccbe04faf5fa6a8adb07abf6b48a4a6cdd2..6bfa217ed6d0de81d1ef51b37accdba9ae4cd04b 100644 (file)
@@ -794,7 +794,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
        struct k3_dma_dev *d = ofdma->of_dma_data;
        unsigned int request = dma_spec->args[0];
 
-       if (request > d->dma_requests)
+       if (request >= d->dma_requests)
                return NULL;
 
        return dma_get_slave_channel(&(d->chans[request].vc.chan));
index defcdde4d358b19cc5430de95fb5e9f16ec538ca..de0957fe966821beb79ee1b75470a8834509ef44 100644 (file)
@@ -3033,7 +3033,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
        pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
        pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
        pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-       pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+       pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
        pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
                         1 : PL330_MAX_BURST);
 
index 9b5ca8691f27dcf6561fbd98051b697ab2da6011..a4a931ddf6f695fa21a25a359a94ee0c57f92beb 100644 (file)
@@ -1485,7 +1485,11 @@ static int omap_dma_probe(struct platform_device *pdev)
        od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
        od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
        od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
-       od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+       if (__dma_omap15xx(od->plat->dma_attr))
+               od->ddev.residue_granularity =
+                               DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+       else
+               od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
        od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
        od->ddev.dev = &pdev->dev;
        INIT_LIST_HEAD(&od->ddev.channels);
index 951b6c79f166a7d2b4ec14e12096559df9aed684..624a11cb07e23b775d097d930997cc6ebd171b06 100644 (file)
@@ -47,6 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name,               0444, DMI_PRODUCT_NAME);
 DEFINE_DMI_ATTR_WITH_SHOW(product_version,     0444, DMI_PRODUCT_VERSION);
 DEFINE_DMI_ATTR_WITH_SHOW(product_serial,      0400, DMI_PRODUCT_SERIAL);
 DEFINE_DMI_ATTR_WITH_SHOW(product_uuid,                0400, DMI_PRODUCT_UUID);
+DEFINE_DMI_ATTR_WITH_SHOW(product_sku,         0444, DMI_PRODUCT_SKU);
 DEFINE_DMI_ATTR_WITH_SHOW(product_family,      0444, DMI_PRODUCT_FAMILY);
 DEFINE_DMI_ATTR_WITH_SHOW(board_vendor,                0444, DMI_BOARD_VENDOR);
 DEFINE_DMI_ATTR_WITH_SHOW(board_name,          0444, DMI_BOARD_NAME);
@@ -193,6 +194,7 @@ static void __init dmi_id_init_attr_table(void)
        ADD_DMI_ATTR(product_serial,    DMI_PRODUCT_SERIAL);
        ADD_DMI_ATTR(product_uuid,      DMI_PRODUCT_UUID);
        ADD_DMI_ATTR(product_family,    DMI_PRODUCT_FAMILY);
+       ADD_DMI_ATTR(product_sku,       DMI_PRODUCT_SKU);
        ADD_DMI_ATTR(board_vendor,      DMI_BOARD_VENDOR);
        ADD_DMI_ATTR(board_name,        DMI_BOARD_NAME);
        ADD_DMI_ATTR(board_version,     DMI_BOARD_VERSION);
index 54e66adef2525179e49ecfe9fc04e253ecc18e51..f2483548cde92d692f748d6a9c7da0cbf98274a3 100644 (file)
@@ -447,6 +447,7 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
                dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
                dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
                dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
+               dmi_save_ident(dm, DMI_PRODUCT_SKU, 25);
                dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26);
                break;
        case 2:         /* Base Board Information */
index caa37a6dd9d4eca506e3a0c2fed3c636fbd05d2b..a90b0b8fc69a18abb62d10c3f046a88a7300fd5b 100644 (file)
@@ -64,7 +64,7 @@ static void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
        efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
        efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
        efi_status_t status;
-       efi_physical_addr_t log_location, log_last_entry;
+       efi_physical_addr_t log_location = 0, log_last_entry = 0;
        struct linux_efi_tpm_eventlog *log_tbl = NULL;
        unsigned long first_entry_addr, last_entry_addr;
        size_t log_size, last_entry_size;
index dd4edd8f22ceebb67c4f6e1ab486c4ac9e9a1106..7fa793672a7a969239329ef1fccc5a2391c764c8 100644 (file)
@@ -455,8 +455,10 @@ static int altera_cvp_probe(struct pci_dev *pdev,
 
        mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name,
                              &altera_cvp_ops, conf);
-       if (!mgr)
-               return -ENOMEM;
+       if (!mgr) {
+               ret = -ENOMEM;
+               goto err_unmap;
+       }
 
        pci_set_drvdata(pdev, mgr);
 
index a59c07590ceec2f314768066cbed28126ed68a6d..7dcbac8af9a7a8fb9c1cf46890d661be1ec34592 100644 (file)
@@ -190,6 +190,7 @@ struct amdgpu_job;
 struct amdgpu_irq_src;
 struct amdgpu_fpriv;
 struct amdgpu_bo_va_mapping;
+struct amdgpu_atif;
 
 enum amdgpu_cp_irq {
        AMDGPU_CP_IRQ_GFX_EOP = 0,
@@ -1269,43 +1270,6 @@ struct amdgpu_vram_scratch {
 /*
  * ACPI
  */
-struct amdgpu_atif_notification_cfg {
-       bool enabled;
-       int command_code;
-};
-
-struct amdgpu_atif_notifications {
-       bool display_switch;
-       bool expansion_mode_change;
-       bool thermal_state;
-       bool forced_power_state;
-       bool system_power_state;
-       bool display_conf_change;
-       bool px_gfx_switch;
-       bool brightness_change;
-       bool dgpu_display_event;
-};
-
-struct amdgpu_atif_functions {
-       bool system_params;
-       bool sbios_requests;
-       bool select_active_disp;
-       bool lid_state;
-       bool get_tv_standard;
-       bool set_tv_standard;
-       bool get_panel_expansion_mode;
-       bool set_panel_expansion_mode;
-       bool temperature_change;
-       bool graphics_device_types;
-};
-
-struct amdgpu_atif {
-       struct amdgpu_atif_notifications notifications;
-       struct amdgpu_atif_functions functions;
-       struct amdgpu_atif_notification_cfg notification_cfg;
-       struct amdgpu_encoder *encoder_for_bl;
-};
-
 struct amdgpu_atcs_functions {
        bool get_ext_state;
        bool pcie_perf_req;
@@ -1466,7 +1430,7 @@ struct amdgpu_device {
 #if defined(CONFIG_DEBUG_FS)
        struct dentry                   *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
 #endif
-       struct amdgpu_atif              atif;
+       struct amdgpu_atif              *atif;
        struct amdgpu_atcs              atcs;
        struct mutex                    srbm_mutex;
        /* GRBM index mutex. Protects concurrent access to GRBM index */
@@ -1894,6 +1858,12 @@ static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false;
 static inline bool amdgpu_has_atpx(void) { return false; }
 #endif
 
+#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
+void *amdgpu_atpx_get_dhandle(void);
+#else
+static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
+#endif
+
 /*
  * KMS
  */
index 8fa850a070e0fe8ea7a67823008ff7e543d8d1dc..0d8c3fc6eacefcfd71788e4442cc541a031a5416 100644 (file)
 #include "amd_acpi.h"
 #include "atom.h"
 
+struct amdgpu_atif_notification_cfg {
+       bool enabled;
+       int command_code;
+};
+
+struct amdgpu_atif_notifications {
+       bool display_switch;
+       bool expansion_mode_change;
+       bool thermal_state;
+       bool forced_power_state;
+       bool system_power_state;
+       bool display_conf_change;
+       bool px_gfx_switch;
+       bool brightness_change;
+       bool dgpu_display_event;
+};
+
+struct amdgpu_atif_functions {
+       bool system_params;
+       bool sbios_requests;
+       bool select_active_disp;
+       bool lid_state;
+       bool get_tv_standard;
+       bool set_tv_standard;
+       bool get_panel_expansion_mode;
+       bool set_panel_expansion_mode;
+       bool temperature_change;
+       bool graphics_device_types;
+};
+
+struct amdgpu_atif {
+       acpi_handle handle;
+
+       struct amdgpu_atif_notifications notifications;
+       struct amdgpu_atif_functions functions;
+       struct amdgpu_atif_notification_cfg notification_cfg;
+       struct amdgpu_encoder *encoder_for_bl;
+};
+
 /* Call the ATIF method
  */
 /**
@@ -46,8 +85,9 @@
  * Executes the requested ATIF function (all asics).
  * Returns a pointer to the acpi output buffer.
  */
-static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
-               struct acpi_buffer *params)
+static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
+                                          int function,
+                                          struct acpi_buffer *params)
 {
        acpi_status status;
        union acpi_object atif_arg_elements[2];
@@ -70,7 +110,8 @@ static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
                atif_arg_elements[1].integer.value = 0;
        }
 
-       status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
+       status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
+                                     &buffer);
 
        /* Fail only if calling the method fails and ATIF is supported */
        if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -141,15 +182,14 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
  * (all asics).
  * returns 0 on success, error on failure.
  */
-static int amdgpu_atif_verify_interface(acpi_handle handle,
-               struct amdgpu_atif *atif)
+static int amdgpu_atif_verify_interface(struct amdgpu_atif *atif)
 {
        union acpi_object *info;
        struct atif_verify_interface output;
        size_t size;
        int err = 0;
 
-       info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
+       info = amdgpu_atif_call(atif, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
        if (!info)
                return -EIO;
 
@@ -176,6 +216,35 @@ out:
        return err;
 }
 
+static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle)
+{
+       acpi_handle handle = NULL;
+       char acpi_method_name[255] = { 0 };
+       struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
+       acpi_status status;
+
+       /* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only
+        * systems, ATIF is in the dGPU's namespace.
+        */
+       status = acpi_get_handle(dhandle, "ATIF", &handle);
+       if (ACPI_SUCCESS(status))
+               goto out;
+
+       if (amdgpu_has_atpx()) {
+               status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF",
+                                        &handle);
+               if (ACPI_SUCCESS(status))
+                       goto out;
+       }
+
+       DRM_DEBUG_DRIVER("No ATIF handle found\n");
+       return NULL;
+out:
+       acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+       DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
+       return handle;
+}
+
 /**
  * amdgpu_atif_get_notification_params - determine notify configuration
  *
@@ -188,15 +257,16 @@ out:
  * where n is specified in the result if a notifier is used.
  * Returns 0 on success, error on failure.
  */
-static int amdgpu_atif_get_notification_params(acpi_handle handle,
-               struct amdgpu_atif_notification_cfg *n)
+static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif)
 {
        union acpi_object *info;
+       struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg;
        struct atif_system_params params;
        size_t size;
        int err = 0;
 
-       info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
+       info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS,
+                               NULL);
        if (!info) {
                err = -EIO;
                goto out;
@@ -250,14 +320,15 @@ out:
  * (all asics).
  * Returns 0 on success, error on failure.
  */
-static int amdgpu_atif_get_sbios_requests(acpi_handle handle,
-               struct atif_sbios_requests *req)
+static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif,
+                                         struct atif_sbios_requests *req)
 {
        union acpi_object *info;
        size_t size;
        int count = 0;
 
-       info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
+       info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS,
+                               NULL);
        if (!info)
                return -EIO;
 
@@ -290,11 +361,10 @@ out:
  * Returns NOTIFY code
  */
 static int amdgpu_atif_handler(struct amdgpu_device *adev,
-                       struct acpi_bus_event *event)
+                              struct acpi_bus_event *event)
 {
-       struct amdgpu_atif *atif = &adev->atif;
+       struct amdgpu_atif *atif = adev->atif;
        struct atif_sbios_requests req;
-       acpi_handle handle;
        int count;
 
        DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
@@ -303,14 +373,14 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
        if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
                return NOTIFY_DONE;
 
-       if (!atif->notification_cfg.enabled ||
+       if (!atif ||
+           !atif->notification_cfg.enabled ||
            event->type != atif->notification_cfg.command_code)
                /* Not our event */
                return NOTIFY_DONE;
 
        /* Check pending SBIOS requests */
-       handle = ACPI_HANDLE(&adev->pdev->dev);
-       count = amdgpu_atif_get_sbios_requests(handle, &req);
+       count = amdgpu_atif_get_sbios_requests(atif, &req);
 
        if (count <= 0)
                return NOTIFY_DONE;
@@ -641,8 +711,8 @@ static int amdgpu_acpi_event(struct notifier_block *nb,
  */
 int amdgpu_acpi_init(struct amdgpu_device *adev)
 {
-       acpi_handle handle;
-       struct amdgpu_atif *atif = &adev->atif;
+       acpi_handle handle, atif_handle;
+       struct amdgpu_atif *atif;
        struct amdgpu_atcs *atcs = &adev->atcs;
        int ret;
 
@@ -658,12 +728,26 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
                DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
        }
 
+       /* Probe for ATIF, and initialize it if found */
+       atif_handle = amdgpu_atif_probe_handle(handle);
+       if (!atif_handle)
+               goto out;
+
+       atif = kzalloc(sizeof(*atif), GFP_KERNEL);
+       if (!atif) {
+               DRM_WARN("Not enough memory to initialize ATIF\n");
+               goto out;
+       }
+       atif->handle = atif_handle;
+
        /* Call the ATIF method */
-       ret = amdgpu_atif_verify_interface(handle, atif);
+       ret = amdgpu_atif_verify_interface(atif);
        if (ret) {
                DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
+               kfree(atif);
                goto out;
        }
+       adev->atif = atif;
 
        if (atif->notifications.brightness_change) {
                struct drm_encoder *tmp;
@@ -693,8 +777,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
        }
 
        if (atif->functions.system_params) {
-               ret = amdgpu_atif_get_notification_params(handle,
-                               &atif->notification_cfg);
+               ret = amdgpu_atif_get_notification_params(atif);
                if (ret) {
                        DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
                                        ret);
@@ -720,4 +803,6 @@ out:
 void amdgpu_acpi_fini(struct amdgpu_device *adev)
 {
        unregister_acpi_notifier(&adev->acpi_nb);
+       if (adev->atif)
+               kfree(adev->atif);
 }
index daa06e7c5bb73e2d4073fad2177bf50eee0be006..9ab89371d9e8dd5da264b6dc5f7df3d669a49358 100644 (file)
@@ -90,6 +90,12 @@ bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
        return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
 }
 
+#if defined(CONFIG_ACPI)
+void *amdgpu_atpx_get_dhandle(void) {
+       return amdgpu_atpx_priv.dhandle;
+}
+#endif
+
 /**
  * amdgpu_atpx_call - call an ATPX method
  *
index 82312a7bc6ad5b5a01e232b350f23ac889f47ed7..9c85a90be29375a9bedadd04ee841bd0ae1c5e33 100644 (file)
@@ -927,6 +927,10 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
                r = amdgpu_bo_vm_update_pte(p);
                if (r)
                        return r;
+
+               r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
+               if (r)
+                       return r;
        }
 
        return amdgpu_cs_sync_rings(p);
index 3317d1536f4fc352247756e3c650d72c9236916b..6e5284e6c028d7624a60cd630419d1e1507fbc14 100644 (file)
@@ -2158,10 +2158,18 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
        switch (asic_type) {
 #if defined(CONFIG_DRM_AMD_DC)
        case CHIP_BONAIRE:
-       case CHIP_HAWAII:
        case CHIP_KAVERI:
        case CHIP_KABINI:
        case CHIP_MULLINS:
+               /*
+                * We have systems in the wild with these ASICs that require
+                * LVDS and VGA support which is not supported with DC.
+                *
+                * Fallback to the non-DC driver here by default so as not to
+                * cause regressions.
+                */
+               return amdgpu_dc > 0;
+       case CHIP_HAWAII:
        case CHIP_CARRIZO:
        case CHIP_STONEY:
        case CHIP_POLARIS10:
index 39ec6b8890a1bf200053900b7998e5f33d703a32..e74d620d9699f8a54c273b8b233c81f3efa40f9c 100644 (file)
@@ -376,7 +376,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
        struct amdgpu_device *adev = ring->adev;
        uint64_t index;
 
-       if (ring != &adev->uvd.inst[ring->me].ring) {
+       if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
                ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
                ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
        } else {
index f70eeed9ed76fa893dabe2218c4c85c4b4aec104..7aaa263ad8c7e0873d46f46cfc81b37b544a9d40 100644 (file)
@@ -231,6 +231,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
                fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
 
+       /* wrap the last IB with fence */
+       if (job && job->uf_addr) {
+               amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
+                                      fence_flags | AMDGPU_FENCE_FLAG_64BIT);
+       }
+
        r = amdgpu_fence_emit(ring, f, fence_flags);
        if (r) {
                dev_err(adev->dev, "failed to emit fence (%d)\n", r);
@@ -243,12 +249,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        if (ring->funcs->insert_end)
                ring->funcs->insert_end(ring);
 
-       /* wrap the last IB with fence */
-       if (job && job->uf_addr) {
-               amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
-                                      fence_flags | AMDGPU_FENCE_FLAG_64BIT);
-       }
-
        if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
                amdgpu_ring_patch_cond_exec(ring, patch_offset);
 
index 5e4e1bd9038379fe62666e44318162adfc544fea..3526efa8960e3de2042f944db341dc36bc5bddb2 100644 (file)
@@ -762,8 +762,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
        domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
        if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
                adev->vram_pin_size += amdgpu_bo_size(bo);
-               if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
-                       adev->invisible_pin_size += amdgpu_bo_size(bo);
+               adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
        } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
                adev->gart_pin_size += amdgpu_bo_size(bo);
        }
@@ -790,25 +789,22 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
        bo->pin_count--;
        if (bo->pin_count)
                return 0;
-       for (i = 0; i < bo->placement.num_placement; i++) {
-               bo->placements[i].lpfn = 0;
-               bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-       }
-       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
-       if (unlikely(r)) {
-               dev_err(adev->dev, "%p validate failed for unpin\n", bo);
-               goto error;
-       }
 
        if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
                adev->vram_pin_size -= amdgpu_bo_size(bo);
-               if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
-                       adev->invisible_pin_size -= amdgpu_bo_size(bo);
+               adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
        } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
                adev->gart_pin_size -= amdgpu_bo_size(bo);
        }
 
-error:
+       for (i = 0; i < bo->placement.num_placement; i++) {
+               bo->placements[i].lpfn = 0;
+               bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+       }
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+       if (unlikely(r))
+               dev_err(adev->dev, "%p validate failed for unpin\n", bo);
+
        return r;
 }
 
index b455da4877829e57b76178ed2300959c4dade7f4..fc818b4d849cd70a05948b457a115e09ddfe42d0 100644 (file)
@@ -1882,7 +1882,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
                if (!amdgpu_device_has_dc_support(adev)) {
                        mutex_lock(&adev->pm.mutex);
                        amdgpu_dpm_get_active_displays(adev);
-                       adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs;
+                       adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
                        adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
                        adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
                        /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
index e969c879d87e66c686c0345839da07b39391e3e2..e5da4654b630dd7030704496ee5260e7676324ec 100644 (file)
@@ -73,6 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
 uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
 int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
 
+u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
 
index bcf68f80bbf058b9cfb8f7a1239f82259f95774b..3ff08e326838f381a91d542e3e2ee7484b46c5b3 100644 (file)
@@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
        unsigned version_major, version_minor, family_id;
        int i, j, r;
 
-       INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
+       INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
 
        switch (adev->asic_type) {
 #ifdef CONFIG_DRM_AMDGPU_CIK
@@ -314,12 +314,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
        void *ptr;
        int i, j;
 
+       cancel_delayed_work_sync(&adev->uvd.idle_work);
+
        for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
                if (adev->uvd.inst[j].vcpu_bo == NULL)
                        continue;
 
-               cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
-
                /* only valid for physical mode */
                if (adev->asic_type < CHIP_POLARIS10) {
                        for (i = 0; i < adev->uvd.max_handles; ++i)
@@ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
 {
        struct amdgpu_device *adev =
-               container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
+               container_of(work, struct amdgpu_device, uvd.idle_work.work);
        unsigned fences = 0, i, j;
 
        for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
@@ -1167,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
                                                               AMD_CG_STATE_GATE);
                }
        } else {
-               schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
+               schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
        }
 }
 
@@ -1179,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
        if (amdgpu_sriov_vf(adev))
                return;
 
-       set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
+       set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
        if (set_clocks) {
                if (adev->pm.dpm_enabled) {
                        amdgpu_dpm_enable_uvd(adev, true);
@@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
 void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
 {
        if (!amdgpu_sriov_vf(ring->adev))
-               schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
+               schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
 }
 
 /**
index b1579fba134c189777d59242d4f9e2dcd97a8378..8b23a1b00c76c95cf2b12c8aff1440415b059560 100644 (file)
@@ -44,7 +44,6 @@ struct amdgpu_uvd_inst {
        void                    *saved_bo;
        atomic_t                handles[AMDGPU_MAX_UVD_HANDLES];
        struct drm_file         *filp[AMDGPU_MAX_UVD_HANDLES];
-       struct delayed_work     idle_work;
        struct amdgpu_ring      ring;
        struct amdgpu_ring      ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
        struct amdgpu_irq_src   irq;
@@ -62,6 +61,7 @@ struct amdgpu_uvd {
        bool                    address_64_bit;
        bool                    use_ctx_buf;
        struct amdgpu_uvd_inst          inst[AMDGPU_MAX_UVD_INSTANCES];
+       struct delayed_work     idle_work;
 };
 
 int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
index 127e87b470ff4da368c8c1feb0576f0b6cb0c62c..1b4ad9b2a7550189d45f4ac86b7e73bc7cc5966b 100644 (file)
@@ -52,7 +52,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
        unsigned long bo_size;
        const char *fw_name;
        const struct common_firmware_header *hdr;
-       unsigned version_major, version_minor, family_id;
+       unsigned char fw_check;
        int r;
 
        INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
@@ -83,12 +83,33 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 
        hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
        adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
-       family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
-       version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
-       version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
-       DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
-               version_major, version_minor, family_id);
 
+       /* Bit 20-23, it is encode major and non-zero for new naming convention.
+        * This field is part of version minor and DRM_DISABLED_FLAG in old naming
+        * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
+        * is zero in old naming convention, this field is always zero so far.
+        * These four bits are used to tell which naming convention is present.
+        */
+       fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
+       if (fw_check) {
+               unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
+
+               fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
+               enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
+               enc_major = fw_check;
+               dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
+               vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
+               DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
+                       enc_major, enc_minor, dec_ver, vep, fw_rev);
+       } else {
+               unsigned int version_major, version_minor, family_id;
+
+               family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
+               version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+               version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+               DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
+                       version_major, version_minor, family_id);
+       }
 
        bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
                  +  AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
index b0eb2f537392d192d84d3884bd685f7084e9e220..fdcb498f6d194b42d386940604557cfc90d2680c 100644 (file)
@@ -107,6 +107,9 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
                return;
        list_add_tail(&base->bo_list, &bo->va);
 
+       if (bo->tbo.type == ttm_bo_type_kernel)
+               list_move(&base->vm_status, &vm->relocated);
+
        if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
                return;
 
@@ -468,7 +471,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
                        pt->parent = amdgpu_bo_ref(parent->base.bo);
 
                        amdgpu_vm_bo_base_init(&entry->base, vm, pt);
-                       list_move(&entry->base.vm_status, &vm->relocated);
                }
 
                if (level < AMDGPU_VM_PTB) {
@@ -1463,7 +1465,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                        uint64_t count;
 
                        max_entries = min(max_entries, 16ull * 1024ull);
-                       for (count = 1; count < max_entries; ++count) {
+                       for (count = 1;
+                            count < max_entries / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+                            ++count) {
                                uint64_t idx = pfn + count;
 
                                if (pages_addr[idx] !=
@@ -1476,7 +1480,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                                dma_addr = pages_addr;
                        } else {
                                addr = pages_addr[pfn];
-                               max_entries = count;
+                               max_entries = count * (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
                        }
 
                } else if (flags & AMDGPU_PTE_VALID) {
@@ -1491,7 +1495,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                if (r)
                        return r;
 
-               pfn += last - start + 1;
+               pfn += (last - start + 1) / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
                if (nodes && nodes->size == pfn) {
                        pfn = 0;
                        ++nodes;
index 9aca653bec07714874297e327eb950225f5ac555..b6333f92ba4565e9b5949f48e643f2d47daaa2a0 100644 (file)
@@ -96,6 +96,38 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
                adev->gmc.visible_vram_size : end) - start;
 }
 
+/**
+ * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
+ *
+ * @bo: &amdgpu_bo buffer object (must be in VRAM)
+ *
+ * Returns:
+ * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
+ */
+u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
+{
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct ttm_mem_reg *mem = &bo->tbo.mem;
+       struct drm_mm_node *nodes = mem->mm_node;
+       unsigned pages = mem->num_pages;
+       u64 usage = 0;
+
+       if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
+               return 0;
+
+       if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
+               return amdgpu_bo_size(bo);
+
+       while (nodes && pages) {
+               usage += nodes->size << PAGE_SHIFT;
+               usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
+               pages -= nodes->size;
+               ++nodes;
+       }
+
+       return usage;
+}
+
 /**
  * amdgpu_vram_mgr_new - allocate new ranges
  *
@@ -135,7 +167,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
                num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
        }
 
-       nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
+       nodes = kvmalloc_array(num_nodes, sizeof(*nodes),
+                              GFP_KERNEL | __GFP_ZERO);
        if (!nodes)
                return -ENOMEM;
 
@@ -190,7 +223,7 @@ error:
                drm_mm_remove_node(&nodes[i]);
        spin_unlock(&mgr->lock);
 
-       kfree(nodes);
+       kvfree(nodes);
        return r == -ENOSPC ? 0 : r;
 }
 
@@ -229,7 +262,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
        atomic64_sub(usage, &mgr->usage);
        atomic64_sub(vis_usage, &mgr->vis_usage);
 
-       kfree(mem->mm_node);
+       kvfree(mem->mm_node);
        mem->mm_node = NULL;
 }
 
index 0999c843f623ca37c0504a2a4c3151374c7fb854..a71b97519cc05ac9fc7f4e1884c3f4d61fa65a69 100644 (file)
@@ -900,7 +900,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
        .emit_frame_size =
                4 + /* vce_v3_0_emit_pipeline_sync */
                6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
-       .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
+       .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
        .emit_ib = amdgpu_vce_ring_emit_ib,
        .emit_fence = amdgpu_vce_ring_emit_fence,
        .test_ring = amdgpu_vce_ring_test_ring,
@@ -924,7 +924,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
                6 + /* vce_v3_0_emit_vm_flush */
                4 + /* vce_v3_0_emit_pipeline_sync */
                6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
-       .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
+       .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
        .emit_ib = vce_v3_0_ring_emit_ib,
        .emit_vm_flush = vce_v3_0_emit_vm_flush,
        .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
index f9add85157e7355432aab9d0d14728f8906774f9..770c6b24be0b6b15d607e5008b864f5b4ca2bf6c 100644 (file)
@@ -2175,6 +2175,46 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
        return color_space;
 }
 
+static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
+{
+       if (timing_out->display_color_depth <= COLOR_DEPTH_888)
+               return;
+
+       timing_out->display_color_depth--;
+}
+
+static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
+                                               const struct drm_display_info *info)
+{
+       int normalized_clk;
+       if (timing_out->display_color_depth <= COLOR_DEPTH_888)
+               return;
+       do {
+               normalized_clk = timing_out->pix_clk_khz;
+               /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
+               if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+                       normalized_clk /= 2;
+               /* Adjusting pix clock following on HDMI spec based on colour depth */
+               switch (timing_out->display_color_depth) {
+               case COLOR_DEPTH_101010:
+                       normalized_clk = (normalized_clk * 30) / 24;
+                       break;
+               case COLOR_DEPTH_121212:
+                       normalized_clk = (normalized_clk * 36) / 24;
+                       break;
+               case COLOR_DEPTH_161616:
+                       normalized_clk = (normalized_clk * 48) / 24;
+                       break;
+               default:
+                       return;
+               }
+               if (normalized_clk <= info->max_tmds_clock)
+                       return;
+               reduce_mode_colour_depth(timing_out);
+
+       } while (timing_out->display_color_depth > COLOR_DEPTH_888);
+
+}
 /*****************************************************************************/
 
 static void
@@ -2183,6 +2223,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
                                             const struct drm_connector *connector)
 {
        struct dc_crtc_timing *timing_out = &stream->timing;
+       const struct drm_display_info *info = &connector->display_info;
 
        memset(timing_out, 0, sizeof(struct dc_crtc_timing));
 
@@ -2191,8 +2232,10 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
        timing_out->v_border_top = 0;
        timing_out->v_border_bottom = 0;
        /* TODO: un-hardcode */
-
-       if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
+       if (drm_mode_is_420_only(info, mode_in)
+                       && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+               timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+       else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
                        && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
                timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
        else
@@ -2228,6 +2271,8 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
 
        stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
        stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+       if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+               adjust_colour_depth_from_display_info(timing_out, info);
 }
 
 static void fill_audio_info(struct audio_info *audio_info,
@@ -3928,10 +3973,11 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
        if (acrtc->base.state->event)
                prepare_flip_isr(acrtc);
 
+       spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
        surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
        surface_updates->flip_addr = &addr;
 
-
        dc_commit_updates_for_stream(adev->dm.dc,
                                             surface_updates,
                                             1,
@@ -3944,9 +3990,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
                         __func__,
                         addr.address.grph.addr.high_part,
                         addr.address.grph.addr.low_part);
-
-
-       spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 }
 
 /*
@@ -4206,6 +4249,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
        struct drm_connector *connector;
        struct drm_connector_state *old_con_state, *new_con_state;
        struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+       int crtc_disable_count = 0;
 
        drm_atomic_helper_update_legacy_modeset_state(dev, state);
 
@@ -4410,6 +4454,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
                bool modeset_needed;
 
+               if (old_crtc_state->active && !new_crtc_state->active)
+                       crtc_disable_count++;
+
                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
                modeset_needed = modeset_required(
@@ -4463,11 +4510,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
         * so we can put the GPU into runtime suspend if we're not driving any
         * displays anymore
         */
+       for (i = 0; i < crtc_disable_count; i++)
+               pm_runtime_put_autosuspend(dev->dev);
        pm_runtime_mark_last_busy(dev->dev);
-       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
-               if (old_crtc_state->active && !new_crtc_state->active)
-                       pm_runtime_put_autosuspend(dev->dev);
-       }
 }
 
 
index 4304d9e408b88d180eabac07327497fdda353b25..ace9ad578ca08f85aeccf49ac4d744b071444274 100644 (file)
@@ -83,22 +83,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
        enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
                I2C_MOT_TRUE : I2C_MOT_FALSE;
        enum ddc_result res;
-       uint32_t read_bytes = msg->size;
+       ssize_t read_bytes;
 
        if (WARN_ON(msg->size > 16))
                return -E2BIG;
 
        switch (msg->request & ~DP_AUX_I2C_MOT) {
        case DP_AUX_NATIVE_READ:
-               res = dal_ddc_service_read_dpcd_data(
+               read_bytes = dal_ddc_service_read_dpcd_data(
                                TO_DM_AUX(aux)->ddc_service,
                                false,
                                I2C_MOT_UNDEF,
                                msg->address,
                                msg->buffer,
-                               msg->size,
-                               &read_bytes);
-               break;
+                               msg->size);
+               return read_bytes;
        case DP_AUX_NATIVE_WRITE:
                res = dal_ddc_service_write_dpcd_data(
                                TO_DM_AUX(aux)->ddc_service,
@@ -109,15 +108,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
                                msg->size);
                break;
        case DP_AUX_I2C_READ:
-               res = dal_ddc_service_read_dpcd_data(
+               read_bytes = dal_ddc_service_read_dpcd_data(
                                TO_DM_AUX(aux)->ddc_service,
                                true,
                                mot,
                                msg->address,
                                msg->buffer,
-                               msg->size,
-                               &read_bytes);
-               break;
+                               msg->size);
+               return read_bytes;
        case DP_AUX_I2C_WRITE:
                res = dal_ddc_service_write_dpcd_data(
                                TO_DM_AUX(aux)->ddc_service,
@@ -139,9 +137,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
                 r == DDC_RESULT_SUCESSFULL);
 #endif
 
-       if (res != DDC_RESULT_SUCESSFULL)
-               return -EIO;
-       return read_bytes;
+       return msg->size;
 }
 
 static enum drm_connector_status
index 5a3346124a0177da27c6d205559a2f363f5aa40d..5a2e952c5bead295df49350289416d4674c4dcef 100644 (file)
@@ -255,8 +255,9 @@ static void pp_to_dc_clock_levels_with_latency(
                        DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
 
        for (i = 0; i < clk_level_info->num_levels; i++) {
-               DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz);
-               clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
+               DRM_DEBUG("DM_PPLIB:\t %d in 10kHz\n", pp_clks->data[i].clocks_in_khz);
+               /* translate 10kHz to kHz */
+               clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz * 10;
                clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
        }
 }
index ae48d603ebd6ca73c289c71f50795f5d3bd6f65e..49c2face1e7a869e07e94da881e7ed33acbe483e 100644 (file)
@@ -629,14 +629,13 @@ bool dal_ddc_service_query_ddc_data(
        return ret;
 }
 
-enum ddc_result dal_ddc_service_read_dpcd_data(
+ssize_t dal_ddc_service_read_dpcd_data(
        struct ddc_service *ddc,
        bool i2c,
        enum i2c_mot_mode mot,
        uint32_t address,
        uint8_t *data,
-       uint32_t len,
-       uint32_t *read)
+       uint32_t len)
 {
        struct aux_payload read_payload = {
                .i2c_over_aux = i2c,
@@ -653,8 +652,6 @@ enum ddc_result dal_ddc_service_read_dpcd_data(
                .mot = mot
        };
 
-       *read = 0;
-
        if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
                BREAK_TO_DEBUGGER();
                return DDC_RESULT_FAILED_INVALID_OPERATION;
@@ -664,8 +661,7 @@ enum ddc_result dal_ddc_service_read_dpcd_data(
                ddc->ctx->i2caux,
                ddc->ddc_pin,
                &command)) {
-               *read = command.payloads->length;
-               return DDC_RESULT_SUCESSFULL;
+               return (ssize_t)command.payloads->length;
        }
 
        return DDC_RESULT_FAILED_OPERATION;
index b235a75355b855e03cfd679126e72a9e30dd7d00..bae752332a9f7f2baea5d59f9c367d5b5b61a56e 100644 (file)
@@ -741,6 +741,29 @@ static struct mem_input_funcs dce_mi_funcs = {
        .mem_input_is_flip_pending = dce_mi_is_flip_pending
 };
 
+static struct mem_input_funcs dce112_mi_funcs = {
+       .mem_input_program_display_marks = dce112_mi_program_display_marks,
+       .allocate_mem_input = dce_mi_allocate_dmif,
+       .free_mem_input = dce_mi_free_dmif,
+       .mem_input_program_surface_flip_and_addr =
+                       dce_mi_program_surface_flip_and_addr,
+       .mem_input_program_pte_vm = dce_mi_program_pte_vm,
+       .mem_input_program_surface_config =
+                       dce_mi_program_surface_config,
+       .mem_input_is_flip_pending = dce_mi_is_flip_pending
+};
+
+static struct mem_input_funcs dce120_mi_funcs = {
+       .mem_input_program_display_marks = dce120_mi_program_display_marks,
+       .allocate_mem_input = dce_mi_allocate_dmif,
+       .free_mem_input = dce_mi_free_dmif,
+       .mem_input_program_surface_flip_and_addr =
+                       dce_mi_program_surface_flip_and_addr,
+       .mem_input_program_pte_vm = dce_mi_program_pte_vm,
+       .mem_input_program_surface_config =
+                       dce_mi_program_surface_config,
+       .mem_input_is_flip_pending = dce_mi_is_flip_pending
+};
 
 void dce_mem_input_construct(
        struct dce_mem_input *dce_mi,
@@ -769,7 +792,7 @@ void dce112_mem_input_construct(
        const struct dce_mem_input_mask *mi_mask)
 {
        dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
-       dce_mi->base.funcs->mem_input_program_display_marks = dce112_mi_program_display_marks;
+       dce_mi->base.funcs = &dce112_mi_funcs;
 }
 
 void dce120_mem_input_construct(
@@ -781,5 +804,5 @@ void dce120_mem_input_construct(
        const struct dce_mem_input_mask *mi_mask)
 {
        dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
-       dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks;
+       dce_mi->base.funcs = &dce120_mi_funcs;
 }
index 38ec0d609297f832362d45397a2080918e4937c0..344dd2e69e7ceb5177aa2d0762d63862531af31f 100644 (file)
@@ -678,9 +678,22 @@ bool dce100_validate_bandwidth(
        struct dc  *dc,
        struct dc_state *context)
 {
-       /* TODO implement when needed but for now hardcode max value*/
-       context->bw.dce.dispclk_khz = 681000;
-       context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+       int i;
+       bool at_least_one_pipe = false;
+
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               if (context->res_ctx.pipe_ctx[i].stream)
+                       at_least_one_pipe = true;
+       }
+
+       if (at_least_one_pipe) {
+               /* TODO implement when needed but for now hardcode max value*/
+               context->bw.dce.dispclk_khz = 681000;
+               context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+       } else {
+               context->bw.dce.dispclk_khz = 0;
+               context->bw.dce.yclk_khz = 0;
+       }
 
        return true;
 }
index 30b3a08b91be27dade29620cfc5dbf857c89bad1..090b7a8dd67bde2bdfdaf243c04d175abf82112c 100644 (file)
@@ -102,14 +102,13 @@ bool dal_ddc_service_query_ddc_data(
                uint8_t *read_buf,
                uint32_t read_size);
 
-enum ddc_result dal_ddc_service_read_dpcd_data(
+ssize_t dal_ddc_service_read_dpcd_data(
                struct ddc_service *ddc,
                bool i2c,
                enum i2c_mot_mode mot,
                uint32_t address,
                uint8_t *data,
-               uint32_t len,
-               uint32_t *read);
+               uint32_t len);
 
 enum ddc_result dal_ddc_service_write_dpcd_data(
                struct ddc_service *ddc,
index 092d800b703a7627a2b98fdda7be54b5b6f7ff11..33b4de4ad66eb561f15c04cac555c2ea76b03d2b 100644 (file)
@@ -1433,7 +1433,10 @@ struct atom_smc_dpm_info_v4_1
        uint8_t  acggfxclkspreadpercent;
        uint16_t acggfxclkspreadfreq;
 
-       uint32_t boardreserved[10];
+       uint8_t Vr2_I2C_address;
+       uint8_t padding_vr2[3];
+
+       uint32_t boardreserved[9];
 };
 
 /* 
index 5325661fedffb9480b26fb7b94269a30fb32334a..d27c1c9df2868696887157845aa13eca5b7348d1 100644 (file)
@@ -512,14 +512,82 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI
        return 0;
 }
 
+static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr,
+                       struct pp_atomfwctrl_bios_boot_up_values *boot_values,
+                       struct atom_firmware_info_v3_2 *fw_info)
+{
+       uint32_t frequency = 0;
+
+       boot_values->ulRevision = fw_info->firmware_revision;
+       boot_values->ulGfxClk   = fw_info->bootup_sclk_in10khz;
+       boot_values->ulUClk     = fw_info->bootup_mclk_in10khz;
+       boot_values->usVddc     = fw_info->bootup_vddc_mv;
+       boot_values->usVddci    = fw_info->bootup_vddci_mv;
+       boot_values->usMvddc    = fw_info->bootup_mvddc_mv;
+       boot_values->usVddGfx   = fw_info->bootup_vddgfx_mv;
+       boot_values->ucCoolingID = fw_info->coolingsolution_id;
+       boot_values->ulSocClk   = 0;
+       boot_values->ulDCEFClk   = 0;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency))
+               boot_values->ulSocClk   = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency))
+               boot_values->ulDCEFClk  = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency))
+               boot_values->ulEClk     = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency))
+               boot_values->ulVClk     = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency))
+               boot_values->ulDClk     = frequency;
+}
+
+static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
+                       struct pp_atomfwctrl_bios_boot_up_values *boot_values,
+                       struct atom_firmware_info_v3_1 *fw_info)
+{
+       uint32_t frequency = 0;
+
+       boot_values->ulRevision = fw_info->firmware_revision;
+       boot_values->ulGfxClk   = fw_info->bootup_sclk_in10khz;
+       boot_values->ulUClk     = fw_info->bootup_mclk_in10khz;
+       boot_values->usVddc     = fw_info->bootup_vddc_mv;
+       boot_values->usVddci    = fw_info->bootup_vddci_mv;
+       boot_values->usMvddc    = fw_info->bootup_mvddc_mv;
+       boot_values->usVddGfx   = fw_info->bootup_vddgfx_mv;
+       boot_values->ucCoolingID = fw_info->coolingsolution_id;
+       boot_values->ulSocClk   = 0;
+       boot_values->ulDCEFClk   = 0;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency))
+               boot_values->ulSocClk   = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency))
+               boot_values->ulDCEFClk  = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency))
+               boot_values->ulEClk     = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency))
+               boot_values->ulVClk     = frequency;
+
+       if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency))
+               boot_values->ulDClk     = frequency;
+}
+
 int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
                        struct pp_atomfwctrl_bios_boot_up_values *boot_values)
 {
-       struct atom_firmware_info_v3_1 *info = NULL;
+       struct atom_firmware_info_v3_2 *fwinfo_3_2;
+       struct atom_firmware_info_v3_1 *fwinfo_3_1;
+       struct atom_common_table_header *info = NULL;
        uint16_t ix;
 
        ix = GetIndexIntoMasterDataTable(firmwareinfo);
-       info = (struct atom_firmware_info_v3_1 *)
+       info = (struct atom_common_table_header *)
                smu_atom_get_data_table(hwmgr->adev,
                                ix, NULL, NULL, NULL);
 
@@ -528,16 +596,18 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
                return -EINVAL;
        }
 
-       boot_values->ulRevision = info->firmware_revision;
-       boot_values->ulGfxClk   = info->bootup_sclk_in10khz;
-       boot_values->ulUClk     = info->bootup_mclk_in10khz;
-       boot_values->usVddc     = info->bootup_vddc_mv;
-       boot_values->usVddci    = info->bootup_vddci_mv;
-       boot_values->usMvddc    = info->bootup_mvddc_mv;
-       boot_values->usVddGfx   = info->bootup_vddgfx_mv;
-       boot_values->ucCoolingID = info->coolingsolution_id;
-       boot_values->ulSocClk   = 0;
-       boot_values->ulDCEFClk   = 0;
+       if ((info->format_revision == 3) && (info->content_revision == 2)) {
+               fwinfo_3_2 = (struct atom_firmware_info_v3_2 *)info;
+               pp_atomfwctrl_copy_vbios_bootup_values_3_2(hwmgr,
+                               boot_values, fwinfo_3_2);
+       } else if ((info->format_revision == 3) && (info->content_revision == 1)) {
+               fwinfo_3_1 = (struct atom_firmware_info_v3_1 *)info;
+               pp_atomfwctrl_copy_vbios_bootup_values_3_1(hwmgr,
+                               boot_values, fwinfo_3_1);
+       } else {
+               pr_info("Fw info table revision does not match!");
+               return -EINVAL;
+       }
 
        return 0;
 }
@@ -629,5 +699,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
        param->acggfxclkspreadpercent = info->acggfxclkspreadpercent;
        param->acggfxclkspreadfreq = info->acggfxclkspreadfreq;
 
+       param->Vr2_I2C_address = info->Vr2_I2C_address;
+
        return 0;
 }
index fe10aa4db5e64f721fbd462ffa6127b0e06851fc..22e21668c93a429239688fd7509328fc0d870406 100644 (file)
@@ -136,6 +136,9 @@ struct pp_atomfwctrl_bios_boot_up_values {
        uint32_t   ulUClk;
        uint32_t   ulSocClk;
        uint32_t   ulDCEFClk;
+       uint32_t   ulEClk;
+       uint32_t   ulVClk;
+       uint32_t   ulDClk;
        uint16_t   usVddc;
        uint16_t   usVddci;
        uint16_t   usMvddc;
@@ -207,6 +210,8 @@ struct pp_atomfwctrl_smc_dpm_parameters
        uint8_t  acggfxclkspreadenabled;
        uint8_t  acggfxclkspreadpercent;
        uint16_t acggfxclkspreadfreq;
+
+       uint8_t Vr2_I2C_address;
 };
 
 int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
index dbe4b1f66784961ea028b3fcfee564e830617f80..22364875a943e5e32e7e13d5fe2ef79d579f2824 100644 (file)
@@ -1090,7 +1090,7 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
 static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
 {
        struct amdgpu_device *adev = hwmgr->adev;
-       int result;
+       int result = 0;
        uint32_t num_se = 0;
        uint32_t count, data;
 
index 782e2098824df6225e2c044bf060626d143a0919..c98e5de777cd1bc18abbc4a5560430543f23ddd6 100644 (file)
@@ -81,6 +81,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
 
        data->registry_data.disallowed_features = 0x0;
        data->registry_data.od_state_in_dc_support = 0;
+       data->registry_data.thermal_support = 1;
        data->registry_data.skip_baco_hardware = 0;
 
        data->registry_data.log_avfs_param = 0;
@@ -803,6 +804,9 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
                data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
                data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
                data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
+               data->vbios_boot_state.eclock = boot_up_values.ulEClk;
+               data->vbios_boot_state.dclock = boot_up_values.ulDClk;
+               data->vbios_boot_state.vclock = boot_up_values.ulVClk;
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                PPSMC_MSG_SetMinDeepSleepDcefclk,
                        (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
index e81ded1ec1982d55f2b1a5657ee0fc163b3d7c96..49b38df8c7f2702553b7aa49e27e77069d55adef 100644 (file)
@@ -167,6 +167,9 @@ struct vega12_vbios_boot_state {
        uint32_t    mem_clock;
        uint32_t    soc_clock;
        uint32_t    dcef_clock;
+       uint32_t    eclock;
+       uint32_t    dclock;
+       uint32_t    vclock;
 };
 
 #define DPMTABLE_OD_UPDATE_SCLK     0x00000001
index 888ddca902d894216acee566879f239a47009468..29914700ee82f5d8d09de71ff8fadb7b28bfaca3 100644 (file)
@@ -230,6 +230,8 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
                ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
        }
 
+       ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
+
        return 0;
 }
 
index 2f8a3b983cce0c9444cac949ed7ce79a420b06ba..b08526fd161908d29bda5ab87dc7bd1c1f1401f1 100644 (file)
@@ -499,7 +499,10 @@ typedef struct {
        uint8_t      AcgGfxclkSpreadPercent;
        uint16_t     AcgGfxclkSpreadFreq;
 
-       uint32_t     BoardReserved[10];
+  uint8_t      Vr2_I2C_address;
+  uint8_t      padding_vr2[3];
+
+  uint32_t     BoardReserved[9];
 
 
   uint32_t     MmHubPadding[7];
index d644a9bb9078d081639aa09a72af9f92e3612fc2..9f407c48d4f0d4775c6c3dd8943591c19e22331e 100644 (file)
@@ -381,6 +381,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
        uint32_t fw_to_load;
        int result = 0;
        struct SMU_DRAMData_TOC *toc;
+       uint32_t num_entries = 0;
 
        if (!hwmgr->reload_fw) {
                pr_info("skip reloading...\n");
@@ -422,41 +423,41 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
        }
 
        toc = (struct SMU_DRAMData_TOC *)smu_data->header;
-       toc->num_entries = 0;
        toc->structure_version = 1;
 
        PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_RLC_G, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
        PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_CP_CE, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
        PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_CP_PFP, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
        PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_CP_ME, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
        PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_CP_MEC, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
        PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_CP_MEC_JT1, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
        PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_CP_MEC_JT2, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
        PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_SDMA0, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
        PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_SDMA1, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
        if (!hwmgr->not_vf)
                PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
-                               UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
+                               UCODE_ID_MEC_STORAGE, &toc->entry[num_entries++]),
                                "Failed to Get Firmware Entry.", return -EINVAL);
 
+       toc->num_entries = num_entries;
        smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
        smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
 
index 8d20faa198cf199ff65d604ca9f4f1effd633c86..0a788d76ed5f02f6aed755447783874280b5cf7a 100644 (file)
@@ -278,7 +278,6 @@ static int malidp_init(struct drm_device *drm)
 
 static void malidp_fini(struct drm_device *drm)
 {
-       drm_atomic_helper_shutdown(drm);
        drm_mode_config_cleanup(drm);
 }
 
@@ -646,6 +645,7 @@ vblank_fail:
        malidp_de_irq_fini(drm);
        drm->irq_enabled = false;
 irq_init_fail:
+       drm_atomic_helper_shutdown(drm);
        component_unbind_all(dev, drm);
 bind_fail:
        of_node_put(malidp->crtc.port);
@@ -681,6 +681,7 @@ static void malidp_unbind(struct device *dev)
        malidp_se_irq_fini(drm);
        malidp_de_irq_fini(drm);
        drm->irq_enabled = false;
+       drm_atomic_helper_shutdown(drm);
        component_unbind_all(dev, drm);
        of_node_put(malidp->crtc.port);
        malidp->crtc.port = NULL;
index d789b46dc817335dd2d509ee456d850897762b88..069783e715f1777829b3d992b2add45b8fb693da 100644 (file)
@@ -634,7 +634,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
                                .vsync_irq = MALIDP500_DE_IRQ_VSYNC,
                        },
                        .se_irq_map = {
-                               .irq_mask = MALIDP500_SE_IRQ_CONF_MODE,
+                               .irq_mask = MALIDP500_SE_IRQ_CONF_MODE |
+                                           MALIDP500_SE_IRQ_GLOBAL,
                                .vsync_irq = 0,
                        },
                        .dc_irq_map = {
index 7a44897c50fea784bf516db7f17866ee31f413fa..29409a65d864760e674f787cb5279cdbff5b91a7 100644 (file)
@@ -23,6 +23,7 @@
 
 /* Layer specific register offsets */
 #define MALIDP_LAYER_FORMAT            0x000
+#define   LAYER_FORMAT_MASK            0x3f
 #define MALIDP_LAYER_CONTROL           0x004
 #define   LAYER_ENABLE                 (1 << 0)
 #define   LAYER_FLOWCFG_MASK           7
@@ -235,8 +236,8 @@ static int malidp_de_plane_check(struct drm_plane *plane,
        if (state->rotation & MALIDP_ROTATED_MASK) {
                int val;
 
-               val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_h,
-                                                    state->crtc_w,
+               val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w,
+                                                    state->crtc_h,
                                                     fb->format->format);
                if (val < 0)
                        return val;
@@ -337,7 +338,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
        dest_w = plane->state->crtc_w;
        dest_h = plane->state->crtc_h;
 
-       malidp_hw_write(mp->hwdev, ms->format, mp->layer->base);
+       val = malidp_hw_read(mp->hwdev, mp->layer->base);
+       val = (val & ~LAYER_FORMAT_MASK) | ms->format;
+       malidp_hw_write(mp->hwdev, val, mp->layer->base);
 
        for (i = 0; i < ms->n_planes; i++) {
                /* calculate the offset for the layer's plane registers */
index 03eeee11dd5bd2f4ed000b2f86ca5eda6253f9ca..42a40daff13265c968f442b4dafd2ec081bbafd7 100644 (file)
@@ -519,8 +519,9 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
        u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
 
        /*
-        * This is rediculous - rather than writing bits to clear, we
-        * have to set the actual status register value.  This is racy.
+        * Reading the ISR appears to clear bits provided CLEAN_SPU_IRQ_ISR
+        * is set.  Writing has some other effect to acknowledge the IRQ -
+        * without this, we only get a single IRQ.
         */
        writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
 
@@ -1116,16 +1117,22 @@ armada_drm_crtc_set_property(struct drm_crtc *crtc,
 static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc)
 {
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       unsigned long flags;
 
+       spin_lock_irqsave(&dcrtc->irq_lock, flags);
        armada_drm_crtc_enable_irq(dcrtc, VSYNC_IRQ_ENA);
+       spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
        return 0;
 }
 
 static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc)
 {
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+       unsigned long flags;
 
+       spin_lock_irqsave(&dcrtc->irq_lock, flags);
        armada_drm_crtc_disable_irq(dcrtc, VSYNC_IRQ_ENA);
+       spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
 }
 
 static const struct drm_crtc_funcs armada_crtc_funcs = {
@@ -1415,6 +1422,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
                       CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
        writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
        writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+       readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
        writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
 
        ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
index 27319a8335e258cf12cb093c5c598523fbe307f3..345dc4d0851ef43bd73069679c3ec7fef8bfa073 100644 (file)
@@ -160,6 +160,7 @@ enum {
        CFG_ALPHAM_GRA          = 0x1 << 16,
        CFG_ALPHAM_CFG          = 0x2 << 16,
        CFG_ALPHA_MASK          = 0xff << 8,
+#define CFG_ALPHA(x)           ((x) << 8)
        CFG_PIXCMD_MASK         = 0xff,
 };
 
index c391955009d6051a6bb67b8a9a7bcebee38a8983..afa7ded3ae31df68a406da0bba045a6d87f0bce6 100644 (file)
@@ -28,6 +28,7 @@ struct armada_ovl_plane_properties {
        uint16_t contrast;
        uint16_t saturation;
        uint32_t colorkey_mode;
+       uint32_t colorkey_enable;
 };
 
 struct armada_ovl_plane {
@@ -54,11 +55,13 @@ armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
        writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
 
        spin_lock_irq(&dcrtc->irq_lock);
-       armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
-                    CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
-                    dcrtc->base + LCD_SPU_DMA_CTRL1);
-
-       armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
+       armada_updatel(prop->colorkey_mode,
+                      CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+                      dcrtc->base + LCD_SPU_DMA_CTRL1);
+       if (dcrtc->variant->has_spu_adv_reg)
+               armada_updatel(prop->colorkey_enable,
+                              ADV_GRACOLORKEY | ADV_VIDCOLORKEY,
+                              dcrtc->base + LCD_SPU_ADV_REG);
        spin_unlock_irq(&dcrtc->irq_lock);
 }
 
@@ -321,8 +324,17 @@ static int armada_ovl_plane_set_property(struct drm_plane *plane,
                dplane->prop.colorkey_vb |= K2B(val);
                update_attr = true;
        } else if (property == priv->colorkey_mode_prop) {
-               dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
-               dplane->prop.colorkey_mode |= CFG_CKMODE(val);
+               if (val == CKMODE_DISABLE) {
+                       dplane->prop.colorkey_mode =
+                               CFG_CKMODE(CKMODE_DISABLE) |
+                               CFG_ALPHAM_CFG | CFG_ALPHA(255);
+                       dplane->prop.colorkey_enable = 0;
+               } else {
+                       dplane->prop.colorkey_mode =
+                               CFG_CKMODE(val) |
+                               CFG_ALPHAM_GRA | CFG_ALPHA(0);
+                       dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
+               }
                update_attr = true;
        } else if (property == priv->brightness_prop) {
                dplane->prop.brightness = val - 256;
@@ -453,7 +465,9 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
        dplane->prop.colorkey_yr = 0xfefefe00;
        dplane->prop.colorkey_ug = 0x01010100;
        dplane->prop.colorkey_vb = 0x01010100;
-       dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
+       dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB) |
+                                    CFG_ALPHAM_GRA | CFG_ALPHA(0);
+       dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
        dplane->prop.brightness = 0;
        dplane->prop.contrast = 0x4000;
        dplane->prop.saturation = 0x4000;
index 73c875db45f4346afd5a25408e9264c466401138..47e0992f39083161d46c5d1759f0c06f5cf2c0de 100644 (file)
@@ -839,7 +839,7 @@ static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
                        return ret;
        }
 
-       if (desc->layout.xstride && desc->layout.pstride) {
+       if (desc->layout.xstride[0] && desc->layout.pstride[0]) {
                int ret;
 
                ret = drm_plane_create_rotation_property(&plane->base,
index 7ab36042a822cf6cfec2fc440ef3fbe6e018fa3d..a6e8f4591e636241c6f1e8515fea33dc9147a7f3 100644 (file)
@@ -14,6 +14,7 @@
 #include <drm/bridge/mhl.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
 
 #include <linux/clk.h>
 #include <linux/delay.h>
 
 #define SII8620_BURST_BUF_LEN 288
 #define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
-#define MHL1_MAX_LCLK 225000
-#define MHL3_MAX_LCLK 600000
+
+#define MHL1_MAX_PCLK 75000
+#define MHL1_MAX_PCLK_PP_MODE 150000
+#define MHL3_MAX_PCLK 200000
+#define MHL3_MAX_PCLK_PP_MODE 300000
 
 enum sii8620_mode {
        CM_DISCONNECTED,
@@ -69,9 +73,7 @@ struct sii8620 {
        struct regulator_bulk_data supplies[2];
        struct mutex lock; /* context lock, protects fields below */
        int error;
-       int pixel_clock;
        unsigned int use_packed_pixel:1;
-       int video_code;
        enum sii8620_mode mode;
        enum sii8620_sink_type sink_type;
        u8 cbus_status;
@@ -79,7 +81,9 @@ struct sii8620 {
        u8 xstat[MHL_XDS_SIZE];
        u8 devcap[MHL_DCAP_SIZE];
        u8 xdevcap[MHL_XDC_SIZE];
-       u8 avif[HDMI_INFOFRAME_SIZE(AVI)];
+       bool feature_complete;
+       bool devcap_read;
+       bool sink_detected;
        struct edid *edid;
        unsigned int gen2_write_burst:1;
        enum sii8620_mt_state mt_state;
@@ -476,7 +480,7 @@ static void sii8620_update_array(u8 *dst, u8 *src, int count)
        }
 }
 
-static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
+static void sii8620_identify_sink(struct sii8620 *ctx)
 {
        static const char * const sink_str[] = {
                [SINK_NONE] = "NONE",
@@ -487,7 +491,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
        char sink_name[20];
        struct device *dev = ctx->dev;
 
-       if (ret < 0)
+       if (!ctx->sink_detected || !ctx->devcap_read)
                return;
 
        sii8620_fetch_edid(ctx);
@@ -496,6 +500,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
                sii8620_mhl_disconnected(ctx);
                return;
        }
+       sii8620_set_upstream_edid(ctx);
 
        if (drm_detect_hdmi_monitor(ctx->edid))
                ctx->sink_type = SINK_HDMI;
@@ -508,53 +513,6 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
                 sink_str[ctx->sink_type], sink_name);
 }
 
-static void sii8620_hsic_init(struct sii8620 *ctx)
-{
-       if (!sii8620_is_mhl3(ctx))
-               return;
-
-       sii8620_write(ctx, REG_FCGC,
-               BIT_FCGC_HSIC_HOSTMODE | BIT_FCGC_HSIC_ENABLE);
-       sii8620_setbits(ctx, REG_HRXCTRL3,
-               BIT_HRXCTRL3_HRX_STAY_RESET | BIT_HRXCTRL3_STATUS_EN, ~0);
-       sii8620_setbits(ctx, REG_TTXNUMB, MSK_TTXNUMB_TTX_NUMBPS, 4);
-       sii8620_setbits(ctx, REG_TRXCTRL, BIT_TRXCTRL_TRX_FROM_SE_COC, ~0);
-       sii8620_setbits(ctx, REG_HTXCTRL, BIT_HTXCTRL_HTX_DRVCONN1, 0);
-       sii8620_setbits(ctx, REG_KEEPER, MSK_KEEPER_MODE, VAL_KEEPER_MODE_HOST);
-       sii8620_write_seq_static(ctx,
-               REG_TDMLLCTL, 0,
-               REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST |
-                       BIT_UTSRST_KEEPER_SRST | BIT_UTSRST_FC_SRST,
-               REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST,
-               REG_HRXINTL, 0xff,
-               REG_HRXINTH, 0xff,
-               REG_TTXINTL, 0xff,
-               REG_TTXINTH, 0xff,
-               REG_TRXINTL, 0xff,
-               REG_TRXINTH, 0xff,
-               REG_HTXINTL, 0xff,
-               REG_HTXINTH, 0xff,
-               REG_FCINTR0, 0xff,
-               REG_FCINTR1, 0xff,
-               REG_FCINTR2, 0xff,
-               REG_FCINTR3, 0xff,
-               REG_FCINTR4, 0xff,
-               REG_FCINTR5, 0xff,
-               REG_FCINTR6, 0xff,
-               REG_FCINTR7, 0xff
-       );
-}
-
-static void sii8620_edid_read(struct sii8620 *ctx, int ret)
-{
-       if (ret < 0)
-               return;
-
-       sii8620_set_upstream_edid(ctx);
-       sii8620_hsic_init(ctx);
-       sii8620_enable_hpd(ctx);
-}
-
 static void sii8620_mr_devcap(struct sii8620 *ctx)
 {
        u8 dcap[MHL_DCAP_SIZE];
@@ -570,6 +528,8 @@ static void sii8620_mr_devcap(struct sii8620 *ctx)
                 dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L],
                 dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]);
        sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
+       ctx->devcap_read = true;
+       sii8620_identify_sink(ctx);
 }
 
 static void sii8620_mr_xdevcap(struct sii8620 *ctx)
@@ -807,6 +767,7 @@ static void sii8620_burst_rx_all(struct sii8620 *ctx)
 static void sii8620_fetch_edid(struct sii8620 *ctx)
 {
        u8 lm_ddc, ddc_cmd, int3, cbus;
+       unsigned long timeout;
        int fetched, i;
        int edid_len = EDID_LENGTH;
        u8 *edid;
@@ -856,23 +817,31 @@ static void sii8620_fetch_edid(struct sii8620 *ctx)
                        REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK
                );
 
-               do {
-                       int3 = sii8620_readb(ctx, REG_INTR3);
+               int3 = 0;
+               timeout = jiffies + msecs_to_jiffies(200);
+               for (;;) {
                        cbus = sii8620_readb(ctx, REG_CBUS_STATUS);
-
-                       if (int3 & BIT_DDC_CMD_DONE)
-                               break;
-
-                       if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) {
+                       if (~cbus & BIT_CBUS_STATUS_CBUS_CONNECTED) {
+                               kfree(edid);
+                               edid = NULL;
+                               goto end;
+                       }
+                       if (int3 & BIT_DDC_CMD_DONE) {
+                               if (sii8620_readb(ctx, REG_DDC_DOUT_CNT)
+                                   >= FETCH_SIZE)
+                                       break;
+                       } else {
+                               int3 = sii8620_readb(ctx, REG_INTR3);
+                       }
+                       if (time_is_before_jiffies(timeout)) {
+                               ctx->error = -ETIMEDOUT;
+                               dev_err(ctx->dev, "timeout during EDID read\n");
                                kfree(edid);
                                edid = NULL;
                                goto end;
                        }
-               } while (1);
-
-               sii8620_readb(ctx, REG_DDC_STATUS);
-               while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE)
                        usleep_range(10, 20);
+               }
 
                sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE);
                if (fetched + FETCH_SIZE == EDID_LENGTH) {
@@ -971,8 +940,17 @@ static int sii8620_hw_on(struct sii8620 *ctx)
        ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
        if (ret)
                return ret;
+
        usleep_range(10000, 20000);
-       return clk_prepare_enable(ctx->clk_xtal);
+       ret = clk_prepare_enable(ctx->clk_xtal);
+       if (ret)
+               return ret;
+
+       msleep(100);
+       gpiod_set_value(ctx->gpio_reset, 0);
+       msleep(100);
+
+       return 0;
 }
 
 static int sii8620_hw_off(struct sii8620 *ctx)
@@ -982,17 +960,6 @@ static int sii8620_hw_off(struct sii8620 *ctx)
        return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
 }
 
-static void sii8620_hw_reset(struct sii8620 *ctx)
-{
-       usleep_range(10000, 20000);
-       gpiod_set_value(ctx->gpio_reset, 0);
-       usleep_range(5000, 20000);
-       gpiod_set_value(ctx->gpio_reset, 1);
-       usleep_range(10000, 20000);
-       gpiod_set_value(ctx->gpio_reset, 0);
-       msleep(300);
-}
-
 static void sii8620_cbus_reset(struct sii8620 *ctx)
 {
        sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
@@ -1055,23 +1022,23 @@ static void sii8620_set_format(struct sii8620 *ctx)
                                BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED,
                                ctx->use_packed_pixel ? ~0 : 0);
        } else {
-               if (ctx->use_packed_pixel)
+               if (ctx->use_packed_pixel) {
                        sii8620_write_seq_static(ctx,
                                REG_VID_MODE, BIT_VID_MODE_M1080P,
                                REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1,
                                REG_MHLTX_CTL6, 0x60
                        );
-               else
+               } else {
                        sii8620_write_seq_static(ctx,
                                REG_VID_MODE, 0,
                                REG_MHL_TOP_CTL, 1,
                                REG_MHLTX_CTL6, 0xa0
                        );
+               }
        }
 
        if (ctx->use_packed_pixel)
-               out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL) |
-                       BIT_TPI_OUTPUT_CSCMODE709;
+               out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL);
        else
                out_fmt = VAL_TPI_FORMAT(RGB, FULL);
 
@@ -1128,18 +1095,28 @@ static ssize_t mhl3_infoframe_pack(struct mhl3_infoframe *frame,
        return frm_len;
 }
 
-static void sii8620_set_infoframes(struct sii8620 *ctx)
+static void sii8620_set_infoframes(struct sii8620 *ctx,
+                                  struct drm_display_mode *mode)
 {
        struct mhl3_infoframe mhl_frm;
        union hdmi_infoframe frm;
        u8 buf[31];
        int ret;
 
+       ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
+                                                      mode,
+                                                      true);
+       if (ctx->use_packed_pixel)
+               frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
+
+       if (!ret)
+               ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
+       if (ret > 0)
+               sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
+
        if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) {
                sii8620_write(ctx, REG_TPI_SC,
                        BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
-               sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif + 3,
-                       ARRAY_SIZE(ctx->avif) - 3);
                sii8620_write(ctx, REG_PKT_FILTER_0,
                        BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
                        BIT_PKT_FILTER_0_DROP_MPEG_PKT |
@@ -1148,16 +1125,6 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
                return;
        }
 
-       ret = hdmi_avi_infoframe_init(&frm.avi);
-       frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
-       frm.avi.active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
-       frm.avi.picture_aspect = HDMI_PICTURE_ASPECT_16_9;
-       frm.avi.colorimetry = HDMI_COLORIMETRY_ITU_709;
-       frm.avi.video_code = ctx->video_code;
-       if (!ret)
-               ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
-       if (ret > 0)
-               sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
        sii8620_write(ctx, REG_PKT_FILTER_0,
                BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
                BIT_PKT_FILTER_0_DROP_MPEG_PKT |
@@ -1177,6 +1144,9 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
 
 static void sii8620_start_video(struct sii8620 *ctx)
 {
+       struct drm_display_mode *mode =
+               &ctx->bridge.encoder->crtc->state->adjusted_mode;
+
        if (!sii8620_is_mhl3(ctx))
                sii8620_stop_video(ctx);
 
@@ -1195,8 +1165,14 @@ static void sii8620_start_video(struct sii8620 *ctx)
        sii8620_set_format(ctx);
 
        if (!sii8620_is_mhl3(ctx)) {
-               sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
-                       MHL_DST_LM_CLK_MODE_NORMAL | MHL_DST_LM_PATH_ENABLED);
+               u8 link_mode = MHL_DST_LM_PATH_ENABLED;
+
+               if (ctx->use_packed_pixel)
+                       link_mode |= MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
+               else
+                       link_mode |= MHL_DST_LM_CLK_MODE_NORMAL;
+
+               sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), link_mode);
                sii8620_set_auto_zone(ctx);
        } else {
                static const struct {
@@ -1213,10 +1189,10 @@ static void sii8620_start_video(struct sii8620 *ctx)
                          MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 },
                };
                u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN;
-               int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3);
+               int clk = mode->clock * (ctx->use_packed_pixel ? 2 : 3);
                int i;
 
-               for (i = 0; i < ARRAY_SIZE(clk_spec); ++i)
+               for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i)
                        if (clk < clk_spec[i].max_clk)
                                break;
 
@@ -1242,7 +1218,7 @@ static void sii8620_start_video(struct sii8620 *ctx)
                        clk_spec[i].link_rate);
        }
 
-       sii8620_set_infoframes(ctx);
+       sii8620_set_infoframes(ctx, mode);
 }
 
 static void sii8620_disable_hpd(struct sii8620 *ctx)
@@ -1534,6 +1510,16 @@ static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
        );
 }
 
+static void sii8620_hpd_unplugged(struct sii8620 *ctx)
+{
+       sii8620_disable_hpd(ctx);
+       ctx->sink_type = SINK_NONE;
+       ctx->sink_detected = false;
+       ctx->feature_complete = false;
+       kfree(ctx->edid);
+       ctx->edid = NULL;
+}
+
 static void sii8620_disconnect(struct sii8620 *ctx)
 {
        sii8620_disable_gen2_write_burst(ctx);
@@ -1561,7 +1547,7 @@ static void sii8620_disconnect(struct sii8620 *ctx)
                REG_MHL_DP_CTL6, 0x2A,
                REG_MHL_DP_CTL7, 0x03
        );
-       sii8620_disable_hpd(ctx);
+       sii8620_hpd_unplugged(ctx);
        sii8620_write_seq_static(ctx,
                REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
                REG_MHL_COC_CTL1, 0x07,
@@ -1609,10 +1595,8 @@ static void sii8620_disconnect(struct sii8620 *ctx)
        memset(ctx->xstat, 0, sizeof(ctx->xstat));
        memset(ctx->devcap, 0, sizeof(ctx->devcap));
        memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap));
+       ctx->devcap_read = false;
        ctx->cbus_status = 0;
-       ctx->sink_type = SINK_NONE;
-       kfree(ctx->edid);
-       ctx->edid = NULL;
        sii8620_mt_cleanup(ctx);
 }
 
@@ -1699,17 +1683,18 @@ static void sii8620_status_dcap_ready(struct sii8620 *ctx)
 
 static void sii8620_status_changed_path(struct sii8620 *ctx)
 {
-       if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) {
-               sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
-                                     MHL_DST_LM_CLK_MODE_NORMAL
-                                     | MHL_DST_LM_PATH_ENABLED);
-               if (!sii8620_is_mhl3(ctx))
-                       sii8620_mt_read_devcap(ctx, false);
-               sii8620_mt_set_cont(ctx, sii8620_sink_detected);
-       } else {
-               sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
-                                     MHL_DST_LM_CLK_MODE_NORMAL);
-       }
+       u8 link_mode;
+
+       if (ctx->use_packed_pixel)
+               link_mode = MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
+       else
+               link_mode = MHL_DST_LM_CLK_MODE_NORMAL;
+
+       if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
+               link_mode |= MHL_DST_LM_PATH_ENABLED;
+
+       sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+                             link_mode);
 }
 
 static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
@@ -1722,9 +1707,14 @@ static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
        sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
        sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
 
-       if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
+       if (ctx->stat[MHL_DST_CONNECTED_RDY] & st[MHL_DST_CONNECTED_RDY] &
+           MHL_DST_CONN_DCAP_RDY) {
                sii8620_status_dcap_ready(ctx);
 
+               if (!sii8620_is_mhl3(ctx))
+                       sii8620_mt_read_devcap(ctx, false);
+       }
+
        if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
                sii8620_status_changed_path(ctx);
 }
@@ -1808,8 +1798,11 @@ static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
        }
        if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ)
                sii8620_send_features(ctx);
-       if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE)
-               sii8620_edid_read(ctx, 0);
+       if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) {
+               ctx->feature_complete = true;
+               if (ctx->edid)
+                       sii8620_enable_hpd(ctx);
+       }
 }
 
 static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
@@ -1884,6 +1877,15 @@ static void sii8620_irq_msc(struct sii8620 *ctx)
        if (stat & BIT_CBUS_MSC_MR_WRITE_STAT)
                sii8620_msc_mr_write_stat(ctx);
 
+       if (stat & BIT_CBUS_HPD_CHG) {
+               if (ctx->cbus_status & BIT_CBUS_STATUS_CBUS_HPD) {
+                       ctx->sink_detected = true;
+                       sii8620_identify_sink(ctx);
+               } else {
+                       sii8620_hpd_unplugged(ctx);
+               }
+       }
+
        if (stat & BIT_CBUS_MSC_MR_SET_INT)
                sii8620_msc_mr_set_int(ctx);
 
@@ -1931,14 +1933,6 @@ static void sii8620_irq_edid(struct sii8620 *ctx)
                ctx->mt_state = MT_STATE_DONE;
 }
 
-static void sii8620_scdt_high(struct sii8620 *ctx)
-{
-       sii8620_write_seq_static(ctx,
-               REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI,
-               REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI,
-       );
-}
-
 static void sii8620_irq_scdt(struct sii8620 *ctx)
 {
        u8 stat = sii8620_readb(ctx, REG_INTR5);
@@ -1946,53 +1940,13 @@ static void sii8620_irq_scdt(struct sii8620 *ctx)
        if (stat & BIT_INTR_SCDT_CHANGE) {
                u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3);
 
-               if (cstat & BIT_TMDS_CSTAT_P3_SCDT) {
-                       if (ctx->sink_type == SINK_HDMI)
-                               /* enable infoframe interrupt */
-                               sii8620_scdt_high(ctx);
-                       else
-                               sii8620_start_video(ctx);
-               }
+               if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
+                       sii8620_start_video(ctx);
        }
 
        sii8620_write(ctx, REG_INTR5, stat);
 }
 
-static void sii8620_new_vsi(struct sii8620 *ctx)
-{
-       u8 vsif[11];
-
-       sii8620_write(ctx, REG_RX_HDMI_CTRL2,
-                     VAL_RX_HDMI_CTRL2_DEFVAL |
-                     BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI);
-       sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif,
-                        ARRAY_SIZE(vsif));
-}
-
-static void sii8620_new_avi(struct sii8620 *ctx)
-{
-       sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL);
-       sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif,
-                        ARRAY_SIZE(ctx->avif));
-}
-
-static void sii8620_irq_infr(struct sii8620 *ctx)
-{
-       u8 stat = sii8620_readb(ctx, REG_INTR8)
-               & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI);
-
-       sii8620_write(ctx, REG_INTR8, stat);
-
-       if (stat & BIT_CEA_NEW_VSI)
-               sii8620_new_vsi(ctx);
-
-       if (stat & BIT_CEA_NEW_AVI)
-               sii8620_new_avi(ctx);
-
-       if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI))
-               sii8620_start_video(ctx);
-}
-
 static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret)
 {
        if (ret < 0)
@@ -2043,11 +1997,11 @@ static void sii8620_irq_ddc(struct sii8620 *ctx)
 
        if (stat & BIT_DDC_CMD_DONE) {
                sii8620_write(ctx, REG_INTR3_MASK, 0);
-               if (sii8620_is_mhl3(ctx))
+               if (sii8620_is_mhl3(ctx) && !ctx->feature_complete)
                        sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE),
                                           MHL_INT_RC_FEAT_REQ);
                else
-                       sii8620_edid_read(ctx, 0);
+                       sii8620_enable_hpd(ctx);
        }
        sii8620_write(ctx, REG_INTR3, stat);
 }
@@ -2074,7 +2028,6 @@ static irqreturn_t sii8620_irq_thread(int irq, void *data)
                { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
                { BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc },
                { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
-               { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
        };
        struct sii8620 *ctx = data;
        u8 stats[LEN_FAST_INTR_STAT];
@@ -2112,7 +2065,6 @@ static void sii8620_cable_in(struct sii8620 *ctx)
                dev_err(dev, "Error powering on, %d.\n", ret);
                return;
        }
-       sii8620_hw_reset(ctx);
 
        sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver));
        ret = sii8620_clear_error(ctx);
@@ -2268,17 +2220,43 @@ static void sii8620_detach(struct drm_bridge *bridge)
        rc_unregister_device(ctx->rc_dev);
 }
 
+static int sii8620_is_packing_required(struct sii8620 *ctx,
+                                      const struct drm_display_mode *mode)
+{
+       int max_pclk, max_pclk_pp_mode;
+
+       if (sii8620_is_mhl3(ctx)) {
+               max_pclk = MHL3_MAX_PCLK;
+               max_pclk_pp_mode = MHL3_MAX_PCLK_PP_MODE;
+       } else {
+               max_pclk = MHL1_MAX_PCLK;
+               max_pclk_pp_mode = MHL1_MAX_PCLK_PP_MODE;
+       }
+
+       if (mode->clock < max_pclk)
+               return 0;
+       else if (mode->clock < max_pclk_pp_mode)
+               return 1;
+       else
+               return -1;
+}
+
 static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge,
                                         const struct drm_display_mode *mode)
 {
        struct sii8620 *ctx = bridge_to_sii8620(bridge);
+       int pack_required = sii8620_is_packing_required(ctx, mode);
        bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] &
                        MHL_DCAP_VID_LINK_PPIXEL;
-       unsigned int max_pclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK :
-                                                      MHL1_MAX_LCLK;
-       max_pclk /= can_pack ? 2 : 3;
 
-       return (mode->clock > max_pclk) ? MODE_CLOCK_HIGH : MODE_OK;
+       switch (pack_required) {
+       case 0:
+               return MODE_OK;
+       case 1:
+               return (can_pack) ? MODE_OK : MODE_CLOCK_HIGH;
+       default:
+               return MODE_CLOCK_HIGH;
+       }
 }
 
 static bool sii8620_mode_fixup(struct drm_bridge *bridge,
@@ -2286,43 +2264,14 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge,
                               struct drm_display_mode *adjusted_mode)
 {
        struct sii8620 *ctx = bridge_to_sii8620(bridge);
-       int max_lclk;
-       bool ret = true;
 
        mutex_lock(&ctx->lock);
 
-       max_lclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : MHL1_MAX_LCLK;
-       if (max_lclk > 3 * adjusted_mode->clock) {
-               ctx->use_packed_pixel = 0;
-               goto end;
-       }
-       if ((ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) &&
-           max_lclk > 2 * adjusted_mode->clock) {
-               ctx->use_packed_pixel = 1;
-               goto end;
-       }
-       ret = false;
-end:
-       if (ret) {
-               u8 vic = drm_match_cea_mode(adjusted_mode);
-
-               if (!vic) {
-                       union hdmi_infoframe frm;
-                       u8 mhl_vic[] = { 0, 95, 94, 93, 98 };
-
-                       /* FIXME: We need the connector here */
-                       drm_hdmi_vendor_infoframe_from_display_mode(
-                               &frm.vendor.hdmi, NULL, adjusted_mode);
-                       vic = frm.vendor.hdmi.vic;
-                       if (vic >= ARRAY_SIZE(mhl_vic))
-                               vic = 0;
-                       vic = mhl_vic[vic];
-               }
-               ctx->video_code = vic;
-               ctx->pixel_clock = adjusted_mode->clock;
-       }
+       ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode);
+
        mutex_unlock(&ctx->lock);
-       return ret;
+
+       return true;
 }
 
 static const struct drm_bridge_funcs sii8620_bridge_funcs = {
index b553a6f2ff0eb27dec7ad0aaeeb891992fe19ab8..7af748ed1c58dddfae7cb578760be3344273901c 100644 (file)
@@ -369,13 +369,6 @@ EXPORT_SYMBOL(drm_dev_exit);
  */
 void drm_dev_unplug(struct drm_device *dev)
 {
-       drm_dev_unregister(dev);
-
-       mutex_lock(&drm_global_mutex);
-       if (dev->open_count == 0)
-               drm_dev_put(dev);
-       mutex_unlock(&drm_global_mutex);
-
        /*
         * After synchronizing any critical read section is guaranteed to see
         * the new value of ->unplugged, and any critical section which might
@@ -384,6 +377,13 @@ void drm_dev_unplug(struct drm_device *dev)
         */
        dev->unplugged = true;
        synchronize_srcu(&drm_unplug_srcu);
+
+       drm_dev_unregister(dev);
+
+       mutex_lock(&drm_global_mutex);
+       if (dev->open_count == 0)
+               drm_dev_put(dev);
+       mutex_unlock(&drm_global_mutex);
 }
 EXPORT_SYMBOL(drm_dev_unplug);
 
index 1f8031e30f5397bf97054b3fcffc11408c35759e..cdb10f885a4febea85fc5272e22f1378d770da8b 100644 (file)
@@ -532,7 +532,7 @@ static void drm_property_free_blob(struct kref *kref)
 
        drm_mode_object_unregister(blob->dev, &blob->base);
 
-       kfree(blob);
+       kvfree(blob);
 }
 
 /**
@@ -559,7 +559,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
        if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
                return ERR_PTR(-EINVAL);
 
-       blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+       blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
        if (!blob)
                return ERR_PTR(-ENOMEM);
 
@@ -576,7 +576,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
        ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
                                    true, drm_property_free_blob);
        if (ret) {
-               kfree(blob);
+               kvfree(blob);
                return ERR_PTR(-EINVAL);
        }
 
index e5013a9991477eda57913a80f7c978a199727a62..540b59fb41038fcbed3f16528edf9f85e3976071 100644 (file)
@@ -631,8 +631,11 @@ static struct platform_driver etnaviv_platform_driver = {
        },
 };
 
+static struct platform_device *etnaviv_drm;
+
 static int __init etnaviv_init(void)
 {
+       struct platform_device *pdev;
        int ret;
        struct device_node *np;
 
@@ -644,7 +647,7 @@ static int __init etnaviv_init(void)
 
        ret = platform_driver_register(&etnaviv_platform_driver);
        if (ret != 0)
-               platform_driver_unregister(&etnaviv_gpu_driver);
+               goto unregister_gpu_driver;
 
        /*
         * If the DT contains at least one available GPU device, instantiate
@@ -653,20 +656,33 @@ static int __init etnaviv_init(void)
        for_each_compatible_node(np, NULL, "vivante,gc") {
                if (!of_device_is_available(np))
                        continue;
-
-               platform_device_register_simple("etnaviv", -1, NULL, 0);
+               pdev = platform_device_register_simple("etnaviv", -1,
+                                                      NULL, 0);
+               if (IS_ERR(pdev)) {
+                       ret = PTR_ERR(pdev);
+                       of_node_put(np);
+                       goto unregister_platform_driver;
+               }
+               etnaviv_drm = pdev;
                of_node_put(np);
                break;
        }
 
+       return 0;
+
+unregister_platform_driver:
+       platform_driver_unregister(&etnaviv_platform_driver);
+unregister_gpu_driver:
+       platform_driver_unregister(&etnaviv_gpu_driver);
        return ret;
 }
 module_init(etnaviv_init);
 
 static void __exit etnaviv_exit(void)
 {
-       platform_driver_unregister(&etnaviv_gpu_driver);
+       platform_device_unregister(etnaviv_drm);
        platform_driver_unregister(&etnaviv_platform_driver);
+       platform_driver_unregister(&etnaviv_gpu_driver);
 }
 module_exit(etnaviv_exit);
 
index dd430f0f8ff5158975e21f26ad97fc3aa5bae2cb..90f17ff7888e7042b85f38e5ef114f77fa42dccd 100644 (file)
@@ -131,6 +131,9 @@ struct etnaviv_gpu {
        struct work_struct sync_point_work;
        int sync_point_event;
 
+       /* hang detection */
+       u32 hangcheck_dma_addr;
+
        void __iomem *mmio;
        int irq;
 
index a74eb57af15bc65ba2ff4a2ed3906da29afe959b..50d6b88cb7aab3f99025829e1f544aae957d6d7f 100644 (file)
@@ -10,6 +10,7 @@
 #include "etnaviv_gem.h"
 #include "etnaviv_gpu.h"
 #include "etnaviv_sched.h"
+#include "state.xml.h"
 
 static int etnaviv_job_hang_limit = 0;
 module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
@@ -85,6 +86,29 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
 {
        struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
        struct etnaviv_gpu *gpu = submit->gpu;
+       u32 dma_addr;
+       int change;
+
+       /*
+        * If the GPU managed to complete this jobs fence, the timout is
+        * spurious. Bail out.
+        */
+       if (fence_completed(gpu, submit->out_fence->seqno))
+               return;
+
+       /*
+        * If the GPU is still making forward progress on the front-end (which
+        * should never loop) we shift out the timeout to give it a chance to
+        * finish the job.
+        */
+       dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+       change = dma_addr - gpu->hangcheck_dma_addr;
+       if (change < 0 || change > 16) {
+               gpu->hangcheck_dma_addr = dma_addr;
+               schedule_delayed_work(&sched_job->work_tdr,
+                                     sched_job->sched->timeout);
+               return;
+       }
 
        /* block scheduler */
        kthread_park(gpu->sched.thread);
index 82c95c34447fe19d34018b15a9b6a92e43ca917a..e868773ea5097cf3afd0592c2bf51ef66adf84cc 100644 (file)
@@ -265,7 +265,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
        unsigned long val;
 
        val = readl(ctx->addr + DECON_WINCONx(win));
-       val &= ~WINCONx_BPPMODE_MASK;
+       val &= WINCONx_ENWIN_F;
 
        switch (fb->format->format) {
        case DRM_FORMAT_XRGB1555:
@@ -356,8 +356,8 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
                writel(val, ctx->addr + DECON_VIDOSDxB(win));
        }
 
-       val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
-               VIDOSD_Wx_ALPHA_B_F(0x0);
+       val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) |
+               VIDOSD_Wx_ALPHA_B_F(0xff);
        writel(val, ctx->addr + DECON_VIDOSDxC(win));
 
        val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
index a81b4a5e24a77397e4748a914357424cca60642d..ed3cc2989f93f2fe17f086e7ee8413702bd9d944 100644 (file)
@@ -420,7 +420,7 @@ err_mode_config_cleanup:
 err_free_private:
        kfree(private);
 err_free_drm:
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
 
        return ret;
 }
@@ -444,7 +444,7 @@ static void exynos_drm_unbind(struct device *dev)
        drm->dev_private = NULL;
        dev_set_drvdata(dev, NULL);
 
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
 }
 
 static const struct component_master_ops exynos_drm_ops = {
index 7fcc1a7ab1a079fe63bfa6d45687bae146ac2920..27b7d34d776cb23fdaa1cd35064236cd313f7edf 100644 (file)
@@ -138,7 +138,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 
 err:
        while (i--)
-               drm_gem_object_unreference_unlocked(&exynos_gem[i]->base);
+               drm_gem_object_put_unlocked(&exynos_gem[i]->base);
 
        return ERR_PTR(ret);
 }
index 6127ef25acd60ec5ec6db92655d364220963fc0b..e8d0670bb5f8d280a9e17a1e4270c0464b1e4abc 100644 (file)
@@ -470,17 +470,18 @@ static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation)
 static void fimc_set_window(struct fimc_context *ctx,
                            struct exynos_drm_ipp_buffer *buf)
 {
+       unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
        u32 cfg, h1, h2, v1, v2;
 
        /* cropped image */
        h1 = buf->rect.x;
-       h2 = buf->buf.width - buf->rect.w - buf->rect.x;
+       h2 = real_width - buf->rect.w - buf->rect.x;
        v1 = buf->rect.y;
        v2 = buf->buf.height - buf->rect.h - buf->rect.y;
 
        DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
                buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h,
-               buf->buf.width, buf->buf.height);
+               real_width, buf->buf.height);
        DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2);
 
        /*
@@ -503,12 +504,13 @@ static void fimc_set_window(struct fimc_context *ctx,
 static void fimc_src_set_size(struct fimc_context *ctx,
                              struct exynos_drm_ipp_buffer *buf)
 {
+       unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
        u32 cfg;
 
-       DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
+       DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
 
        /* original size */
-       cfg = (EXYNOS_ORGISIZE_HORIZONTAL(buf->buf.width) |
+       cfg = (EXYNOS_ORGISIZE_HORIZONTAL(real_width) |
                EXYNOS_ORGISIZE_VERTICAL(buf->buf.height));
 
        fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
@@ -529,7 +531,7 @@ static void fimc_src_set_size(struct fimc_context *ctx,
         * for now, we support only ITU601 8 bit mode
         */
        cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
-               EXYNOS_CISRCFMT_SOURCEHSIZE(buf->buf.width) |
+               EXYNOS_CISRCFMT_SOURCEHSIZE(real_width) |
                EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height));
        fimc_write(ctx, cfg, EXYNOS_CISRCFMT);
 
@@ -842,12 +844,13 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
 static void fimc_dst_set_size(struct fimc_context *ctx,
                             struct exynos_drm_ipp_buffer *buf)
 {
+       unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
        u32 cfg, cfg_ext;
 
-       DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
+       DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
 
        /* original size */
-       cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(buf->buf.width) |
+       cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(real_width) |
                EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height));
 
        fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
index 6e1494fa71b40d70a59b768a1ac2caf6d9799dfe..bdf5a7655228b69ad799ffdce859876cf176563c 100644 (file)
@@ -143,7 +143,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
        DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
 
        /* drop reference from allocate - handle holds it now. */
-       drm_gem_object_unreference_unlocked(obj);
+       drm_gem_object_put_unlocked(obj);
 
        return 0;
 }
@@ -186,7 +186,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
 
        exynos_gem = to_exynos_gem(obj);
 
-       drm_gem_object_unreference_unlocked(obj);
+       drm_gem_object_put_unlocked(obj);
 
        return exynos_gem->size;
 }
@@ -329,13 +329,13 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
                return;
        }
 
-       drm_gem_object_unreference_unlocked(obj);
+       drm_gem_object_put_unlocked(obj);
 
        /*
         * decrease obj->refcount one more time because we has already
         * increased it at exynos_drm_gem_get_dma_addr().
         */
-       drm_gem_object_unreference_unlocked(obj);
+       drm_gem_object_put_unlocked(obj);
 }
 
 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
@@ -383,7 +383,7 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
        args->flags = exynos_gem->flags;
        args->size = exynos_gem->size;
 
-       drm_gem_object_unreference_unlocked(obj);
+       drm_gem_object_put_unlocked(obj);
 
        return 0;
 }
index 35ac66730563944e83dcb1a6be7ec39999ead086..7ba414b52faa940595a028db7fa3e959bc7a58cd 100644 (file)
@@ -492,21 +492,25 @@ static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt)
                        GSC_IN_CHROMA_ORDER_CRCB);
                break;
        case DRM_FORMAT_NV21:
+               cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P);
+               break;
        case DRM_FORMAT_NV61:
-               cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
-                       GSC_IN_YUV420_2P);
+               cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P);
                break;
        case DRM_FORMAT_YUV422:
                cfg |= GSC_IN_YUV422_3P;
                break;
        case DRM_FORMAT_YUV420:
+               cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P);
+               break;
        case DRM_FORMAT_YVU420:
-               cfg |= GSC_IN_YUV420_3P;
+               cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P);
                break;
        case DRM_FORMAT_NV12:
+               cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P);
+               break;
        case DRM_FORMAT_NV16:
-               cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
-                       GSC_IN_YUV420_2P);
+               cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P);
                break;
        }
 
@@ -523,30 +527,30 @@ static void gsc_src_set_transf(struct gsc_context *ctx, unsigned int rotation)
 
        switch (degree) {
        case DRM_MODE_ROTATE_0:
-               if (rotation & DRM_MODE_REFLECT_Y)
-                       cfg |= GSC_IN_ROT_XFLIP;
                if (rotation & DRM_MODE_REFLECT_X)
+                       cfg |= GSC_IN_ROT_XFLIP;
+               if (rotation & DRM_MODE_REFLECT_Y)
                        cfg |= GSC_IN_ROT_YFLIP;
                break;
        case DRM_MODE_ROTATE_90:
                cfg |= GSC_IN_ROT_90;
-               if (rotation & DRM_MODE_REFLECT_Y)
-                       cfg |= GSC_IN_ROT_XFLIP;
                if (rotation & DRM_MODE_REFLECT_X)
+                       cfg |= GSC_IN_ROT_XFLIP;
+               if (rotation & DRM_MODE_REFLECT_Y)
                        cfg |= GSC_IN_ROT_YFLIP;
                break;
        case DRM_MODE_ROTATE_180:
                cfg |= GSC_IN_ROT_180;
-               if (rotation & DRM_MODE_REFLECT_Y)
-                       cfg &= ~GSC_IN_ROT_XFLIP;
                if (rotation & DRM_MODE_REFLECT_X)
+                       cfg &= ~GSC_IN_ROT_XFLIP;
+               if (rotation & DRM_MODE_REFLECT_Y)
                        cfg &= ~GSC_IN_ROT_YFLIP;
                break;
        case DRM_MODE_ROTATE_270:
                cfg |= GSC_IN_ROT_270;
-               if (rotation & DRM_MODE_REFLECT_Y)
-                       cfg &= ~GSC_IN_ROT_XFLIP;
                if (rotation & DRM_MODE_REFLECT_X)
+                       cfg &= ~GSC_IN_ROT_XFLIP;
+               if (rotation & DRM_MODE_REFLECT_Y)
                        cfg &= ~GSC_IN_ROT_YFLIP;
                break;
        }
@@ -577,7 +581,7 @@ static void gsc_src_set_size(struct gsc_context *ctx,
        cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
                GSC_SRCIMG_WIDTH_MASK);
 
-       cfg |= (GSC_SRCIMG_WIDTH(buf->buf.width) |
+       cfg |= (GSC_SRCIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
                GSC_SRCIMG_HEIGHT(buf->buf.height));
 
        gsc_write(cfg, GSC_SRCIMG_SIZE);
@@ -672,18 +676,25 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt)
                        GSC_OUT_CHROMA_ORDER_CRCB);
                break;
        case DRM_FORMAT_NV21:
-       case DRM_FORMAT_NV61:
                cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
                break;
+       case DRM_FORMAT_NV61:
+               cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P);
+               break;
        case DRM_FORMAT_YUV422:
+               cfg |= GSC_OUT_YUV422_3P;
+               break;
        case DRM_FORMAT_YUV420:
+               cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P);
+               break;
        case DRM_FORMAT_YVU420:
-               cfg |= GSC_OUT_YUV420_3P;
+               cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P);
                break;
        case DRM_FORMAT_NV12:
+               cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P);
+               break;
        case DRM_FORMAT_NV16:
-               cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
-                       GSC_OUT_YUV420_2P);
+               cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P);
                break;
        }
 
@@ -868,7 +879,7 @@ static void gsc_dst_set_size(struct gsc_context *ctx,
        /* original size */
        cfg = gsc_read(GSC_DSTIMG_SIZE);
        cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK);
-       cfg |= GSC_DSTIMG_WIDTH(buf->buf.width) |
+       cfg |= GSC_DSTIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
               GSC_DSTIMG_HEIGHT(buf->buf.height);
        gsc_write(cfg, GSC_DSTIMG_SIZE);
 
@@ -1341,7 +1352,7 @@ static const struct drm_exynos_ipp_limit gsc_5420_limits[] = {
 };
 
 static const struct drm_exynos_ipp_limit gsc_5433_limits[] = {
-       { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 2 }, .v = { 16, 8191, 2 }) },
+       { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 16 }, .v = { 16, 8191, 2 }) },
        { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) },
        { IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) },
        { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 },
index 26374e58c5578dc326356e19520b522e1d45412c..b435db8fc91677927c6e735d78d99232d7a91e0e 100644 (file)
@@ -345,27 +345,6 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
        int ret = 0;
        int i;
 
-       /* basic checks */
-       if (buf->buf.width == 0 || buf->buf.height == 0)
-               return -EINVAL;
-       buf->format = drm_format_info(buf->buf.fourcc);
-       for (i = 0; i < buf->format->num_planes; i++) {
-               unsigned int width = (i == 0) ? buf->buf.width :
-                            DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
-
-               if (buf->buf.pitch[i] == 0)
-                       buf->buf.pitch[i] = width * buf->format->cpp[i];
-               if (buf->buf.pitch[i] < width * buf->format->cpp[i])
-                       return -EINVAL;
-               if (!buf->buf.gem_id[i])
-                       return -ENOENT;
-       }
-
-       /* pitch for additional planes must match */
-       if (buf->format->num_planes > 2 &&
-           buf->buf.pitch[1] != buf->buf.pitch[2])
-               return -EINVAL;
-
        /* get GEM buffers and check their size */
        for (i = 0; i < buf->format->num_planes; i++) {
                unsigned int height = (i == 0) ? buf->buf.height :
@@ -428,7 +407,7 @@ enum drm_ipp_size_id {
        IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX
 };
 
-static const enum drm_ipp_size_id limit_id_fallback[IPP_LIMIT_MAX][4] = {
+static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = {
        [IPP_LIMIT_BUFFER]  = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
        [IPP_LIMIT_AREA]    = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
                                DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
@@ -495,12 +474,13 @@ static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf,
        enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA;
        struct drm_ipp_limit l;
        struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v;
+       int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
 
        if (!limits)
                return 0;
 
        __get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l);
-       if (!__size_limit_check(buf->buf.width, &l.h) ||
+       if (!__size_limit_check(real_width, &l.h) ||
            !__size_limit_check(buf->buf.height, &l.v))
                return -EINVAL;
 
@@ -560,10 +540,62 @@ static int exynos_drm_ipp_check_scale_limits(
        return 0;
 }
 
+static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
+                                      struct exynos_drm_ipp_buffer *buf,
+                                      struct exynos_drm_ipp_buffer *src,
+                                      struct exynos_drm_ipp_buffer *dst,
+                                      bool rotate, bool swap)
+{
+       const struct exynos_drm_ipp_formats *fmt;
+       int ret, i;
+
+       fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier,
+                              buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE :
+                                           DRM_EXYNOS_IPP_FORMAT_DESTINATION);
+       if (!fmt) {
+               DRM_DEBUG_DRIVER("Task %pK: %s format not supported\n", task,
+                                buf == src ? "src" : "dst");
+               return -EINVAL;
+       }
+
+       /* basic checks */
+       if (buf->buf.width == 0 || buf->buf.height == 0)
+               return -EINVAL;
+
+       buf->format = drm_format_info(buf->buf.fourcc);
+       for (i = 0; i < buf->format->num_planes; i++) {
+               unsigned int width = (i == 0) ? buf->buf.width :
+                            DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
+
+               if (buf->buf.pitch[i] == 0)
+                       buf->buf.pitch[i] = width * buf->format->cpp[i];
+               if (buf->buf.pitch[i] < width * buf->format->cpp[i])
+                       return -EINVAL;
+               if (!buf->buf.gem_id[i])
+                       return -ENOENT;
+       }
+
+       /* pitch for additional planes must match */
+       if (buf->format->num_planes > 2 &&
+           buf->buf.pitch[1] != buf->buf.pitch[2])
+               return -EINVAL;
+
+       /* check driver limits */
+       ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits,
+                                              fmt->num_limits,
+                                              rotate,
+                                              buf == dst ? swap : false);
+       if (ret)
+               return ret;
+       ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
+                                               fmt->limits,
+                                               fmt->num_limits, swap);
+       return ret;
+}
+
 static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
 {
        struct exynos_drm_ipp *ipp = task->ipp;
-       const struct exynos_drm_ipp_formats *src_fmt, *dst_fmt;
        struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
        unsigned int rotation = task->transform.rotation;
        int ret = 0;
@@ -607,37 +639,11 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
                return -EINVAL;
        }
 
-       src_fmt = __ipp_format_get(ipp, src->buf.fourcc, src->buf.modifier,
-                                  DRM_EXYNOS_IPP_FORMAT_SOURCE);
-       if (!src_fmt) {
-               DRM_DEBUG_DRIVER("Task %pK: src format not supported\n", task);
-               return -EINVAL;
-       }
-       ret = exynos_drm_ipp_check_size_limits(src, src_fmt->limits,
-                                              src_fmt->num_limits,
-                                              rotate, false);
-       if (ret)
-               return ret;
-       ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
-                                               src_fmt->limits,
-                                               src_fmt->num_limits, swap);
+       ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap);
        if (ret)
                return ret;
 
-       dst_fmt = __ipp_format_get(ipp, dst->buf.fourcc, dst->buf.modifier,
-                                  DRM_EXYNOS_IPP_FORMAT_DESTINATION);
-       if (!dst_fmt) {
-               DRM_DEBUG_DRIVER("Task %pK: dst format not supported\n", task);
-               return -EINVAL;
-       }
-       ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits,
-                                              dst_fmt->num_limits,
-                                              false, swap);
-       if (ret)
-               return ret;
-       ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
-                                               dst_fmt->limits,
-                                               dst_fmt->num_limits, swap);
+       ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap);
        if (ret)
                return ret;
 
index 38a2a7f1204be7a9ef416556db031cf8eaa07770..7098c6d35266bd1116e3be83feabc836bad30cb8 100644 (file)
@@ -132,7 +132,7 @@ static void exynos_drm_plane_reset(struct drm_plane *plane)
        if (plane->state) {
                exynos_state = to_exynos_plane_state(plane->state);
                if (exynos_state->base.fb)
-                       drm_framebuffer_unreference(exynos_state->base.fb);
+                       drm_framebuffer_put(exynos_state->base.fb);
                kfree(exynos_state);
                plane->state = NULL;
        }
index 1a76dd3d52e1dc5b63d81bd19b49e4120e23891d..a820a68429b9a8f56be132d251b2f4ea81f6c1b4 100644 (file)
@@ -168,9 +168,9 @@ static void rotator_dst_set_transf(struct rot_context *rot,
        val &= ~ROT_CONTROL_FLIP_MASK;
 
        if (rotation & DRM_MODE_REFLECT_X)
-               val |= ROT_CONTROL_FLIP_HORIZONTAL;
-       if (rotation & DRM_MODE_REFLECT_Y)
                val |= ROT_CONTROL_FLIP_VERTICAL;
+       if (rotation & DRM_MODE_REFLECT_Y)
+               val |= ROT_CONTROL_FLIP_HORIZONTAL;
 
        val &= ~ROT_CONTROL_ROT_MASK;
 
index 91d4382343d080abd4607fd78a58729878eda844..0ddb6eec7b113ea306fea4bde563e8ecb9945495 100644 (file)
@@ -30,6 +30,7 @@
 #define scaler_write(cfg, offset)      writel(cfg, scaler->regs + (offset))
 #define SCALER_MAX_CLK                 4
 #define SCALER_AUTOSUSPEND_DELAY       2000
+#define SCALER_RESET_WAIT_RETRIES      100
 
 struct scaler_data {
        const char      *clk_name[SCALER_MAX_CLK];
@@ -51,9 +52,9 @@ struct scaler_context {
 static u32 scaler_get_format(u32 drm_fmt)
 {
        switch (drm_fmt) {
-       case DRM_FORMAT_NV21:
-               return SCALER_YUV420_2P_UV;
        case DRM_FORMAT_NV12:
+               return SCALER_YUV420_2P_UV;
+       case DRM_FORMAT_NV21:
                return SCALER_YUV420_2P_VU;
        case DRM_FORMAT_YUV420:
                return SCALER_YUV420_3P;
@@ -63,15 +64,15 @@ static u32 scaler_get_format(u32 drm_fmt)
                return SCALER_YUV422_1P_UYVY;
        case DRM_FORMAT_YVYU:
                return SCALER_YUV422_1P_YVYU;
-       case DRM_FORMAT_NV61:
-               return SCALER_YUV422_2P_UV;
        case DRM_FORMAT_NV16:
+               return SCALER_YUV422_2P_UV;
+       case DRM_FORMAT_NV61:
                return SCALER_YUV422_2P_VU;
        case DRM_FORMAT_YUV422:
                return SCALER_YUV422_3P;
-       case DRM_FORMAT_NV42:
-               return SCALER_YUV444_2P_UV;
        case DRM_FORMAT_NV24:
+               return SCALER_YUV444_2P_UV;
+       case DRM_FORMAT_NV42:
                return SCALER_YUV444_2P_VU;
        case DRM_FORMAT_YUV444:
                return SCALER_YUV444_3P;
@@ -100,6 +101,23 @@ static u32 scaler_get_format(u32 drm_fmt)
        return 0;
 }
 
+static inline int scaler_reset(struct scaler_context *scaler)
+{
+       int retry = SCALER_RESET_WAIT_RETRIES;
+
+       scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
+       do {
+               cpu_relax();
+       } while (retry > 1 &&
+                scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
+       do {
+               cpu_relax();
+               scaler_write(1, SCALER_INT_EN);
+       } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
+
+       return retry ? 0 : -EIO;
+}
+
 static inline void scaler_enable_int(struct scaler_context *scaler)
 {
        u32 val;
@@ -354,9 +372,13 @@ static int scaler_commit(struct exynos_drm_ipp *ipp,
        u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc);
        struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect;
 
-       scaler->task = task;
-
        pm_runtime_get_sync(scaler->dev);
+       if (scaler_reset(scaler)) {
+               pm_runtime_put(scaler->dev);
+               return -EIO;
+       }
+
+       scaler->task = task;
 
        scaler_set_src_fmt(scaler, src_fmt);
        scaler_set_src_base(scaler, &task->src);
@@ -394,7 +416,11 @@ static inline void scaler_disable_int(struct scaler_context *scaler)
 
 static inline u32 scaler_get_int_status(struct scaler_context *scaler)
 {
-       return scaler_read(SCALER_INT_STATUS);
+       u32 val = scaler_read(SCALER_INT_STATUS);
+
+       scaler_write(val, SCALER_INT_STATUS);
+
+       return val;
 }
 
 static inline int scaler_task_done(u32 val)
index 4704a993cbb7f003a51b901ae9b291a9adc9fe09..16b39734115c93855d82a5de94e16cc5e2570940 100644 (file)
 #define GSC_OUT_YUV420_3P              (3 << 4)
 #define GSC_OUT_YUV422_1P              (4 << 4)
 #define GSC_OUT_YUV422_2P              (5 << 4)
+#define GSC_OUT_YUV422_3P              (6 << 4)
 #define GSC_OUT_YUV444                 (7 << 4)
 #define GSC_OUT_TILE_TYPE_MASK         (1 << 2)
 #define GSC_OUT_TILE_C_16x8            (0 << 2)
index b51c05d03f14a1790ba43e70065af6acd74887ef..7f562410f9cf8aab7c47462fcbe7fd07fea68c73 100644 (file)
@@ -862,6 +862,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
 {
        struct intel_vgpu *vgpu = s->vgpu;
        struct intel_gvt *gvt = vgpu->gvt;
+       u32 ctx_sr_ctl;
 
        if (offset + 4 > gvt->device_info.mmio_size) {
                gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
@@ -894,6 +895,28 @@ static int cmd_reg_handler(struct parser_exec_state *s,
                patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
        }
 
+       /* TODO
+        * Right now only scan LRI command on KBL and in inhibit context.
+        * It's good enough to support initializing mmio by lri command in
+        * vgpu inhibit context on KBL.
+        */
+       if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
+                       intel_gvt_mmio_is_in_ctx(gvt, offset) &&
+                       !strncmp(cmd, "lri", 3)) {
+               intel_gvt_hypervisor_read_gpa(s->vgpu,
+                       s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
+               /* check inhibit context */
+               if (ctx_sr_ctl & 1) {
+                       u32 data = cmd_val(s, index + 1);
+
+                       if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset))
+                               intel_vgpu_mask_mmio_write(vgpu,
+                                                       offset, &data, 4);
+                       else
+                               vgpu_vreg(vgpu, offset) = data;
+               }
+       }
+
        /* TODO: Update the global mask if this MMIO is a masked-MMIO */
        intel_gvt_mmio_set_cmd_accessed(gvt, offset);
        return 0;
index 6d8180e8d1e21a71916e8b6cad404dab8d8c3257..4b072ade8c389372bcf1e161f6257305dec043bd 100644 (file)
@@ -196,7 +196,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                        ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
                        TRANS_DDI_PORT_MASK);
                vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
-                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
                        (PORT_B << TRANS_DDI_PORT_SHIFT) |
                        TRANS_DDI_FUNC_ENABLE);
                if (IS_BROADWELL(dev_priv)) {
@@ -216,7 +216,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                        ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
                        TRANS_DDI_PORT_MASK);
                vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
-                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
                        (PORT_C << TRANS_DDI_PORT_SHIFT) |
                        TRANS_DDI_FUNC_ENABLE);
                if (IS_BROADWELL(dev_priv)) {
@@ -236,7 +236,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
                        ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
                        TRANS_DDI_PORT_MASK);
                vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
-                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+                       (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
                        (PORT_D << TRANS_DDI_PORT_SHIFT) |
                        TRANS_DDI_FUNC_ENABLE);
                if (IS_BROADWELL(dev_priv)) {
index 23296547da95e8634c3bbaa401225c2d415498d5..4efec8fa6c1d30aa9c7853131ad299fb8bbef169 100644 (file)
@@ -1592,6 +1592,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
                vgpu_free_mm(mm);
                return ERR_PTR(-ENOMEM);
        }
+       mm->ggtt_mm.last_partial_off = -1UL;
 
        return mm;
 }
@@ -1616,6 +1617,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
                invalidate_ppgtt_mm(mm);
        } else {
                vfree(mm->ggtt_mm.virtual_ggtt);
+               mm->ggtt_mm.last_partial_off = -1UL;
        }
 
        vgpu_free_mm(mm);
@@ -1868,6 +1870,62 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
        memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
                        bytes);
 
+       /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
+        * write, we assume the two 4 bytes writes are consecutive.
+        * Otherwise, we abort and report error
+        */
+       if (bytes < info->gtt_entry_size) {
+               if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
+                       /* the first partial part*/
+                       ggtt_mm->ggtt_mm.last_partial_off = off;
+                       ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+                       return 0;
+               } else if ((g_gtt_index ==
+                               (ggtt_mm->ggtt_mm.last_partial_off >>
+                               info->gtt_entry_size_shift)) &&
+                       (off != ggtt_mm->ggtt_mm.last_partial_off)) {
+                       /* the second partial part */
+
+                       int last_off = ggtt_mm->ggtt_mm.last_partial_off &
+                               (info->gtt_entry_size - 1);
+
+                       memcpy((void *)&e.val64 + last_off,
+                               (void *)&ggtt_mm->ggtt_mm.last_partial_data +
+                               last_off, bytes);
+
+                       ggtt_mm->ggtt_mm.last_partial_off = -1UL;
+               } else {
+                       int last_offset;
+
+                       gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
+                                       ggtt_mm->ggtt_mm.last_partial_off, off,
+                                       bytes, info->gtt_entry_size);
+
+                       /* set host ggtt entry to scratch page and clear
+                        * virtual ggtt entry as not present for last
+                        * partially write offset
+                        */
+                       last_offset = ggtt_mm->ggtt_mm.last_partial_off &
+                                       (~(info->gtt_entry_size - 1));
+
+                       ggtt_get_host_entry(ggtt_mm, &m, last_offset);
+                       ggtt_invalidate_pte(vgpu, &m);
+                       ops->set_pfn(&m, gvt->gtt.scratch_mfn);
+                       ops->clear_present(&m);
+                       ggtt_set_host_entry(ggtt_mm, &m, last_offset);
+                       ggtt_invalidate(gvt->dev_priv);
+
+                       ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
+                       ops->clear_present(&e);
+                       ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
+
+                       ggtt_mm->ggtt_mm.last_partial_off = off;
+                       ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+
+                       return 0;
+               }
+       }
+
        if (ops->test_present(&e)) {
                gfn = ops->get_pfn(&e);
                m = e;
index 3792f2b7f4ff0686832458efcf533248c4aa356d..97e62647418a0a48fa1d1da6ace5a800f7d0de23 100644 (file)
@@ -150,6 +150,8 @@ struct intel_vgpu_mm {
                } ppgtt_mm;
                struct {
                        void *virtual_ggtt;
+                       unsigned long last_partial_off;
+                       u64 last_partial_data;
                } ggtt_mm;
        };
 };
index 05d15a095310d41b75d6ab64aa04162799f393a1..858967daf04b21792be2f578c9b27a6e75ba8277 100644 (file)
@@ -268,6 +268,8 @@ struct intel_gvt_mmio {
 #define F_CMD_ACCESSED (1 << 5)
 /* This reg could be accessed by unaligned address */
 #define F_UNALIGN      (1 << 6)
+/* This reg is saved/restored in context */
+#define F_IN_CTX       (1 << 7)
 
        struct gvt_mmio_block *mmio_block;
        unsigned int num_mmio_block;
@@ -639,6 +641,33 @@ static inline bool intel_gvt_mmio_has_mode_mask(
        return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
 }
 
+/**
+ * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if a MMIO has a in-context mask, false if it isn't.
+ *
+ */
+static inline bool intel_gvt_mmio_is_in_ctx(
+                       struct intel_gvt *gvt, unsigned int offset)
+{
+       return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
+}
+
+/**
+ * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline void intel_gvt_mmio_set_in_ctx(
+                       struct intel_gvt *gvt, unsigned int offset)
+{
+       gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
+}
+
 int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
 int intel_gvt_debugfs_init(struct intel_gvt *gvt);
index bcbc47a88a7006a06107005b0faad5c02820c215..8f1caacdc78a4037efb56aeb1fa1cc488a5103f3 100644 (file)
@@ -3045,6 +3045,30 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
        return 0;
 }
 
+/**
+ * intel_vgpu_mask_mmio_write - write mask register
+ * @vgpu: a vGPU
+ * @offset: access offset
+ * @p_data: write data buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+               void *p_data, unsigned int bytes)
+{
+       u32 mask, old_vreg;
+
+       old_vreg = vgpu_vreg(vgpu, offset);
+       write_vreg(vgpu, offset, p_data, bytes);
+       mask = vgpu_vreg(vgpu, offset) >> 16;
+       vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
+                               (vgpu_vreg(vgpu, offset) & mask);
+
+       return 0;
+}
+
 /**
  * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
  * force-nopriv register
index 71b6208759439d8ca74d3dbee4ea4a95059e779d..dac8c6401e26a010f5ed36bd441b9f429526c24e 100644 (file)
@@ -98,4 +98,6 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
 int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
                           void *pdata, unsigned int bytes, bool is_read);
 
+int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+                                 void *p_data, unsigned int bytes);
 #endif
index 0f949554d118c22e1313cfecc2948d0fcc4672ef..5ca9caf7552a6145b0ccb91a3f18a0c3d4764841 100644 (file)
@@ -581,7 +581,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
 
        for (mmio = gvt->engine_mmio_list.mmio;
             i915_mmio_reg_valid(mmio->reg); mmio++) {
-               if (mmio->in_context)
+               if (mmio->in_context) {
                        gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
+                       intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
+               }
        }
 }
index 34c125e2d90c094c98759e127d2f93780cd65788..52f3b91d14fd00f7751c874600a4763eadaee8bf 100644 (file)
@@ -340,14 +340,21 @@ struct drm_i915_file_private {
 
        unsigned int bsd_engine;
 
-/* Client can have a maximum of 3 contexts banned before
- * it is denied of creating new contexts. As one context
- * ban needs 4 consecutive hangs, and more if there is
- * progress in between, this is a last resort stop gap measure
- * to limit the badly behaving clients access to gpu.
+/*
+ * Every context ban increments per client ban score. Also
+ * hangs in short succession increments ban score. If ban threshold
+ * is reached, client is considered banned and submitting more work
+ * will fail. This is a stop gap measure to limit the badly behaving
+ * clients access to gpu. Note that unbannable contexts never increment
+ * the client ban score.
  */
-#define I915_MAX_CLIENT_CONTEXT_BANS 3
-       atomic_t context_bans;
+#define I915_CLIENT_SCORE_HANG_FAST    1
+#define   I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
+#define I915_CLIENT_SCORE_CONTEXT_BAN   3
+#define I915_CLIENT_SCORE_BANNED       9
+       /** ban_score: Accumulated score of all ctx bans and fast hangs. */
+       atomic_t ban_score;
+       unsigned long hang_timestamp;
 };
 
 /* Interface history:
@@ -2238,9 +2245,6 @@ static inline struct scatterlist *____sg_next(struct scatterlist *sg)
  **/
 static inline struct scatterlist *__sg_next(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        return sg_is_last(sg) ? NULL : ____sg_next(sg);
 }
 
index 3704f4c0c2c970c31b0c6031050b20126e7ae2a9..17c5097721e8f27795dae3487a21fb59d9ec75a9 100644 (file)
@@ -2002,7 +2002,6 @@ int i915_gem_fault(struct vm_fault *vmf)
        bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
        struct i915_vma *vma;
        pgoff_t page_offset;
-       unsigned int flags;
        int ret;
 
        /* We don't use vmf->pgoff since that has the fake offset */
@@ -2038,27 +2037,34 @@ int i915_gem_fault(struct vm_fault *vmf)
                goto err_unlock;
        }
 
-       /* If the object is smaller than a couple of partial vma, it is
-        * not worth only creating a single partial vma - we may as well
-        * clear enough space for the full object.
-        */
-       flags = PIN_MAPPABLE;
-       if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
-               flags |= PIN_NONBLOCK | PIN_NONFAULT;
 
        /* Now pin it into the GTT as needed */
-       vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
+       vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+                                      PIN_MAPPABLE |
+                                      PIN_NONBLOCK |
+                                      PIN_NONFAULT);
        if (IS_ERR(vma)) {
                /* Use a partial view if it is bigger than available space */
                struct i915_ggtt_view view =
                        compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
+               unsigned int flags;
 
-               /* Userspace is now writing through an untracked VMA, abandon
+               flags = PIN_MAPPABLE;
+               if (view.type == I915_GGTT_VIEW_NORMAL)
+                       flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
+
+               /*
+                * Userspace is now writing through an untracked VMA, abandon
                 * all hope that the hardware is able to track future writes.
                 */
                obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
 
-               vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+               vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
+               if (IS_ERR(vma) && !view.type) {
+                       flags = PIN_MAPPABLE;
+                       view.type = I915_GGTT_VIEW_PARTIAL;
+                       vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
+               }
        }
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
@@ -2933,32 +2939,54 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
        return 0;
 }
 
+static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv,
+                                       const struct i915_gem_context *ctx)
+{
+       unsigned int score;
+       unsigned long prev_hang;
+
+       if (i915_gem_context_is_banned(ctx))
+               score = I915_CLIENT_SCORE_CONTEXT_BAN;
+       else
+               score = 0;
+
+       prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
+       if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
+               score += I915_CLIENT_SCORE_HANG_FAST;
+
+       if (score) {
+               atomic_add(score, &file_priv->ban_score);
+
+               DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
+                                ctx->name, score,
+                                atomic_read(&file_priv->ban_score));
+       }
+}
+
 static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
 {
-       bool banned;
+       unsigned int score;
+       bool banned, bannable;
 
        atomic_inc(&ctx->guilty_count);
 
-       banned = false;
-       if (i915_gem_context_is_bannable(ctx)) {
-               unsigned int score;
+       bannable = i915_gem_context_is_bannable(ctx);
+       score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
+       banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
 
-               score = atomic_add_return(CONTEXT_SCORE_GUILTY,
-                                         &ctx->ban_score);
-               banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
+       DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n",
+                        ctx->name, atomic_read(&ctx->guilty_count),
+                        score, yesno(banned && bannable));
 
-               DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
-                                ctx->name, score, yesno(banned));
-       }
-       if (!banned)
+       /* Cool contexts don't accumulate client ban score */
+       if (!bannable)
                return;
 
-       i915_gem_context_set_banned(ctx);
-       if (!IS_ERR_OR_NULL(ctx->file_priv)) {
-               atomic_inc(&ctx->file_priv->context_bans);
-               DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
-                                ctx->name, atomic_read(&ctx->file_priv->context_bans));
-       }
+       if (banned)
+               i915_gem_context_set_banned(ctx);
+
+       if (!IS_ERR_OR_NULL(ctx->file_priv))
+               i915_gem_client_mark_guilty(ctx->file_priv, ctx);
 }
 
 static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
@@ -5736,6 +5764,7 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
        INIT_LIST_HEAD(&file_priv->mm.request_list);
 
        file_priv->bsd_engine = -1;
+       file_priv->hang_timestamp = jiffies;
 
        ret = i915_gem_context_open(i915, file);
        if (ret)
index 33f8a4b3c98170f2857e15255e4fc23ae8bbb49e..060335d3d9e0b44d19c9b3147e2e1496d1e8e571 100644 (file)
@@ -652,7 +652,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 
 static bool client_is_banned(struct drm_i915_file_private *file_priv)
 {
-       return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS;
+       return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
 }
 
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
index f627a8c47c58a36f6ff92f17a4d6d672b28bc00b..22df17c8ca9b0fc75dc29189c6b7117002c5ad17 100644 (file)
@@ -489,7 +489,9 @@ eb_validate_vma(struct i915_execbuffer *eb,
 }
 
 static int
-eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
+eb_add_vma(struct i915_execbuffer *eb,
+          unsigned int i, unsigned batch_idx,
+          struct i915_vma *vma)
 {
        struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
        int err;
@@ -522,6 +524,24 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
        eb->flags[i] = entry->flags;
        vma->exec_flags = &eb->flags[i];
 
+       /*
+        * SNA is doing fancy tricks with compressing batch buffers, which leads
+        * to negative relocation deltas. Usually that works out ok since the
+        * relocate address is still positive, except when the batch is placed
+        * very low in the GTT. Ensure this doesn't happen.
+        *
+        * Note that actual hangs have only been observed on gen7, but for
+        * paranoia do it everywhere.
+        */
+       if (i == batch_idx) {
+               if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
+                       eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
+               if (eb->reloc_cache.has_fence)
+                       eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
+
+               eb->batch = vma;
+       }
+
        err = 0;
        if (eb_pin_vma(eb, entry, vma)) {
                if (entry->offset != vma->node.start) {
@@ -716,7 +736,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 {
        struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
        struct drm_i915_gem_object *obj;
-       unsigned int i;
+       unsigned int i, batch;
        int err;
 
        if (unlikely(i915_gem_context_is_closed(eb->ctx)))
@@ -728,6 +748,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
        INIT_LIST_HEAD(&eb->relocs);
        INIT_LIST_HEAD(&eb->unbound);
 
+       batch = eb_batch_index(eb);
+
        for (i = 0; i < eb->buffer_count; i++) {
                u32 handle = eb->exec[i].handle;
                struct i915_lut_handle *lut;
@@ -770,33 +792,16 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
                lut->handle = handle;
 
 add_vma:
-               err = eb_add_vma(eb, i, vma);
+               err = eb_add_vma(eb, i, batch, vma);
                if (unlikely(err))
                        goto err_vma;
 
                GEM_BUG_ON(vma != eb->vma[i]);
                GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
+               GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
+                          eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
        }
 
-       /* take note of the batch buffer before we might reorder the lists */
-       i = eb_batch_index(eb);
-       eb->batch = eb->vma[i];
-       GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]);
-
-       /*
-        * SNA is doing fancy tricks with compressing batch buffers, which leads
-        * to negative relocation deltas. Usually that works out ok since the
-        * relocate address is still positive, except when the batch is placed
-        * very low in the GTT. Ensure this doesn't happen.
-        *
-        * Note that actual hangs have only been observed on gen7, but for
-        * paranoia do it everywhere.
-        */
-       if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
-               eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
-       if (eb->reloc_cache.has_fence)
-               eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
-
        eb->args->flags |= __EXEC_VALIDATED;
        return eb_reserve(eb);
 
index f9bc3aaa90d0f5de110e893415be0a6ee1c40448..c16cb025755e46edb6038e15790732c5fda74b71 100644 (file)
@@ -1893,9 +1893,17 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
 
                /*
                 * Clear the PIPE*STAT regs before the IIR
+                *
+                * Toggle the enable bits to make sure we get an
+                * edge in the ISR pipe event bit if we don't clear
+                * all the enabled status bits. Otherwise the edge
+                * triggered IIR on i965/g4x wouldn't notice that
+                * an interrupt is still pending.
                 */
-               if (pipe_stats[pipe])
-                       I915_WRITE(reg, enable_mask | pipe_stats[pipe]);
+               if (pipe_stats[pipe]) {
+                       I915_WRITE(reg, pipe_stats[pipe]);
+                       I915_WRITE(reg, enable_mask);
+               }
        }
        spin_unlock(&dev_priv->irq_lock);
 }
@@ -1990,10 +1998,38 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
 
 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
 {
-       u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+       u32 hotplug_status = 0, hotplug_status_mask;
+       int i;
 
-       if (hotplug_status)
+       if (IS_G4X(dev_priv) ||
+           IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
+                       DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
+       else
+               hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
+
+       /*
+        * We absolutely have to clear all the pending interrupt
+        * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
+        * interrupt bit won't have an edge, and the i965/g4x
+        * edge triggered IIR will not notice that an interrupt
+        * is still pending. We can't use PORT_HOTPLUG_EN to
+        * guarantee the edge as the act of toggling the enable
+        * bits can itself generate a new hotplug interrupt :(
+        */
+       for (i = 0; i < 10; i++) {
+               u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
+
+               if (tmp == 0)
+                       return hotplug_status;
+
+               hotplug_status |= tmp;
                I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+       }
+
+       WARN_ONCE(1,
+                 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
+                 I915_READ(PORT_HOTPLUG_STAT));
 
        return hotplug_status;
 }
index f11bb213ec0784e4c50db5bd0ea0647a5419e0e1..7720569f20244114e027dca6b810b182771cfcee 100644 (file)
@@ -2425,12 +2425,17 @@ enum i915_power_well_id {
 #define _3D_CHICKEN    _MMIO(0x2084)
 #define  _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB     (1 << 10)
 #define _3D_CHICKEN2   _MMIO(0x208c)
+
+#define FF_SLICE_CHICKEN       _MMIO(0x2088)
+#define  FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX      (1 << 1)
+
 /* Disables pipelining of read flushes past the SF-WIZ interface.
  * Required on all Ironlake steppings according to the B-Spec, but the
  * particular danger of not doing so is not specified.
  */
 # define _3D_CHICKEN2_WM_READ_PIPELINED                        (1 << 14)
 #define _3D_CHICKEN3   _MMIO(0x2090)
+#define  _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX           (1 << 12)
 #define  _3D_CHICKEN_SF_DISABLE_OBJEND_CULL            (1 << 10)
 #define  _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE       (1 << 5)
 #define  _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL         (1 << 5)
index 9324d476e0a7c356b39cb02374e904a2b0a95262..0531c01c3604663c9166e593d2c26fd45cbd0361 100644 (file)
@@ -109,7 +109,7 @@ vma_create(struct drm_i915_gem_object *obj,
                                                     obj->base.size >> PAGE_SHIFT));
                        vma->size = view->partial.size;
                        vma->size <<= PAGE_SHIFT;
-                       GEM_BUG_ON(vma->size >= obj->base.size);
+                       GEM_BUG_ON(vma->size > obj->base.size);
                } else if (view->type == I915_GGTT_VIEW_ROTATED) {
                        vma->size = intel_rotation_info_size(&view->rotated);
                        vma->size <<= PAGE_SHIFT;
index de0e22322c76ed649c2f36266e65247ed9d02c28..072b326d5ee0a77868de83818a334c68077c762e 100644 (file)
@@ -304,6 +304,9 @@ intel_crt_mode_valid(struct drm_connector *connector,
        int max_dotclk = dev_priv->max_dotclk_freq;
        int max_clock;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        if (mode->clock < 25000)
                return MODE_CLOCK_LOW;
 
@@ -337,6 +340,12 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config,
                                     struct drm_connector_state *conn_state)
 {
+       struct drm_display_mode *adjusted_mode =
+               &pipe_config->base.adjusted_mode;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        return true;
 }
 
@@ -344,6 +353,12 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
                                   struct intel_crtc_state *pipe_config,
                                   struct drm_connector_state *conn_state)
 {
+       struct drm_display_mode *adjusted_mode =
+               &pipe_config->base.adjusted_mode;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        pipe_config->has_pch_encoder = true;
 
        return true;
@@ -354,6 +369,11 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
                                   struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct drm_display_mode *adjusted_mode =
+               &pipe_config->base.adjusted_mode;
+
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
 
        pipe_config->has_pch_encoder = true;
 
index dee3a8e659f1d6c9dbe2040abd6e2ba42020070a..2cc6faa1daa8d464d5fa2c7a063ef7d6cb3965cc 100644 (file)
@@ -14469,12 +14469,22 @@ static enum drm_mode_status
 intel_mode_valid(struct drm_device *dev,
                 const struct drm_display_mode *mode)
 {
+       /*
+        * Can't reject DBLSCAN here because Xorg ddxen can add piles
+        * of DBLSCAN modes to the output's mode list when they detect
+        * the scaling mode property on the connector. And they don't
+        * ask the kernel to validate those modes in any way until
+        * modeset time at which point the client gets a protocol error.
+        * So in order to not upset those clients we silently ignore the
+        * DBLSCAN flag on such connectors. For other connectors we will
+        * reject modes with the DBLSCAN flag in encoder->compute_config().
+        * And we always reject DBLSCAN modes in connector->mode_valid()
+        * as we never want such modes on the connector's mode list.
+        */
+
        if (mode->vscan > 1)
                return MODE_NO_VSCAN;
 
-       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
-               return MODE_NO_DBLESCAN;
-
        if (mode->flags & DRM_MODE_FLAG_HSKEW)
                return MODE_H_ILLEGAL;
 
index 8320f0e8e3bef8587b94a908bf87f3eecdf63fc5..16faea30114ac01f2744b69150d7a26d7ca578c3 100644 (file)
@@ -420,6 +420,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
        int max_rate, mode_rate, max_lanes, max_link_clock;
        int max_dotclk;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
 
        if (intel_dp_is_edp(intel_dp) && fixed_mode) {
@@ -1862,7 +1865,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                                                conn_state->scaling_mode);
        }
 
-       if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
+       if (HAS_GMCH_DISPLAY(dev_priv) &&
            adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
                return false;
 
@@ -2782,16 +2788,6 @@ static void intel_disable_dp(struct intel_encoder *encoder,
 static void g4x_disable_dp(struct intel_encoder *encoder,
                           const struct intel_crtc_state *old_crtc_state,
                           const struct drm_connector_state *old_conn_state)
-{
-       intel_disable_dp(encoder, old_crtc_state, old_conn_state);
-
-       /* disable the port before the pipe on g4x */
-       intel_dp_link_down(encoder, old_crtc_state);
-}
-
-static void ilk_disable_dp(struct intel_encoder *encoder,
-                          const struct intel_crtc_state *old_crtc_state,
-                          const struct drm_connector_state *old_conn_state)
 {
        intel_disable_dp(encoder, old_crtc_state, old_conn_state);
 }
@@ -2807,13 +2803,19 @@ static void vlv_disable_dp(struct intel_encoder *encoder,
        intel_disable_dp(encoder, old_crtc_state, old_conn_state);
 }
 
-static void ilk_post_disable_dp(struct intel_encoder *encoder,
+static void g4x_post_disable_dp(struct intel_encoder *encoder,
                                const struct intel_crtc_state *old_crtc_state,
                                const struct drm_connector_state *old_conn_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = encoder->port;
 
+       /*
+        * Bspec does not list a specific disable sequence for g4x DP.
+        * Follow the ilk+ sequence (disable pipe before the port) for
+        * g4x DP as it does not suffer from underruns like the normal
+        * g4x modeset sequence (disable pipe after the port).
+        */
        intel_dp_link_down(encoder, old_crtc_state);
 
        /* Only ilk+ has port A */
@@ -6337,7 +6339,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
        drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
 
-       if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+       if (!HAS_GMCH_DISPLAY(dev_priv))
                connector->interlace_allowed = true;
        connector->doublescan_allowed = 0;
 
@@ -6436,15 +6438,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
                intel_encoder->enable = vlv_enable_dp;
                intel_encoder->disable = vlv_disable_dp;
                intel_encoder->post_disable = vlv_post_disable_dp;
-       } else if (INTEL_GEN(dev_priv) >= 5) {
-               intel_encoder->pre_enable = g4x_pre_enable_dp;
-               intel_encoder->enable = g4x_enable_dp;
-               intel_encoder->disable = ilk_disable_dp;
-               intel_encoder->post_disable = ilk_post_disable_dp;
        } else {
                intel_encoder->pre_enable = g4x_pre_enable_dp;
                intel_encoder->enable = g4x_enable_dp;
                intel_encoder->disable = g4x_disable_dp;
+               intel_encoder->post_disable = g4x_post_disable_dp;
        }
 
        intel_dig_port->dp.output_reg = output_reg;
index 9e6956c0868835a9bcdf156c45d151ee2479b99a..5890500a3a8b6640e587070e89d805b06c99baf7 100644 (file)
@@ -48,6 +48,9 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
        bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
                                           DP_DPCD_QUIRK_LIMITED_M_N);
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        pipe_config->has_pch_encoder = false;
        bpp = 24;
        if (intel_dp->compliance.test_data.bpc) {
@@ -366,6 +369,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
        if (!intel_dp)
                return MODE_ERROR;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        max_link_clock = intel_dp_max_link_rate(intel_dp);
        max_lanes = intel_dp_max_lane_count(intel_dp);
 
index cf39ca90d887872ddb2de5e011041f785fda3996..f349b39201993c88f633c1154c9fc9054c90449c 100644 (file)
@@ -326,6 +326,9 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
                                                conn_state->scaling_mode);
        }
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        /* DSI uses short packets for sync events, so clear mode flags for DSI */
        adjusted_mode->flags = 0;
 
@@ -1266,6 +1269,9 @@ intel_dsi_mode_valid(struct drm_connector *connector,
 
        DRM_DEBUG_KMS("\n");
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        if (fixed_mode) {
                if (mode->hdisplay > fixed_mode->hdisplay)
                        return MODE_PANEL;
index a70d767313aa10e198338e2e7472592827dfa347..61d908e0df0e2d75175f6f6300d0114b66f3833d 100644 (file)
@@ -219,6 +219,9 @@ intel_dvo_mode_valid(struct drm_connector *connector,
        int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
        int target_clock = mode->clock;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        /* XXX: Validate clock range */
 
        if (fixed_mode) {
@@ -254,6 +257,9 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
        if (fixed_mode)
                intel_fixed_panel_mode(fixed_mode, adjusted_mode);
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        return true;
 }
 
index ee929f31f7db712d0f1b306571de962480758314..d8cb53ef435134b67e2f08fc5c8470f6697436d0 100644 (file)
@@ -1557,6 +1557,9 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
        bool force_dvi =
                READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        clock = mode->clock;
 
        if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
@@ -1677,6 +1680,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
        int desired_bpp;
        bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
 
        if (pipe_config->has_hdmi_sink)
index 15434cad543001317be875f1a266e3fef6636042..7c4c8fb1dae465bdaba562ed536e68918b7eb6b8 100644 (file)
@@ -1545,11 +1545,21 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
        /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
        batch = gen8_emit_flush_coherentl3_wa(engine, batch);
 
+       *batch++ = MI_LOAD_REGISTER_IMM(3);
+
        /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
-       *batch++ = MI_LOAD_REGISTER_IMM(1);
        *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
        *batch++ = _MASKED_BIT_DISABLE(
                        GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
+
+       /* BSpec: 11391 */
+       *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN);
+       *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX);
+
+       /* BSpec: 11299 */
+       *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3);
+       *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX);
+
        *batch++ = MI_NOOP;
 
        /* WaClearSlmSpaceAtContextSwitch:kbl */
@@ -2641,10 +2651,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
        context_size += LRC_HEADER_PAGES * PAGE_SIZE;
 
        ctx_obj = i915_gem_object_create(ctx->i915, context_size);
-       if (IS_ERR(ctx_obj)) {
-               ret = PTR_ERR(ctx_obj);
-               goto error_deref_obj;
-       }
+       if (IS_ERR(ctx_obj))
+               return PTR_ERR(ctx_obj);
 
        vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
        if (IS_ERR(vma)) {
index d278f24ba6ae58bf4a704dc57610a53e9b042d25..48f618dc9abbb9de03ddf594cddf67d154a872e1 100644 (file)
@@ -380,6 +380,8 @@ intel_lvds_mode_valid(struct drm_connector *connector,
        struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
        int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
        if (mode->hdisplay > fixed_mode->hdisplay)
                return MODE_PANEL;
        if (mode->vdisplay > fixed_mode->vdisplay)
@@ -429,6 +431,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
        intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
                               adjusted_mode);
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        if (HAS_PCH_SPLIT(dev_priv)) {
                pipe_config->has_pch_encoder = true;
 
index 25005023c243cb0526baf01f9c90d90dd3df4da2..26975df4e593b9a899c060cee398ffbdb7b5052b 100644 (file)
@@ -1160,6 +1160,9 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
                                                           adjusted_mode);
        }
 
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
        /*
         * Make the CRTC code factor in the SDVO pixel multiplier.  The
         * SDVO device will factor out the multiplier during mode_set.
@@ -1621,6 +1624,9 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
        struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
        int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        if (intel_sdvo->pixel_clock_min > mode->clock)
                return MODE_CLOCK_LOW;
 
index 885fc3809f7f904e8bc82e0eeee9cddd3dcef3f4..b55b5c157e384158d6c55dab6a693c2e9e5fea21 100644 (file)
@@ -850,6 +850,9 @@ intel_tv_mode_valid(struct drm_connector *connector,
        const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
        int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
 
+       if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return MODE_NO_DBLESCAN;
+
        if (mode->clock > max_dotclk)
                return MODE_CLOCK_HIGH;
 
@@ -877,16 +880,21 @@ intel_tv_compute_config(struct intel_encoder *encoder,
                        struct drm_connector_state *conn_state)
 {
        const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
+       struct drm_display_mode *adjusted_mode =
+               &pipe_config->base.adjusted_mode;
 
        if (!tv_mode)
                return false;
 
-       pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock;
+       if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+               return false;
+
+       adjusted_mode->crtc_clock = tv_mode->clock;
        DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
        pipe_config->pipe_bpp = 8*3;
 
        /* TV has it's own notion of sync and other mode flags, so clear them. */
-       pipe_config->base.adjusted_mode.flags = 0;
+       adjusted_mode->flags = 0;
 
        /*
         * FIXME: We don't check whether the input mode is actually what we want
index 32b1a6cdecfc05133147e6ff85c959f4668362f1..d3443125e66164a863fb41bfdb435a1ce13340b6 100644 (file)
@@ -197,8 +197,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
        priv->io_base = regs;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi");
-       if (!res)
-               return -EINVAL;
+       if (!res) {
+               ret = -EINVAL;
+               goto free_drm;
+       }
        /* Simply ioremap since it may be a shared register zone */
        regs = devm_ioremap(dev, res->start, resource_size(res));
        if (!regs) {
@@ -215,8 +217,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
        }
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
-       if (!res)
-               return -EINVAL;
+       if (!res) {
+               ret = -EINVAL;
+               goto free_drm;
+       }
        /* Simply ioremap since it may be a shared register zone */
        regs = devm_ioremap(dev, res->start, resource_size(res));
        if (!regs) {
index 291c08117ab65337f7a9d8567cec08207cd555db..397143b639c64ba6dbd7c1144c0314aae954339f 100644 (file)
@@ -132,7 +132,7 @@ curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
 
        nvif_object_map(&wndw->wimm.base.user, NULL, 0);
        wndw->immd = func;
-       wndw->ctxdma.parent = &disp->core->chan.base.user;
+       wndw->ctxdma.parent = NULL;
        return 0;
 }
 
index 224963b533a69163b39bed8cbf175e637befd591..c5a9bc1af5af79038d938de1f8cb3a85dc35dac8 100644 (file)
@@ -444,14 +444,17 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
        if (ret)
                return ret;
 
-       ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
-       if (IS_ERR(ctxdma)) {
-               nouveau_bo_unpin(fb->nvbo);
-               return PTR_ERR(ctxdma);
+       if (wndw->ctxdma.parent) {
+               ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
+               if (IS_ERR(ctxdma)) {
+                       nouveau_bo_unpin(fb->nvbo);
+                       return PTR_ERR(ctxdma);
+               }
+
+               asyw->image.handle[0] = ctxdma->object.handle;
        }
 
        asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
-       asyw->image.handle[0] = ctxdma->object.handle;
        asyw->image.offset[0] = fb->nvbo->bo.offset;
 
        if (wndw->func->prepare) {
index b8cda94492412c820c2d44ec0f40afa298f163b0..768207fbbae3d8d23e287428ccca34bcdfedbda2 100644 (file)
@@ -623,7 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
        struct qxl_cursor_cmd *cmd;
        struct qxl_cursor *cursor;
        struct drm_gem_object *obj;
-       struct qxl_bo *cursor_bo = NULL, *user_bo = NULL;
+       struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
        int ret;
        void *user_ptr;
        int size = 64*64*4;
@@ -677,7 +677,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
                                                           cursor_bo, 0);
                cmd->type = QXL_CURSOR_SET;
 
-               qxl_bo_unref(&qcrtc->cursor_bo);
+               old_cursor_bo = qcrtc->cursor_bo;
                qcrtc->cursor_bo = cursor_bo;
                cursor_bo = NULL;
        } else {
@@ -697,6 +697,9 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
        qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
        qxl_release_fence_buffer_objects(release);
 
+       if (old_cursor_bo)
+               qxl_bo_unref(&old_cursor_bo);
+
        qxl_bo_unref(&cursor_bo);
 
        return;
index 2589f4acd5ae22ab6255e3a7baa35db2ac38c073..9c81301d0eedabd120c2cb26e1e530ce15b71205 100644 (file)
@@ -32,7 +32,10 @@ obj-$(CONFIG_DRM_SUN4I)              += sun4i-tcon.o
 obj-$(CONFIG_DRM_SUN4I)                += sun4i_tv.o
 obj-$(CONFIG_DRM_SUN4I)                += sun6i_drc.o
 
-obj-$(CONFIG_DRM_SUN4I_BACKEND)        += sun4i-backend.o sun4i-frontend.o
+obj-$(CONFIG_DRM_SUN4I_BACKEND)        += sun4i-backend.o
+ifdef CONFIG_DRM_SUN4I_BACKEND
+obj-$(CONFIG_DRM_SUN4I)                += sun4i-frontend.o
+endif
 obj-$(CONFIG_DRM_SUN4I_HDMI)   += sun4i-drm-hdmi.o
 obj-$(CONFIG_DRM_SUN6I_DSI)    += sun6i-dsi.o
 obj-$(CONFIG_DRM_SUN8I_DW_HDMI)        += sun8i-drm-hdmi.o
index 08747fc3ee713d6ba796b946103334b302a48758..8232b39e16ca700d17ebfae9ab207a0a3c4d5c78 100644 (file)
@@ -17,7 +17,6 @@
 #include <drm/drm_encoder.h>
 #include <drm/drm_modes.h>
 #include <drm/drm_of.h>
-#include <drm/drm_panel.h>
 
 #include <uapi/drm/drm_mode.h>
 
@@ -418,9 +417,6 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
 static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
                                     const struct drm_display_mode *mode)
 {
-       struct drm_panel *panel = tcon->panel;
-       struct drm_connector *connector = panel->connector;
-       struct drm_display_info display_info = connector->display_info;
        unsigned int bp, hsync, vsync;
        u8 clk_delay;
        u32 val = 0;
@@ -478,27 +474,6 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
        if (mode->flags & DRM_MODE_FLAG_PVSYNC)
                val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
 
-       /*
-        * On A20 and similar SoCs, the only way to achieve Positive Edge
-        * (Rising Edge), is setting dclk clock phase to 2/3(240°).
-        * By default TCON works in Negative Edge(Falling Edge),
-        * this is why phase is set to 0 in that case.
-        * Unfortunately there's no way to logically invert dclk through
-        * IO_POL register.
-        * The only acceptable way to work, triple checked with scope,
-        * is using clock phase set to 0° for Negative Edge and set to 240°
-        * for Positive Edge.
-        * On A33 and similar SoCs there would be a 90° phase option,
-        * but it divides also dclk by 2.
-        * Following code is a way to avoid quirks all around TCON
-        * and DOTCLOCK drivers.
-        */
-       if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
-               clk_set_phase(tcon->dclk, 240);
-
-       if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
-               clk_set_phase(tcon->dclk, 0);
-
        regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
                           SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
                           val);
index 776c1513e582827aae0764e52e376df92e884ab6..a2bd5876c633515950f23be6cdff6f0f49306312 100644 (file)
@@ -398,7 +398,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
                 * unaligned offset is malformed and cause commands stream
                 * corruption on the buffer address relocation.
                 */
-               if (offset & 3 || offset >= obj->gem.size) {
+               if (offset & 3 || offset > obj->gem.size) {
                        err = -EINVAL;
                        goto fail;
                }
index 2ebdc6d5a76e60a33d6a271ff158258a61b7908c..d5583190f3e44de77560d76c2d5a5a4cf7db62de 100644 (file)
@@ -137,7 +137,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
 
        if (cmd > (char *) urb->transfer_buffer) {
                /* Send partial buffer remaining before exiting */
-               int len = cmd - (char *) urb->transfer_buffer;
+               int len;
+               if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length)
+                       *cmd++ = 0xAF;
+               len = cmd - (char *) urb->transfer_buffer;
                ret = udl_submit_urb(dev, urb, len);
                bytes_sent += len;
        } else
index 0c87b1ac6b68f0d41cfd01851a14b9a092455f4f..b992644c17e6b565b414351b1e262d4b61c9f38d 100644 (file)
@@ -153,11 +153,11 @@ static void udl_compress_hline16(
                raw_pixels_count_byte = cmd++; /*  we'll know this later */
                raw_pixel_start = pixel;
 
-               cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1,
-                       min((int)(pixel_end - pixel) / bpp,
-                           (int)(cmd_buffer_end - cmd) / 2))) * bpp;
+               cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL,
+                                       (unsigned long)(pixel_end - pixel) / bpp,
+                                       (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp;
 
-               prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
+               prefetch_range((void *) pixel, cmd_pixel_end - pixel);
                pixel_val16 = get_pixel_val16(pixel, bpp);
 
                while (pixel < cmd_pixel_end) {
@@ -193,6 +193,9 @@ static void udl_compress_hline16(
                if (pixel > raw_pixel_start) {
                        /* finalize last RAW span */
                        *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
+               } else {
+                       /* undo unused byte */
+                       cmd--;
                }
 
                *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF;
index f1d5f76e9c33d8f31fa4e6f79f37e27e4f43c35c..d88073e7d22dddd94b33e2210fb20181dea6d245 100644 (file)
@@ -218,6 +218,9 @@ static int host1x_probe(struct platform_device *pdev)
                return err;
        }
 
+       if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+               goto skip_iommu;
+
        host->group = iommu_group_get(&pdev->dev);
        if (host->group) {
                struct iommu_domain_geometry *geometry;
index e2f4a4d93d2012f3d21ea59271732c84a80189a0..527a1cddb14fd5d2a23fa96559ed0e33c54bd690 100644 (file)
@@ -569,7 +569,8 @@ void host1x_job_unpin(struct host1x_job *job)
        for (i = 0; i < job->num_unpins; i++) {
                struct host1x_job_unpin_data *unpin = &job->unpins[i];
 
-               if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
+               if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
+                   unpin->size && host->domain) {
                        iommu_unmap(host->domain, job->addr_phys[i],
                                    unpin->size);
                        free_iova(&host->iova,
index f858cc72011d183fa11892fb152e0d9b705c3059..3942ee61bd1c17e57867a7d8e5f521b5f8eae9b8 100644 (file)
@@ -1952,6 +1952,8 @@ static int hid_device_probe(struct device *dev)
        }
        hdev->io_started = false;
 
+       clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
+
        if (!hdev->driver) {
                id = hid_match_device(hdev, hdrv);
                if (id == NULL) {
@@ -2215,7 +2217,8 @@ static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
        struct hid_device *hdev = to_hid_device(dev);
 
        if (hdev->driver == hdrv &&
-           !hdrv->match(hdev, hid_ignore_special_drivers))
+           !hdrv->match(hdev, hid_ignore_special_drivers) &&
+           !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
                return device_reprobe(dev);
 
        return 0;
index 8469b6964ff64e45f7807641ef8eda8197f8f81f..b48100236df890cdd1bbffa0daac97257357a38d 100644 (file)
@@ -1154,6 +1154,8 @@ copy_rest:
                        goto out;
                if (list->tail > list->head) {
                        len = list->tail - list->head;
+                       if (len > count)
+                               len = count;
 
                        if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
                                ret = -EFAULT;
@@ -1163,6 +1165,8 @@ copy_rest:
                        list->head += len;
                } else {
                        len = HID_DEBUG_BUFSIZE - list->head;
+                       if (len > count)
+                               len = count;
 
                        if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
                                ret = -EFAULT;
@@ -1170,7 +1174,9 @@ copy_rest:
                        }
                        list->head = 0;
                        ret += len;
-                       goto copy_rest;
+                       count -= len;
+                       if (count > 0)
+                               goto copy_rest;
                }
 
        }
index 7b8e17b03cb864a7bc0ab0cbd114594b01f28c5c..6bf4da7ad63a51f3b9aa6713552c96be6042bba2 100644 (file)
@@ -124,6 +124,8 @@ static const struct hid_device_id hammer_devices[] = {
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WAND) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WHISKERS) },
        { }
 };
 MODULE_DEVICE_TABLE(hid, hammer_devices);
index a85634fe033f01bd6f9a2b41c67d027c0b55ccc1..c7981ddd8776377faa9a238b8e58d6162054b9c6 100644 (file)
 #define USB_DEVICE_ID_GOOGLE_TOUCH_ROSE        0x5028
 #define USB_DEVICE_ID_GOOGLE_STAFF     0x502b
 #define USB_DEVICE_ID_GOOGLE_WAND      0x502d
+#define USB_DEVICE_ID_GOOGLE_WHISKERS  0x5030
 
 #define USB_VENDOR_ID_GOTOP            0x08f2
 #define USB_DEVICE_ID_SUPER_Q2         0x007f
index cb86cc834201c89f660daa3509722f6fa72cb98c..0422ec2b13d208d98acdf22c5eb97b6393c5f530 100644 (file)
@@ -573,7 +573,7 @@ static bool steam_is_valve_interface(struct hid_device *hdev)
 
 static int steam_client_ll_parse(struct hid_device *hdev)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
 
        return hid_parse_report(hdev, steam->hdev->dev_rdesc,
                        steam->hdev->dev_rsize);
@@ -590,7 +590,7 @@ static void steam_client_ll_stop(struct hid_device *hdev)
 
 static int steam_client_ll_open(struct hid_device *hdev)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
        int ret;
 
        ret = hid_hw_open(steam->hdev);
@@ -605,7 +605,7 @@ static int steam_client_ll_open(struct hid_device *hdev)
 
 static void steam_client_ll_close(struct hid_device *hdev)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
 
        mutex_lock(&steam->mutex);
        steam->client_opened = false;
@@ -623,7 +623,7 @@ static int steam_client_ll_raw_request(struct hid_device *hdev,
                                size_t count, unsigned char report_type,
                                int reqtype)
 {
-       struct steam_device *steam = hid_get_drvdata(hdev);
+       struct steam_device *steam = hdev->driver_data;
 
        return hid_hw_raw_request(steam->hdev, reportnum, buf, count,
                        report_type, reqtype);
@@ -710,7 +710,7 @@ static int steam_probe(struct hid_device *hdev,
                ret = PTR_ERR(steam->client_hdev);
                goto client_hdev_fail;
        }
-       hid_set_drvdata(steam->client_hdev, steam);
+       steam->client_hdev->driver_data = steam;
 
        /*
         * With the real steam controller interface, do not connect hidraw.
index c1652bb7bd156e298514bcc63ce818ea49e6d7c6..eae0cb3ddec668e8d2f82b1d571bb826b0fb1dd5 100644 (file)
@@ -484,7 +484,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
                return;
        }
 
-       if ((ret_size > size) || (ret_size <= 2)) {
+       if ((ret_size > size) || (ret_size < 2)) {
                dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
                        __func__, size, ret_size);
                return;
index 582e449be9feeeebd5924fea4a28aab4a5c2e8a2..a2c53ea3b5edfce82eeb2ef4b4e2392f5c7fb98f 100644 (file)
@@ -205,8 +205,7 @@ static void ish_remove(struct pci_dev *pdev)
        kfree(ishtp_dev);
 }
 
-#ifdef CONFIG_PM
-static struct device *ish_resume_device;
+static struct device __maybe_unused *ish_resume_device;
 
 /* 50ms to get resume response */
 #define WAIT_FOR_RESUME_ACK_MS         50
@@ -220,7 +219,7 @@ static struct device *ish_resume_device;
  * in that case a simple resume message is enough, others we need
  * a reset sequence.
  */
-static void ish_resume_handler(struct work_struct *work)
+static void __maybe_unused ish_resume_handler(struct work_struct *work)
 {
        struct pci_dev *pdev = to_pci_dev(ish_resume_device);
        struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -262,7 +261,7 @@ static void ish_resume_handler(struct work_struct *work)
  *
  * Return: 0 to the pm core
  */
-static int ish_suspend(struct device *device)
+static int __maybe_unused ish_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -288,7 +287,7 @@ static int ish_suspend(struct device *device)
        return 0;
 }
 
-static DECLARE_WORK(resume_work, ish_resume_handler);
+static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler);
 /**
  * ish_resume() - ISH resume callback
  * @device:    device pointer
@@ -297,7 +296,7 @@ static DECLARE_WORK(resume_work, ish_resume_handler);
  *
  * Return: 0 to the pm core
  */
-static int ish_resume(struct device *device)
+static int __maybe_unused ish_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -311,21 +310,14 @@ static int ish_resume(struct device *device)
        return 0;
 }
 
-static const struct dev_pm_ops ish_pm_ops = {
-       .suspend = ish_suspend,
-       .resume = ish_resume,
-};
-#define ISHTP_ISH_PM_OPS       (&ish_pm_ops)
-#else
-#define ISHTP_ISH_PM_OPS       NULL
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume);
 
 static struct pci_driver ish_driver = {
        .name = KBUILD_MODNAME,
        .id_table = ish_pci_tbl,
        .probe = ish_probe,
        .remove = ish_remove,
-       .driver.pm = ISHTP_ISH_PM_OPS,
+       .driver.pm = &ish_pm_ops,
 };
 
 module_pci_driver(ish_driver);
index e3ce233f8bdcc5bdcae97ffa217f65e022938b0e..23872d08308cdb5857d53b5bcdf907e20d74c345 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/hiddev.h>
 #include <linux/compat.h>
 #include <linux/vmalloc.h>
+#include <linux/nospec.h>
 #include "usbhid.h"
 
 #ifdef CONFIG_USB_DYNAMIC_MINORS
@@ -469,10 +470,14 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
 
                if (uref->field_index >= report->maxfield)
                        goto inval;
+               uref->field_index = array_index_nospec(uref->field_index,
+                                                      report->maxfield);
 
                field = report->field[uref->field_index];
                if (uref->usage_index >= field->maxusage)
                        goto inval;
+               uref->usage_index = array_index_nospec(uref->usage_index,
+                                                      field->maxusage);
 
                uref->usage_code = field->usage[uref->usage_index].hid;
 
@@ -499,6 +504,8 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
 
                        if (uref->field_index >= report->maxfield)
                                goto inval;
+                       uref->field_index = array_index_nospec(uref->field_index,
+                                                              report->maxfield);
 
                        field = report->field[uref->field_index];
 
@@ -753,6 +760,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
                if (finfo.field_index >= report->maxfield)
                        break;
+               finfo.field_index = array_index_nospec(finfo.field_index,
+                                                      report->maxfield);
 
                field = report->field[finfo.field_index];
                memset(&finfo, 0, sizeof(finfo));
@@ -797,6 +806,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
                if (cinfo.index >= hid->maxcollection)
                        break;
+               cinfo.index = array_index_nospec(cinfo.index,
+                                                hid->maxcollection);
 
                cinfo.type = hid->collection[cinfo.index].type;
                cinfo.usage = hid->collection[cinfo.index].usage;
index c101369b51de88b927fdf2295f3bb664ed415899..d6797535fff97217b477cf7c2009397dcce2d1ec 100644 (file)
@@ -395,6 +395,14 @@ static void wacom_usage_mapping(struct hid_device *hdev,
                }
        }
 
+       /* 2nd-generation Intuos Pro Large has incorrect Y maximum */
+       if (hdev->vendor == USB_VENDOR_ID_WACOM &&
+           hdev->product == 0x0358 &&
+           WACOM_PEN_FIELD(field) &&
+           wacom_equivalent_usage(usage->hid) == HID_GD_Y) {
+               field->logical_maximum = 43200;
+       }
+
        switch (usage->hid) {
        case HID_GD_X:
                features->x_max = field->logical_maximum;
index 0bb44d0088edb5f8bd8da44a23c57095db14bb68..ad7afa74d3657d902cf655ef6f1467854625b98b 100644 (file)
@@ -3365,8 +3365,14 @@ void wacom_setup_device_quirks(struct wacom *wacom)
                        if (features->type >= INTUOSHT && features->type <= BAMBOO_PT)
                                features->device_type |= WACOM_DEVICETYPE_PAD;
 
-                       features->x_max = 4096;
-                       features->y_max = 4096;
+                       if (features->type == INTUOSHT2) {
+                               features->x_max = features->x_max / 10;
+                               features->y_max = features->y_max / 10;
+                       }
+                       else {
+                               features->x_max = 4096;
+                               features->y_max = 4096;
+                       }
                }
                else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
                        features->device_type |= WACOM_DEVICETYPE_PAD;
index bf3bb7e1adab8579cf7647a24cdba12c9c828050..9d3ef879dc51e1aa08848649cfaec435a6f882fa 100644 (file)
@@ -1074,6 +1074,13 @@ static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
                        DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
                },
        },
+       {
+               .ident = "Dell XPS13 9333",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
+               },
+       },
        { }
 };
 
index 155d4d1d1585af4aa7debc37163072af4979bd02..f9d1349c328698aa510e57d10c6b7b475999d16f 100644 (file)
@@ -4175,7 +4175,7 @@ static int nct6775_probe(struct platform_device *pdev)
         * The temperature is already monitored if the respective bit in <mask>
         * is set.
         */
-       for (i = 0; i < 32; i++) {
+       for (i = 0; i < 31; i++) {
                if (!(data->temp_mask & BIT(i + 1)))
                        continue;
                if (!reg_temp_alternate[i])
index 4a34f311e1ff4df2cd6cdfcff7ea1662c95a06eb..6ec65adaba49569ab7b9775f856859a0fcfbd967 100644 (file)
@@ -647,10 +647,10 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap,
        if (bit_adap->getscl == NULL)
                adap->quirks = &i2c_bit_quirk_no_clk_stretch;
 
-       /* Bring bus to a known state. Looks like STOP if bus is not free yet */
-       setscl(bit_adap, 1);
-       udelay(bit_adap->udelay);
-       setsda(bit_adap, 1);
+       /*
+        * We tried forcing SCL/SDA to an initial state here. But that caused a
+        * regression, sadly. Check Bugzilla #200045 for details.
+        */
 
        ret = add_adapter(adap);
        if (ret < 0)
index 44cffad43701f4839096bbde5c5937ee22cce135..c4d176f5ed793c76c78c412d081c21bc8dff2327 100644 (file)
@@ -234,7 +234,8 @@ static const struct irq_chip cht_wc_i2c_irq_chip = {
        .name                   = "cht_wc_ext_chrg_irq_chip",
 };
 
-static const char * const bq24190_suppliers[] = { "fusb302-typec-source" };
+static const char * const bq24190_suppliers[] = {
+       "tcpm-source-psy-i2c-fusb302" };
 
 static const struct property_entry bq24190_props[] = {
        PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_suppliers),
index 005e6e0330c278276a0d602fcfebdc3429218cfd..66f85bbf35917161cc36e4ffb308d78b8401c0cb 100644 (file)
@@ -279,9 +279,9 @@ static int i2c_gpio_probe(struct platform_device *pdev)
         * required for an I2C bus.
         */
        if (pdata->scl_is_open_drain)
-               gflags = GPIOD_OUT_LOW;
+               gflags = GPIOD_OUT_HIGH;
        else
-               gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
+               gflags = GPIOD_OUT_HIGH_OPEN_DRAIN;
        priv->scl = i2c_gpio_get_desc(dev, "scl", 1, gflags);
        if (IS_ERR(priv->scl))
                return PTR_ERR(priv->scl);
index e866c481bfc325d3c42e733faa88d133b3388f0d..fce52bdab2b715a7123e34b153e2206662c67bf9 100644 (file)
@@ -127,7 +127,7 @@ enum stu300_error {
 
 /*
  * The number of address send athemps tried before giving up.
- * If the first one failes it seems like 5 to 8 attempts are required.
+ * If the first one fails it seems like 5 to 8 attempts are required.
  */
 #define NUM_ADDR_RESEND_ATTEMPTS 12
 
index 5fccd1f1bca85d28bcc249fa6b76f4297bf504bb..797def5319f1325adacf1974c0b44cdb3a7ca4a6 100644 (file)
@@ -545,6 +545,14 @@ static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev)
 {
        u32 cnfg;
 
+       /*
+        * NACK interrupt is generated before the I2C controller generates
+        * the STOP condition on the bus. So wait for 2 clock periods
+        * before disabling the controller so that the STOP condition has
+        * been delivered properly.
+        */
+       udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
+
        cnfg = i2c_readl(i2c_dev, I2C_CNFG);
        if (cnfg & I2C_CNFG_PACKET_MODE_EN)
                i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG);
@@ -706,15 +714,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
        if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
                return 0;
 
-       /*
-        * NACK interrupt is generated before the I2C controller generates
-        * the STOP condition on the bus. So wait for 2 clock periods
-        * before resetting the controller so that the STOP condition has
-        * been delivered properly.
-        */
-       if (i2c_dev->msg_err == I2C_ERR_NO_ACK)
-               udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
-
        tegra_i2c_init(i2c_dev);
        if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
                if (msg->flags & I2C_M_IGNORE_NAK)
index 31d16ada6e7d9a789240cc62f50a7fcde840bb2e..301285c54603fda6ded7653ad2446c8421cb2ee2 100644 (file)
@@ -198,7 +198,16 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
 
                val = !val;
                bri->set_scl(adap, val);
-               ndelay(RECOVERY_NDELAY);
+
+               /*
+                * If we can set SDA, we will always create STOP here to ensure
+                * the additional pulses will do no harm. This is achieved by
+                * letting SDA follow SCL half a cycle later.
+                */
+               ndelay(RECOVERY_NDELAY / 2);
+               if (bri->set_sda)
+                       bri->set_sda(adap, val);
+               ndelay(RECOVERY_NDELAY / 2);
        }
 
        /* check if recovery actually succeeded */
index f3f683041e7f9199ad5799ef5c8fd83f59fc9856..51970bae3c4a5a4d08f03ae558816fd9c264996b 100644 (file)
@@ -465,15 +465,18 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
 
        status = i2c_transfer(adapter, msg, num);
        if (status < 0)
-               return status;
-       if (status != num)
-               return -EIO;
+               goto cleanup;
+       if (status != num) {
+               status = -EIO;
+               goto cleanup;
+       }
+       status = 0;
 
        /* Check PEC if last message is a read */
        if (i && (msg[num-1].flags & I2C_M_RD)) {
                status = i2c_smbus_check_pec(partial_pec, &msg[num-1]);
                if (status < 0)
-                       return status;
+                       goto cleanup;
        }
 
        if (read_write == I2C_SMBUS_READ)
@@ -499,12 +502,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
                        break;
                }
 
+cleanup:
        if (msg[0].flags & I2C_M_DMA_SAFE)
                kfree(msg[0].buf);
        if (msg[1].flags & I2C_M_DMA_SAFE)
                kfree(msg[1].buf);
 
-       return 0;
+       return status;
 }
 
 /**
index 7e3d82cff3d5f2537608c0a21d9bf277767e7bd8..c149c9c360fc4f265ce1e406e1dd8ba7ae5615d8 100644 (file)
@@ -1053,7 +1053,7 @@ static irqreturn_t mma8452_interrupt(int irq, void *p)
        if (src < 0)
                return IRQ_NONE;
 
-       if (!(src & data->chip_info->enabled_events))
+       if (!(src & (data->chip_info->enabled_events | MMA8452_INT_DRDY)))
                return IRQ_NONE;
 
        if (src & MMA8452_INT_DRDY) {
index f9c0624505a2993e3a48d9a581faa8a26e9287de..42618fe4f83ed82d0f50b92e884dd59a11a1df09 100644 (file)
@@ -959,6 +959,8 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
        }
 
        irq_type = irqd_get_trigger_type(desc);
+       if (!irq_type)
+               irq_type = IRQF_TRIGGER_RISING;
        if (irq_type == IRQF_TRIGGER_RISING)
                st->irq_mask = INV_MPU6050_ACTIVE_HIGH;
        else if (irq_type == IRQF_TRIGGER_FALLING)
index 34d42a2504c92bf43ee1216f1855e9febfd03161..df5b2a0da96c4a9c311ddd6da57f990f6d1f821f 100644 (file)
@@ -582,6 +582,8 @@ static int tsl2772_als_calibrate(struct iio_dev *indio_dev)
                        "%s: failed to get lux\n", __func__);
                return lux_val;
        }
+       if (lux_val == 0)
+               return -ERANGE;
 
        ret = (chip->settings.als_cal_target * chip->settings.als_gain_trim) /
                        lux_val;
index 5ec3e41b65f2b8f991626a4522d6263d66ca2a27..fe87d27779d96b99ce4f847c9a9e02a4a1a87aa7 100644 (file)
@@ -415,10 +415,9 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
        }
        comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
 
-       *val = comp_humidity;
-       *val2 = 1024;
+       *val = comp_humidity * 1000 / 1024;
 
-       return IIO_VAL_FRACTIONAL;
+       return IIO_VAL_INT;
 }
 
 static int bmp280_read_raw(struct iio_dev *indio_dev,
index 3e90b6a1d9d2d6a203d13d945e9322a9ec154fe8..cc06e8404e9bf07c6d0acccd81490a0ba78a94f7 100644 (file)
@@ -3488,8 +3488,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
        struct ib_flow_attr               *flow_attr;
        struct ib_qp                      *qp;
        struct ib_uflow_resources         *uflow_res;
+       struct ib_uverbs_flow_spec_hdr    *kern_spec;
        int err = 0;
-       void *kern_spec;
        void *ib_spec;
        int i;
 
@@ -3538,8 +3538,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
                if (!kern_flow_attr)
                        return -ENOMEM;
 
-               memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
-               err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
+               *kern_flow_attr = cmd.flow_attr;
+               err = ib_copy_from_udata(&kern_flow_attr->flow_specs, ucore,
                                         cmd.flow_attr.size);
                if (err)
                        goto err_free_attr;
@@ -3559,6 +3559,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
                goto err_uobj;
        }
 
+       if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
+               err = -EINVAL;
+               goto err_put;
+       }
+
        flow_attr = kzalloc(struct_size(flow_attr, flows,
                                cmd.flow_attr.num_of_specs), GFP_KERNEL);
        if (!flow_attr) {
@@ -3578,21 +3583,22 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
        flow_attr->flags = kern_flow_attr->flags;
        flow_attr->size = sizeof(*flow_attr);
 
-       kern_spec = kern_flow_attr + 1;
+       kern_spec = kern_flow_attr->flow_specs;
        ib_spec = flow_attr + 1;
        for (i = 0; i < flow_attr->num_of_specs &&
-            cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
-            cmd.flow_attr.size >=
-            ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
-               err = kern_spec_to_ib_spec(file->ucontext, kern_spec, ib_spec,
-                                          uflow_res);
+                       cmd.flow_attr.size >= sizeof(*kern_spec) &&
+                       cmd.flow_attr.size >= kern_spec->size;
+            i++) {
+               err = kern_spec_to_ib_spec(
+                               file->ucontext, (struct ib_uverbs_flow_spec *)kern_spec,
+                               ib_spec, uflow_res);
                if (err)
                        goto err_free;
 
                flow_attr->size +=
                        ((union ib_flow_spec *) ib_spec)->size;
-               cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
-               kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
+               cmd.flow_attr.size -= kern_spec->size;
+               kern_spec = ((void *)kern_spec) + kern_spec->size;
                ib_spec += ((union ib_flow_spec *) ib_spec)->size;
        }
        if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
index 3ae2339dd27a9f5b6c4d104674d096f9f22b5b67..2094d136513d6c5f144663ad9e74192fd85a191a 100644 (file)
@@ -736,10 +736,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
        if (ret)
                return ret;
 
-       if (!file->ucontext &&
-           (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended))
-               return -EINVAL;
-
        if (extended) {
                if (count < (sizeof(hdr) + sizeof(ex_hdr)))
                        return -EINVAL;
@@ -759,6 +755,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
                goto out;
        }
 
+       /*
+        * Must be after the ib_dev check, as once the RCU clears ib_dev ==
+        * NULL means ucontext == NULL
+        */
+       if (!file->ucontext &&
+           (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        if (!verify_command_mask(ib_dev, command, extended)) {
                ret = -EOPNOTSUPP;
                goto out;
index 0b56828c1319b1b350385dfd5be2c981d26d6ffa..9d6beb948535bec89545e9f9b25f7b83976a654e 100644 (file)
@@ -1562,11 +1562,12 @@ EXPORT_SYMBOL(ib_destroy_qp);
 
 /* Completion queues */
 
-struct ib_cq *ib_create_cq(struct ib_device *device,
-                          ib_comp_handler comp_handler,
-                          void (*event_handler)(struct ib_event *, void *),
-                          void *cq_context,
-                          const struct ib_cq_init_attr *cq_attr)
+struct ib_cq *__ib_create_cq(struct ib_device *device,
+                            ib_comp_handler comp_handler,
+                            void (*event_handler)(struct ib_event *, void *),
+                            void *cq_context,
+                            const struct ib_cq_init_attr *cq_attr,
+                            const char *caller)
 {
        struct ib_cq *cq;
 
@@ -1580,12 +1581,13 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
                cq->cq_context    = cq_context;
                atomic_set(&cq->usecnt, 0);
                cq->res.type = RDMA_RESTRACK_CQ;
+               cq->res.kern_name = caller;
                rdma_restrack_add(&cq->res);
        }
 
        return cq;
 }
-EXPORT_SYMBOL(ib_create_cq);
+EXPORT_SYMBOL(__ib_create_cq);
 
 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
 {
index 1445918e32392f28ae4ce9ea74e7df0feeddf371..7b76e6f81aeb477181afedc2f44fec990ce3090f 100644 (file)
@@ -774,7 +774,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
 {
        struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
 
-       if (unlikely(mhp->mpl_len == mhp->max_mpl_len))
+       if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
                return -ENOMEM;
 
        mhp->mpl[mhp->mpl_len++] = addr;
index 1a1a47ac53c6f049285a30028ba4cde5bd21d5af..f15c931020810cdbc6125898af42146337974cc8 100644 (file)
@@ -271,7 +271,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
 
        lockdep_assert_held(&qp->s_lock);
        ps->s_txreq = get_txreq(ps->dev, qp);
-       if (IS_ERR(ps->s_txreq))
+       if (!ps->s_txreq)
                goto bail_no_tx;
 
        if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
index b7b671017e594298c8bea18d029fbd1154a8cfe9..e254dcec6f647067a0efce4cee6b47e9f76dbf9c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -72,7 +72,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
        int middle = 0;
 
        ps->s_txreq = get_txreq(ps->dev, qp);
-       if (IS_ERR(ps->s_txreq))
+       if (!ps->s_txreq)
                goto bail_no_tx;
 
        if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
index 1ab332f1866e878580ddb683f71ac83e13d35cbb..70d39fc450a1e112b2f97b4e499cbf96623d19ad 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -503,7 +503,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
        u32 lid;
 
        ps->s_txreq = get_txreq(ps->dev, qp);
-       if (IS_ERR(ps->s_txreq))
+       if (!ps->s_txreq)
                goto bail_no_tx;
 
        if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
index 873e48ea923fc42acc9cb2d5d3d7055dd07a1790..c4ab2d5b4502ee1e905ef2c193495f56e479eaf9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2016 - 2017 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -94,7 +94,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
                                struct rvt_qp *qp)
        __must_hold(&qp->s_lock)
 {
-       struct verbs_txreq *tx = ERR_PTR(-EBUSY);
+       struct verbs_txreq *tx = NULL;
 
        write_seqlock(&dev->txwait_lock);
        if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
index 729244c3086ce7eb7d28da104bb4f7f4363c96bf..1c19bbc764b2d6f93134fe7775f55569d7b70b84 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
  *
  * This file is provided under a dual BSD/GPLv2 license.  When using or
  * redistributing this file, you may do so under either license.
@@ -83,7 +83,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
        if (unlikely(!tx)) {
                /* call slow path to get the lock */
                tx = __get_txreq(dev, qp);
-               if (IS_ERR(tx))
+               if (!tx)
                        return tx;
        }
        tx->qp = qp;
index ed1f253faf977c5bf3b4f15f4a1ea8992e817d88..c7c85c22e4e3291a343319ffcdb2e00034d7cc5f 100644 (file)
@@ -486,8 +486,11 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
        }
 
        if (flags & IB_MR_REREG_ACCESS) {
-               if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
-                       return -EPERM;
+               if (ib_access_writable(mr_access_flags) &&
+                   !mmr->umem->writable) {
+                       err = -EPERM;
+                       goto release_mpt_entry;
+               }
 
                err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
                                               convert_access(mr_access_flags));
index e52dd21519b45ff00268ae33c21816a8b5a96b53..b3ba9a222550750f9c92a1ea8d1cf23b93e05d12 100644 (file)
@@ -3199,8 +3199,8 @@ static int flow_counters_set_data(struct ib_counters *ibcounters,
        if (!mcounters->hw_cntrs_hndl) {
                mcounters->hw_cntrs_hndl = mlx5_fc_create(
                        to_mdev(ibcounters->device)->mdev, false);
-               if (!mcounters->hw_cntrs_hndl) {
-                       ret = -ENOMEM;
+               if (IS_ERR(mcounters->hw_cntrs_hndl)) {
+                       ret = PTR_ERR(mcounters->hw_cntrs_hndl);
                        goto free;
                }
                hw_hndl = true;
@@ -3546,29 +3546,35 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
                        return ERR_PTR(-ENOMEM);
 
                err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
-               if (err) {
-                       kfree(ucmd);
-                       return ERR_PTR(err);
-               }
+               if (err)
+                       goto free_ucmd;
        }
 
-       if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
-               return ERR_PTR(-ENOMEM);
+       if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
+               err = -ENOMEM;
+               goto free_ucmd;
+       }
 
        if (domain != IB_FLOW_DOMAIN_USER ||
            flow_attr->port > dev->num_ports ||
            (flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
-                                 IB_FLOW_ATTR_FLAGS_EGRESS)))
-               return ERR_PTR(-EINVAL);
+                                 IB_FLOW_ATTR_FLAGS_EGRESS))) {
+               err = -EINVAL;
+               goto free_ucmd;
+       }
 
        if (is_egress &&
            (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
-            flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT))
-               return ERR_PTR(-EINVAL);
+            flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
+               err = -EINVAL;
+               goto free_ucmd;
+       }
 
        dst = kzalloc(sizeof(*dst), GFP_KERNEL);
-       if (!dst)
-               return ERR_PTR(-ENOMEM);
+       if (!dst) {
+               err = -ENOMEM;
+               goto free_ucmd;
+       }
 
        mutex_lock(&dev->flow_db->lock);
 
@@ -3637,8 +3643,8 @@ destroy_ft:
 unlock:
        mutex_unlock(&dev->flow_db->lock);
        kfree(dst);
+free_ucmd:
        kfree(ucmd);
-       kfree(handler);
        return ERR_PTR(err);
 }
 
@@ -6107,7 +6113,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
                             MLX5_CAP_GEN(mdev, num_vhca_ports));
 
-       if (MLX5_VPORT_MANAGER(mdev) &&
+       if (MLX5_ESWITCH_MANAGER(mdev) &&
            mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
                dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
 
index 0af7b7905550baddb5084d99293e9a36196eb6b3..f5de5adc9b1a4143b7d2c82f6e7fe59df1157f93 100644 (file)
@@ -266,18 +266,24 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
 
        desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
                    srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
-       if (desc_size == 0 || srq->msrq.max_gs > desc_size)
-               return ERR_PTR(-EINVAL);
+       if (desc_size == 0 || srq->msrq.max_gs > desc_size) {
+               err = -EINVAL;
+               goto err_srq;
+       }
        desc_size = roundup_pow_of_two(desc_size);
        desc_size = max_t(size_t, 32, desc_size);
-       if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
-               return ERR_PTR(-EINVAL);
+       if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) {
+               err = -EINVAL;
+               goto err_srq;
+       }
        srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
                sizeof(struct mlx5_wqe_data_seg);
        srq->msrq.wqe_shift = ilog2(desc_size);
        buf_size = srq->msrq.max * desc_size;
-       if (buf_size < desc_size)
-               return ERR_PTR(-EINVAL);
+       if (buf_size < desc_size) {
+               err = -EINVAL;
+               goto err_srq;
+       }
        in.type = init_attr->srq_type;
 
        if (pd->uobject)
index f7ac8fc9b531d7550fb0b41233b55e0bec51b4ff..f07b8df96f43954e67d4dfc32148e96a751e6974 100644 (file)
@@ -1957,6 +1957,9 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        }
 
        if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
+               if (rdma_protocol_iwarp(&dev->ibdev, 1))
+                       return -EINVAL;
+
                if (attr_mask & IB_QP_PATH_MTU) {
                        if (attr->path_mtu < IB_MTU_256 ||
                            attr->path_mtu > IB_MTU_4096) {
index f30eeba3f772c5a8e0433cdc6b6fcaa47076583c..8be27238a86e4ee1f160b4058e9517ef58708d26 100644 (file)
@@ -645,6 +645,9 @@ next_wqe:
                } else {
                        goto exit;
                }
+               if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+                   qp->sq_sig_type == IB_SIGNAL_ALL_WR)
+                       rxe_run_task(&qp->comp.task, 1);
                qp->req.wqe_index = next_index(qp->sq.queue,
                                                qp->req.wqe_index);
                goto next_wqe;
@@ -709,6 +712,7 @@ next_wqe:
 
        if (fill_packet(qp, wqe, &pkt, skb, payload)) {
                pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
+               kfree_skb(skb);
                goto err;
        }
 
@@ -740,7 +744,6 @@ next_wqe:
        goto next_wqe;
 
 err:
-       kfree_skb(skb);
        wqe->status = IB_WC_LOC_PROT_ERR;
        wqe->state = wqe_state_error;
        __rxe_do_task(&qp->comp.task);
index cf30523c6ef64c956e5ebf77c730c6bb146c4a1f..6c7326c93721c495c4e61a73cac2dfaf9a5bc8fc 100644 (file)
@@ -131,8 +131,10 @@ EXPORT_SYMBOL(input_mt_destroy_slots);
  * inactive, or if the tool type is changed, a new tracking id is
  * assigned to the slot. The tool type is only reported if the
  * corresponding absbit field is set.
+ *
+ * Returns true if contact is active.
  */
-void input_mt_report_slot_state(struct input_dev *dev,
+bool input_mt_report_slot_state(struct input_dev *dev,
                                unsigned int tool_type, bool active)
 {
        struct input_mt *mt = dev->mt;
@@ -140,22 +142,24 @@ void input_mt_report_slot_state(struct input_dev *dev,
        int id;
 
        if (!mt)
-               return;
+               return false;
 
        slot = &mt->slots[mt->slot];
        slot->frame = mt->frame;
 
        if (!active) {
                input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
-               return;
+               return false;
        }
 
        id = input_mt_get_value(slot, ABS_MT_TRACKING_ID);
-       if (id < 0 || input_mt_get_value(slot, ABS_MT_TOOL_TYPE) != tool_type)
+       if (id < 0)
                id = input_mt_new_trkid(mt);
 
        input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, id);
        input_event(dev, EV_ABS, ABS_MT_TOOL_TYPE, tool_type);
+
+       return true;
 }
 EXPORT_SYMBOL(input_mt_report_slot_state);
 
index 48e36acbeb496db7f5033029158a645f8d3cdb27..cd620e009bada3a8f8c1e70b99be25100bea9c44 100644 (file)
@@ -125,7 +125,7 @@ static const struct xpad_device {
        u8 mapping;
        u8 xtype;
 } xpad_device[] = {
-       { 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 },
+       { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
        { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
        { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
        { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
index f6e643b589b616c61d1751005fedb3288f0ad82c..e8dae6195b30500934f23738f995201521e05160 100644 (file)
@@ -45,7 +45,7 @@ struct event_dev {
 static irqreturn_t events_interrupt(int irq, void *dev_id)
 {
        struct event_dev *edev = dev_id;
-       unsigned type, code, value;
+       unsigned int type, code, value;
 
        type = __raw_readl(edev->addr + REG_READ);
        code = __raw_readl(edev->addr + REG_READ);
@@ -57,7 +57,7 @@ static irqreturn_t events_interrupt(int irq, void *dev_id)
 }
 
 static void events_import_bits(struct event_dev *edev,
-                       unsigned long bits[], unsigned type, size_t count)
+                       unsigned long bits[], unsigned int type, size_t count)
 {
        void __iomem *addr = edev->addr;
        int i, j;
@@ -99,6 +99,7 @@ static void events_import_abs_params(struct event_dev *edev)
 
                for (j = 0; j < ARRAY_SIZE(val); j++) {
                        int offset = (i * ARRAY_SIZE(val) + j) * sizeof(u32);
+
                        val[j] = __raw_readl(edev->addr + REG_DATA + offset);
                }
 
@@ -112,7 +113,7 @@ static int events_probe(struct platform_device *pdev)
        struct input_dev *input_dev;
        struct event_dev *edev;
        struct resource *res;
-       unsigned keymapnamelen;
+       unsigned int keymapnamelen;
        void __iomem *addr;
        int irq;
        int i;
@@ -150,7 +151,7 @@ static int events_probe(struct platform_device *pdev)
        for (i = 0; i < keymapnamelen; i++)
                edev->name[i] = __raw_readb(edev->addr + REG_DATA + i);
 
-       pr_debug("events_probe() keymap=%s\n", edev->name);
+       pr_debug("%s: keymap=%s\n", __func__, edev->name);
 
        input_dev->name = edev->name;
        input_dev->id.bustype = BUS_HOST;
index c25606e006938743d64498429cf3d0b69768d7fb..ca59a2be9bc5344f65389ea7372a7740b74b5343 100644 (file)
@@ -841,4 +841,14 @@ config INPUT_RAVE_SP_PWRBUTTON
          To compile this driver as a module, choose M here: the
          module will be called rave-sp-pwrbutton.
 
+config INPUT_SC27XX_VIBRA
+       tristate "Spreadtrum sc27xx vibrator support"
+       depends on MFD_SC27XX_PMIC || COMPILE_TEST
+       select INPUT_FF_MEMLESS
+       help
+         This option enables support for Spreadtrum sc27xx vibrator driver.
+
+         To compile this driver as a module, choose M here. The module will
+         be called sc27xx_vibra.
+
 endif
index 72cde28649e2c0bc4fec14f6898445d2f79880dc..9d0f9d1ff68f41a5ec7f13101bb11176e8fd8729 100644 (file)
@@ -66,6 +66,7 @@ obj-$(CONFIG_INPUT_RETU_PWRBUTTON)    += retu-pwrbutton.o
 obj-$(CONFIG_INPUT_AXP20X_PEK)         += axp20x-pek.o
 obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER)        += rotary_encoder.o
 obj-$(CONFIG_INPUT_RK805_PWRKEY)       += rk805-pwrkey.o
+obj-$(CONFIG_INPUT_SC27XX_VIBRA)       += sc27xx-vibra.o
 obj-$(CONFIG_INPUT_SGI_BTNS)           += sgi_btns.o
 obj-$(CONFIG_INPUT_SIRFSOC_ONKEY)      += sirfsoc-onkey.o
 obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY)   += soc_button_array.o
diff --git a/drivers/input/misc/sc27xx-vibra.c b/drivers/input/misc/sc27xx-vibra.c
new file mode 100644 (file)
index 0000000..295251a
--- /dev/null
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Spreadtrum Communications Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+
+#define CUR_DRV_CAL_SEL                GENMASK(13, 12)
+#define SLP_LDOVIBR_PD_EN      BIT(9)
+#define LDO_VIBR_PD            BIT(8)
+
+struct vibra_info {
+       struct input_dev        *input_dev;
+       struct work_struct      play_work;
+       struct regmap           *regmap;
+       u32                     base;
+       u32                     strength;
+       bool                    enabled;
+};
+
+static void sc27xx_vibra_set(struct vibra_info *info, bool on)
+{
+       if (on) {
+               regmap_update_bits(info->regmap, info->base, LDO_VIBR_PD, 0);
+               regmap_update_bits(info->regmap, info->base,
+                                  SLP_LDOVIBR_PD_EN, 0);
+               info->enabled = true;
+       } else {
+               regmap_update_bits(info->regmap, info->base, LDO_VIBR_PD,
+                                  LDO_VIBR_PD);
+               regmap_update_bits(info->regmap, info->base,
+                                  SLP_LDOVIBR_PD_EN, SLP_LDOVIBR_PD_EN);
+               info->enabled = false;
+       }
+}
+
+static int sc27xx_vibra_hw_init(struct vibra_info *info)
+{
+       return regmap_update_bits(info->regmap, info->base, CUR_DRV_CAL_SEL, 0);
+}
+
+static void sc27xx_vibra_play_work(struct work_struct *work)
+{
+       struct vibra_info *info = container_of(work, struct vibra_info,
+                                              play_work);
+
+       if (info->strength && !info->enabled)
+               sc27xx_vibra_set(info, true);
+       else if (info->strength == 0 && info->enabled)
+               sc27xx_vibra_set(info, false);
+}
+
+static int sc27xx_vibra_play(struct input_dev *input, void *data,
+                            struct ff_effect *effect)
+{
+       struct vibra_info *info = input_get_drvdata(input);
+
+       info->strength = effect->u.rumble.weak_magnitude;
+       schedule_work(&info->play_work);
+
+       return 0;
+}
+
+static void sc27xx_vibra_close(struct input_dev *input)
+{
+       struct vibra_info *info = input_get_drvdata(input);
+
+       cancel_work_sync(&info->play_work);
+       if (info->enabled)
+               sc27xx_vibra_set(info, false);
+}
+
+static int sc27xx_vibra_probe(struct platform_device *pdev)
+{
+       struct vibra_info *info;
+       int error;
+
+       info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       info->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+       if (!info->regmap) {
+               dev_err(&pdev->dev, "failed to get vibrator regmap.\n");
+               return -ENODEV;
+       }
+
+       error = device_property_read_u32(&pdev->dev, "reg", &info->base);
+       if (error) {
+               dev_err(&pdev->dev, "failed to get vibrator base address.\n");
+               return error;
+       }
+
+       info->input_dev = devm_input_allocate_device(&pdev->dev);
+       if (!info->input_dev) {
+               dev_err(&pdev->dev, "failed to allocate input device.\n");
+               return -ENOMEM;
+       }
+
+       info->input_dev->name = "sc27xx:vibrator";
+       info->input_dev->id.version = 0;
+       info->input_dev->close = sc27xx_vibra_close;
+
+       input_set_drvdata(info->input_dev, info);
+       input_set_capability(info->input_dev, EV_FF, FF_RUMBLE);
+       INIT_WORK(&info->play_work, sc27xx_vibra_play_work);
+       info->enabled = false;
+
+       error = sc27xx_vibra_hw_init(info);
+       if (error) {
+               dev_err(&pdev->dev, "failed to initialize the vibrator.\n");
+               return error;
+       }
+
+       error = input_ff_create_memless(info->input_dev, NULL,
+                                       sc27xx_vibra_play);
+       if (error) {
+               dev_err(&pdev->dev, "failed to register vibrator to FF.\n");
+               return error;
+       }
+
+       error = input_register_device(info->input_dev);
+       if (error) {
+               dev_err(&pdev->dev, "failed to register input device.\n");
+               return error;
+       }
+
+       return 0;
+}
+
+static const struct of_device_id sc27xx_vibra_of_match[] = {
+       { .compatible = "sprd,sc2731-vibrator", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, sc27xx_vibra_of_match);
+
+static struct platform_driver sc27xx_vibra_driver = {
+       .driver = {
+               .name = "sc27xx-vibrator",
+               .of_match_table = sc27xx_vibra_of_match,
+       },
+       .probe = sc27xx_vibra_probe,
+};
+
+module_platform_driver(sc27xx_vibra_driver);
+
+MODULE_DESCRIPTION("Spreadtrum SC27xx Vibrator Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Xiaotong Lu <xiaotong.lu@spreadtrum.com>");
index 599544c1a91cd365261b6ca2ec4e4f3149b0a63d..243e0fa6e3e3cb44ce22adc6e76421fda79f4ff2 100644 (file)
@@ -27,6 +27,8 @@
 #define ETP_DISABLE_POWER      0x0001
 #define ETP_PRESSURE_OFFSET    25
 
+#define ETP_CALIBRATE_MAX_LEN  3
+
 /* IAP Firmware handling */
 #define ETP_PRODUCT_ID_FORMAT_STRING   "%d.0"
 #define ETP_FW_NAME            "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin"
index 8ff75114e7626dc3d2fa23a1d3457f1802b2a628..1f9cd7d8b7ad35e982712e00e8e95369e55cd861 100644 (file)
@@ -613,7 +613,7 @@ static ssize_t calibrate_store(struct device *dev,
        int tries = 20;
        int retval;
        int error;
-       u8 val[3];
+       u8 val[ETP_CALIBRATE_MAX_LEN];
 
        retval = mutex_lock_interruptible(&data->sysfs_mutex);
        if (retval)
@@ -1345,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN060C", 0 },
        { "ELAN0611", 0 },
        { "ELAN0612", 0 },
+       { "ELAN0618", 0 },
        { "ELAN1000", 0 },
        { }
 };
index cfcb32559925baf1acf070f908f3b91b1fc1b905..c060d270bc4d862ad7366bd87529dbdc032672b6 100644 (file)
@@ -56,7 +56,7 @@
 static int elan_smbus_initialize(struct i2c_client *client)
 {
        u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 };
-       u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 };
+       u8 values[I2C_SMBUS_BLOCK_MAX] = {0};
        int len, error;
 
        /* Get hello packet */
@@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client)
 static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val)
 {
        int error;
+       u8 buf[I2C_SMBUS_BLOCK_MAX] = {0};
+
+       BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf));
 
        error = i2c_smbus_read_block_data(client,
-                                         ETP_SMBUS_CALIBRATE_QUERY, val);
+                                         ETP_SMBUS_CALIBRATE_QUERY, buf);
        if (error < 0)
                return error;
 
+       memcpy(val, buf, ETP_CALIBRATE_MAX_LEN);
        return 0;
 }
 
@@ -472,6 +476,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
 {
        int len;
 
+       BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN);
+
        len = i2c_smbus_read_block_data(client,
                                        ETP_SMBUS_PACKET_QUERY,
                                        &report[ETP_SMBUS_REPORT_OFFSET]);
index fb4d902c440345d3cbc02329ed742d48b931dc85..dd85b16dc6f889bb366a10cbd4278234d3f9763c 100644 (file)
@@ -799,7 +799,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
        else if (ic_version == 7 && etd->info.samples[1] == 0x2A)
                sanity_check = ((packet[3] & 0x1c) == 0x10);
        else
-               sanity_check = ((packet[0] & 0x0c) == 0x04 &&
+               sanity_check = ((packet[0] & 0x08) == 0x00 &&
                                (packet[3] & 0x1c) == 0x10);
 
        if (!sanity_check)
@@ -1175,6 +1175,12 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
        { }
 };
 
+static const char * const middle_button_pnp_ids[] = {
+       "LEN2131", /* ThinkPad P52 w/ NFC */
+       "LEN2132", /* ThinkPad P52 */
+       NULL
+};
+
 /*
  * Set the appropriate event bits for the input subsystem
  */
@@ -1194,7 +1200,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
        __clear_bit(EV_REL, dev->evbit);
 
        __set_bit(BTN_LEFT, dev->keybit);
-       if (dmi_check_system(elantech_dmi_has_middle_button))
+       if (dmi_check_system(elantech_dmi_has_middle_button) ||
+                       psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids))
                __set_bit(BTN_MIDDLE, dev->keybit);
        __set_bit(BTN_RIGHT, dev->keybit);
 
index 5ff5b1952be0c7afe810cef7f6f086f71928e150..d3ff1fc09af712700507d05ac3548703e49173a1 100644 (file)
@@ -192,8 +192,8 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
                        else
                                input_report_rel(dev, REL_WHEEL, -wheel);
 
-                       input_report_key(dev, BTN_SIDE,  BIT(4));
-                       input_report_key(dev, BTN_EXTRA, BIT(5));
+                       input_report_key(dev, BTN_SIDE,  packet[3] & BIT(4));
+                       input_report_key(dev, BTN_EXTRA, packet[3] & BIT(5));
                        break;
                }
                break;
@@ -203,13 +203,13 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
                input_report_rel(dev, REL_WHEEL, -(s8) packet[3]);
 
                /* Extra buttons on Genius NewNet 3D */
-               input_report_key(dev, BTN_SIDE,  BIT(6));
-               input_report_key(dev, BTN_EXTRA, BIT(7));
+               input_report_key(dev, BTN_SIDE,  packet[0] & BIT(6));
+               input_report_key(dev, BTN_EXTRA, packet[0] & BIT(7));
                break;
 
        case PSMOUSE_THINKPS:
                /* Extra button on ThinkingMouse */
-               input_report_key(dev, BTN_EXTRA, BIT(3));
+               input_report_key(dev, BTN_EXTRA, packet[0] & BIT(3));
 
                /*
                 * Without this bit of weirdness moving up gives wildly
@@ -223,7 +223,7 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
                 * Cortron PS2 Trackball reports SIDE button in the
                 * 4th bit of the first byte.
                 */
-               input_report_key(dev, BTN_SIDE, BIT(3));
+               input_report_key(dev, BTN_SIDE, packet[0] & BIT(3));
                packet[0] |= BIT(3);
                break;
 
index 7172b88cd0649c8de16ac7373ec2a16c42c338d2..fad2eae4a118e793e617a86a52b28351ef4fafed 100644 (file)
@@ -3,6 +3,7 @@
 #
 config RMI4_CORE
        tristate "Synaptics RMI4 bus support"
+       select IRQ_DOMAIN
        help
          Say Y here if you want to support the Synaptics RMI4 bus.  This is
          required for all RMI4 device support.
index 8bb866c7b9855c5025d31b7be3f722d469f73da9..8eeffa066022dadb9f718f77aab1609700f05543 100644 (file)
@@ -32,15 +32,15 @@ void rmi_2d_sensor_abs_process(struct rmi_2d_sensor *sensor,
        if (obj->type == RMI_2D_OBJECT_NONE)
                return;
 
-       if (axis_align->swap_axes)
-               swap(obj->x, obj->y);
-
        if (axis_align->flip_x)
                obj->x = sensor->max_x - obj->x;
 
        if (axis_align->flip_y)
                obj->y = sensor->max_y - obj->y;
 
+       if (axis_align->swap_axes)
+               swap(obj->x, obj->y);
+
        /*
         * Here checking if X offset or y offset are specified is
         * redundant. We just add the offsets or clip the values.
@@ -120,15 +120,15 @@ void rmi_2d_sensor_rel_report(struct rmi_2d_sensor *sensor, int x, int y)
        x = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)x));
        y = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)y));
 
-       if (axis_align->swap_axes)
-               swap(x, y);
-
        if (axis_align->flip_x)
                x = min(RMI_2D_REL_POS_MAX, -x);
 
        if (axis_align->flip_y)
                y = min(RMI_2D_REL_POS_MAX, -y);
 
+       if (axis_align->swap_axes)
+               swap(x, y);
+
        if (x || y) {
                input_report_rel(sensor->input, REL_X, x);
                input_report_rel(sensor->input, REL_Y, y);
@@ -141,17 +141,10 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
        struct input_dev *input = sensor->input;
        int res_x;
        int res_y;
+       int max_x, max_y;
        int input_flags = 0;
 
        if (sensor->report_abs) {
-               if (sensor->axis_align.swap_axes) {
-                       swap(sensor->max_x, sensor->max_y);
-                       swap(sensor->axis_align.clip_x_low,
-                            sensor->axis_align.clip_y_low);
-                       swap(sensor->axis_align.clip_x_high,
-                            sensor->axis_align.clip_y_high);
-               }
-
                sensor->min_x = sensor->axis_align.clip_x_low;
                if (sensor->axis_align.clip_x_high)
                        sensor->max_x = min(sensor->max_x,
@@ -163,14 +156,19 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
                                sensor->axis_align.clip_y_high);
 
                set_bit(EV_ABS, input->evbit);
-               input_set_abs_params(input, ABS_MT_POSITION_X, 0, sensor->max_x,
-                                       0, 0);
-               input_set_abs_params(input, ABS_MT_POSITION_Y, 0, sensor->max_y,
-                                       0, 0);
+
+               max_x = sensor->max_x;
+               max_y = sensor->max_y;
+               if (sensor->axis_align.swap_axes)
+                       swap(max_x, max_y);
+               input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
+               input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
 
                if (sensor->x_mm && sensor->y_mm) {
                        res_x = (sensor->max_x - sensor->min_x) / sensor->x_mm;
                        res_y = (sensor->max_y - sensor->min_y) / sensor->y_mm;
+                       if (sensor->axis_align.swap_axes)
+                               swap(res_x, res_y);
 
                        input_abs_set_res(input, ABS_X, res_x);
                        input_abs_set_res(input, ABS_Y, res_y);
index c5fa53adba8d01318cfeacea440360c51c044a7d..bd0d5ff01b08f9c88920b03f56dbb4a3eed21af3 100644 (file)
@@ -9,6 +9,8 @@
 
 #include <linux/kernel.h>
 #include <linux/device.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
 #include <linux/list.h>
 #include <linux/pm.h>
 #include <linux/rmi.h>
@@ -167,6 +169,39 @@ static inline void rmi_function_of_probe(struct rmi_function *fn)
 {}
 #endif
 
+static struct irq_chip rmi_irq_chip = {
+       .name = "rmi4",
+};
+
+static int rmi_create_function_irq(struct rmi_function *fn,
+                                  struct rmi_function_handler *handler)
+{
+       struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev);
+       int i, error;
+
+       for (i = 0; i < fn->num_of_irqs; i++) {
+               set_bit(fn->irq_pos + i, fn->irq_mask);
+
+               fn->irq[i] = irq_create_mapping(drvdata->irqdomain,
+                                               fn->irq_pos + i);
+
+               irq_set_chip_data(fn->irq[i], fn);
+               irq_set_chip_and_handler(fn->irq[i], &rmi_irq_chip,
+                                        handle_simple_irq);
+               irq_set_nested_thread(fn->irq[i], 1);
+
+               error = devm_request_threaded_irq(&fn->dev, fn->irq[i], NULL,
+                                       handler->attention, IRQF_ONESHOT,
+                                       dev_name(&fn->dev), fn);
+               if (error) {
+                       dev_err(&fn->dev, "Error %d registering IRQ\n", error);
+                       return error;
+               }
+       }
+
+       return 0;
+}
+
 static int rmi_function_probe(struct device *dev)
 {
        struct rmi_function *fn = to_rmi_function(dev);
@@ -178,7 +213,14 @@ static int rmi_function_probe(struct device *dev)
 
        if (handler->probe) {
                error = handler->probe(fn);
-               return error;
+               if (error)
+                       return error;
+       }
+
+       if (fn->num_of_irqs && handler->attention) {
+               error = rmi_create_function_irq(fn, handler);
+               if (error)
+                       return error;
        }
 
        return 0;
@@ -230,12 +272,18 @@ err_put_device:
 
 void rmi_unregister_function(struct rmi_function *fn)
 {
+       int i;
+
        rmi_dbg(RMI_DEBUG_CORE, &fn->dev, "Unregistering F%02X.\n",
                        fn->fd.function_number);
 
        device_del(&fn->dev);
        of_node_put(fn->dev.of_node);
        put_device(&fn->dev);
+
+       for (i = 0; i < fn->num_of_irqs; i++)
+               irq_dispose_mapping(fn->irq[i]);
+
 }
 
 /**
index b7625a9ac66ab5384727cc83496223be3aedbe92..96383eab41ba1d850468a64e7ced8a3f1bf72ff6 100644 (file)
 
 struct rmi_device;
 
+/*
+ * The interrupt source count in the function descriptor can represent up to
+ * 6 interrupt sources in the normal manner.
+ */
+#define RMI_FN_MAX_IRQS        6
+
 /**
  * struct rmi_function - represents the implementation of an RMI4
  * function for a particular device (basically, a driver for that RMI4 function)
@@ -26,6 +32,7 @@ struct rmi_device;
  * @irq_pos: The position in the irq bitfield this function holds
  * @irq_mask: For convenience, can be used to mask IRQ bits off during ATTN
  * interrupt handling.
+ * @irqs: assigned virq numbers (up to num_of_irqs)
  *
  * @node: entry in device's list of functions
  */
@@ -36,6 +43,7 @@ struct rmi_function {
        struct list_head node;
 
        unsigned int num_of_irqs;
+       int irq[RMI_FN_MAX_IRQS];
        unsigned int irq_pos;
        unsigned long irq_mask[];
 };
@@ -76,7 +84,7 @@ struct rmi_function_handler {
        void (*remove)(struct rmi_function *fn);
        int (*config)(struct rmi_function *fn);
        int (*reset)(struct rmi_function *fn);
-       int (*attention)(struct rmi_function *fn, unsigned long *irq_bits);
+       irqreturn_t (*attention)(int irq, void *ctx);
        int (*suspend)(struct rmi_function *fn);
        int (*resume)(struct rmi_function *fn);
 };
index 7d29053dfb0f06878ff7897b59f52039a299a089..fc3ab93b7aea454475ee324eecee91470c4a9dc3 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/pm.h>
 #include <linux/slab.h>
 #include <linux/of.h>
+#include <linux/irqdomain.h>
 #include <uapi/linux/input.h>
 #include <linux/rmi.h>
 #include "rmi_bus.h"
@@ -127,28 +128,11 @@ static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev)
        return 0;
 }
 
-static void process_one_interrupt(struct rmi_driver_data *data,
-                                 struct rmi_function *fn)
-{
-       struct rmi_function_handler *fh;
-
-       if (!fn || !fn->dev.driver)
-               return;
-
-       fh = to_rmi_function_handler(fn->dev.driver);
-       if (fh->attention) {
-               bitmap_and(data->fn_irq_bits, data->irq_status, fn->irq_mask,
-                               data->irq_count);
-               if (!bitmap_empty(data->fn_irq_bits, data->irq_count))
-                       fh->attention(fn, data->fn_irq_bits);
-       }
-}
-
 static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
 {
        struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
        struct device *dev = &rmi_dev->dev;
-       struct rmi_function *entry;
+       int i;
        int error;
 
        if (!data)
@@ -173,16 +157,8 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
         */
        mutex_unlock(&data->irq_mutex);
 
-       /*
-        * It would be nice to be able to use irq_chip to handle these
-        * nested IRQs.  Unfortunately, most of the current customers for
-        * this driver are using older kernels (3.0.x) that don't support
-        * the features required for that.  Once they've shifted to more
-        * recent kernels (say, 3.3 and higher), this should be switched to
-        * use irq_chip.
-        */
-       list_for_each_entry(entry, &data->function_list, node)
-               process_one_interrupt(data, entry);
+       for_each_set_bit(i, data->irq_status, data->irq_count)
+               handle_nested_irq(irq_find_mapping(data->irqdomain, i));
 
        if (data->input)
                input_sync(data->input);
@@ -1001,9 +977,13 @@ EXPORT_SYMBOL_GPL(rmi_driver_resume);
 static int rmi_driver_remove(struct device *dev)
 {
        struct rmi_device *rmi_dev = to_rmi_device(dev);
+       struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
 
        rmi_disable_irq(rmi_dev, false);
 
+       irq_domain_remove(data->irqdomain);
+       data->irqdomain = NULL;
+
        rmi_f34_remove_sysfs(rmi_dev);
        rmi_free_function_list(rmi_dev);
 
@@ -1035,7 +1015,8 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
 {
        struct rmi_device *rmi_dev = data->rmi_dev;
        struct device *dev = &rmi_dev->dev;
-       int irq_count;
+       struct fwnode_handle *fwnode = rmi_dev->xport->dev->fwnode;
+       int irq_count = 0;
        size_t size;
        int retval;
 
@@ -1046,7 +1027,6 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
         * being accessed.
         */
        rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__);
-       irq_count = 0;
        data->bootloader_mode = false;
 
        retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
@@ -1058,6 +1038,15 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
        if (data->bootloader_mode)
                dev_warn(dev, "Device in bootloader mode.\n");
 
+       /* Allocate and register a linear revmap irq_domain */
+       data->irqdomain = irq_domain_create_linear(fwnode, irq_count,
+                                                  &irq_domain_simple_ops,
+                                                  data);
+       if (!data->irqdomain) {
+               dev_err(&rmi_dev->dev, "Failed to create IRQ domain\n");
+               return -ENOMEM;
+       }
+
        data->irq_count = irq_count;
        data->num_of_irq_regs = (data->irq_count + 7) / 8;
 
@@ -1080,10 +1069,9 @@ int rmi_init_functions(struct rmi_driver_data *data)
 {
        struct rmi_device *rmi_dev = data->rmi_dev;
        struct device *dev = &rmi_dev->dev;
-       int irq_count;
+       int irq_count = 0;
        int retval;
 
-       irq_count = 0;
        rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__);
        retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
        if (retval < 0) {
index 8a07ae147df690ee7796c3f9f897904fce6ac6dd..4edaa14fe878650c81e6267550869f8acc714b40 100644 (file)
@@ -681,9 +681,9 @@ static int rmi_f01_resume(struct rmi_function *fn)
        return 0;
 }
 
-static int rmi_f01_attention(struct rmi_function *fn,
-                            unsigned long *irq_bits)
+static irqreturn_t rmi_f01_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct rmi_device *rmi_dev = fn->rmi_dev;
        int error;
        u8 device_status;
@@ -692,7 +692,7 @@ static int rmi_f01_attention(struct rmi_function *fn,
        if (error) {
                dev_err(&fn->dev,
                        "Failed to read device status: %d.\n", error);
-               return error;
+               return IRQ_RETVAL(error);
        }
 
        if (RMI_F01_STATUS_BOOTLOADER(device_status))
@@ -704,11 +704,11 @@ static int rmi_f01_attention(struct rmi_function *fn,
                error = rmi_dev->driver->reset_handler(rmi_dev);
                if (error) {
                        dev_err(&fn->dev, "Device reset failed: %d\n", error);
-                       return error;
+                       return IRQ_RETVAL(error);
                }
        }
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 struct rmi_function_handler rmi_f01_handler = {
index 88822196d6b723fcf69efd9c3b685fb92dedcf7b..aaa1edc9552254609c1e2ba00008b48bf80f3a85 100644 (file)
@@ -244,8 +244,9 @@ static int rmi_f03_config(struct rmi_function *fn)
        return 0;
 }
 
-static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f03_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct rmi_device *rmi_dev = fn->rmi_dev;
        struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
        struct f03_data *f03 = dev_get_drvdata(&fn->dev);
@@ -262,7 +263,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
                /* First grab the data passed by the transport device */
                if (drvdata->attn_data.size < ob_len) {
                        dev_warn(&fn->dev, "F03 interrupted, but data is missing!\n");
-                       return 0;
+                       return IRQ_HANDLED;
                }
 
                memcpy(obs, drvdata->attn_data.data, ob_len);
@@ -277,7 +278,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
                                "%s: Failed to read F03 output buffers: %d\n",
                                __func__, error);
                        serio_interrupt(f03->serio, 0, SERIO_TIMEOUT);
-                       return error;
+                       return IRQ_RETVAL(error);
                }
        }
 
@@ -303,7 +304,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
                serio_interrupt(f03->serio, ob_data, serio_flags);
        }
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static void rmi_f03_remove(struct rmi_function *fn)
index 12a233251793c24c754224ae1b379de52db34e7d..df64d6aed4f7e10b8eb78eb78619a15d7bcaaf56 100644 (file)
@@ -570,9 +570,7 @@ static inline u8 rmi_f11_parse_finger_state(const u8 *f_state, u8 n_finger)
 }
 
 static void rmi_f11_finger_handler(struct f11_data *f11,
-                                  struct rmi_2d_sensor *sensor,
-                                  unsigned long *irq_bits, int num_irq_regs,
-                                  int size)
+                                  struct rmi_2d_sensor *sensor, int size)
 {
        const u8 *f_state = f11->data.f_state;
        u8 finger_state;
@@ -581,12 +579,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
        int rel_fingers;
        int abs_size = sensor->nbr_fingers * RMI_F11_ABS_BYTES;
 
-       int abs_bits = bitmap_and(f11->result_bits, irq_bits, f11->abs_mask,
-                                 num_irq_regs * 8);
-       int rel_bits = bitmap_and(f11->result_bits, irq_bits, f11->rel_mask,
-                                 num_irq_regs * 8);
-
-       if (abs_bits) {
+       if (sensor->report_abs) {
                if (abs_size > size)
                        abs_fingers = size / RMI_F11_ABS_BYTES;
                else
@@ -604,19 +597,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
                        rmi_f11_abs_pos_process(f11, sensor, &sensor->objs[i],
                                                        finger_state, i);
                }
-       }
 
-       if (rel_bits) {
-               if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size)
-                       rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES;
-               else
-                       rel_fingers = sensor->nbr_fingers;
-
-               for (i = 0; i < rel_fingers; i++)
-                       rmi_f11_rel_pos_report(f11, i);
-       }
-
-       if (abs_bits) {
                /*
                 * the absolute part is made in 2 parts to allow the kernel
                 * tracking to take place.
@@ -638,7 +619,16 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
                }
 
                input_mt_sync_frame(sensor->input);
+       } else if (sensor->report_rel) {
+               if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size)
+                       rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES;
+               else
+                       rel_fingers = sensor->nbr_fingers;
+
+               for (i = 0; i < rel_fingers; i++)
+                       rmi_f11_rel_pos_report(f11, i);
        }
+
 }
 
 static int f11_2d_construct_data(struct f11_data *f11)
@@ -1276,8 +1266,9 @@ static int rmi_f11_config(struct rmi_function *fn)
        return 0;
 }
 
-static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f11_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct rmi_device *rmi_dev = fn->rmi_dev;
        struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
        struct f11_data *f11 = dev_get_drvdata(&fn->dev);
@@ -1303,13 +1294,12 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
                                data_base_addr, f11->sensor.data_pkt,
                                f11->sensor.pkt_size);
                if (error < 0)
-                       return error;
+                       return IRQ_RETVAL(error);
        }
 
-       rmi_f11_finger_handler(f11, &f11->sensor, irq_bits,
-                               drvdata->num_of_irq_regs, valid_bytes);
+       rmi_f11_finger_handler(f11, &f11->sensor, valid_bytes);
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static int rmi_f11_resume(struct rmi_function *fn)
index a3d1aa88f2a9ce27fcd1f89d2f87d58b21686fce..5c7f489157792bf32da34e982b715824ec17eaff 100644 (file)
@@ -197,10 +197,10 @@ static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1, int size)
                rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i);
 }
 
-static int rmi_f12_attention(struct rmi_function *fn,
-                            unsigned long *irq_nr_regs)
+static irqreturn_t rmi_f12_attention(int irq, void *ctx)
 {
        int retval;
+       struct rmi_function *fn = ctx;
        struct rmi_device *rmi_dev = fn->rmi_dev;
        struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
        struct f12_data *f12 = dev_get_drvdata(&fn->dev);
@@ -222,7 +222,7 @@ static int rmi_f12_attention(struct rmi_function *fn,
                if (retval < 0) {
                        dev_err(&fn->dev, "Failed to read object data. Code: %d.\n",
                                retval);
-                       return retval;
+                       return IRQ_RETVAL(retval);
                }
        }
 
@@ -232,7 +232,7 @@ static int rmi_f12_attention(struct rmi_function *fn,
 
        input_mt_sync_frame(sensor->input);
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static int rmi_f12_write_control_regs(struct rmi_function *fn)
index 82e0f0d43d55271c92c774ba325b1bc40099f83e..5e3ed5ac0c3e40b3919b59493293720877907f1a 100644 (file)
@@ -122,8 +122,9 @@ static void rmi_f30_report_button(struct rmi_function *fn,
        }
 }
 
-static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f30_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct f30_data *f30 = dev_get_drvdata(&fn->dev);
        struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev);
        int error;
@@ -134,7 +135,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
                if (drvdata->attn_data.size < f30->register_count) {
                        dev_warn(&fn->dev,
                                 "F30 interrupted, but data is missing\n");
-                       return 0;
+                       return IRQ_HANDLED;
                }
                memcpy(f30->data_regs, drvdata->attn_data.data,
                        f30->register_count);
@@ -147,7 +148,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
                        dev_err(&fn->dev,
                                "%s: Failed to read F30 data registers: %d\n",
                                __func__, error);
-                       return error;
+                       return IRQ_RETVAL(error);
                }
        }
 
@@ -159,7 +160,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
                        rmi_f03_commit_buttons(f30->f03);
        }
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static int rmi_f30_config(struct rmi_function *fn)
index f1f5ac539d5d56b2d554e2aa7bdb50fd0af0d5e3..87a7d4ba382d7210b294f8168adb5083c15b80ee 100644 (file)
@@ -100,8 +100,9 @@ static int rmi_f34_command(struct f34_data *f34, u8 command,
        return 0;
 }
 
-static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f34_attention(int irq, void *ctx)
 {
+       struct rmi_function *fn = ctx;
        struct f34_data *f34 = dev_get_drvdata(&fn->dev);
        int ret;
        u8 status;
@@ -126,7 +127,7 @@ static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits)
                        complete(&f34->v7.cmd_done);
        }
 
-       return 0;
+       return IRQ_HANDLED;
 }
 
 static int rmi_f34_write_blocks(struct f34_data *f34, const void *data,
index e8a59d1640192b75e6f83db0e2ad355064c19ff9..a6f515bcab2228a8783f10dbf10fae30462fd852 100644 (file)
@@ -610,11 +610,6 @@ error:
        mutex_unlock(&f54->data_mutex);
 }
 
-static int rmi_f54_attention(struct rmi_function *fn, unsigned long *irqbits)
-{
-       return 0;
-}
-
 static int rmi_f54_config(struct rmi_function *fn)
 {
        struct rmi_driver *drv = fn->rmi_dev->driver;
@@ -756,6 +751,5 @@ struct rmi_function_handler rmi_f54_handler = {
        .func = 0x54,
        .probe = rmi_f54_probe,
        .config = rmi_f54_config,
-       .attention = rmi_f54_attention,
        .remove = rmi_f54_remove,
 };
index ff7043f74a3d32286a6b8cdbed91f1bc3f0be12f..d196ac3d8b8cda8e1cf405101ed5603473db821d 100644 (file)
@@ -603,6 +603,7 @@ static const struct acpi_device_id silead_ts_acpi_match[] = {
        { "GSL3692", 0 },
        { "MSSL1680", 0 },
        { "MSSL0001", 0 },
+       { "MSSL0002", 0 },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match);
index e055d228bfb94057893a8a080dd7bbc709aeb6bf..689ffe5383706dd062cd9ce7aac9bc631c7656ba 100644 (file)
@@ -142,7 +142,6 @@ config DMAR_TABLE
 config INTEL_IOMMU
        bool "Support for Intel IOMMU using DMA Remapping Devices"
        depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
-       select DMA_DIRECT_OPS
        select IOMMU_API
        select IOMMU_IOVA
        select NEED_DMA_MAP_STATE
index 14e4b37224284976a1cb8890e5d13ae5337350cc..b344a883f11690969865e936e30f4053ecf97005 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/pci.h>
 #include <linux/dmar.h>
 #include <linux/dma-mapping.h>
-#include <linux/dma-direct.h>
 #include <linux/mempool.h>
 #include <linux/memory.h>
 #include <linux/cpu.h>
@@ -3713,30 +3712,61 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
                                  dma_addr_t *dma_handle, gfp_t flags,
                                  unsigned long attrs)
 {
-       void *vaddr;
+       struct page *page = NULL;
+       int order;
 
-       vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs);
-       if (iommu_no_mapping(dev) || !vaddr)
-               return vaddr;
+       size = PAGE_ALIGN(size);
+       order = get_order(size);
 
-       *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr),
-                       PAGE_ALIGN(size), DMA_BIDIRECTIONAL,
-                       dev->coherent_dma_mask);
-       if (!*dma_handle)
-               goto out_free_pages;
-       return vaddr;
+       if (!iommu_no_mapping(dev))
+               flags &= ~(GFP_DMA | GFP_DMA32);
+       else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
+               if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
+                       flags |= GFP_DMA;
+               else
+                       flags |= GFP_DMA32;
+       }
+
+       if (gfpflags_allow_blocking(flags)) {
+               unsigned int count = size >> PAGE_SHIFT;
+
+               page = dma_alloc_from_contiguous(dev, count, order, flags);
+               if (page && iommu_no_mapping(dev) &&
+                   page_to_phys(page) + size > dev->coherent_dma_mask) {
+                       dma_release_from_contiguous(dev, page, count);
+                       page = NULL;
+               }
+       }
+
+       if (!page)
+               page = alloc_pages(flags, order);
+       if (!page)
+               return NULL;
+       memset(page_address(page), 0, size);
+
+       *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
+                                        DMA_BIDIRECTIONAL,
+                                        dev->coherent_dma_mask);
+       if (*dma_handle)
+               return page_address(page);
+       if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+               __free_pages(page, order);
 
-out_free_pages:
-       dma_direct_free(dev, size, vaddr, *dma_handle, attrs);
        return NULL;
 }
 
 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
                                dma_addr_t dma_handle, unsigned long attrs)
 {
-       if (!iommu_no_mapping(dev))
-               intel_unmap(dev, dma_handle, PAGE_ALIGN(size));
-       dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+       int order;
+       struct page *page = virt_to_page(vaddr);
+
+       size = PAGE_ALIGN(size);
+       order = get_order(size);
+
+       intel_unmap(dev, dma_handle, size);
+       if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+               __free_pages(page, order);
 }
 
 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
index 0f52d44b3f6997c8c9e4e6f6f1a7da7b43d3e7c5..f5fe0100f9ffd043d251d96ce473775bfdafd3b4 100644 (file)
@@ -199,7 +199,7 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 
 fail:
        irq_domain_free_irqs_parent(domain, virq, nr_irqs);
-       gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs));
+       gicv2m_unalloc_msi(v2m, hwirq, nr_irqs);
        return err;
 }
 
index 5377d7e2afba62b518671267b5d29c4963c2e5e6..d7842d312d3eacd7d07853caa6a529a1c8080c99 100644 (file)
@@ -182,6 +182,22 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev,
        return its->collections + its_dev->event_map.col_map[event];
 }
 
+static struct its_collection *valid_col(struct its_collection *col)
+{
+       if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
+               return NULL;
+
+       return col;
+}
+
+static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
+{
+       if (valid_col(its->collections + vpe->col_idx))
+               return vpe;
+
+       return NULL;
+}
+
 /*
  * ITS command descriptors - parameters to be encoded in a command
  * block.
@@ -439,7 +455,7 @@ static struct its_collection *its_build_mapti_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_movi_cmd(struct its_node *its,
@@ -458,7 +474,7 @@ static struct its_collection *its_build_movi_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_discard_cmd(struct its_node *its,
@@ -476,7 +492,7 @@ static struct its_collection *its_build_discard_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_inv_cmd(struct its_node *its,
@@ -494,7 +510,7 @@ static struct its_collection *its_build_inv_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_int_cmd(struct its_node *its,
@@ -512,7 +528,7 @@ static struct its_collection *its_build_int_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_clear_cmd(struct its_node *its,
@@ -530,7 +546,7 @@ static struct its_collection *its_build_clear_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return col;
+       return valid_col(col);
 }
 
 static struct its_collection *its_build_invall_cmd(struct its_node *its,
@@ -554,7 +570,7 @@ static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vinvall_cmd.vpe;
+       return valid_vpe(its, desc->its_vinvall_cmd.vpe);
 }
 
 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
@@ -576,7 +592,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vmapp_cmd.vpe;
+       return valid_vpe(its, desc->its_vmapp_cmd.vpe);
 }
 
 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
@@ -599,7 +615,7 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vmapti_cmd.vpe;
+       return valid_vpe(its, desc->its_vmapti_cmd.vpe);
 }
 
 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
@@ -622,7 +638,7 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vmovi_cmd.vpe;
+       return valid_vpe(its, desc->its_vmovi_cmd.vpe);
 }
 
 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
@@ -640,7 +656,7 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
 
        its_fixup_cmd(cmd);
 
-       return desc->its_vmovp_cmd.vpe;
+       return valid_vpe(its, desc->its_vmovp_cmd.vpe);
 }
 
 static u64 its_cmd_ptr_to_offset(struct its_node *its,
@@ -1824,11 +1840,16 @@ static int its_alloc_tables(struct its_node *its)
 
 static int its_alloc_collections(struct its_node *its)
 {
+       int i;
+
        its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
                                   GFP_KERNEL);
        if (!its->collections)
                return -ENOMEM;
 
+       for (i = 0; i < nr_cpu_ids; i++)
+               its->collections[i].target_address = ~0ULL;
+
        return 0;
 }
 
@@ -2310,7 +2331,14 @@ static int its_irq_domain_activate(struct irq_domain *domain,
                cpu_mask = cpumask_of_node(its_dev->its->numa_node);
 
        /* Bind the LPI to the first possible CPU */
-       cpu = cpumask_first(cpu_mask);
+       cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
+       if (cpu >= nr_cpu_ids) {
+               if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
+                       return -EINVAL;
+
+               cpu = cpumask_first(cpu_online_mask);
+       }
+
        its_dev->event_map.col_map[event] = cpu;
        irq_data_update_effective_affinity(d, cpumask_of(cpu));
 
@@ -3399,6 +3427,16 @@ static int redist_disable_lpis(void)
        u64 timeout = USEC_PER_SEC;
        u64 val;
 
+       /*
+        * If coming via a CPU hotplug event, we don't need to disable
+        * LPIs before trying to re-enable them. They are already
+        * configured and all is well in the world. Detect this case
+        * by checking the allocation of the pending table for the
+        * current CPU.
+        */
+       if (gic_data_rdist()->pend_page)
+               return 0;
+
        if (!gic_rdists_supports_plpis()) {
                pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
                return -ENXIO;
index 1ec3bfe56693ab39831e464048b1959a04250e6d..c671b3212010e6de583e5a5211fc2a20064200f2 100644 (file)
@@ -93,8 +93,12 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
        msg->address_lo = lower_32_bits(msi_data->msiir_addr);
        msg->data = data->hwirq;
 
-       if (msi_affinity_flag)
-               msg->data |= cpumask_first(data->common->affinity);
+       if (msi_affinity_flag) {
+               const struct cpumask *mask;
+
+               mask = irq_data_get_effective_affinity_mask(data);
+               msg->data |= cpumask_first(mask);
+       }
 
        iommu_dma_map_msi_msg(data->irq, msg);
 }
@@ -121,7 +125,7 @@ static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
                return -EINVAL;
        }
 
-       cpumask_copy(irq_data->common->affinity, mask);
+       irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
 
        return IRQ_SET_MASK_OK;
 }
index 98f90aadd141b03c42bedd070b66030be7983d86..18c0a1281914fa3218761bd20b2a2e0c85e8aae6 100644 (file)
@@ -588,7 +588,7 @@ static const struct proto_ops data_sock_ops = {
        .getname        = data_sock_getname,
        .sendmsg        = mISDN_sock_sendmsg,
        .recvmsg        = mISDN_sock_recvmsg,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = data_sock_setsockopt,
index 10c08982185a572ff05683461d514e86c8920f96..9c03f35d9df113c6eb6608f4b48b85447635aca9 100644 (file)
@@ -4,7 +4,7 @@
 
 menuconfig NVM
        bool "Open-Channel SSD target support"
-       depends on BLOCK && HAS_DMA && PCI
+       depends on BLOCK && PCI
        select BLK_DEV_NVME
        help
          Say Y here to get to enable Open-channel SSDs.
index ab13fcec3fca046c3da6fd621f0e0db9c47b1bf9..75df4c9d8b541de480dfea8d823e0eff389d9ccd 100644 (file)
@@ -588,7 +588,7 @@ static const char *raid10_md_layout_to_format(int layout)
 }
 
 /* Return md raid10 algorithm for @name */
-static const int raid10_name_to_format(const char *name)
+static int raid10_name_to_format(const char *name)
 {
        if (!strcasecmp(name, "near"))
                return ALGORITHM_RAID10_NEAR;
index 938766794c2ef3b6caf538a0fa787447eadb160c..3d0e2c198f0614dbaf22db657a2bfc9336f89ebd 100644 (file)
@@ -885,9 +885,7 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
 static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
                               sector_t start, sector_t len, void *data)
 {
-       struct request_queue *q = bdev_get_queue(dev->bdev);
-
-       return q && blk_queue_dax(q);
+       return bdev_dax_supported(dev->bdev, PAGE_SIZE);
 }
 
 static bool dm_table_supports_dax(struct dm_table *t)
@@ -1907,6 +1905,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 
        if (dm_table_supports_dax(t))
                blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+       else
+               blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
+
        if (dm_table_supports_dax_write_cache(t))
                dax_write_cache(t->md->dax_dev, true);
 
index 36ef284ad086b881324771d4f882dc6fa96d6dde..72142021b5c9a0410cfb6ccb04a93d613376fb53 100644 (file)
@@ -776,7 +776,6 @@ static int __write_changed_details(struct dm_pool_metadata *pmd)
 static int __commit_transaction(struct dm_pool_metadata *pmd)
 {
        int r;
-       size_t metadata_len, data_len;
        struct thin_disk_superblock *disk_super;
        struct dm_block *sblock;
 
@@ -797,14 +796,6 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
        if (r < 0)
                return r;
 
-       r = dm_sm_root_size(pmd->metadata_sm, &metadata_len);
-       if (r < 0)
-               return r;
-
-       r = dm_sm_root_size(pmd->data_sm, &data_len);
-       if (r < 0)
-               return r;
-
        r = save_sm_roots(pmd);
        if (r < 0)
                return r;
index 7945238df1c0a67a8e525697f0e419c7594ed1ad..b900723bbd0fae4845a17ef67dadcf33dc5cc67b 100644 (file)
@@ -1386,6 +1386,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
 
 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
 
+static void requeue_bios(struct pool *pool);
+
 static void check_for_space(struct pool *pool)
 {
        int r;
@@ -1398,8 +1400,10 @@ static void check_for_space(struct pool *pool)
        if (r)
                return;
 
-       if (nr_free)
+       if (nr_free) {
                set_pool_mode(pool, PM_WRITE);
+               requeue_bios(pool);
+       }
 }
 
 /*
@@ -1476,7 +1480,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
 
        r = dm_pool_alloc_data_block(pool->pmd, result);
        if (r) {
-               metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
+               if (r == -ENOSPC)
+                       set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
+               else
+                       metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
                return r;
        }
 
index 5961c7794ef37008f7a10f521517aded086f20f3..07ea6a48aac69a1db35567222c4b8123f2ef9e59 100644 (file)
@@ -259,7 +259,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
        if (da != p) {
                long i;
                wc->memory_map = NULL;
-               pages = kvmalloc(p * sizeof(struct page *), GFP_KERNEL);
+               pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
                if (!pages) {
                        r = -ENOMEM;
                        goto err2;
@@ -859,7 +859,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
 
        if (wc->entries)
                return 0;
-       wc->entries = vmalloc(sizeof(struct wc_entry) * wc->n_blocks);
+       wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
        if (!wc->entries)
                return -ENOMEM;
        for (b = 0; b < wc->n_blocks; b++) {
@@ -1481,9 +1481,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
                wb->bio.bi_iter.bi_sector = read_original_sector(wc, e);
                wb->page_offset = PAGE_SIZE;
                if (max_pages <= WB_LIST_INLINE ||
-                   unlikely(!(wb->wc_list = kmalloc(max_pages * sizeof(struct wc_entry *),
-                                                    GFP_NOIO | __GFP_NORETRY |
-                                                    __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
+                   unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
+                                                          GFP_NOIO | __GFP_NORETRY |
+                                                          __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
                        wb->wc_list = wb->wc_list_inline;
                        max_pages = WB_LIST_INLINE;
                }
index 3c0e45f4dcf5cdf06d79b0c9d107d7455a0b6ad7..a44183ff4be0a3bd4219a7bf5854622aeca79db2 100644 (file)
@@ -787,7 +787,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
        /* Chunk BIO work */
        mutex_init(&dmz->chunk_lock);
-       INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
+       INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
        dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
                                        0, dev->name);
        if (!dmz->chunk_wq) {
index e65429a29c06e2554e8a0e23ba5a0b2a3b18a8c8..b0dd7027848b7de9f701469c6eb29b5d9c96e1df 100644 (file)
@@ -1056,8 +1056,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
        if (len < 1)
                goto out;
        nr_pages = min(len, nr_pages);
-       if (ti->type->direct_access)
-               ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
+       ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
 
  out:
        dm_put_live_table(md, srcu_idx);
@@ -1606,10 +1605,9 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
                                 * the usage of io->orig_bio in dm_remap_zone_report()
                                 * won't be affected by this reassignment.
                                 */
-                               struct bio *b = bio_clone_bioset(bio, GFP_NOIO,
-                                                                &md->queue->bio_split);
+                               struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
+                                                         GFP_NOIO, &md->queue->bio_split);
                                ci.io->orig_bio = b;
-                               bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9);
                                bio_chain(b, bio);
                                ret = generic_make_request(bio);
                                break;
index 29b0cd9ec951ee4603279656e7148ba2a5b8d763..994aed2f9dfff4135170102265523045e893ac0a 100644 (file)
@@ -5547,7 +5547,8 @@ int md_run(struct mddev *mddev)
                else
                        pr_warn("md: personality for level %s is not loaded!\n",
                                mddev->clevel);
-               return -EINVAL;
+               err = -EINVAL;
+               goto abort;
        }
        spin_unlock(&pers_lock);
        if (mddev->level != pers->level) {
@@ -5560,7 +5561,8 @@ int md_run(struct mddev *mddev)
            pers->start_reshape == NULL) {
                /* This personality cannot handle reshaping... */
                module_put(pers->owner);
-               return -EINVAL;
+               err = -EINVAL;
+               goto abort;
        }
 
        if (pers->sync_request) {
@@ -5629,7 +5631,7 @@ int md_run(struct mddev *mddev)
                mddev->private = NULL;
                module_put(pers->owner);
                bitmap_destroy(mddev);
-               return err;
+               goto abort;
        }
        if (mddev->queue) {
                bool nonrot = true;
index 478cf446827f469c1d02d6f2918fcb8dd870f893..35bd3a62451b30fec0cca41fcdc687bb7920aa56 100644 (file)
@@ -3893,6 +3893,13 @@ static int raid10_run(struct mddev *mddev)
                            disk->rdev->saved_raid_disk < 0)
                                conf->fullsync = 1;
                }
+
+               if (disk->replacement &&
+                   !test_bit(In_sync, &disk->replacement->flags) &&
+                   disk->replacement->saved_raid_disk < 0) {
+                       conf->fullsync = 1;
+               }
+
                disk->recovery_disabled = mddev->recovery_disabled - 1;
        }
 
index 40826bba06b6d52c06bef7eb64bea6a719496dd0..fcfab6635f9c6649a64e0b144f55df672d621389 100644 (file)
@@ -207,29 +207,19 @@ void lirc_bpf_free(struct rc_dev *rcdev)
        bpf_prog_array_free(rcdev->raw->progs);
 }
 
-int lirc_prog_attach(const union bpf_attr *attr)
+int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
 {
-       struct bpf_prog *prog;
        struct rc_dev *rcdev;
        int ret;
 
        if (attr->attach_flags)
                return -EINVAL;
 
-       prog = bpf_prog_get_type(attr->attach_bpf_fd,
-                                BPF_PROG_TYPE_LIRC_MODE2);
-       if (IS_ERR(prog))
-               return PTR_ERR(prog);
-
        rcdev = rc_dev_get_from_fd(attr->target_fd);
-       if (IS_ERR(rcdev)) {
-               bpf_prog_put(prog);
+       if (IS_ERR(rcdev))
                return PTR_ERR(rcdev);
-       }
 
        ret = lirc_bpf_attach(rcdev, prog);
-       if (ret)
-               bpf_prog_put(prog);
 
        put_device(&rcdev->dev);
 
index e05c3245930a1e3f94aad4c0a7f85e015759ec76..fa840666bdd1aeb20cca67fc5df9556fc73135aa 100644 (file)
@@ -507,35 +507,14 @@ static int remote_settings_file_close(struct inode *inode, struct file *file)
 static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
 {
        void __iomem *address = (void __iomem *)file->private_data;
-       unsigned char *page;
-       int retval;
        int len = 0;
        unsigned int value;
-
-       if (*offset < 0)
-               return -EINVAL;
-       if (count == 0 || count > 1024)
-               return 0;
-       if (*offset != 0)
-               return 0;
-
-       page = (unsigned char *)__get_free_page(GFP_KERNEL);
-       if (!page)
-               return -ENOMEM;
+       char lbuf[20];
 
        value = readl(address);
-       len = sprintf(page, "%d\n", value);
-
-       if (copy_to_user(buf, page, len)) {
-               retval = -EFAULT;
-               goto exit;
-       }
-       *offset += len;
-       retval = len;
+       len = snprintf(lbuf, sizeof(lbuf), "%d\n", value);
 
-exit:
-       free_page((unsigned long)page);
-       return retval;
+       return simple_read_from_buffer(buf, count, offset, lbuf, len);
 }
 
 static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset)
index b0b8f18a85e3e132d7741e9daab53fe9270b1b8a..6649f0d56d2f0cdee1dde6d57ad68141ccce12bf 100644 (file)
@@ -310,8 +310,11 @@ int mei_irq_read_handler(struct mei_device *dev,
        if (&cl->link == &dev->file_list) {
                /* A message for not connected fixed address clients
                 * should be silently discarded
+                * On power down client may be force cleaned,
+                * silently discard such messages
                 */
-               if (hdr_is_fixed(mei_hdr)) {
+               if (hdr_is_fixed(mei_hdr) ||
+                   dev->dev_state == MEI_DEV_POWER_DOWN) {
                        mei_irq_discard_msg(dev, mei_hdr);
                        ret = 0;
                        goto reset_slots;
index efd733472a3531804225c5515ade4f4cf69fd707..56c6f79a5c5af83a862cf76352f172f5e02ee0b8 100644 (file)
@@ -467,7 +467,7 @@ static int vmballoon_send_batched_lock(struct vmballoon *b,
                unsigned int num_pages, bool is_2m_pages, unsigned int *target)
 {
        unsigned long status;
-       unsigned long pfn = page_to_pfn(b->page);
+       unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
 
        STATS_INC(b->stats.lock[is_2m_pages]);
 
@@ -515,7 +515,7 @@ static bool vmballoon_send_batched_unlock(struct vmballoon *b,
                unsigned int num_pages, bool is_2m_pages, unsigned int *target)
 {
        unsigned long status;
-       unsigned long pfn = page_to_pfn(b->page);
+       unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
 
        STATS_INC(b->stats.unlock[is_2m_pages]);
 
index ef05e00393782d16d77f2f1ecf16803f69461217..2a833686784b6b459d9744b366ef22cb5ca1279c 100644 (file)
@@ -27,8 +27,8 @@ struct mmc_gpio {
        bool override_cd_active_level;
        irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
        char *ro_label;
-       char cd_label[0];
        u32 cd_debounce_delay_ms;
+       char cd_label[];
 };
 
 static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
index 623f4d27fa0161b1a938521c266176f544f2ff22..80dc2fd6576cf3f88afd695ad1f36ec1b4f52b41 100644 (file)
@@ -1065,8 +1065,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
         * It's used when HS400 mode is enabled.
         */
        if (data->flags & MMC_DATA_WRITE &&
-               !(host->timing != MMC_TIMING_MMC_HS400))
-               return;
+               host->timing != MMC_TIMING_MMC_HS400)
+               goto disable;
 
        if (data->flags & MMC_DATA_WRITE)
                enable = SDMMC_CARD_WR_THR_EN;
@@ -1074,7 +1074,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
                enable = SDMMC_CARD_RD_THR_EN;
 
        if (host->timing != MMC_TIMING_MMC_HS200 &&
-           host->timing != MMC_TIMING_UHS_SDR104)
+           host->timing != MMC_TIMING_UHS_SDR104 &&
+           host->timing != MMC_TIMING_MMC_HS400)
                goto disable;
 
        blksz_depth = blksz / (1 << host->data_shift);
index f7f9773d161f1e5e2f58ae1f9a32c800ac5f3474..d032bd63444d10295826750b80a560c0335ddda5 100644 (file)
@@ -139,8 +139,7 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) {
        renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST,
                                            RST_RESERVED_BITS | val);
 
-       if (host->data && host->data->flags & MMC_DATA_READ)
-               clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
+       clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
 
        renesas_sdhi_internal_dmac_enable_dma(host, true);
 }
@@ -164,17 +163,14 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
                goto force_pio;
 
        /* This DMAC cannot handle if buffer is not 8-bytes alignment */
-       if (!IS_ALIGNED(sg_dma_address(sg), 8)) {
-               dma_unmap_sg(&host->pdev->dev, sg, host->sg_len,
-                            mmc_get_dma_dir(data));
-               goto force_pio;
-       }
+       if (!IS_ALIGNED(sg_dma_address(sg), 8))
+               goto force_pio_with_unmap;
 
        if (data->flags & MMC_DATA_READ) {
                dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
                if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) &&
                    test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags))
-                       goto force_pio;
+                       goto force_pio_with_unmap;
        } else {
                dtran_mode |= DTRAN_MODE_CH_NUM_CH0;
        }
@@ -189,6 +185,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
 
        return;
 
+force_pio_with_unmap:
+       dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, mmc_get_dma_dir(data));
+
 force_pio:
        host->force_pio = true;
        renesas_sdhi_internal_dmac_enable_dma(host, false);
index d6aef70d34fac0554d223bed4121e173dea1281d..4eb3d29ecde1078512f85d291904a63d5fa358c6 100644 (file)
@@ -312,6 +312,15 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
 
                        if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
                                val |= SDHCI_SUPPORT_HS400;
+
+                       /*
+                        * Do not advertise faster UHS modes if there are no
+                        * pinctrl states for 100MHz/200MHz.
+                        */
+                       if (IS_ERR_OR_NULL(imx_data->pins_100mhz) ||
+                           IS_ERR_OR_NULL(imx_data->pins_200mhz))
+                               val &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50
+                                        | SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_HS400);
                }
        }
 
@@ -1158,18 +1167,6 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
                                                ESDHC_PINCTRL_STATE_100MHZ);
                imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
                                                ESDHC_PINCTRL_STATE_200MHZ);
-               if (IS_ERR(imx_data->pins_100mhz) ||
-                               IS_ERR(imx_data->pins_200mhz)) {
-                       dev_warn(mmc_dev(host->mmc),
-                               "could not get ultra high speed state, work on normal mode\n");
-                       /*
-                        * fall back to not supporting uhs by specifying no
-                        * 1.8v quirk
-                        */
-                       host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
-               }
-       } else {
-               host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
        }
 
        /* call to generic mmc_of_parse to support additional capabilities */
index e7472590f2ed6416b800ef85c2efeae574cc6718..8e7f3e35ee3dc48eef93c126f2d161ad63b3dc12 100644 (file)
@@ -1446,6 +1446,7 @@ static int sunxi_mmc_runtime_resume(struct device *dev)
        sunxi_mmc_init_host(host);
        sunxi_mmc_set_bus_width(host, mmc->ios.bus_width);
        sunxi_mmc_set_clk(host, &mmc->ios);
+       enable_irq(host->irq);
 
        return 0;
 }
@@ -1455,6 +1456,12 @@ static int sunxi_mmc_runtime_suspend(struct device *dev)
        struct mmc_host *mmc = dev_get_drvdata(dev);
        struct sunxi_mmc_host *host = mmc_priv(mmc);
 
+       /*
+        * When clocks are off, it's possible receiving
+        * fake interrupts, which will stall the system.
+        * Disabling the irq  will prevent this.
+        */
+       disable_irq(host->irq);
        sunxi_mmc_reset_host(host);
        sunxi_mmc_disable(host);
 
index a0c655628d6d5283fd9b5cd8234b8b09857d9c75..1b64ac8c5bc86309061a5540d385eb6e2ca866cc 100644 (file)
@@ -2526,7 +2526,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 
 struct ppb_lock {
        struct flchip *chip;
-       loff_t offset;
+       unsigned long adr;
        int locked;
 };
 
@@ -2544,8 +2544,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
        unsigned long timeo;
        int ret;
 
+       adr += chip->start;
        mutex_lock(&chip->mutex);
-       ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
+       ret = get_chip(map, chip, adr, FL_LOCKING);
        if (ret) {
                mutex_unlock(&chip->mutex);
                return ret;
@@ -2563,8 +2564,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
 
        if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
                chip->state = FL_LOCKING;
-               map_write(map, CMD(0xA0), chip->start + adr);
-               map_write(map, CMD(0x00), chip->start + adr);
+               map_write(map, CMD(0xA0), adr);
+               map_write(map, CMD(0x00), adr);
        } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
                /*
                 * Unlocking of one specific sector is not supported, so we
@@ -2602,7 +2603,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
        map_write(map, CMD(0x00), chip->start);
 
        chip->state = FL_READY;
-       put_chip(map, chip, adr + chip->start);
+       put_chip(map, chip, adr);
        mutex_unlock(&chip->mutex);
 
        return ret;
@@ -2659,9 +2660,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
                 * sectors shall be unlocked, so lets keep their locking
                 * status at "unlocked" (locked=0) for the final re-locking.
                 */
-               if ((adr < ofs) || (adr >= (ofs + len))) {
+               if ((offset < ofs) || (offset >= (ofs + len))) {
                        sect[sectors].chip = &cfi->chips[chipnum];
-                       sect[sectors].offset = offset;
+                       sect[sectors].adr = adr;
                        sect[sectors].locked = do_ppb_xxlock(
                                map, &cfi->chips[chipnum], adr, 0,
                                DO_XXLOCK_ONEBLOCK_GETLOCK);
@@ -2675,6 +2676,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
                        i++;
 
                if (adr >> cfi->chipshift) {
+                       if (offset >= (ofs + len))
+                               break;
                        adr = 0;
                        chipnum++;
 
@@ -2705,7 +2708,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
         */
        for (i = 0; i < sectors; i++) {
                if (sect[i].locked)
-                       do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
+                       do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
                                      DO_XXLOCK_ONEBLOCK_LOCK);
        }
 
index 3a6f450d1093c4c59d8b79bfafbc0d0c8c744722..53febe8a68c3cdfadfad784bca3335879a86d1f8 100644 (file)
@@ -733,8 +733,8 @@ static struct flash_info dataflash_data[] = {
        { "AT45DB642x",  0x1f2800, 8192, 1056, 11, SUP_POW2PS},
        { "at45db642d",  0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
 
-       { "AT45DB641E",  0x1f28000100, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
-       { "at45db641e",  0x1f28000100, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
+       { "AT45DB641E",  0x1f28000100ULL, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
+       { "at45db641e",  0x1f28000100ULL, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
 };
 
 static struct flash_info *jedec_lookup(struct spi_device *spi,
index cfd33e6ca77f903a6afc636e73f31ffb40d0d0bd..5869e90cc14b3c1f367b17a4f58bdb31c1e188ea 100644 (file)
@@ -123,7 +123,11 @@ static int denali_dt_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       denali->clk_x_rate = clk_get_rate(dt->clk);
+       /*
+        * Hardcode the clock rate for the backward compatibility.
+        * This works for both SOCFPGA and UniPhier.
+        */
+       denali->clk_x_rate = 200000000;
 
        ret = denali_init(denali);
        if (ret)
index 45786e707b7bd1ae5a4cba72825bcfb00fdda370..26cef218bb43ee1bd1fb2cf1dbadeb8ad38e2f8e 100644 (file)
@@ -48,7 +48,7 @@
 #define NFC_V1_V2_CONFIG               (host->regs + 0x0a)
 #define NFC_V1_V2_ECC_STATUS_RESULT    (host->regs + 0x0c)
 #define NFC_V1_V2_RSLTMAIN_AREA                (host->regs + 0x0e)
-#define NFC_V1_V2_RSLTSPARE_AREA       (host->regs + 0x10)
+#define NFC_V21_RSLTSPARE_AREA         (host->regs + 0x10)
 #define NFC_V1_V2_WRPROT               (host->regs + 0x12)
 #define NFC_V1_UNLOCKSTART_BLKADDR     (host->regs + 0x14)
 #define NFC_V1_UNLOCKEND_BLKADDR       (host->regs + 0x16)
@@ -1274,6 +1274,9 @@ static void preset_v2(struct mtd_info *mtd)
        writew(config1, NFC_V1_V2_CONFIG1);
        /* preset operation */
 
+       /* spare area size in 16-bit half-words */
+       writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA);
+
        /* Unlock the internal RAM Buffer */
        writew(0x2, NFC_V1_V2_CONFIG);
 
index 10c4f9919850c3e7b56ed6bdb083a0fc35a0b7f5..b01d15ec4c56bfbdded578526d76e2ed12b65093 100644 (file)
@@ -440,7 +440,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
 
        for (; page < page_end; page++) {
                res = chip->ecc.read_oob(mtd, chip, page);
-               if (res)
+               if (res < 0)
                        return res;
 
                bad = chip->oob_poi[chip->badblockpos];
index 7ed1f87e742a7accbbeb441b02e199878b3ef035..49c546c97c6f9a370ff64636deaf778bce2c3ce0 100644 (file)
 
 #include <linux/mtd/rawnand.h>
 
+/*
+ * Macronix AC series does not support using SET/GET_FEATURES to change
+ * the timings unlike what is declared in the parameter page. Unflag
+ * this feature to avoid unnecessary downturns.
+ */
+static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
+{
+       unsigned int i;
+       static const char * const broken_get_timings[] = {
+               "MX30LF1G18AC",
+               "MX30LF1G28AC",
+               "MX30LF2G18AC",
+               "MX30LF2G28AC",
+               "MX30LF4G18AC",
+               "MX30LF4G28AC",
+               "MX60LF8G18AC",
+       };
+
+       if (!chip->parameters.supports_set_get_features)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(broken_get_timings); i++) {
+               if (!strcmp(broken_get_timings[i], chip->parameters.model))
+                       break;
+       }
+
+       if (i == ARRAY_SIZE(broken_get_timings))
+               return;
+
+       bitmap_clear(chip->parameters.get_feature_list,
+                    ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+       bitmap_clear(chip->parameters.set_feature_list,
+                    ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+}
+
 static int macronix_nand_init(struct nand_chip *chip)
 {
        if (nand_is_slc(chip))
                chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
 
-       /*
-        * MX30LF2G18AC chip does not support using SET/GET_FEATURES to change
-        * the timings unlike what is declared in the parameter page. Unflag
-        * this feature to avoid unnecessary downturns.
-        */
-       if (chip->parameters.supports_set_get_features &&
-           !strcmp("MX30LF2G18AC", chip->parameters.model)) {
-               bitmap_clear(chip->parameters.get_feature_list,
-                            ONFI_FEATURE_ADDR_TIMING_MODE, 1);
-               bitmap_clear(chip->parameters.set_feature_list,
-                            ONFI_FEATURE_ADDR_TIMING_MODE, 1);
-       }
+       macronix_nand_fix_broken_get_timings(chip);
 
        return 0;
 }
index 0af45b134c0cf859902f3d138b305bf5836d526d..5ec4c90a637d549a644441461ffc7717c623a4bc 100644 (file)
@@ -66,7 +66,9 @@ static int micron_nand_onfi_init(struct nand_chip *chip)
 
        if (p->supports_set_get_features) {
                set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->set_feature_list);
+               set_bit(ONFI_FEATURE_ON_DIE_ECC, p->set_feature_list);
                set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->get_feature_list);
+               set_bit(ONFI_FEATURE_ON_DIE_ECC, p->get_feature_list);
        }
 
        return 0;
index c3f7aaa5d18f7de068f797b84b05b31be4248897..d7e10b36a0b94b476fb5068d634c58ee756fbc2c 100644 (file)
@@ -926,10 +926,12 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
        if (ret)
                return ret;
 
-       if (f_pdata->use_direct_mode)
+       if (f_pdata->use_direct_mode) {
                memcpy_toio(cqspi->ahb_base + to, buf, len);
-       else
+               ret = cqspi_wait_idle(cqspi);
+       } else {
                ret = cqspi_indirect_write_execute(nor, to, buf, len);
+       }
        if (ret)
                return ret;
 
index d5c15e8bb3de706b12d343ee1a50477b23ab3d3f..f273af136fc7c995ee4df2bdb697002c916c0a48 100644 (file)
@@ -173,7 +173,7 @@ config SUNLANCE
 
 config AMD_XGBE
        tristate "AMD 10GbE Ethernet driver"
-       depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM && HAS_DMA
+       depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM
        depends on X86 || ARM64 || COMPILE_TEST
        select BITREVERSE
        select CRC32
index 1205861b631896a0fc6b19ac608483d8e4b27d4e..eedd3f3dd22e220186578235c9f5f0b0072e80f6 100644 (file)
@@ -1,6 +1,5 @@
 config NET_XGENE_V2
        tristate "APM X-Gene SoC Ethernet-v2 Driver"
-       depends on HAS_DMA
        depends on ARCH_XGENE || COMPILE_TEST
        help
          This is the Ethernet driver for the on-chip ethernet interface
index afccb033177b39233a333994835713d577339c2f..e4e33c900b577161e77974bd62c45030cb2762e8 100644 (file)
@@ -1,6 +1,5 @@
 config NET_XGENE
        tristate "APM X-Gene SoC Ethernet Driver"
-       depends on HAS_DMA
        depends on ARCH_XGENE || COMPILE_TEST
        select PHYLIB
        select MDIO_XGENE
index fc7383106946ca6461f62ea305be0f03bb59c227..91eb8910b1c992b1b7876f05a26753a5cf79c100 100644 (file)
@@ -63,8 +63,6 @@
 
 #define AQ_CFG_NAPI_WEIGHT     64U
 
-#define AQ_CFG_MULTICAST_ADDRESS_MAX     32U
-
 /*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
 
 #define AQ_NIC_FC_OFF    0U
index a2d416b24ffc251c71d002a9befe825d5c585fbc..2c6ebd91a9f2782e87472e497447b60974a7a571 100644 (file)
@@ -98,6 +98,8 @@ struct aq_stats_s {
 #define AQ_HW_MEDIA_TYPE_TP    1U
 #define AQ_HW_MEDIA_TYPE_FIBRE 2U
 
+#define AQ_HW_MULTICAST_ADDRESS_MAX     32U
+
 struct aq_hw_s {
        atomic_t flags;
        u8 rbl_enabled:1;
@@ -177,7 +179,7 @@ struct aq_hw_ops {
                                    unsigned int packet_filter);
 
        int (*hw_multicast_list_set)(struct aq_hw_s *self,
-                                    u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX]
+                                    u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
                                     [ETH_ALEN],
                                     u32 count);
 
index ba5fe8c4125d85d0050c2c5b269cba2c320d3127..e3ae29e523f0e26738b0ab80a2f3ac431083d13b 100644 (file)
@@ -135,17 +135,10 @@ err_exit:
 static void aq_ndev_set_multicast_settings(struct net_device *ndev)
 {
        struct aq_nic_s *aq_nic = netdev_priv(ndev);
-       int err = 0;
 
-       err = aq_nic_set_packet_filter(aq_nic, ndev->flags);
-       if (err < 0)
-               return;
+       aq_nic_set_packet_filter(aq_nic, ndev->flags);
 
-       if (netdev_mc_count(ndev)) {
-               err = aq_nic_set_multicast_list(aq_nic, ndev);
-               if (err < 0)
-                       return;
-       }
+       aq_nic_set_multicast_list(aq_nic, ndev);
 }
 
 static const struct net_device_ops aq_ndev_ops = {
index 1a1a6380c128c4522b330907cc16258f0e012189..7a22d0257e04ccf07ef87cae18d5d4f87630660a 100644 (file)
@@ -563,34 +563,41 @@ err_exit:
 
 int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
 {
+       unsigned int packet_filter = self->packet_filter;
        struct netdev_hw_addr *ha = NULL;
        unsigned int i = 0U;
 
-       self->mc_list.count = 0U;
-
-       netdev_for_each_mc_addr(ha, ndev) {
-               ether_addr_copy(self->mc_list.ar[i++], ha->addr);
-               ++self->mc_list.count;
+       self->mc_list.count = 0;
+       if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+               packet_filter |= IFF_PROMISC;
+       } else {
+               netdev_for_each_uc_addr(ha, ndev) {
+                       ether_addr_copy(self->mc_list.ar[i++], ha->addr);
 
-               if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX)
-                       break;
+                       if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
+                               break;
+               }
        }
 
-       if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) {
-               /* Number of filters is too big: atlantic does not support this.
-                * Force all multi filter to support this.
-                * With this we disable all UC filters and setup "all pass"
-                * multicast mask
-                */
-               self->packet_filter |= IFF_ALLMULTI;
-               self->aq_nic_cfg.mc_list_count = 0;
-               return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
-                                                            self->packet_filter);
+       if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+               packet_filter |= IFF_ALLMULTI;
        } else {
-               return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
-                                                   self->mc_list.ar,
-                                                   self->mc_list.count);
+               netdev_for_each_mc_addr(ha, ndev) {
+                       ether_addr_copy(self->mc_list.ar[i++], ha->addr);
+
+                       if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
+                               break;
+               }
+       }
+
+       if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) {
+               packet_filter |= IFF_MULTICAST;
+               self->mc_list.count = i;
+               self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
+                                                      self->mc_list.ar,
+                                                      self->mc_list.count);
        }
+       return aq_nic_set_packet_filter(self, packet_filter);
 }
 
 int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
index faa533a0ec474116b7d84369c947a7f0f1bfa853..fecfc401f95df041f56f348c1082147377464059 100644 (file)
@@ -75,7 +75,7 @@ struct aq_nic_s {
        struct aq_hw_link_status_s link_status;
        struct {
                u32 count;
-               u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
+               u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
        } mc_list;
 
        struct pci_dev *pdev;
index 67e2f9fb9402f3ed419ee46c47a7f6bd4d8e1ffc..8cc6abadc03b90e88fb58b09a53e7da3702710e5 100644 (file)
@@ -765,7 +765,7 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
 
 static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
                                           u8 ar_mac
-                                          [AQ_CFG_MULTICAST_ADDRESS_MAX]
+                                          [AQ_HW_MULTICAST_ADDRESS_MAX]
                                           [ETH_ALEN],
                                           u32 count)
 {
index 819f6bcf9b4ee76e620691ae3861a1fad213eca9..956860a697970ab427be0357d8541e929a85c489 100644 (file)
@@ -784,7 +784,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
 
 static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
                                           u8 ar_mac
-                                          [AQ_CFG_MULTICAST_ADDRESS_MAX]
+                                          [AQ_HW_MULTICAST_ADDRESS_MAX]
                                           [ETH_ALEN],
                                           u32 count)
 {
@@ -812,7 +812,7 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
 
                hw_atl_rpfl2_uc_flr_en_set(self,
                                           (self->aq_nic_cfg->is_mc_list_enabled),
-                                   HW_ATL_B0_MAC_MIN + i);
+                                          HW_ATL_B0_MAC_MIN + i);
        }
 
        err = aq_hw_err_from_flags(self);
index e743ddf46343302fe69c4c562c7cba239fe06dd9..5d0ab8e74b680cc6e75de6e91b79115b4637daa7 100644 (file)
@@ -24,7 +24,8 @@ config ARC_EMAC_CORE
 config ARC_EMAC
        tristate "ARC EMAC support"
        select ARC_EMAC_CORE
-       depends on OF_IRQ && OF_NET && HAS_DMA && (ARC || COMPILE_TEST)
+       depends on OF_IRQ && OF_NET
+       depends on ARC || COMPILE_TEST
        ---help---
          On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
          non-standard on-chip ethernet device ARC EMAC 10/100 is used.
@@ -33,7 +34,8 @@ config ARC_EMAC
 config EMAC_ROCKCHIP
        tristate "Rockchip EMAC support"
        select ARC_EMAC_CORE
-       depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA && (ARCH_ROCKCHIP || COMPILE_TEST)
+       depends on OF_IRQ && OF_NET && REGULATOR
+       depends on ARCH_ROCKCHIP || COMPILE_TEST
        ---help---
          Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers.
          This selects Rockchip SoC glue layer support for the
index 567ee54504bcd6eba897009259f691b74b77609e..5e5022fa1d047be078be911bc4f6cd0631f04de7 100644 (file)
@@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev)
        struct pci_dev *pdev = to_pci_dev(dev);
        struct alx_priv *alx = pci_get_drvdata(pdev);
        struct alx_hw *hw = &alx->hw;
+       int err;
 
        alx_reset_phy(hw);
 
        if (!netif_running(alx->dev))
                return 0;
        netif_device_attach(alx->dev);
-       return __alx_open(alx, true);
+
+       rtnl_lock();
+       err = __alx_open(alx, true);
+       rtnl_unlock();
+
+       return err;
 }
 
 static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
index af75156919edfead9bbe1e223b92d45d4fdd444e..4c3bfde6e8de00f2010b1329e05c8b36a16e158f 100644 (file)
@@ -157,7 +157,6 @@ config BGMAC
 config BGMAC_BCMA
        tristate "Broadcom iProc GBit BCMA support"
        depends on BCMA && BCMA_HOST_SOC
-       depends on HAS_DMA
        depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST
        select BGMAC
        select PHYLIB
@@ -170,7 +169,6 @@ config BGMAC_BCMA
 
 config BGMAC_PLATFORM
        tristate "Broadcom iProc GBit platform support"
-       depends on HAS_DMA
        depends on ARCH_BCM_IPROC || COMPILE_TEST
        depends on OF
        select BGMAC
index d5fca2e5a9bc34ad6edfa295e378dfe12078c0e5..a1f60f89e05944458e98e7faa2292960368c5ef8 100644 (file)
@@ -1946,8 +1946,8 @@ static int bcm_sysport_open(struct net_device *dev)
        if (!priv->is_lite)
                priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
        else
-               priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) &
-                                  GIB_FCS_STRIP);
+               priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
+                                 GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
 
        phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
                                0, priv->phy_interface);
index d6e5d0cbf3a3b3c526d347add087c9cef428776a..cf440b91fd04a331a7dce529d740a22686b96dfd 100644 (file)
@@ -278,7 +278,8 @@ struct bcm_rsb {
 #define  GIB_GTX_CLK_EXT_CLK           (0 << GIB_GTX_CLK_SEL_SHIFT)
 #define  GIB_GTX_CLK_125MHZ            (1 << GIB_GTX_CLK_SEL_SHIFT)
 #define  GIB_GTX_CLK_250MHZ            (2 << GIB_GTX_CLK_SEL_SHIFT)
-#define  GIB_FCS_STRIP                 (1 << 6)
+#define  GIB_FCS_STRIP_SHIFT           6
+#define  GIB_FCS_STRIP                 (1 << GIB_FCS_STRIP_SHIFT)
 #define  GIB_LCL_LOOP_EN               (1 << 7)
 #define  GIB_LCL_LOOP_TXEN             (1 << 8)
 #define  GIB_RMT_LOOP_EN               (1 << 9)
index d847e1b9c37b5afff33e799e919e3ff39b5cd1e8..be1506169076f0a89f6a621d01dce81afe720ba7 100644 (file)
@@ -1533,6 +1533,7 @@ struct bnx2x {
        struct link_vars        link_vars;
        u32                     link_cnt;
        struct bnx2x_link_report_data last_reported_link;
+       bool                    force_link_down;
 
        struct mdio_if_info     mdio;
 
index 8cd73ff5debc276aec53d1f056fe3040875b2c0a..af7b5a4d8ba044800b0eb229d8c989c564515e94 100644 (file)
@@ -1261,6 +1261,11 @@ void __bnx2x_link_report(struct bnx2x *bp)
 {
        struct bnx2x_link_report_data cur_data;
 
+       if (bp->force_link_down) {
+               bp->link_vars.link_up = 0;
+               return;
+       }
+
        /* reread mf_cfg */
        if (IS_PF(bp) && !CHIP_IS_E1(bp))
                bnx2x_read_mf_cfg(bp);
@@ -2817,6 +2822,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                bp->pending_max = 0;
        }
 
+       bp->force_link_down = false;
        if (bp->port.pmf) {
                rc = bnx2x_initial_phy_init(bp, load_mode);
                if (rc)
index 5b1ed240bf18be0963cc580ab4256b6adc924046..57348f2b49a31fd5b1ef5a67d2ba1e7945768ab0 100644 (file)
@@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
                bp->sp_rtnl_state = 0;
                smp_mb();
 
+               /* Immediately indicate link as down */
+               bp->link_vars.link_up = 0;
+               bp->force_link_down = true;
+               netif_carrier_off(bp->dev);
+               BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
+
                bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
                /* When ret value shows failure of allocation failure,
                 * the nic is rebooted again. If open still fails, a error
index 176fc9f4d7defe6a9d5b513902c97f56d732b323..4394c1162be4fde931aa822d69a008b89f54efb8 100644 (file)
@@ -5712,7 +5712,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
        }
        vnic->uc_filter_count = 1;
 
-       vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+       vnic->rx_mask = 0;
+       if (bp->dev->flags & IFF_BROADCAST)
+               vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
 
        if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
                vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
@@ -5917,7 +5919,7 @@ unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
        return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
 }
 
-void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
+static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
 {
        bp->hw_resc.max_irqs = max_irqs;
 }
@@ -6888,7 +6890,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
                rc = bnxt_request_irq(bp);
                if (rc) {
                        netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
-                       goto open_err;
+                       goto open_err_irq;
                }
        }
 
@@ -6928,6 +6930,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
 open_err:
        bnxt_debug_dev_exit(bp);
        bnxt_disable_napi(bp);
+
+open_err_irq:
        bnxt_del_napi(bp);
 
 open_err_free_mem:
@@ -7214,13 +7218,16 @@ static void bnxt_set_rx_mode(struct net_device *dev)
 
        mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
                  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
-                 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
+                 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
+                 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
 
        if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
                mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
 
        uc_update = bnxt_uc_list_updated(bp);
 
+       if (dev->flags & IFF_BROADCAST)
+               mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
        if (dev->flags & IFF_ALLMULTI) {
                mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
                vnic->mc_list_count = 0;
@@ -8502,11 +8509,11 @@ int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
        int rx, tx, cp;
 
        _bnxt_get_max_rings(bp, &rx, &tx, &cp);
+       *max_rx = rx;
+       *max_tx = tx;
        if (!rx || !tx || !cp)
                return -ENOMEM;
 
-       *max_rx = rx;
-       *max_tx = tx;
        return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
 }
 
@@ -8520,8 +8527,11 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
                /* Not enough rings, try disabling agg rings. */
                bp->flags &= ~BNXT_FLAG_AGG_RINGS;
                rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
-               if (rc)
+               if (rc) {
+                       /* set BNXT_FLAG_AGG_RINGS back for consistency */
+                       bp->flags |= BNXT_FLAG_AGG_RINGS;
                        return rc;
+               }
                bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
                bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
                bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
index 9b14eb610b9f653b61092d74b3ab9257a84383d9..91575ef97c8cb119d9407530f4b6f5472d72724c 100644 (file)
@@ -1470,7 +1470,6 @@ void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
 void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
 unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
-void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max);
 int bnxt_get_avail_msix(struct bnxt *bp, int num);
 int bnxt_reserve_rings(struct bnxt *bp);
 void bnxt_tx_disable(struct bnxt *bp);
index 795f45024c209e65591a3e9fe60814315ebb3cb0..491bd40a254d8dad8810d983505b69efe2d011b1 100644 (file)
 #define BNXT_FID_INVALID                       0xffff
 #define VLAN_TCI(vid, prio)    ((vid) | ((prio) << VLAN_PRIO_SHIFT))
 
+#define is_vlan_pcp_wildcarded(vlan_tci_mask)  \
+       ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000)
+#define is_vlan_pcp_exactmatch(vlan_tci_mask)  \
+       ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK)
+#define is_vlan_pcp_zero(vlan_tci)     \
+       ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000)
+#define is_vid_exactmatch(vlan_tci_mask)       \
+       ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
+
 /* Return the dst fid of the func for flow forwarding
  * For PFs: src_fid is the fid of the PF
  * For VF-reps: src_fid the fid of the VF
@@ -389,6 +398,21 @@ static bool is_exactmatch(void *mask, int len)
        return true;
 }
 
+static bool is_vlan_tci_allowed(__be16  vlan_tci_mask,
+                               __be16  vlan_tci)
+{
+       /* VLAN priority must be either exactly zero or fully wildcarded and
+        * VLAN id must be exact match.
+        */
+       if (is_vid_exactmatch(vlan_tci_mask) &&
+           ((is_vlan_pcp_exactmatch(vlan_tci_mask) &&
+             is_vlan_pcp_zero(vlan_tci)) ||
+            is_vlan_pcp_wildcarded(vlan_tci_mask)))
+               return true;
+
+       return false;
+}
+
 static bool bits_set(void *key, int len)
 {
        const u8 *p = key;
@@ -803,9 +827,9 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
        /* Currently VLAN fields cannot be partial wildcard */
        if (bits_set(&flow->l2_key.inner_vlan_tci,
                     sizeof(flow->l2_key.inner_vlan_tci)) &&
-           !is_exactmatch(&flow->l2_mask.inner_vlan_tci,
-                          sizeof(flow->l2_mask.inner_vlan_tci))) {
-               netdev_info(bp->dev, "Wildcard match unsupported for VLAN TCI\n");
+           !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci,
+                                flow->l2_key.inner_vlan_tci)) {
+               netdev_info(bp->dev, "Unsupported VLAN TCI\n");
                return false;
        }
        if (bits_set(&flow->l2_key.inner_vlan_tpid,
index 347e4f946eb222ce5c8e1e14777c9d6555eb48dc..840f6e505f733208955bedee497ecf51397d487d 100644 (file)
@@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
                edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
        }
        bnxt_fill_msix_vecs(bp, ent);
-       bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix);
        bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
        edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
        return avail_msix;
@@ -192,7 +191,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
        msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
        bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
        edev->ulp_tbl[ulp_id].msix_requested = 0;
-       bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested);
        edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
        if (netif_running(dev)) {
                bnxt_close_nic(bp, true, false);
index 30273a7717e2df797890da57e229ce31e9d957e2..4fd829b5e65d14b56337e63fc480dd72c8420eeb 100644 (file)
@@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
        id_tbl->max = size;
        id_tbl->next = next;
        spin_lock_init(&id_tbl->lock);
-       id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL);
+       id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
        if (!id_tbl->table)
                return -ENOMEM;
 
index 3be87efdc93d6347da8417ddcd101ed90cc12d8c..aa1374d0af9313dfdbf6a7f8dfeea92e2fee7013 100644 (file)
@@ -6,11 +6,15 @@
  * Copyright (C) 2004 Sun Microsystems Inc.
  * Copyright (C) 2005-2016 Broadcom Corporation.
  * Copyright (C) 2016-2017 Broadcom Limited.
+ * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
+ * refers to Broadcom Inc. and/or its subsidiaries.
  *
  * Firmware is:
  *     Derived from proprietary unpublished source code,
  *     Copyright (C) 2000-2016 Broadcom Corporation.
  *     Copyright (C) 2016-2017 Broadcom Ltd.
+ *     Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
+ *     refers to Broadcom Inc. and/or its subsidiaries.
  *
  *     Permission is hereby granted for the distribution of this firmware
  *     data in hexadecimal or equivalent format, provided this copyright
@@ -9290,6 +9294,15 @@ static int tg3_chip_reset(struct tg3 *tp)
 
        tg3_restore_clk(tp);
 
+       /* Increase the core clock speed to fix tx timeout issue for 5762
+        * with 100Mbps link speed.
+        */
+       if (tg3_asic_rev(tp) == ASIC_REV_5762) {
+               val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+               tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
+                    TG3_CPMU_MAC_ORIDE_ENABLE);
+       }
+
        /* Reprobe ASF enable state.  */
        tg3_flag_clear(tp, ENABLE_ASF);
        tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
index 1d61aa3efda177c64c69465f0b72c0df5221ba37..a772a33b685c5eb8c28137107eb33cb4b6ffeb1d 100644 (file)
@@ -7,6 +7,8 @@
  * Copyright (C) 2004 Sun Microsystems Inc.
  * Copyright (C) 2007-2016 Broadcom Corporation.
  * Copyright (C) 2016-2017 Broadcom Limited.
+ * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
+ * refers to Broadcom Inc. and/or its subsidiaries.
  */
 
 #ifndef _T3_H
index 86659823b2592e20d16e1fbc0640d45c99508f47..3d45f4c92cf6e5d3f091ae654e5312165956d19f 100644 (file)
 #define GEM_DCFG6              0x0294 /* Design Config 6 */
 #define GEM_DCFG7              0x0298 /* Design Config 7 */
 #define GEM_DCFG8              0x029C /* Design Config 8 */
+#define GEM_DCFG10             0x02A4 /* Design Config 10 */
 
 #define GEM_TXBDCTRL   0x04cc /* TX Buffer Descriptor control register */
 #define GEM_RXBDCTRL   0x04d0 /* RX Buffer Descriptor control register */
 #define GEM_SCR2CMP_OFFSET                     0
 #define GEM_SCR2CMP_SIZE                       8
 
+/* Bitfields in DCFG10 */
+#define GEM_TXBD_RDBUFF_OFFSET                 12
+#define GEM_TXBD_RDBUFF_SIZE                   4
+#define GEM_RXBD_RDBUFF_OFFSET                 8
+#define GEM_RXBD_RDBUFF_SIZE                   4
+
 /* Bitfields in TISUBN */
 #define GEM_SUBNSINCR_OFFSET                   0
 #define GEM_SUBNSINCR_SIZE                     16
 #define MACB_CAPS_USRIO_DISABLED               0x00000010
 #define MACB_CAPS_JUMBO                                0x00000020
 #define MACB_CAPS_GEM_HAS_PTP                  0x00000040
+#define MACB_CAPS_BD_RD_PREFETCH               0x00000080
 #define MACB_CAPS_FIFO_MODE                    0x10000000
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE       0x20000000
 #define MACB_CAPS_SG_DISABLED                  0x40000000
@@ -1203,6 +1211,9 @@ struct macb {
        unsigned int max_tuples;
 
        struct tasklet_struct   hresp_err_tasklet;
+
+       int     rx_bd_rd_prefetch;
+       int     tx_bd_rd_prefetch;
 };
 
 #ifdef CONFIG_MACB_USE_HWSTAMP
index 3e93df5d4e3b2573f88cc427e7eefc6d1930e3ff..a6c911bb5ce22588276a9f92561947ff5bff2726 100644 (file)
@@ -1811,23 +1811,25 @@ static void macb_free_consistent(struct macb *bp)
 {
        struct macb_queue *queue;
        unsigned int q;
+       int size;
 
-       queue = &bp->queues[0];
        bp->macbgem_ops.mog_free_rx_buffers(bp);
-       if (queue->rx_ring) {
-               dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
-                               queue->rx_ring, queue->rx_ring_dma);
-               queue->rx_ring = NULL;
-       }
 
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
                kfree(queue->tx_skb);
                queue->tx_skb = NULL;
                if (queue->tx_ring) {
-                       dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp),
+                       size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
+                       dma_free_coherent(&bp->pdev->dev, size,
                                          queue->tx_ring, queue->tx_ring_dma);
                        queue->tx_ring = NULL;
                }
+               if (queue->rx_ring) {
+                       size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
+                       dma_free_coherent(&bp->pdev->dev, size,
+                                         queue->rx_ring, queue->rx_ring_dma);
+                       queue->rx_ring = NULL;
+               }
        }
 }
 
@@ -1874,7 +1876,7 @@ static int macb_alloc_consistent(struct macb *bp)
        int size;
 
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
-               size = TX_RING_BYTES(bp);
+               size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
                queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
                                                    &queue->tx_ring_dma,
                                                    GFP_KERNEL);
@@ -1890,7 +1892,7 @@ static int macb_alloc_consistent(struct macb *bp)
                if (!queue->tx_skb)
                        goto out_err;
 
-               size = RX_RING_BYTES(bp);
+               size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
                queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
                                                 &queue->rx_ring_dma, GFP_KERNEL);
                if (!queue->rx_ring)
@@ -3726,6 +3728,8 @@ static int at91ether_init(struct platform_device *pdev)
        int err;
        u32 reg;
 
+       bp->queues[0].bp = bp;
+
        dev->netdev_ops = &at91ether_netdev_ops;
        dev->ethtool_ops = &macb_ethtool_ops;
 
@@ -3795,7 +3799,7 @@ static const struct macb_config np4_config = {
 static const struct macb_config zynqmp_config = {
        .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
                        MACB_CAPS_JUMBO |
-                       MACB_CAPS_GEM_HAS_PTP,
+                       MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
        .dma_burst_length = 16,
        .clk_init = macb_clk_init,
        .init = macb_init,
@@ -3856,7 +3860,7 @@ static int macb_probe(struct platform_device *pdev)
        void __iomem *mem;
        const char *mac;
        struct macb *bp;
-       int err;
+       int err, val;
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        mem = devm_ioremap_resource(&pdev->dev, regs);
@@ -3945,6 +3949,18 @@ static int macb_probe(struct platform_device *pdev)
        else
                dev->max_mtu = ETH_DATA_LEN;
 
+       if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
+               val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
+               if (val)
+                       bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
+                                               macb_dma_desc_get_size(bp);
+
+               val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
+               if (val)
+                       bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
+                                               macb_dma_desc_get_size(bp);
+       }
+
        mac = of_get_mac_address(np);
        if (mac) {
                ether_addr_copy(bp->dev->dev_addr, mac);
index 2220c771092b46e8fb583d46ea99d5829e1793d0..678835136bf8069326067feaa46f8465db4e38d4 100644 (file)
@@ -170,10 +170,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 
        if (delta > TSU_NSEC_MAX_VAL) {
                gem_tsu_get_time(&bp->ptp_clock_info, &now);
-               if (sign)
-                       now = timespec64_sub(now, then);
-               else
-                       now = timespec64_add(now, then);
+               now = timespec64_add(now, then);
 
                gem_tsu_set_time(&bp->ptp_clock_info,
                                 (const struct timespec64 *)&now);
index 07d2201530d26c85e26cf0987553451acad936a6..9fdd496b90ff47cb0f1147777ae7b9ca0071076d 100644 (file)
@@ -1,6 +1,6 @@
 config NET_CALXEDA_XGMAC
        tristate "Calxeda 1G/10G XGMAC Ethernet driver"
-       depends on HAS_IOMEM && HAS_DMA
+       depends on HAS_IOMEM
        depends on ARCH_HIGHBANK || COMPILE_TEST
        select CRC32
        help
index 043e3c11c42bd407d47561bec2a2e0acd525f12b..92d88c5f76fb8b68e9f8b35ada37d4a77d68f739 100644 (file)
@@ -15,7 +15,7 @@ if NET_VENDOR_CAVIUM
 
 config THUNDER_NIC_PF
        tristate "Thunder Physical function driver"
-       depends on 64BIT
+       depends on 64BIT && PCI
        select THUNDER_NIC_BGX
        ---help---
          This driver supports Thunder's NIC physical function.
@@ -28,13 +28,13 @@ config THUNDER_NIC_PF
 config THUNDER_NIC_VF
        tristate "Thunder Virtual function driver"
        imply CAVIUM_PTP
-       depends on 64BIT
+       depends on 64BIT && PCI
        ---help---
          This driver supports Thunder's NIC virtual function
 
 config THUNDER_NIC_BGX
        tristate "Thunder MAC interface driver (BGX)"
-       depends on 64BIT
+       depends on 64BIT && PCI
        select PHYLIB
        select MDIO_THUNDER
        select THUNDER_NIC_RGX
@@ -44,7 +44,7 @@ config        THUNDER_NIC_BGX
 
 config THUNDER_NIC_RGX
        tristate "Thunder MAC interface driver (RGX)"
-       depends on 64BIT
+       depends on 64BIT && PCI
        select PHYLIB
        select MDIO_THUNDER
        ---help---
@@ -53,7 +53,7 @@ config        THUNDER_NIC_RGX
 
 config CAVIUM_PTP
        tristate "Cavium PTP coprocessor as PTP clock"
-       depends on 64BIT
+       depends on 64BIT && PCI
        imply PTP_1588_CLOCK
        default y
        ---help---
@@ -65,7 +65,7 @@ config CAVIUM_PTP
 
 config LIQUIDIO
        tristate "Cavium LiquidIO support"
-       depends on 64BIT
+       depends on 64BIT && PCI
        depends on MAY_USE_DEVLINK
        imply PTP_1588_CLOCK
        select FW_LOADER
index 8a815bb5717732331293e9fba5b00d3ca23aaf88..7e8454d3b1ad3f382f778c27058695c34b9f13cb 100644 (file)
@@ -91,6 +91,9 @@ static int octeon_console_debug_enabled(u32 console)
  */
 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
 
+/* time to wait for possible in-flight requests in milliseconds */
+#define WAIT_INFLIGHT_REQUEST  msecs_to_jiffies(1000)
+
 struct lio_trusted_vf_ctx {
        struct completion complete;
        int status;
@@ -259,7 +262,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
        force_io_queues_off(oct);
 
        /* To allow for in-flight requests */
-       schedule_timeout_uninterruptible(100);
+       schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
 
        if (wait_for_pending_requests(oct))
                dev_err(&oct->pci_dev->dev, "There were pending requests\n");
index 3f6afb54a5eb188061dcad1ce4679465d408db86..bb43ddb7539e719d0cbff780e5ddf17c756dbe05 100644 (file)
@@ -643,13 +643,21 @@ static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
 static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct octeon_mgmt *p = netdev_priv(netdev);
-       int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
+       int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
        netdev->mtu = new_mtu;
 
-       cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
+       /* HW lifts the limit if the frame is VLAN tagged
+        * (+4 bytes per each tag, up to two tags)
+        */
+       cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet);
+       /* Set the hardware to truncate packets larger than the MTU. The jabber
+        * register must be set to a multiple of 8 bytes, so round up. JABBER is
+        * an unconditional limit, so we need to account for two possible VLAN
+        * tags.
+        */
        cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
-                      (size_without_fcs + 7) & 0xfff8);
+                      (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8);
 
        return 0;
 }
index 7b795edd9d3a9543271d29acf0cc35d760a6b065..a19172dbe6be272d9a168302bab18f551a687a17 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/nospec.h>
 
 #include "common.h"
 #include "cxgb3_ioctl.h"
@@ -2268,6 +2269,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
 
                if (t.qset_idx >= nqsets)
                        return -EINVAL;
+               t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
 
                q = &adapter->params.sge.qset[q1 + t.qset_idx];
                t.rspq_size = q->rspq_size;
index dd04a2f89ce62db6ea9bca433023d9aac4b10e23..bc03c175a3cdf1440aca2269b4483f59cdb3a9dc 100644 (file)
@@ -263,7 +263,7 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
                                "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
                                enable ? "set" : "unset", pi->port_id, i, -err);
                else
-                       txq->dcb_prio = value;
+                       txq->dcb_prio = enable ? value : 0;
        }
 }
 
index 974a868a4824b78dc8cb7225f37b5d2cf8b24b32..3720c3e11ebb883466d04b4a2169878f0b135399 100644 (file)
@@ -8702,7 +8702,7 @@ static int t4_get_flash_params(struct adapter *adap)
        };
 
        unsigned int part, manufacturer;
-       unsigned int density, size;
+       unsigned int density, size = 0;
        u32 flashid = 0;
        int ret;
 
@@ -8772,11 +8772,6 @@ static int t4_get_flash_params(struct adapter *adap)
                case 0x22: /* 256MB */
                        size = 1 << 28;
                        break;
-
-               default:
-                       dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
-                               flashid, density);
-                       return -EINVAL;
                }
                break;
        }
@@ -8792,10 +8787,6 @@ static int t4_get_flash_params(struct adapter *adap)
                case 0x17: /* 64MB */
                        size = 1 << 26;
                        break;
-               default:
-                       dev_err(adap->pdev_dev, "ISSI Flash Part has bad size, ID = %#x, Density code = %#x\n",
-                               flashid, density);
-                       return -EINVAL;
                }
                break;
        }
@@ -8811,10 +8802,6 @@ static int t4_get_flash_params(struct adapter *adap)
                case 0x18: /* 16MB */
                        size = 1 << 24;
                        break;
-               default:
-                       dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n",
-                               flashid, density);
-                       return -EINVAL;
                }
                break;
        }
@@ -8830,17 +8817,21 @@ static int t4_get_flash_params(struct adapter *adap)
                case 0x18: /* 16MB */
                        size = 1 << 24;
                        break;
-               default:
-                       dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n",
-                               flashid, density);
-                       return -EINVAL;
                }
                break;
        }
-       default:
-               dev_err(adap->pdev_dev, "Unsupported Flash Part, ID = %#x\n",
-                       flashid);
-               return -EINVAL;
+       }
+
+       /* If we didn't recognize the FLASH part, that's no real issue: the
+        * Hardware/Software contract says that Hardware will _*ALWAYS*_
+        * use a FLASH part which is at least 4MB in size and has 64KB
+        * sectors.  The unrecognized FLASH part is likely to be much larger
+        * than 4MB, but that's all we really need.
+        */
+       if (size == 0) {
+               dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
+                        flashid);
+               size = 1 << 22;
        }
 
        /* Store decoded Flash size and fall through into vetting code. */
index 973c1fb70d09929f92fc47db0e3d60e3146eaff0..99038dfc7fbe52bea5932691133e2bdeced48844 100644 (file)
@@ -79,7 +79,6 @@ void enic_rfs_flw_tbl_init(struct enic *enic)
        enic->rfs_h.max = enic->config.num_arfs;
        enic->rfs_h.free = enic->rfs_h.max;
        enic->rfs_h.toclean = 0;
-       enic_rfs_timer_start(enic);
 }
 
 void enic_rfs_flw_tbl_free(struct enic *enic)
@@ -88,7 +87,6 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
 
        enic_rfs_timer_stop(enic);
        spin_lock_bh(&enic->rfs_h.lock);
-       enic->rfs_h.free = 0;
        for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
                struct hlist_head *hhead;
                struct hlist_node *tmp;
@@ -99,6 +97,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
                        enic_delfltr(enic, n->fltr_id);
                        hlist_del(&n->node);
                        kfree(n);
+                       enic->rfs_h.free++;
                }
        }
        spin_unlock_bh(&enic->rfs_h.lock);
index 30d2eaa18c0479adcd75315db194d3785b8007bc..90c645b8538e0f7ae8c77d625ded6cd6b0e0ca0f 100644 (file)
@@ -1920,7 +1920,7 @@ static int enic_open(struct net_device *netdev)
 {
        struct enic *enic = netdev_priv(netdev);
        unsigned int i;
-       int err;
+       int err, ret;
 
        err = enic_request_intr(enic);
        if (err) {
@@ -1971,16 +1971,15 @@ static int enic_open(struct net_device *netdev)
                vnic_intr_unmask(&enic->intr[i]);
 
        enic_notify_timer_start(enic);
-       enic_rfs_flw_tbl_init(enic);
+       enic_rfs_timer_start(enic);
 
        return 0;
 
 err_out_free_rq:
        for (i = 0; i < enic->rq_count; i++) {
-               err = vnic_rq_disable(&enic->rq[i]);
-               if (err)
-                       return err;
-               vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+               ret = vnic_rq_disable(&enic->rq[i]);
+               if (!ret)
+                       vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
        }
        enic_dev_notify_unset(enic);
 err_out_free_intr:
@@ -2904,6 +2903,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        timer_setup(&enic->notify_timer, enic_notify_timer, 0);
 
+       enic_rfs_flw_tbl_init(enic);
        enic_set_rx_coal_setting(enic);
        INIT_WORK(&enic->reset, enic_reset);
        INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
index 78db8e62a83f17c05d615cb674703efa4e926bd0..ed6c76d20b45b2a38ccf87e63487e77a756812a3 100644 (file)
@@ -1735,8 +1735,8 @@ static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
        if (unlikely(nd->state != ncsi_dev_state_functional))
                return;
 
-       netdev_info(nd->dev, "NCSI interface %s\n",
-                   nd->link_up ? "up" : "down");
+       netdev_dbg(nd->dev, "NCSI interface %s\n",
+                  nd->link_up ? "up" : "down");
 }
 
 static void ftgmac100_setup_clk(struct ftgmac100 *priv)
index 5f4e1ffa7b95fe4f8d2bb6447764951c51fffc67..ab02057ac7304f088242a2a07481820302d3556b 100644 (file)
@@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
 /* Default alignment for start of data in an Rx FD */
 #define DPAA_FD_DATA_ALIGNMENT  16
 
+/* The DPAA requires 256 bytes reserved and mapped for the SGT */
+#define DPAA_SGT_SIZE 256
+
 /* Values for the L3R field of the FM Parse Results
  */
 /* L3 Type field: First IP Present IPv4 */
@@ -1617,8 +1620,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 
        if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
                nr_frags = skb_shinfo(skb)->nr_frags;
-               dma_unmap_single(dev, addr, qm_fd_get_offset(fd) +
-                                sizeof(struct qm_sg_entry) * (1 + nr_frags),
+               dma_unmap_single(dev, addr,
+                                qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
                                 dma_dir);
 
                /* The sgt buffer has been allocated with netdev_alloc_frag(),
@@ -1903,8 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
        void *sgt_buf;
 
        /* get a page frag to store the SGTable */
-       sz = SKB_DATA_ALIGN(priv->tx_headroom +
-               sizeof(struct qm_sg_entry) * (1 + nr_frags));
+       sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
        sgt_buf = netdev_alloc_frag(sz);
        if (unlikely(!sgt_buf)) {
                netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
@@ -1972,9 +1974,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
        skbh = (struct sk_buff **)buffer_start;
        *skbh = skb;
 
-       addr = dma_map_single(dev, buffer_start, priv->tx_headroom +
-                             sizeof(struct qm_sg_entry) * (1 + nr_frags),
-                             dma_dir);
+       addr = dma_map_single(dev, buffer_start,
+                             priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
        if (unlikely(dma_mapping_error(dev, addr))) {
                dev_err(dev, "DMA mapping failed");
                err = -EINVAL;
index ce6e24c74978a22a1d22383f0a5b4f38ffec7c00..ecbf6187e13a1fe3d6dba06015ff9cc49aed6224 100644 (file)
@@ -324,6 +324,10 @@ struct fman_port_qmi_regs {
 #define HWP_HXS_PHE_REPORT 0x00000800
 #define HWP_HXS_PCAC_PSTAT 0x00000100
 #define HWP_HXS_PCAC_PSTOP 0x00000001
+#define HWP_HXS_TCP_OFFSET 0xA
+#define HWP_HXS_UDP_OFFSET 0xB
+#define HWP_HXS_SH_PAD_REM 0x80000000
+
 struct fman_port_hwp_regs {
        struct {
                u32 ssa; /* Soft Sequence Attachment */
@@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port)
                iowrite32be(0xffffffff, &regs->pmda[i].lcv);
        }
 
+       /* Short packet padding removal from checksum calculation */
+       iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_TCP_OFFSET].ssa);
+       iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_UDP_OFFSET].ssa);
+
        start_port_hwp(port);
 }
 
index 8bcf470ff5f38a4e62842a5f31d5c0b45141ab85..fb1a7251f45d336978199d208af5e1a40eee1556 100644 (file)
@@ -5,7 +5,7 @@
 config NET_VENDOR_HISILICON
        bool "Hisilicon devices"
        default y
-       depends on (OF || ACPI) && HAS_DMA
+       depends on OF || ACPI
        depends on ARM || ARM64 || COMPILE_TEST
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y.
index e2e5cdc7119c3ed0e890f99c7b30996d72d280e9..4c0f7eda1166c5df202c3b9a71cc2e43516531fb 100644 (file)
@@ -439,6 +439,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
 {
        struct hinic_rq *rq = rxq->rq;
 
+       irq_set_affinity_hint(rq->irq, NULL);
        free_irq(rq->irq, rxq);
        rx_del_napi(rxq);
 }
index d0e196bff0818ce214b4909cb97976aa3502bdc4..ffe7acbeaa22d372b7ce32f9950edb9a98cdf71a 100644 (file)
@@ -329,7 +329,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
        return;
 
 failure:
-       dev_info(dev, "replenish pools failure\n");
+       if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
+               dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
        pool->free_map[pool->next_free] = index;
        pool->rx_buff[index].skb = NULL;
 
@@ -1617,7 +1618,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
                                      &tx_crq);
        }
        if (lpar_rc != H_SUCCESS) {
-               dev_err(dev, "tx failed with code %ld\n", lpar_rc);
+               if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
+                       dev_err_ratelimited(dev, "tx: send failed\n");
                dev_kfree_skb_any(skb);
                tx_buff->skb = NULL;
 
@@ -1825,8 +1827,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
 
                rc = ibmvnic_login(netdev);
                if (rc) {
-                       adapter->state = VNIC_PROBED;
-                       return 0;
+                       adapter->state = reset_state;
+                       return rc;
                }
 
                if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
@@ -3204,6 +3206,25 @@ static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
        return crq;
 }
 
+static void print_subcrq_error(struct device *dev, int rc, const char *func)
+{
+       switch (rc) {
+       case H_PARAMETER:
+               dev_warn_ratelimited(dev,
+                                    "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
+                                    func, rc);
+               break;
+       case H_CLOSED:
+               dev_warn_ratelimited(dev,
+                                    "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
+                                    func, rc);
+               break;
+       default:
+               dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
+               break;
+       }
+}
+
 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
                       union sub_crq *sub_crq)
 {
@@ -3230,11 +3251,8 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
                                cpu_to_be64(u64_crq[2]),
                                cpu_to_be64(u64_crq[3]));
 
-       if (rc) {
-               if (rc == H_CLOSED)
-                       dev_warn(dev, "CRQ Queue closed\n");
-               dev_err(dev, "Send error (rc=%d)\n", rc);
-       }
+       if (rc)
+               print_subcrq_error(dev, rc, __func__);
 
        return rc;
 }
@@ -3252,11 +3270,8 @@ static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
                                cpu_to_be64(remote_handle),
                                ioba, num_entries);
 
-       if (rc) {
-               if (rc == H_CLOSED)
-                       dev_warn(dev, "CRQ Queue closed\n");
-               dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
-       }
+       if (rc)
+               print_subcrq_error(dev, rc, __func__);
 
        return rc;
 }
index 8ffb7454e67c2a0309708c1b47487c4d1c58b440..b151ae316546c2483aa91abfabc900b608e53e4a 100644 (file)
@@ -2103,9 +2103,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
        unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
 #else
        unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
-                               SKB_DATA_ALIGN(I40E_SKB_PAD +
-                                              (xdp->data_end -
-                                               xdp->data_hard_start));
+                               SKB_DATA_ALIGN(xdp->data_end -
+                                              xdp->data_hard_start);
 #endif
        struct sk_buff *skb;
 
@@ -2124,7 +2123,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
                return NULL;
 
        /* update pointers within the skb to store the data */
-       skb_reserve(skb, I40E_SKB_PAD + (xdp->data - xdp->data_hard_start));
+       skb_reserve(skb, xdp->data - xdp->data_hard_start);
        __skb_put(skb, xdp->data_end - xdp->data);
        if (metasize)
                skb_metadata_set(skb, metasize);
@@ -2200,9 +2199,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
        return true;
 }
 
-#define I40E_XDP_PASS 0
-#define I40E_XDP_CONSUMED 1
-#define I40E_XDP_TX 2
+#define I40E_XDP_PASS          0
+#define I40E_XDP_CONSUMED      BIT(0)
+#define I40E_XDP_TX            BIT(1)
+#define I40E_XDP_REDIR         BIT(2)
 
 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
                              struct i40e_ring *xdp_ring);
@@ -2249,7 +2249,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
                break;
        case XDP_REDIRECT:
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
+               result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
@@ -2312,7 +2312,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
        struct sk_buff *skb = rx_ring->skb;
        u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-       bool failure = false, xdp_xmit = false;
+       unsigned int xdp_xmit = 0;
+       bool failure = false;
        struct xdp_buff xdp;
 
        xdp.rxq = &rx_ring->xdp_rxq;
@@ -2373,8 +2374,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                }
 
                if (IS_ERR(skb)) {
-                       if (PTR_ERR(skb) == -I40E_XDP_TX) {
-                               xdp_xmit = true;
+                       unsigned int xdp_res = -PTR_ERR(skb);
+
+                       if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
+                               xdp_xmit |= xdp_res;
                                i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
                        } else {
                                rx_buffer->pagecnt_bias++;
@@ -2428,12 +2431,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                total_rx_packets++;
        }
 
-       if (xdp_xmit) {
+       if (xdp_xmit & I40E_XDP_REDIR)
+               xdp_do_flush_map();
+
+       if (xdp_xmit & I40E_XDP_TX) {
                struct i40e_ring *xdp_ring =
                        rx_ring->vsi->xdp_rings[rx_ring->queue_index];
 
                i40e_xdp_ring_update_tail(xdp_ring);
-               xdp_do_flush_map();
        }
 
        rx_ring->skb = skb;
index 3f5c350716bb0e595d79ec928188f5862461694c..0bd1294ba51737240d510f31bbd255faceffeb11 100644 (file)
@@ -1871,7 +1871,12 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
        if (enable_addr != 0)
                rar_high |= IXGBE_RAH_AV;
 
+       /* Record lower 32 bits of MAC address and then make
+        * sure that write is flushed to hardware before writing
+        * the upper 16 bits and setting the valid bit.
+        */
        IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+       IXGBE_WRITE_FLUSH(hw);
        IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
 
        return 0;
@@ -1903,8 +1908,13 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
        rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
        rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
 
-       IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+       /* Clear the address valid bit and upper 16 bits of the address
+        * before clearing the lower bits. This way we aren't updating
+        * a live filter.
+        */
        IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+       IXGBE_WRITE_FLUSH(hw);
+       IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
 
        /* clear VMDq pool/queue selection for this RAR */
        hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
index c116f459945d62455843d4e9262971630dd45099..da4322e4daed5de4fb44f06d8cdb488bc41f6432 100644 (file)
@@ -839,7 +839,7 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
        }
 
        itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
-       if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
+       if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
                netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
                           __func__, itd->sa_idx, xs->xso.offload_handle);
                return 0;
index 3e87dbbc90246dba3a59e3f8ccded5885b441ae2..62e57b05a0aed3d9a02bf8d473aa49505608728f 100644 (file)
@@ -2186,9 +2186,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
        return skb;
 }
 
-#define IXGBE_XDP_PASS 0
-#define IXGBE_XDP_CONSUMED 1
-#define IXGBE_XDP_TX 2
+#define IXGBE_XDP_PASS         0
+#define IXGBE_XDP_CONSUMED     BIT(0)
+#define IXGBE_XDP_TX           BIT(1)
+#define IXGBE_XDP_REDIR                BIT(2)
 
 static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
                               struct xdp_frame *xdpf);
@@ -2225,7 +2226,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
        case XDP_REDIRECT:
                err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
                if (!err)
-                       result = IXGBE_XDP_TX;
+                       result = IXGBE_XDP_REDIR;
                else
                        result = IXGBE_XDP_CONSUMED;
                break;
@@ -2285,7 +2286,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
        unsigned int mss = 0;
 #endif /* IXGBE_FCOE */
        u16 cleaned_count = ixgbe_desc_unused(rx_ring);
-       bool xdp_xmit = false;
+       unsigned int xdp_xmit = 0;
        struct xdp_buff xdp;
 
        xdp.rxq = &rx_ring->xdp_rxq;
@@ -2328,8 +2329,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                }
 
                if (IS_ERR(skb)) {
-                       if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
-                               xdp_xmit = true;
+                       unsigned int xdp_res = -PTR_ERR(skb);
+
+                       if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
+                               xdp_xmit |= xdp_res;
                                ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
                        } else {
                                rx_buffer->pagecnt_bias++;
@@ -2401,7 +2404,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                total_rx_packets++;
        }
 
-       if (xdp_xmit) {
+       if (xdp_xmit & IXGBE_XDP_REDIR)
+               xdp_do_flush_map();
+
+       if (xdp_xmit & IXGBE_XDP_TX) {
                struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
 
                /* Force memory writes to complete before letting h/w
@@ -2409,8 +2415,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                 */
                wmb();
                writel(ring->next_to_use, ring->tail);
-
-               xdp_do_flush_map();
        }
 
        u64_stats_update_begin(&rx_ring->syncp);
index cc2f7701e71e1b033c4bd7ceb78c970351f4d9ee..f33fd22b351c856a3544cdd9628a9da500d13abf 100644 (file)
@@ -18,8 +18,8 @@ if NET_VENDOR_MARVELL
 
 config MV643XX_ETH
        tristate "Marvell Discovery (643XX) and Orion ethernet support"
-       depends on (MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST) && INET
-       depends on HAS_DMA
+       depends on MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST
+       depends on INET
        select PHYLIB
        select MVMDIO
        ---help---
@@ -58,7 +58,6 @@ config MVNETA_BM_ENABLE
 config MVNETA
        tristate "Marvell Armada 370/38x/XP/37xx network interface support"
        depends on ARCH_MVEBU || COMPILE_TEST
-       depends on HAS_DMA
        select MVMDIO
        select PHYLINK
        ---help---
@@ -84,7 +83,6 @@ config MVNETA_BM
 config MVPP2
        tristate "Marvell Armada 375/7K/8K network interface support"
        depends on ARCH_MVEBU || COMPILE_TEST
-       depends on HAS_DMA
        select MVMDIO
        select PHYLINK
        ---help---
@@ -93,7 +91,7 @@ config MVPP2
 
 config PXA168_ETH
        tristate "Marvell pxa168 ethernet support"
-       depends on HAS_IOMEM && HAS_DMA
+       depends on HAS_IOMEM
        depends on CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST
        select PHYLIB
        ---help---
index 17a904cc6a5e0fbe538f42ec2b00573e035c2955..0ad2f3f7da85a029b5dea7dd3ce67b69d4ff8605 100644 (file)
@@ -1932,7 +1932,7 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
                rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
                index = rx_desc - rxq->descs;
                data = rxq->buf_virt_addr[index];
-               phys_addr = rx_desc->buf_phys_addr;
+               phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction;
 
                if (!mvneta_rxq_desc_is_first_last(rx_status) ||
                    (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
index 9f54ccbddea74b57973ee724acf360fa23434a3e..3360f7b9ee73bdb32957472299a3438e8189f7bd 100644 (file)
@@ -474,10 +474,10 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
 {
        const struct mlx4_en_frag_info *frag_info = priv->frag_info;
        unsigned int truesize = 0;
+       bool release = true;
        int nr, frag_size;
        struct page *page;
        dma_addr_t dma;
-       bool release;
 
        /* Collect used fragments while replacing them in the HW descriptors */
        for (nr = 0;; frags++) {
@@ -500,7 +500,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
                        release = page_count(page) != 1 ||
                                  page_is_pfmemalloc(page) ||
                                  page_to_nid(page) != numa_mem_id();
-               } else {
+               } else if (!priv->rx_headroom) {
+                       /* rx_headroom for non XDP setup is always 0.
+                        * When XDP is set, the above condition will
+                        * guarantee page is always released.
+                        */
                        u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
 
                        frags->page_offset += sz_align;
index 487388aed98f22cc9ae814fd60d27b48d5105458..384c1fa490811ee651919c139b9cd9e724d4ff81 100644 (file)
@@ -807,6 +807,7 @@ static void cmd_work_handler(struct work_struct *work)
        unsigned long flags;
        bool poll_cmd = ent->polling;
        int alloc_ret;
+       int cmd_mode;
 
        sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
        down(sem);
@@ -853,6 +854,7 @@ static void cmd_work_handler(struct work_struct *work)
        set_signature(ent, !cmd->checksum_disabled);
        dump_command(dev, ent, 1);
        ent->ts1 = ktime_get_ns();
+       cmd_mode = cmd->mode;
 
        if (ent->callback)
                schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
@@ -877,7 +879,7 @@ static void cmd_work_handler(struct work_struct *work)
        iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
        mmiowb();
        /* if not in polling don't use ent after this point */
-       if (cmd->mode == CMD_MODE_POLLING || poll_cmd) {
+       if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
                poll_timeout(ent);
                /* make sure we read the descriptor after ownership is SW */
                rmb();
@@ -1276,7 +1278,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
 {
        struct mlx5_core_dev *dev = filp->private_data;
        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
-       char outlen_str[8];
+       char outlen_str[8] = {0};
        int outlen;
        void *ptr;
        int err;
@@ -1291,8 +1293,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
        if (copy_from_user(outlen_str, buf, count))
                return -EFAULT;
 
-       outlen_str[7] = 0;
-
        err = sscanf(outlen_str, "%d", &outlen);
        if (err < 0)
                return err;
index 56c1b6f5593e053d4629b15635bacf1ece9d6a88..dae4156a710ddc60467999ab56c67b7ff31914db 100644 (file)
@@ -2846,7 +2846,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
        mlx5e_activate_channels(&priv->channels);
        netif_tx_start_all_queues(priv->netdev);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_add_sqs_fwd_rules(priv);
 
        mlx5e_wait_channels_min_rx_wqes(&priv->channels);
@@ -2857,7 +2857,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
 {
        mlx5e_redirect_rqts_to_drop(priv);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_remove_sqs_fwd_rules(priv);
 
        /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
@@ -4597,7 +4597,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
        mlx5e_set_netdev_dev_addr(netdev);
 
 #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
-       if (MLX5_VPORT_MANAGER(mdev))
+       if (MLX5_ESWITCH_MANAGER(mdev))
                netdev->switchdev_ops = &mlx5e_switchdev_ops;
 #endif
 
@@ -4753,7 +4753,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
 
        mlx5e_enable_async_events(priv);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_register_vport_reps(priv);
 
        if (netdev->reg_state != NETREG_REGISTERED)
@@ -4788,7 +4788,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
 
        queue_work(priv->wq, &priv->set_rx_mode_work);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_unregister_vport_reps(priv);
 
        mlx5e_disable_async_events(priv);
@@ -4972,7 +4972,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
                return NULL;
 
 #ifdef CONFIG_MLX5_ESWITCH
-       if (MLX5_VPORT_MANAGER(mdev)) {
+       if (MLX5_ESWITCH_MANAGER(mdev)) {
                rpriv = mlx5e_alloc_nic_rep_priv(mdev);
                if (!rpriv) {
                        mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
index 57987f6546e8357bdfaeb3e657e0f07fe47d940a..2b8040a3cdbd7c2f74bb854bd8141ba379ea37de 100644 (file)
@@ -823,7 +823,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
        struct mlx5_eswitch_rep *rep;
 
-       if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+       if (!MLX5_ESWITCH_MANAGER(priv->mdev))
                return false;
 
        rep = rpriv->rep;
@@ -837,8 +837,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
 static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
 {
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
-       struct mlx5_eswitch_rep *rep = rpriv->rep;
+       struct mlx5_eswitch_rep *rep;
 
+       if (!MLX5_ESWITCH_MANAGER(priv->mdev))
+               return false;
+
+       rep = rpriv->rep;
        if (rep && rep->vport != FDB_UPLINK_VPORT)
                return true;
 
index f63dfbcd29fea1efc2237d6dcecdbdd74259e1a0..b79d74860a304669eb4e05cf03fa0a267213926a 100644 (file)
@@ -1594,17 +1594,15 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
 }
 
 /* Public E-Switch API */
-#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev))
+#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
+
 
 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
 {
        int err;
        int i, enabled_events;
 
-       if (!ESW_ALLOWED(esw))
-               return 0;
-
-       if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
+       if (!ESW_ALLOWED(esw) ||
            !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
                esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
                return -EOPNOTSUPP;
@@ -1806,7 +1804,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
        u64 node_guid;
        int err = 0;
 
-       if (!ESW_ALLOWED(esw))
+       if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
                return -EPERM;
        if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
                return -EINVAL;
@@ -1883,7 +1881,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
 {
        struct mlx5_vport *evport;
 
-       if (!ESW_ALLOWED(esw))
+       if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
                return -EPERM;
        if (!LEGAL_VPORT(esw, vport))
                return -EINVAL;
index cecd201f0b73ab8a42693a79070c21bcc850d6e4..91f1209886ffdbb37af33ac32369f312296f8bfa 100644 (file)
@@ -1079,8 +1079,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
        if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
                return -EOPNOTSUPP;
 
-       if (!MLX5_CAP_GEN(dev, vport_group_manager))
-               return -EOPNOTSUPP;
+       if(!MLX5_ESWITCH_MANAGER(dev))
+               return -EPERM;
 
        if (dev->priv.eswitch->mode == SRIOV_NONE)
                return -EOPNOTSUPP;
index 49a75d31185ecf25ff93c5f3a9beec6b48be28a1..f1a86cea86a0e24c5128e50f23bbba8a33112b7d 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/mutex.h>
 #include <linux/mlx5/driver.h>
+#include <linux/mlx5/eswitch.h>
 
 #include "mlx5_core.h"
 #include "fs_core.h"
@@ -2652,7 +2653,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
                        goto err;
        }
 
-       if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+       if (MLX5_ESWITCH_MANAGER(dev)) {
                if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
                        err = init_fdb_root_ns(steering);
                        if (err)
index afd9f4fa22f40b70506fafa49f29cf647c22a959..41ad24f0de2cf9d171e586df3b9d167515d3cb03 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/cmd.h>
+#include <linux/mlx5/eswitch.h>
 #include <linux/module.h>
 #include "mlx5_core.h"
 #include "../../mlxfw/mlxfw.h"
@@ -159,13 +160,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
        }
 
        if (MLX5_CAP_GEN(dev, vport_group_manager) &&
-           MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+           MLX5_ESWITCH_MANAGER(dev)) {
                err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
                if (err)
                        return err;
        }
 
-       if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+       if (MLX5_ESWITCH_MANAGER(dev)) {
                err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
                if (err)
                        return err;
index 7cb67122e8b5f04371651e1c1e2757acb281a36e..98359559c77e4286df95df17651a4b9f2ca8e427 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/etherdevice.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/eswitch.h>
 #include "mlx5_core.h"
 #include "lib/mpfs.h"
 
@@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev)
        int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
        struct mlx5_mpfs *mpfs;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return 0;
 
        mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL);
@@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev)
 {
        struct mlx5_mpfs *mpfs = dev->priv.mpfs;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return;
 
        WARN_ON(!hlist_empty(mpfs->hash));
@@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
        u32 index;
        int err;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return 0;
 
        mutex_lock(&mpfs->lock);
@@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
        int err = 0;
        u32 index;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return 0;
 
        mutex_lock(&mpfs->lock);
index fa9d0760dd36ffda5c2c439f12bbdffab6320ccd..31a9cbd85689b01fc0bfe9e6c221d73cc7c5fe13 100644 (file)
@@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
 static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
                                   int inlen)
 {
-       u32 out[MLX5_ST_SZ_DW(qtct_reg)];
+       u32 out[MLX5_ST_SZ_DW(qetc_reg)];
 
        if (!MLX5_CAP_GEN(mdev, ets))
                return -EOPNOTSUPP;
@@ -713,7 +713,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
 static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
                                     int outlen)
 {
-       u32 in[MLX5_ST_SZ_DW(qtct_reg)];
+       u32 in[MLX5_ST_SZ_DW(qetc_reg)];
 
        if (!MLX5_CAP_GEN(mdev, ets))
                return -EOPNOTSUPP;
index 2a8b529ce6dd176cbc29b9bb4b74cd1d1c48f671..a0674962f02c4d2a35d05c98f84436967703101c 100644 (file)
@@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
                return -EBUSY;
        }
 
+       if (!MLX5_ESWITCH_MANAGER(dev))
+               goto enable_vfs_hca;
+
        err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
        if (err) {
                mlx5_core_warn(dev,
@@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
                return err;
        }
 
+enable_vfs_hca:
        for (vf = 0; vf < num_vfs; vf++) {
                err = mlx5_core_enable_hca(dev, vf + 1);
                if (err) {
@@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
        }
 
 out:
-       mlx5_eswitch_disable_sriov(dev->priv.eswitch);
+       if (MLX5_ESWITCH_MANAGER(dev))
+               mlx5_eswitch_disable_sriov(dev->priv.eswitch);
 
        if (mlx5_wait_for_vf_pages(dev))
                mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
index 719cecb182c6c4eb5579eb1b36601acb6c0d0c5c..7eecd5b07bb1931bf3041b1ae12b0f3f5154405a 100644 (file)
@@ -549,8 +549,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
                return -EINVAL;
        if (!MLX5_CAP_GEN(mdev, vport_group_manager))
                return -EACCES;
-       if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
-               return -EOPNOTSUPP;
 
        in = kvzalloc(inlen, GFP_KERNEL);
        if (!in)
index f4d9c9975ac3d857f50ef255756ea23a7a11fdb5..82827a8d3d67cac73ac3f6c232e3f750553deddc 100644 (file)
@@ -30,7 +30,7 @@ config MLXSW_CORE_THERMAL
 
 config MLXSW_PCI
        tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
-       depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE
+       depends on PCI && HAS_IOMEM && MLXSW_CORE
        default m
        ---help---
          This is PCI bus implementation for Mellanox Technologies Switch ASICs.
index 6aaaf3d9ba31d9538d9307caa0450a848bf6b091..77b2adb293415a9de16caaabbd203b397cd12a4a 100644 (file)
@@ -4756,6 +4756,12 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
        kfree(mlxsw_sp_rt6);
 }
 
+static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
+{
+       /* RTF_CACHE routes are ignored */
+       return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
+}
+
 static struct fib6_info *
 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
 {
@@ -4765,11 +4771,11 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
 
 static struct mlxsw_sp_fib6_entry *
 mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
-                                const struct fib6_info *nrt, bool append)
+                                const struct fib6_info *nrt, bool replace)
 {
        struct mlxsw_sp_fib6_entry *fib6_entry;
 
-       if (!append)
+       if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
                return NULL;
 
        list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
@@ -4784,7 +4790,8 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
                        break;
                if (rt->fib6_metric < nrt->fib6_metric)
                        continue;
-               if (rt->fib6_metric == nrt->fib6_metric)
+               if (rt->fib6_metric == nrt->fib6_metric &&
+                   mlxsw_sp_fib6_rt_can_mp(rt))
                        return fib6_entry;
                if (rt->fib6_metric > nrt->fib6_metric)
                        break;
@@ -5163,7 +5170,7 @@ static struct mlxsw_sp_fib6_entry *
 mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
                              const struct fib6_info *nrt, bool replace)
 {
-       struct mlxsw_sp_fib6_entry *fib6_entry;
+       struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
 
        list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
                struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
@@ -5172,13 +5179,18 @@ mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
                        continue;
                if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
                        break;
-               if (replace && rt->fib6_metric == nrt->fib6_metric)
-                       return fib6_entry;
+               if (replace && rt->fib6_metric == nrt->fib6_metric) {
+                       if (mlxsw_sp_fib6_rt_can_mp(rt) ==
+                           mlxsw_sp_fib6_rt_can_mp(nrt))
+                               return fib6_entry;
+                       if (mlxsw_sp_fib6_rt_can_mp(nrt))
+                               fallback = fallback ?: fib6_entry;
+               }
                if (rt->fib6_metric > nrt->fib6_metric)
-                       return fib6_entry;
+                       return fallback ?: fib6_entry;
        }
 
-       return NULL;
+       return fallback;
 }
 
 static int
@@ -5304,8 +5316,7 @@ static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
 }
 
 static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
-                                   struct fib6_info *rt, bool replace,
-                                   bool append)
+                                   struct fib6_info *rt, bool replace)
 {
        struct mlxsw_sp_fib6_entry *fib6_entry;
        struct mlxsw_sp_fib_node *fib_node;
@@ -5331,7 +5342,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
        /* Before creating a new entry, try to append route to an existing
         * multipath entry.
         */
-       fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, append);
+       fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
        if (fib6_entry) {
                err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
                if (err)
@@ -5339,14 +5350,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
                return 0;
        }
 
-       /* We received an append event, yet did not find any route to
-        * append to.
-        */
-       if (WARN_ON(append)) {
-               err = -EINVAL;
-               goto err_fib6_entry_append;
-       }
-
        fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
        if (IS_ERR(fib6_entry)) {
                err = PTR_ERR(fib6_entry);
@@ -5364,7 +5367,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
 err_fib6_node_entry_link:
        mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
 err_fib6_entry_create:
-err_fib6_entry_append:
 err_fib6_entry_nexthop_add:
        mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
        return err;
@@ -5715,7 +5717,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
        struct mlxsw_sp_fib_event_work *fib_work =
                container_of(work, struct mlxsw_sp_fib_event_work, work);
        struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
-       bool replace, append;
+       bool replace;
        int err;
 
        rtnl_lock();
@@ -5726,10 +5728,8 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
        case FIB_EVENT_ENTRY_APPEND: /* fall through */
        case FIB_EVENT_ENTRY_ADD:
                replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
-               append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
                err = mlxsw_sp_router_fib6_add(mlxsw_sp,
-                                              fib_work->fen6_info.rt, replace,
-                                              append);
+                                              fib_work->fen6_info.rt, replace);
                if (err)
                        mlxsw_sp_router_fib_abort(mlxsw_sp);
                mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
index fb2c8f8071e64d3b6d52865ecaddf17f841a2b9d..776a8a9be8e3551311f5a99ba0285c4c698cf10a 100644 (file)
@@ -344,10 +344,9 @@ static int ocelot_port_stop(struct net_device *dev)
 static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
 {
        ifh[0] = IFH_INJ_BYPASS;
-       ifh[1] = (0xff00 & info->port) >> 8;
+       ifh[1] = (0xf00 & info->port) >> 8;
        ifh[2] = (0xff & info->port) << 24;
-       ifh[3] = IFH_INJ_POP_CNT_DISABLE | (info->cpuq << 20) |
-                (info->tag_type << 16) | info->vid;
+       ifh[3] = (info->tag_type << 16) | info->vid;
 
        return 0;
 }
@@ -370,11 +369,13 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
                         QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
 
        info.port = BIT(port->chip_port);
-       info.cpuq = 0xff;
+       info.tag_type = IFH_TAG_TYPE_C;
+       info.vid = skb_vlan_tag_get(skb);
        ocelot_gen_ifh(ifh, &info);
 
        for (i = 0; i < IFH_LEN; i++)
-               ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
+               ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]),
+                                QS_INJ_WR, grp);
 
        count = (skb->len + 3) / 4;
        last = skb->len % 4;
index fcdfb8e7fdeab0b9dcb353f4cd4a7d76370c9817..40216d56dddcb73d997ed4e4c48e63868610da89 100644 (file)
@@ -81,10 +81,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
 
        ret = nfp_net_bpf_offload(nn, prog, running, extack);
        /* Stop offload if replace not possible */
-       if (ret && prog)
-               nfp_bpf_xdp_offload(app, nn, NULL, extack);
+       if (ret)
+               return ret;
 
-       nn->dp.bpf_offload_xdp = prog && !ret;
+       nn->dp.bpf_offload_xdp = !!prog;
        return ret;
 }
 
@@ -202,6 +202,9 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev,
        if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
                return -EOPNOTSUPP;
 
+       if (tcf_block_shared(f->block))
+               return -EOPNOTSUPP;
+
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block,
index 91935405f5861678077c188328d365ed5cb2ba7f..84f7a5dbea9d5bf17abd88416cc5a41f2fa4770b 100644 (file)
@@ -123,6 +123,20 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
                         NFP_FLOWER_MASK_MPLS_Q;
 
                frame->mpls_lse = cpu_to_be32(t_mpls);
+       } else if (dissector_uses_key(flow->dissector,
+                                     FLOW_DISSECTOR_KEY_BASIC)) {
+               /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
+                * bit, which indicates an mpls ether type but without any
+                * mpls fields.
+                */
+               struct flow_dissector_key_basic *key_basic;
+
+               key_basic = skb_flow_dissector_target(flow->dissector,
+                                                     FLOW_DISSECTOR_KEY_BASIC,
+                                                     flow->key);
+               if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
+                   key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC))
+                       frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
        }
 }
 
index c42e64f32333f84640ff913b61ff199701e1b404..525057bee0ed8978f360d6eeb8293d8a990a0f22 100644 (file)
@@ -264,6 +264,14 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
                case cpu_to_be16(ETH_P_ARP):
                        return -EOPNOTSUPP;
 
+               case cpu_to_be16(ETH_P_MPLS_UC):
+               case cpu_to_be16(ETH_P_MPLS_MC):
+                       if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
+                               key_layer |= NFP_FLOWER_LAYER_MAC;
+                               key_size += sizeof(struct nfp_flower_mac_mpls);
+                       }
+                       break;
+
                /* Will be included in layer 2. */
                case cpu_to_be16(ETH_P_8021Q):
                        break;
@@ -623,6 +631,9 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
        if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
                return -EOPNOTSUPP;
 
+       if (tcf_block_shared(f->block))
+               return -EOPNOTSUPP;
+
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block,
index cd34097b79f1be9d313d8f28b9701bb5bd6a3100..37a6d7822a3860647c416efeff47c7a7837a3a85 100644 (file)
@@ -232,7 +232,7 @@ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp)
        err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res),
                           nfp_resource_address(state->res),
                           fwinf, sizeof(*fwinf));
-       if (err < sizeof(*fwinf))
+       if (err < (int)sizeof(*fwinf))
                goto err_release;
 
        if (!nffw_res_flg_init_get(fwinf))
index 00db3401b89852a7fe5eaca7342344bcb3b66d4d..1dfaccd151f0d457a2ce38447400925113ae546d 100644 (file)
@@ -502,6 +502,7 @@ enum BAR_ID {
 struct qed_nvm_image_info {
        u32 num_images;
        struct bist_nvm_image_att *image_att;
+       bool valid;
 };
 
 #define DRV_MODULE_VERSION                   \
index 8f31406ec89407713b2ad32c81a30185b2c05727..e0680ce9132815568914dff86606363b9a02cb88 100644 (file)
@@ -255,9 +255,8 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
                *type = DCBX_PROTOCOL_ROCE_V2;
        } else {
                *type = DCBX_MAX_PROTOCOL_TYPE;
-               DP_ERR(p_hwfn,
-                      "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n",
-                      id, app_prio_bitmap);
+               DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n",
+                      app_prio_bitmap);
                return false;
        }
 
@@ -710,9 +709,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
        p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
 
        memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
-              ARRAY_SIZE(p_local->local_chassis_id));
+              sizeof(p_local->local_chassis_id));
        memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
-              ARRAY_SIZE(p_local->local_port_id));
+              sizeof(p_local->local_port_id));
 }
 
 static void
@@ -724,9 +723,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
        p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
 
        memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
-              ARRAY_SIZE(p_remote->peer_chassis_id));
+              sizeof(p_remote->peer_chassis_id));
        memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
-              ARRAY_SIZE(p_remote->peer_port_id));
+              sizeof(p_remote->peer_port_id));
 }
 
 static int
@@ -1479,8 +1478,8 @@ static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap)
                *cap = 0x80;
                break;
        case DCB_CAP_ATTR_DCBX:
-               *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE |
-                       DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_STATIC);
+               *cap = (DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_VER_IEEE |
+                       DCB_CAP_DCBX_STATIC);
                break;
        default:
                *cap = false;
@@ -1548,8 +1547,6 @@ static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev)
        if (!dcbx_info)
                return 0;
 
-       if (dcbx_info->operational.enabled)
-               mode |= DCB_CAP_DCBX_LLD_MANAGED;
        if (dcbx_info->operational.ieee)
                mode |= DCB_CAP_DCBX_VER_IEEE;
        if (dcbx_info->operational.cee)
index a14e484890299565ee8fdac8851ed9d7f3e90437..4340c4c90bcbe8b03e5373cfc674c8840ff640d9 100644 (file)
@@ -6723,7 +6723,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
                format_idx = header & MFW_TRACE_EVENTID_MASK;
 
                /* Skip message if its index doesn't exist in the meta data */
-               if (format_idx > s_mcp_trace_meta.formats_num) {
+               if (format_idx >= s_mcp_trace_meta.formats_num) {
                        u8 format_size =
                                (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
                                     MFW_TRACE_PRM_SIZE_SHIFT);
index 329781cda77fbecc88328ea95f00e39d4be5db9b..e5249b4741d03f7c347c70a861288b787653741a 100644 (file)
@@ -1804,7 +1804,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
                        DP_INFO(p_hwfn, "Failed to update driver state\n");
 
                rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
-                                              QED_OV_ESWITCH_VEB);
+                                              QED_OV_ESWITCH_NONE);
                if (rc)
                        DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
        }
index c97ebd681c471196cb4135deafbf8e07efc9d615..012973d75ad039436fb0007e9452eb0565f4938c 100644 (file)
@@ -201,8 +201,9 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
 
        skb = build_skb(buffer->data, 0);
        if (!skb) {
-               rc = -ENOMEM;
-               goto out_post;
+               DP_INFO(cdev, "Failed to build SKB\n");
+               kfree(buffer->data);
+               goto out_post1;
        }
 
        data->u.placement_offset += NET_SKB_PAD;
@@ -224,8 +225,14 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
                cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
                                      data->opaque_data_0,
                                      data->opaque_data_1);
+       } else {
+               DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
+                                   QED_MSG_LL2 | QED_MSG_STORAGE),
+                          "Dropping the packet\n");
+               kfree(buffer->data);
        }
 
+out_post1:
        /* Update Buffer information and update FW producer */
        buffer->data = new_data;
        buffer->phys_addr = new_phys_addr;
index b04d57ca5176ee65f348bb5882965e19f107e2f8..758a9a5127fa8c00566e4f90d5f75db636570e33 100644 (file)
@@ -371,7 +371,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
                goto err2;
        }
 
-       DP_INFO(cdev, "qed_probe completed successffuly\n");
+       DP_INFO(cdev, "qed_probe completed successfully\n");
 
        return cdev;
 
@@ -567,8 +567,16 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
                /* Fastpath interrupts */
                for (j = 0; j < 64; j++) {
                        if ((0x2ULL << j) & status) {
-                               hwfn->simd_proto_handler[j].func(
-                                       hwfn->simd_proto_handler[j].token);
+                               struct qed_simd_fp_handler *p_handler =
+                                       &hwfn->simd_proto_handler[j];
+
+                               if (p_handler->func)
+                                       p_handler->func(p_handler->token);
+                               else
+                                       DP_NOTICE(hwfn,
+                                                 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
+                                                 j, status);
+
                                status &= ~(0x2ULL << j);
                                rc = IRQ_HANDLED;
                        }
@@ -781,6 +789,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
        /* We want a minimum of one slowpath and one fastpath vector per hwfn */
        cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
 
+       if (is_kdump_kernel()) {
+               DP_INFO(cdev,
+                       "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
+                       cdev->int_params.in.min_msix_cnt);
+               cdev->int_params.in.num_vectors =
+                       cdev->int_params.in.min_msix_cnt;
+       }
+
        rc = qed_set_int_mode(cdev, false);
        if (rc)  {
                DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
index 4e0b443c9519d67bc3b888ddf3b341c93291e328..9d9e533bccdcd351452753fbb0600db098685eb9 100644 (file)
@@ -592,6 +592,9 @@ int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
        *o_mcp_resp = mb_params.mcp_resp;
        *o_mcp_param = mb_params.mcp_param;
 
+       /* nvm_info needs to be updated */
+       p_hwfn->nvm_info.valid = false;
+
        return 0;
 }
 
@@ -2555,11 +2558,14 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
 
 int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
 {
-       struct qed_nvm_image_info *nvm_info = &p_hwfn->nvm_info;
+       struct qed_nvm_image_info nvm_info;
        struct qed_ptt *p_ptt;
        int rc;
        u32 i;
 
+       if (p_hwfn->nvm_info.valid)
+               return 0;
+
        p_ptt = qed_ptt_acquire(p_hwfn);
        if (!p_ptt) {
                DP_ERR(p_hwfn, "failed to acquire ptt\n");
@@ -2567,29 +2573,29 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
        }
 
        /* Acquire from MFW the amount of available images */
-       nvm_info->num_images = 0;
+       nvm_info.num_images = 0;
        rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
-                                            p_ptt, &nvm_info->num_images);
+                                            p_ptt, &nvm_info.num_images);
        if (rc == -EOPNOTSUPP) {
                DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
                goto out;
-       } else if (rc || !nvm_info->num_images) {
+       } else if (rc || !nvm_info.num_images) {
                DP_ERR(p_hwfn, "Failed getting number of images\n");
                goto err0;
        }
 
-       nvm_info->image_att = kmalloc_array(nvm_info->num_images,
-                                           sizeof(struct bist_nvm_image_att),
-                                           GFP_KERNEL);
-       if (!nvm_info->image_att) {
+       nvm_info.image_att = kmalloc_array(nvm_info.num_images,
+                                          sizeof(struct bist_nvm_image_att),
+                                          GFP_KERNEL);
+       if (!nvm_info.image_att) {
                rc = -ENOMEM;
                goto err0;
        }
 
        /* Iterate over images and get their attributes */
-       for (i = 0; i < nvm_info->num_images; i++) {
+       for (i = 0; i < nvm_info.num_images; i++) {
                rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
-                                                   &nvm_info->image_att[i], i);
+                                                   &nvm_info.image_att[i], i);
                if (rc) {
                        DP_ERR(p_hwfn,
                               "Failed getting image index %d attributes\n", i);
@@ -2597,14 +2603,22 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
                }
 
                DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
-                          nvm_info->image_att[i].len);
+                          nvm_info.image_att[i].len);
        }
 out:
+       /* Update hwfn's nvm_info */
+       if (nvm_info.num_images) {
+               p_hwfn->nvm_info.num_images = nvm_info.num_images;
+               kfree(p_hwfn->nvm_info.image_att);
+               p_hwfn->nvm_info.image_att = nvm_info.image_att;
+               p_hwfn->nvm_info.valid = true;
+       }
+
        qed_ptt_release(p_hwfn, p_ptt);
        return 0;
 
 err1:
-       kfree(nvm_info->image_att);
+       kfree(nvm_info.image_att);
 err0:
        qed_ptt_release(p_hwfn, p_ptt);
        return rc;
@@ -2641,6 +2655,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
                return -EINVAL;
        }
 
+       qed_mcp_nvm_info_populate(p_hwfn);
        for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
                if (type == p_hwfn->nvm_info.image_att[i].image_type)
                        break;
index f01bf52bc381f6f02c33ee3d9df4a90982cf8245..fd59cf45f4be8cb008d8728398870cc1ab41210b 100644 (file)
@@ -4513,6 +4513,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
 static int qed_sriov_enable(struct qed_dev *cdev, int num)
 {
        struct qed_iov_vf_init_params params;
+       struct qed_hwfn *hwfn;
+       struct qed_ptt *ptt;
        int i, j, rc;
 
        if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
@@ -4525,8 +4527,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
 
        /* Initialize HW for VF access */
        for_each_hwfn(cdev, j) {
-               struct qed_hwfn *hwfn = &cdev->hwfns[j];
-               struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+               hwfn = &cdev->hwfns[j];
+               ptt = qed_ptt_acquire(hwfn);
 
                /* Make sure not to use more than 16 queues per VF */
                params.num_queues = min_t(int,
@@ -4562,6 +4564,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
                goto err;
        }
 
+       hwfn = QED_LEADING_HWFN(cdev);
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt) {
+               DP_ERR(hwfn, "Failed to acquire ptt\n");
+               rc = -EBUSY;
+               goto err;
+       }
+
+       rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
+       if (rc)
+               DP_INFO(cdev, "Failed to update eswitch mode\n");
+       qed_ptt_release(hwfn, ptt);
+
        return num;
 
 err:
index 02adb513f4756cb58c423936213bdcb4158d1dfa..013ff567283c738f342ca5d6f5358e30ca6daa72 100644 (file)
@@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
 {
        struct qede_ptp *ptp = edev->ptp;
 
-       if (!ptp)
-               return -EIO;
+       if (!ptp) {
+               info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+                                       SOF_TIMESTAMPING_RX_SOFTWARE |
+                                       SOF_TIMESTAMPING_SOFTWARE;
+               info->phc_index = -1;
+
+               return 0;
+       }
 
        info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
                                SOF_TIMESTAMPING_RX_SOFTWARE |
index 891f03a7a33dc7286b5bb6d1b4ac2333ab74aacf..8d7b9bb910f2addae4712088884b334c42876934 100644 (file)
@@ -1128,6 +1128,8 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
 
        ret = kstrtoul(buf, 16, &data);
+       if (ret)
+               return ret;
 
        switch (data) {
        case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
index 5803cd6db406c7f9c5426ceb87bf062d4f0434fb..206f0266463e362a0e34fe8ff5b626519500e2ed 100644 (file)
@@ -658,7 +658,7 @@ qcaspi_netdev_open(struct net_device *dev)
                return ret;
        }
 
-       netif_start_queue(qca->net_dev);
+       /* SPI thread takes care of TX queue */
 
        return 0;
 }
@@ -760,6 +760,9 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
        qca->net_dev->stats.tx_errors++;
        /* Trigger tx queue flush and QCA7000 reset */
        qca->sync = QCASPI_SYNC_UNKNOWN;
+
+       if (qca->spi_thread)
+               wake_up_process(qca->spi_thread);
 }
 
 static int
@@ -878,22 +881,22 @@ qca_spi_probe(struct spi_device *spi)
 
        if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
            (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
-               dev_info(&spi->dev, "Invalid clkspeed: %d\n",
-                        qcaspi_clkspeed);
+               dev_err(&spi->dev, "Invalid clkspeed: %d\n",
+                       qcaspi_clkspeed);
                return -EINVAL;
        }
 
        if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) ||
            (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) {
-               dev_info(&spi->dev, "Invalid burst len: %d\n",
-                        qcaspi_burst_len);
+               dev_err(&spi->dev, "Invalid burst len: %d\n",
+                       qcaspi_burst_len);
                return -EINVAL;
        }
 
        if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) ||
            (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) {
-               dev_info(&spi->dev, "Invalid pluggable: %d\n",
-                        qcaspi_pluggable);
+               dev_err(&spi->dev, "Invalid pluggable: %d\n",
+                       qcaspi_pluggable);
                return -EINVAL;
        }
 
@@ -955,8 +958,8 @@ qca_spi_probe(struct spi_device *spi)
        }
 
        if (register_netdev(qcaspi_devs)) {
-               dev_info(&spi->dev, "Unable to register net device %s\n",
-                        qcaspi_devs->name);
+               dev_err(&spi->dev, "Unable to register net device %s\n",
+                       qcaspi_devs->name);
                free_netdev(qcaspi_devs);
                return -EFAULT;
        }
index 75dfac0248f45cb423fd9883e38349a456b1dc0d..a3f69901ac877a058fd477d1ef7d3aac984eeded 100644 (file)
@@ -7148,7 +7148,7 @@ static void rtl8169_netpoll(struct net_device *dev)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
 
-       rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), dev);
+       rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp);
 }
 #endif
 
@@ -7789,6 +7789,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                NETIF_F_HW_VLAN_CTAG_RX;
        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
                NETIF_F_HIGHDMA;
+       dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 
        tp->cp_cmd |= RxChkSum | RxVlan;
 
index 27be51f0a421b43e191e594bdb6ebcd753b65eef..f3f7477043ce106155ca30ba7c07fb7d20e968bc 100644 (file)
@@ -17,7 +17,6 @@ if NET_VENDOR_RENESAS
 
 config SH_ETH
        tristate "Renesas SuperH Ethernet support"
-       depends on HAS_DMA
        depends on ARCH_RENESAS || SUPERH || COMPILE_TEST
        select CRC32
        select MII
@@ -31,7 +30,6 @@ config SH_ETH
 
 config RAVB
        tristate "Renesas Ethernet AVB support"
-       depends on HAS_DMA
        depends on ARCH_RENESAS || COMPILE_TEST
        select CRC32
        select MII
index 68f122140966d4de381b47fa192246eb7606707a..0d811c02ff340f09a385ec0677f0388034615eef 100644 (file)
@@ -980,6 +980,13 @@ static void ravb_adjust_link(struct net_device *ndev)
        struct ravb_private *priv = netdev_priv(ndev);
        struct phy_device *phydev = ndev->phydev;
        bool new_state = false;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->lock, flags);
+
+       /* Disable TX and RX right over here, if E-MAC change is ignored */
+       if (priv->no_avb_link)
+               ravb_rcv_snd_disable(ndev);
 
        if (phydev->link) {
                if (phydev->duplex != priv->duplex) {
@@ -997,18 +1004,21 @@ static void ravb_adjust_link(struct net_device *ndev)
                        ravb_modify(ndev, ECMR, ECMR_TXF, 0);
                        new_state = true;
                        priv->link = phydev->link;
-                       if (priv->no_avb_link)
-                               ravb_rcv_snd_enable(ndev);
                }
        } else if (priv->link) {
                new_state = true;
                priv->link = 0;
                priv->speed = 0;
                priv->duplex = -1;
-               if (priv->no_avb_link)
-                       ravb_rcv_snd_disable(ndev);
        }
 
+       /* Enable TX and RX right over here, if E-MAC change is ignored */
+       if (priv->no_avb_link && phydev->link)
+               ravb_rcv_snd_enable(ndev);
+
+       mmiowb();
+       spin_unlock_irqrestore(&priv->lock, flags);
+
        if (new_state && netif_msg_link(priv))
                phy_print_status(phydev);
 }
@@ -1096,75 +1106,6 @@ static int ravb_phy_start(struct net_device *ndev)
        return 0;
 }
 
-static int ravb_get_link_ksettings(struct net_device *ndev,
-                                  struct ethtool_link_ksettings *cmd)
-{
-       struct ravb_private *priv = netdev_priv(ndev);
-       unsigned long flags;
-
-       if (!ndev->phydev)
-               return -ENODEV;
-
-       spin_lock_irqsave(&priv->lock, flags);
-       phy_ethtool_ksettings_get(ndev->phydev, cmd);
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       return 0;
-}
-
-static int ravb_set_link_ksettings(struct net_device *ndev,
-                                  const struct ethtool_link_ksettings *cmd)
-{
-       struct ravb_private *priv = netdev_priv(ndev);
-       unsigned long flags;
-       int error;
-
-       if (!ndev->phydev)
-               return -ENODEV;
-
-       spin_lock_irqsave(&priv->lock, flags);
-
-       /* Disable TX and RX */
-       ravb_rcv_snd_disable(ndev);
-
-       error = phy_ethtool_ksettings_set(ndev->phydev, cmd);
-       if (error)
-               goto error_exit;
-
-       if (cmd->base.duplex == DUPLEX_FULL)
-               priv->duplex = 1;
-       else
-               priv->duplex = 0;
-
-       ravb_set_duplex(ndev);
-
-error_exit:
-       mdelay(1);
-
-       /* Enable TX and RX */
-       ravb_rcv_snd_enable(ndev);
-
-       mmiowb();
-       spin_unlock_irqrestore(&priv->lock, flags);
-
-       return error;
-}
-
-static int ravb_nway_reset(struct net_device *ndev)
-{
-       struct ravb_private *priv = netdev_priv(ndev);
-       int error = -ENODEV;
-       unsigned long flags;
-
-       if (ndev->phydev) {
-               spin_lock_irqsave(&priv->lock, flags);
-               error = phy_start_aneg(ndev->phydev);
-               spin_unlock_irqrestore(&priv->lock, flags);
-       }
-
-       return error;
-}
-
 static u32 ravb_get_msglevel(struct net_device *ndev)
 {
        struct ravb_private *priv = netdev_priv(ndev);
@@ -1377,7 +1318,7 @@ static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
 }
 
 static const struct ethtool_ops ravb_ethtool_ops = {
-       .nway_reset             = ravb_nway_reset,
+       .nway_reset             = phy_ethtool_nway_reset,
        .get_msglevel           = ravb_get_msglevel,
        .set_msglevel           = ravb_set_msglevel,
        .get_link               = ethtool_op_get_link,
@@ -1387,8 +1328,8 @@ static const struct ethtool_ops ravb_ethtool_ops = {
        .get_ringparam          = ravb_get_ringparam,
        .set_ringparam          = ravb_set_ringparam,
        .get_ts_info            = ravb_get_ts_info,
-       .get_link_ksettings     = ravb_get_link_ksettings,
-       .set_link_ksettings     = ravb_set_link_ksettings,
+       .get_link_ksettings     = phy_ethtool_get_link_ksettings,
+       .set_link_ksettings     = phy_ethtool_set_link_ksettings,
        .get_wol                = ravb_get_wol,
        .set_wol                = ravb_set_wol,
 };
index e9007b613f17ca8de16b67e054df42a800522fb5..5614fd231bbe1e4685582e15faf27dad412b241b 100644 (file)
@@ -1927,8 +1927,15 @@ static void sh_eth_adjust_link(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
        struct phy_device *phydev = ndev->phydev;
+       unsigned long flags;
        int new_state = 0;
 
+       spin_lock_irqsave(&mdp->lock, flags);
+
+       /* Disable TX and RX right over here, if E-MAC change is ignored */
+       if (mdp->cd->no_psr || mdp->no_ether_link)
+               sh_eth_rcv_snd_disable(ndev);
+
        if (phydev->link) {
                if (phydev->duplex != mdp->duplex) {
                        new_state = 1;
@@ -1947,18 +1954,21 @@ static void sh_eth_adjust_link(struct net_device *ndev)
                        sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
                        new_state = 1;
                        mdp->link = phydev->link;
-                       if (mdp->cd->no_psr || mdp->no_ether_link)
-                               sh_eth_rcv_snd_enable(ndev);
                }
        } else if (mdp->link) {
                new_state = 1;
                mdp->link = 0;
                mdp->speed = 0;
                mdp->duplex = -1;
-               if (mdp->cd->no_psr || mdp->no_ether_link)
-                       sh_eth_rcv_snd_disable(ndev);
        }
 
+       /* Enable TX and RX right over here, if E-MAC change is ignored */
+       if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
+               sh_eth_rcv_snd_enable(ndev);
+
+       mmiowb();
+       spin_unlock_irqrestore(&mdp->lock, flags);
+
        if (new_state && netif_msg_link(mdp))
                phy_print_status(phydev);
 }
@@ -2030,60 +2040,6 @@ static int sh_eth_phy_start(struct net_device *ndev)
        return 0;
 }
 
-static int sh_eth_get_link_ksettings(struct net_device *ndev,
-                                    struct ethtool_link_ksettings *cmd)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-       unsigned long flags;
-
-       if (!ndev->phydev)
-               return -ENODEV;
-
-       spin_lock_irqsave(&mdp->lock, flags);
-       phy_ethtool_ksettings_get(ndev->phydev, cmd);
-       spin_unlock_irqrestore(&mdp->lock, flags);
-
-       return 0;
-}
-
-static int sh_eth_set_link_ksettings(struct net_device *ndev,
-                                    const struct ethtool_link_ksettings *cmd)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-       unsigned long flags;
-       int ret;
-
-       if (!ndev->phydev)
-               return -ENODEV;
-
-       spin_lock_irqsave(&mdp->lock, flags);
-
-       /* disable tx and rx */
-       sh_eth_rcv_snd_disable(ndev);
-
-       ret = phy_ethtool_ksettings_set(ndev->phydev, cmd);
-       if (ret)
-               goto error_exit;
-
-       if (cmd->base.duplex == DUPLEX_FULL)
-               mdp->duplex = 1;
-       else
-               mdp->duplex = 0;
-
-       if (mdp->cd->set_duplex)
-               mdp->cd->set_duplex(ndev);
-
-error_exit:
-       mdelay(1);
-
-       /* enable tx and rx */
-       sh_eth_rcv_snd_enable(ndev);
-
-       spin_unlock_irqrestore(&mdp->lock, flags);
-
-       return ret;
-}
-
 /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
  * version must be bumped as well.  Just adding registers up to that
  * limit is fine, as long as the existing register indices don't
@@ -2263,22 +2219,6 @@ static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
        pm_runtime_put_sync(&mdp->pdev->dev);
 }
 
-static int sh_eth_nway_reset(struct net_device *ndev)
-{
-       struct sh_eth_private *mdp = netdev_priv(ndev);
-       unsigned long flags;
-       int ret;
-
-       if (!ndev->phydev)
-               return -ENODEV;
-
-       spin_lock_irqsave(&mdp->lock, flags);
-       ret = phy_start_aneg(ndev->phydev);
-       spin_unlock_irqrestore(&mdp->lock, flags);
-
-       return ret;
-}
-
 static u32 sh_eth_get_msglevel(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2429,7 +2369,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
 static const struct ethtool_ops sh_eth_ethtool_ops = {
        .get_regs_len   = sh_eth_get_regs_len,
        .get_regs       = sh_eth_get_regs,
-       .nway_reset     = sh_eth_nway_reset,
+       .nway_reset     = phy_ethtool_nway_reset,
        .get_msglevel   = sh_eth_get_msglevel,
        .set_msglevel   = sh_eth_set_msglevel,
        .get_link       = ethtool_op_get_link,
@@ -2438,8 +2378,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
        .get_sset_count     = sh_eth_get_sset_count,
        .get_ringparam  = sh_eth_get_ringparam,
        .set_ringparam  = sh_eth_set_ringparam,
-       .get_link_ksettings = sh_eth_get_link_ksettings,
-       .set_link_ksettings = sh_eth_set_link_ksettings,
+       .get_link_ksettings = phy_ethtool_get_link_ksettings,
+       .set_link_ksettings = phy_ethtool_set_link_ksettings,
        .get_wol        = sh_eth_get_wol,
        .set_wol        = sh_eth_set_wol,
 };
index 23f0785c0573ec72fea3db10dfdf353c41341ee8..7eeac3d6cfe898a9a4ef6df9378d8c6d29383ce1 100644 (file)
@@ -4288,9 +4288,9 @@ static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
        return -EPROTONOSUPPORT;
 }
 
-static s32 efx_ef10_filter_insert(struct efx_nic *efx,
-                                 struct efx_filter_spec *spec,
-                                 bool replace_equal)
+static s32 efx_ef10_filter_insert_locked(struct efx_nic *efx,
+                                        struct efx_filter_spec *spec,
+                                        bool replace_equal)
 {
        DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -4307,7 +4307,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
        bool is_mc_recip;
        s32 rc;
 
-       down_read(&efx->filter_sem);
+       WARN_ON(!rwsem_is_locked(&efx->filter_sem));
        table = efx->filter_state;
        down_write(&table->lock);
 
@@ -4498,10 +4498,22 @@ out_unlock:
        if (rss_locked)
                mutex_unlock(&efx->rss_lock);
        up_write(&table->lock);
-       up_read(&efx->filter_sem);
        return rc;
 }
 
+static s32 efx_ef10_filter_insert(struct efx_nic *efx,
+                                 struct efx_filter_spec *spec,
+                                 bool replace_equal)
+{
+       s32 ret;
+
+       down_read(&efx->filter_sem);
+       ret = efx_ef10_filter_insert_locked(efx, spec, replace_equal);
+       up_read(&efx->filter_sem);
+
+       return ret;
+}
+
 static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
 {
        /* no need to do anything here on EF10 */
@@ -5285,7 +5297,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
                EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
                efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
                efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
-               rc = efx_ef10_filter_insert(efx, &spec, true);
+               rc = efx_ef10_filter_insert_locked(efx, &spec, true);
                if (rc < 0) {
                        if (rollback) {
                                netif_info(efx, drv, efx->net_dev,
@@ -5314,7 +5326,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
                efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
                eth_broadcast_addr(baddr);
                efx_filter_set_eth_local(&spec, vlan->vid, baddr);
-               rc = efx_ef10_filter_insert(efx, &spec, true);
+               rc = efx_ef10_filter_insert_locked(efx, &spec, true);
                if (rc < 0) {
                        netif_warn(efx, drv, efx->net_dev,
                                   "Broadcast filter insert failed rc=%d\n", rc);
@@ -5370,7 +5382,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
        if (vlan->vid != EFX_FILTER_VID_UNSPEC)
                efx_filter_set_eth_local(&spec, vlan->vid, NULL);
 
-       rc = efx_ef10_filter_insert(efx, &spec, true);
+       rc = efx_ef10_filter_insert_locked(efx, &spec, true);
        if (rc < 0) {
                const char *um = multicast ? "Multicast" : "Unicast";
                const char *encap_name = "";
@@ -5430,7 +5442,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
                                           filter_flags, 0);
                        eth_broadcast_addr(baddr);
                        efx_filter_set_eth_local(&spec, vlan->vid, baddr);
-                       rc = efx_ef10_filter_insert(efx, &spec, true);
+                       rc = efx_ef10_filter_insert_locked(efx, &spec, true);
                        if (rc < 0) {
                                netif_warn(efx, drv, efx->net_dev,
                                           "Broadcast filter insert failed rc=%d\n",
index ad4a354ce570e143a741e7ab7155ae84a8a5df34..ce3a177081a854a683493f7f6f2c79ac63f60cc4 100644 (file)
@@ -1871,12 +1871,6 @@ static void efx_remove_filters(struct efx_nic *efx)
        up_write(&efx->filter_sem);
 }
 
-static void efx_restore_filters(struct efx_nic *efx)
-{
-       down_read(&efx->filter_sem);
-       efx->type->filter_table_restore(efx);
-       up_read(&efx->filter_sem);
-}
 
 /**************************************************************************
  *
@@ -2688,6 +2682,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
        efx_disable_interrupts(efx);
 
        mutex_lock(&efx->mac_lock);
+       down_write(&efx->filter_sem);
        mutex_lock(&efx->rss_lock);
        if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
            method != RESET_TYPE_DATAPATH)
@@ -2745,9 +2740,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
        if (efx->type->rx_restore_rss_contexts)
                efx->type->rx_restore_rss_contexts(efx);
        mutex_unlock(&efx->rss_lock);
-       down_read(&efx->filter_sem);
-       efx_restore_filters(efx);
-       up_read(&efx->filter_sem);
+       efx->type->filter_table_restore(efx);
+       up_write(&efx->filter_sem);
        if (efx->type->sriov_reset)
                efx->type->sriov_reset(efx);
 
@@ -2764,6 +2758,7 @@ fail:
        efx->port_initialized = false;
 
        mutex_unlock(&efx->rss_lock);
+       up_write(&efx->filter_sem);
        mutex_unlock(&efx->mac_lock);
 
        return rc;
@@ -3180,6 +3175,7 @@ bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
        return true;
 }
 
+static
 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
                                       const struct efx_filter_spec *spec)
 {
@@ -3472,7 +3468,9 @@ static int efx_pci_probe_main(struct efx_nic *efx)
 
        efx_init_napi(efx);
 
+       down_write(&efx->filter_sem);
        rc = efx->type->init(efx);
+       up_write(&efx->filter_sem);
        if (rc) {
                netif_err(efx, probe, efx->net_dev,
                          "failed to initialise NIC\n");
@@ -3764,7 +3762,9 @@ static int efx_pm_resume(struct device *dev)
        rc = efx->type->reset(efx, RESET_TYPE_ALL);
        if (rc)
                return rc;
+       down_write(&efx->filter_sem);
        rc = efx->type->init(efx);
+       up_write(&efx->filter_sem);
        if (rc)
                return rc;
        rc = efx_pm_thaw(dev);
index 8edf20967c82c583bb59ace5f1f9c30dcfd1530d..e045a5d6b938f43f391a726f301d8911f156b32c 100644 (file)
@@ -2794,6 +2794,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
        if (!state)
                return -ENOMEM;
        efx->filter_state = state;
+       init_rwsem(&state->lock);
 
        table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
        table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
index cb5b0f58c395c2bdbf32e7283d91cf8c4ac5dbe9..edf20361ea5f15c7ddee617f899e31b92d7e261e 100644 (file)
@@ -111,7 +111,7 @@ config DWMAC_ROCKCHIP
 config DWMAC_SOCFPGA
        tristate "SOCFPGA dwmac support"
        default ARCH_SOCFPGA
-       depends on OF && (ARCH_SOCFPGA || COMPILE_TEST)
+       depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
        select MFD_SYSCON
        help
          Support for ethernet controller on Altera SOCFPGA
index 6e359572b9f0ea53ed46b553fb1cb51273415f57..5b3b06a0a3bf53e1eac9572ae8d14add0c3835e7 100644 (file)
@@ -55,6 +55,7 @@ struct socfpga_dwmac {
        struct  device *dev;
        struct regmap *sys_mgr_base_addr;
        struct reset_control *stmmac_rst;
+       struct reset_control *stmmac_ocp_rst;
        void __iomem *splitter_base;
        bool f2h_ptp_ref_clk;
        struct tse_pcs pcs;
@@ -262,8 +263,8 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
                val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
 
        /* Assert reset to the enet controller before changing the phy mode */
-       if (dwmac->stmmac_rst)
-               reset_control_assert(dwmac->stmmac_rst);
+       reset_control_assert(dwmac->stmmac_ocp_rst);
+       reset_control_assert(dwmac->stmmac_rst);
 
        regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
        ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
@@ -288,8 +289,8 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
        /* Deassert reset for the phy configuration to be sampled by
         * the enet controller, and operation to start in requested mode
         */
-       if (dwmac->stmmac_rst)
-               reset_control_deassert(dwmac->stmmac_rst);
+       reset_control_deassert(dwmac->stmmac_ocp_rst);
+       reset_control_deassert(dwmac->stmmac_rst);
        if (phymode == PHY_INTERFACE_MODE_SGMII) {
                if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) {
                        dev_err(dwmac->dev, "Unable to initialize TSE PCS");
@@ -324,6 +325,15 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
                goto err_remove_config_dt;
        }
 
+       dwmac->stmmac_ocp_rst = devm_reset_control_get_optional(dev, "stmmaceth-ocp");
+       if (IS_ERR(dwmac->stmmac_ocp_rst)) {
+               ret = PTR_ERR(dwmac->stmmac_ocp_rst);
+               dev_err(dev, "error getting reset control of ocp %d\n", ret);
+               goto err_remove_config_dt;
+       }
+
+       reset_control_deassert(dwmac->stmmac_ocp_rst);
+
        ret = socfpga_dwmac_parse_data(dwmac, dev);
        if (ret) {
                dev_err(dev, "Unable to parse OF data\n");
index 2e6e2a96b4f263023e04eaad77e56f160cbedc5c..f9a61f90cfbc6acb269d4e8320bb9a078ae04239 100644 (file)
@@ -37,7 +37,7 @@
  *             is done in the "stmmac files"
  */
 
-/* struct emac_variant - Descrive dwmac-sun8i hardware variant
+/* struct emac_variant - Describe dwmac-sun8i hardware variant
  * @default_syscon_value:      The default value of the EMAC register in syscon
  *                             This value is used for disabling properly EMAC
  *                             and used as a good starting value in case of the
index d37f17ca62fecf66a6b5af1c9aa105923310a341..65bc3556bd8f8c25b9b37421c80d6a663d8eb0db 100644 (file)
@@ -407,6 +407,16 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
        }
 }
 
+static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
+{
+       u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+
+       value &= ~DMA_RBSZ_MASK;
+       value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK;
+
+       writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+}
+
 const struct stmmac_dma_ops dwmac4_dma_ops = {
        .reset = dwmac4_dma_reset,
        .init = dwmac4_dma_init,
@@ -431,6 +441,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
        .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
        .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
        .enable_tso = dwmac4_enable_tso,
+       .set_bfsize = dwmac4_set_bfsize,
 };
 
 const struct stmmac_dma_ops dwmac410_dma_ops = {
@@ -457,4 +468,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
        .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
        .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
        .enable_tso = dwmac4_enable_tso,
+       .set_bfsize = dwmac4_set_bfsize,
 };
index c63c1fe3f26b9e4d5cb714ea3ceed56bf103b17e..22a4a6dbb1a4af42d3d7467e3ebca50efef57986 100644 (file)
 
 /* DMA Rx Channel X Control register defines */
 #define DMA_CONTROL_SR                 BIT(0)
+#define DMA_RBSZ_MASK                  GENMASK(14, 1)
+#define DMA_RBSZ_SHIFT                 1
 
 /* Interrupt status per channel */
 #define DMA_CHAN_STATUS_REB            GENMASK(21, 19)
index e44e7b26ce829be0eff000c6a68b064139d532b8..fe8b536b13f864bfff723ea2236a3e5982026533 100644 (file)
@@ -183,6 +183,7 @@ struct stmmac_dma_ops {
        void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
        void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
        void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
+       void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
 };
 
 #define stmmac_reset(__priv, __args...) \
@@ -235,6 +236,8 @@ struct stmmac_dma_ops {
        stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args)
 #define stmmac_enable_tso(__priv, __args...) \
        stmmac_do_void_callback(__priv, dma, enable_tso, __args)
+#define stmmac_set_dma_bfsize(__priv, __args...) \
+       stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
 
 struct mac_device_info;
 struct net_device;
index e79b0d7b388a16d524917b0dfed1b4dd2f079c2f..60f59abab009e6fcb077eeccacb545dddad47fa6 100644 (file)
@@ -928,6 +928,7 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
 static int stmmac_init_phy(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
+       u32 tx_cnt = priv->plat->tx_queues_to_use;
        struct phy_device *phydev;
        char phy_id_fmt[MII_BUS_ID_SIZE + 3];
        char bus_id[MII_BUS_ID_SIZE];
@@ -968,6 +969,15 @@ static int stmmac_init_phy(struct net_device *dev)
                phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
                                         SUPPORTED_1000baseT_Full);
 
+       /*
+        * Half-duplex mode not supported with multiqueue
+        * half-duplex can only works with single queue
+        */
+       if (tx_cnt > 1)
+               phydev->supported &= ~(SUPPORTED_1000baseT_Half |
+                                      SUPPORTED_100baseT_Half |
+                                      SUPPORTED_10baseT_Half);
+
        /*
         * Broken HW is sometimes missing the pull-up resistor on the
         * MDIO line, which results in reads to non-existent devices returning
@@ -1794,6 +1804,8 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
 
                stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
                                rxfifosz, qmode);
+               stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
+                               chan);
        }
 
        for (chan = 0; chan < tx_channels_count; chan++) {
index 6d141f3931eb650902469cebc18fa3613ad0dcb9..72da77b94ecd987e7e683d0ec890c842090e117e 100644 (file)
@@ -94,7 +94,6 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
 /**
  * stmmac_axi_setup - parse DT parameters for programming the AXI register
  * @pdev: platform device
- * @priv: driver private struct.
  * Description:
  * if required, from device-tree the AXI internal register can be tuned
  * by using platform parameters.
index 7a16d40a72d13cf1d522e8a3a396c826fe76f9b9..b9221fc1674dfa0ef17a43f8ff86d700a1ae514f 100644 (file)
@@ -60,8 +60,7 @@
 #include <linux/sungem_phy.h>
 #include "sungem.h"
 
-/* Stripping FCS is causing problems, disabled for now */
-#undef STRIP_FCS
+#define STRIP_FCS
 
 #define DEFAULT_MSG    (NETIF_MSG_DRV          | \
                         NETIF_MSG_PROBE        | \
@@ -435,7 +434,7 @@ static int gem_rxmac_reset(struct gem *gp)
        writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
        writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
        val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
-              ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+              (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
        writel(val, gp->regs + RXDMA_CFG);
        if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
                writel(((5 & RXDMA_BLANK_IPKTS) |
@@ -760,7 +759,6 @@ static int gem_rx(struct gem *gp, int work_to_do)
        struct net_device *dev = gp->dev;
        int entry, drops, work_done = 0;
        u32 done;
-       __sum16 csum;
 
        if (netif_msg_rx_status(gp))
                printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
@@ -855,9 +853,13 @@ static int gem_rx(struct gem *gp, int work_to_do)
                        skb = copy_skb;
                }
 
-               csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
-               skb->csum = csum_unfold(csum);
-               skb->ip_summed = CHECKSUM_COMPLETE;
+               if (likely(dev->features & NETIF_F_RXCSUM)) {
+                       __sum16 csum;
+
+                       csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
+                       skb->csum = csum_unfold(csum);
+                       skb->ip_summed = CHECKSUM_COMPLETE;
+               }
                skb->protocol = eth_type_trans(skb, gp->dev);
 
                napi_gro_receive(&gp->napi, skb);
@@ -1761,7 +1763,7 @@ static void gem_init_dma(struct gem *gp)
        writel(0, gp->regs + TXDMA_KICK);
 
        val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
-              ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+              (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
        writel(val, gp->regs + RXDMA_CFG);
 
        writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
@@ -2985,8 +2987,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_set_drvdata(pdev, dev);
 
        /* We can do scatter/gather and HW checksum */
-       dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
-       dev->features |= dev->hw_features | NETIF_F_RXCSUM;
+       dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+       dev->features = dev->hw_features;
        if (pci_using_dac)
                dev->features |= NETIF_F_HIGHDMA;
 
index cdbddf16dd2931ba66df103c064705d5f0aef350..4f1267477aa4b56b7f3e1d19420302728da56e7d 100644 (file)
@@ -205,7 +205,7 @@ static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
  * devices (e.g. cpsw switches) use plain old memory.  Descriptor pools
  * abstract out these details
  */
-int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
+static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
 {
        struct cpdma_params *cpdma_params = &ctlr->params;
        struct cpdma_desc_pool *pool;
index 06d7c9e4dcda92deb027522dc04b34326c9fdc8a..f270beebb4289326baff5e86b33f47eae2eaa49b 100644 (file)
@@ -1385,6 +1385,15 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
                return -EOPNOTSUPP;
 }
 
+static int match_first_device(struct device *dev, void *data)
+{
+       if (dev->parent && dev->parent->of_node)
+               return of_device_is_compatible(dev->parent->of_node,
+                                              "ti,davinci_mdio");
+
+       return !strncmp(dev_name(dev), "davinci_mdio", 12);
+}
+
 /**
  * emac_dev_open - EMAC device open
  * @ndev: The DaVinci EMAC network adapter
@@ -1484,8 +1493,14 @@ static int emac_dev_open(struct net_device *ndev)
 
        /* use the first phy on the bus if pdata did not give us a phy id */
        if (!phydev && !priv->phy_id) {
-               phy = bus_find_device_by_name(&mdio_bus_type, NULL,
-                                             "davinci_mdio");
+               /* NOTE: we can't use bus_find_device_by_name() here because
+                * the device name is not guaranteed to be 'davinci_mdio'. On
+                * some systems it can be 'davinci_mdio.0' so we need to use
+                * strncmp() against the first part of the string to correctly
+                * match it.
+                */
+               phy = bus_find_device(&mdio_bus_type, NULL, NULL,
+                                     match_first_device);
                if (phy) {
                        priv->phy_id = dev_name(phy);
                        if (!priv->phy_id || !*priv->phy_id)
index 750eaa53bf0ce59429d524ba0658ad6f488a4ba0..ada33c2d9ac20e01af4acec33727623204fda803 100644 (file)
@@ -476,7 +476,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
 
        return pp;
 }
index f347fd9c5b28370f6452f042bb7f59c0ec8a3cd3..777fa59f5e0cd5abdfb8390ac358d09cf77636a1 100644 (file)
 static const char banner[] __initconst = KERN_INFO \
        "AX.25: bpqether driver version 004\n";
 
-static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
-
-static char bpq_eth_addr[6];
-
 static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
 static int bpq_device_event(struct notifier_block *, unsigned long, void *);
 
@@ -501,8 +497,8 @@ static int bpq_new_device(struct net_device *edev)
        bpq->ethdev = edev;
        bpq->axdev = ndev;
 
-       memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr));
-       memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr));
+       eth_broadcast_addr(bpq->dest_addr);
+       eth_broadcast_addr(bpq->acpt_addr);
 
        err = register_netdevice(ndev);
        if (err)
index 1a924b867b0742b0aa3e5a15f4da3e6885173e74..4b6e308199d270cd455b7df0de20a8458f6b7941 100644 (file)
@@ -210,7 +210,7 @@ int netvsc_recv_callback(struct net_device *net,
 void netvsc_channel_cb(void *context);
 int netvsc_poll(struct napi_struct *napi, int budget);
 
-void rndis_set_subchannel(struct work_struct *w);
+int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
 int rndis_filter_open(struct netvsc_device *nvdev);
 int rndis_filter_close(struct netvsc_device *nvdev);
 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
index 5d5bd513847fff4ff353e7c58d9967a354d06955..31c3d77b4733f0aa9900138b5c49f398d0642db4 100644 (file)
@@ -65,6 +65,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
                               VM_PKT_DATA_INBAND, 0);
 }
 
+/* Worker to setup sub channels on initial setup
+ * Initial hotplug event occurs in softirq context
+ * and can't wait for channels.
+ */
+static void netvsc_subchan_work(struct work_struct *w)
+{
+       struct netvsc_device *nvdev =
+               container_of(w, struct netvsc_device, subchan_work);
+       struct rndis_device *rdev;
+       int i, ret;
+
+       /* Avoid deadlock with device removal already under RTNL */
+       if (!rtnl_trylock()) {
+               schedule_work(w);
+               return;
+       }
+
+       rdev = nvdev->extension;
+       if (rdev) {
+               ret = rndis_set_subchannel(rdev->ndev, nvdev);
+               if (ret == 0) {
+                       netif_device_attach(rdev->ndev);
+               } else {
+                       /* fallback to only primary channel */
+                       for (i = 1; i < nvdev->num_chn; i++)
+                               netif_napi_del(&nvdev->chan_table[i].napi);
+
+                       nvdev->max_chn = 1;
+                       nvdev->num_chn = 1;
+               }
+       }
+
+       rtnl_unlock();
+}
+
 static struct netvsc_device *alloc_net_device(void)
 {
        struct netvsc_device *net_device;
@@ -81,7 +116,7 @@ static struct netvsc_device *alloc_net_device(void)
 
        init_completion(&net_device->channel_init_wait);
        init_waitqueue_head(&net_device->subchan_open);
-       INIT_WORK(&net_device->subchan_work, rndis_set_subchannel);
+       INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
 
        return net_device;
 }
@@ -1239,6 +1274,7 @@ int netvsc_poll(struct napi_struct *napi, int budget)
        struct hv_device *device = netvsc_channel_to_device(channel);
        struct net_device *ndev = hv_get_drvdata(device);
        int work_done = 0;
+       int ret;
 
        /* If starting a new interval */
        if (!nvchan->desc)
@@ -1250,16 +1286,18 @@ int netvsc_poll(struct napi_struct *napi, int budget)
                nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
        }
 
-       /* If send of pending receive completions suceeded
-        *   and did not exhaust NAPI budget this time
-        *   and not doing busy poll
+       /* Send any pending receive completions */
+       ret = send_recv_completions(ndev, net_device, nvchan);
+
+       /* If it did not exhaust NAPI budget this time
+        *  and not doing busy poll
         * then re-enable host interrupts
-        *     and reschedule if ring is not empty.
+        *  and reschedule if ring is not empty
+        *   or sending receive completion failed.
         */
-       if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
-           work_done < budget &&
+       if (work_done < budget &&
            napi_complete_done(napi, work_done) &&
-           hv_end_read(&channel->inbound) &&
+           (ret || hv_end_read(&channel->inbound)) &&
            napi_schedule_prep(napi)) {
                hv_begin_read(&channel->inbound);
                __napi_schedule(napi);
index fe2256bf1d137fea6b76c5e3a564b191e2b5da7c..dd1d6e115145d4c14fb25d1883d1e42614e211a9 100644 (file)
@@ -905,8 +905,20 @@ static int netvsc_attach(struct net_device *ndev,
        if (IS_ERR(nvdev))
                return PTR_ERR(nvdev);
 
-       /* Note: enable and attach happen when sub-channels setup */
+       if (nvdev->num_chn > 1) {
+               ret = rndis_set_subchannel(ndev, nvdev);
+
+               /* if unavailable, just proceed with one queue */
+               if (ret) {
+                       nvdev->max_chn = 1;
+                       nvdev->num_chn = 1;
+               }
+       }
+
+       /* In any case device is now ready */
+       netif_device_attach(ndev);
 
+       /* Note: enable and attach happen when sub-channels setup */
        netif_carrier_off(ndev);
 
        if (netif_running(ndev)) {
@@ -2089,6 +2101,9 @@ static int netvsc_probe(struct hv_device *dev,
 
        memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
 
+       if (nvdev->num_chn > 1)
+               schedule_work(&nvdev->subchan_work);
+
        /* hw_features computed in rndis_netdev_set_hwcaps() */
        net->features = net->hw_features |
                NETIF_F_HIGHDMA | NETIF_F_SG |
index 5428bb26110262fdfb66daaac8463c91e7981d42..408ece27131c4611a8600028831f10aa8b47ed60 100644 (file)
@@ -1062,29 +1062,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
  * This breaks overlap of processing the host message for the
  * new primary channel with the initialization of sub-channels.
  */
-void rndis_set_subchannel(struct work_struct *w)
+int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
 {
-       struct netvsc_device *nvdev
-               = container_of(w, struct netvsc_device, subchan_work);
        struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
-       struct net_device_context *ndev_ctx;
-       struct rndis_device *rdev;
-       struct net_device *ndev;
-       struct hv_device *hv_dev;
+       struct net_device_context *ndev_ctx = netdev_priv(ndev);
+       struct hv_device *hv_dev = ndev_ctx->device_ctx;
+       struct rndis_device *rdev = nvdev->extension;
        int i, ret;
 
-       if (!rtnl_trylock()) {
-               schedule_work(w);
-               return;
-       }
-
-       rdev = nvdev->extension;
-       if (!rdev)
-               goto unlock;    /* device was removed */
-
-       ndev = rdev->ndev;
-       ndev_ctx = netdev_priv(ndev);
-       hv_dev = ndev_ctx->device_ctx;
+       ASSERT_RTNL();
 
        memset(init_packet, 0, sizeof(struct nvsp_message));
        init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
@@ -1100,13 +1086,13 @@ void rndis_set_subchannel(struct work_struct *w)
                               VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
        if (ret) {
                netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
-               goto failed;
+               return ret;
        }
 
        wait_for_completion(&nvdev->channel_init_wait);
        if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
                netdev_err(ndev, "sub channel request failed\n");
-               goto failed;
+               return -EIO;
        }
 
        nvdev->num_chn = 1 +
@@ -1125,21 +1111,7 @@ void rndis_set_subchannel(struct work_struct *w)
        for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
                ndev_ctx->tx_table[i] = i % nvdev->num_chn;
 
-       netif_device_attach(ndev);
-       rtnl_unlock();
-       return;
-
-failed:
-       /* fallback to only primary channel */
-       for (i = 1; i < nvdev->num_chn; i++)
-               netif_napi_del(&nvdev->chan_table[i].napi);
-
-       nvdev->max_chn = 1;
-       nvdev->num_chn = 1;
-
-       netif_device_attach(ndev);
-unlock:
-       rtnl_unlock();
+       return 0;
 }
 
 static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
@@ -1360,21 +1332,13 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
                netif_napi_add(net, &net_device->chan_table[i].napi,
                               netvsc_poll, NAPI_POLL_WEIGHT);
 
-       if (net_device->num_chn > 1)
-               schedule_work(&net_device->subchan_work);
+       return net_device;
 
 out:
-       /* if unavailable, just proceed with one queue */
-       if (ret) {
-               net_device->max_chn = 1;
-               net_device->num_chn = 1;
-       }
-
-       /* No sub channels, device is ready */
-       if (net_device->num_chn == 1)
-               netif_device_attach(net);
-
-       return net_device;
+       /* setting up multiple channels failed */
+       net_device->max_chn = 1;
+       net_device->num_chn = 1;
+       return 0;
 
 err_dev_remv:
        rndis_filter_device_remove(dev, net_device);
index 64f1b1e77bc0f361dc59538072f59ca7a7c72690..23a52b9293f35eaec1d71063305a029ba466d819 100644 (file)
@@ -275,6 +275,8 @@ struct adf7242_local {
        struct spi_message stat_msg;
        struct spi_transfer stat_xfer;
        struct dentry *debugfs_root;
+       struct delayed_work work;
+       struct workqueue_struct *wqueue;
        unsigned long flags;
        int tx_stat;
        bool promiscuous;
@@ -575,10 +577,26 @@ static int adf7242_cmd_rx(struct adf7242_local *lp)
        /* Wait until the ACK is sent */
        adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__);
        adf7242_clear_irqstat(lp);
+       mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400));
 
        return adf7242_cmd(lp, CMD_RC_RX);
 }
 
+static void adf7242_rx_cal_work(struct work_struct *work)
+{
+       struct adf7242_local *lp =
+       container_of(work, struct adf7242_local, work.work);
+
+       /* Reissuing RC_RX every 400ms - to adjust for offset
+        * drift in receiver (datasheet page 61, OCL section)
+        */
+
+       if (!test_bit(FLAG_XMIT, &lp->flags)) {
+               adf7242_cmd(lp, CMD_RC_PHY_RDY);
+               adf7242_cmd_rx(lp);
+       }
+}
+
 static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm)
 {
        struct adf7242_local *lp = hw->priv;
@@ -686,7 +704,7 @@ static int adf7242_start(struct ieee802154_hw *hw)
        enable_irq(lp->spi->irq);
        set_bit(FLAG_START, &lp->flags);
 
-       return adf7242_cmd(lp, CMD_RC_RX);
+       return adf7242_cmd_rx(lp);
 }
 
 static void adf7242_stop(struct ieee802154_hw *hw)
@@ -694,6 +712,7 @@ static void adf7242_stop(struct ieee802154_hw *hw)
        struct adf7242_local *lp = hw->priv;
 
        disable_irq(lp->spi->irq);
+       cancel_delayed_work_sync(&lp->work);
        adf7242_cmd(lp, CMD_RC_IDLE);
        clear_bit(FLAG_START, &lp->flags);
        adf7242_clear_irqstat(lp);
@@ -719,7 +738,10 @@ static int adf7242_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
        adf7242_write_reg(lp, REG_CH_FREQ1, freq >> 8);
        adf7242_write_reg(lp, REG_CH_FREQ2, freq >> 16);
 
-       return adf7242_cmd(lp, CMD_RC_RX);
+       if (test_bit(FLAG_START, &lp->flags))
+               return adf7242_cmd_rx(lp);
+       else
+               return adf7242_cmd(lp, CMD_RC_PHY_RDY);
 }
 
 static int adf7242_set_hw_addr_filt(struct ieee802154_hw *hw,
@@ -814,6 +836,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
        /* ensure existing instances of the IRQ handler have completed */
        disable_irq(lp->spi->irq);
        set_bit(FLAG_XMIT, &lp->flags);
+       cancel_delayed_work_sync(&lp->work);
        reinit_completion(&lp->tx_complete);
        adf7242_cmd(lp, CMD_RC_PHY_RDY);
        adf7242_clear_irqstat(lp);
@@ -952,6 +975,7 @@ static irqreturn_t adf7242_isr(int irq, void *data)
        unsigned int xmit;
        u8 irq1;
 
+       mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400));
        adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1);
 
        if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA)))
@@ -1241,6 +1265,9 @@ static int adf7242_probe(struct spi_device *spi)
        spi_message_add_tail(&lp->stat_xfer, &lp->stat_msg);
 
        spi_set_drvdata(spi, lp);
+       INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work);
+       lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev),
+                                            WQ_MEM_RECLAIM);
 
        ret = adf7242_hw_init(lp);
        if (ret)
@@ -1284,6 +1311,9 @@ static int adf7242_remove(struct spi_device *spi)
        if (!IS_ERR_OR_NULL(lp->debugfs_root))
                debugfs_remove_recursive(lp->debugfs_root);
 
+       cancel_delayed_work_sync(&lp->work);
+       destroy_workqueue(lp->wqueue);
+
        ieee802154_unregister_hw(lp->hw);
        mutex_destroy(&lp->bmux);
        ieee802154_free_hw(lp->hw);
index 77abedf0b52447b4f1d0b5bdd99c259cb3555c1a..3d9e91579866826e476ceb2374b0d286e70c07fd 100644 (file)
@@ -940,7 +940,7 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
 static int
 at86rf230_ed(struct ieee802154_hw *hw, u8 *level)
 {
-       BUG_ON(!level);
+       WARN_ON(!level);
        *level = 0xbe;
        return 0;
 }
@@ -1121,8 +1121,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
        if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
                u16 addr = le16_to_cpu(filt->short_addr);
 
-               dev_vdbg(&lp->spi->dev,
-                        "at86rf230_set_hw_addr_filt called for saddr\n");
+               dev_vdbg(&lp->spi->dev, "%s called for saddr\n", __func__);
                __at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
                __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
        }
@@ -1130,8 +1129,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
        if (changed & IEEE802154_AFILT_PANID_CHANGED) {
                u16 pan = le16_to_cpu(filt->pan_id);
 
-               dev_vdbg(&lp->spi->dev,
-                        "at86rf230_set_hw_addr_filt called for pan id\n");
+               dev_vdbg(&lp->spi->dev, "%s called for pan id\n", __func__);
                __at86rf230_write(lp, RG_PAN_ID_0, pan);
                __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
        }
@@ -1140,15 +1138,13 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
                u8 i, addr[8];
 
                memcpy(addr, &filt->ieee_addr, 8);
-               dev_vdbg(&lp->spi->dev,
-                        "at86rf230_set_hw_addr_filt called for IEEE addr\n");
+               dev_vdbg(&lp->spi->dev, "%s called for IEEE addr\n", __func__);
                for (i = 0; i < 8; i++)
                        __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
        }
 
        if (changed & IEEE802154_AFILT_PANC_CHANGED) {
-               dev_vdbg(&lp->spi->dev,
-                        "at86rf230_set_hw_addr_filt called for panc change\n");
+               dev_vdbg(&lp->spi->dev, "%s called for panc change\n", __func__);
                if (filt->pan_coord)
                        at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1);
                else
@@ -1252,7 +1248,6 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw,
        return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
 }
 
-
 static int
 at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
 {
index 0d673f7682ee065223b64462bc2c4df0a03826a0..176395e4b7bb0ca628bdd22b4f13a23e425bfae2 100644 (file)
@@ -49,7 +49,7 @@ struct fakelb_phy {
 
 static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
 {
-       BUG_ON(!level);
+       WARN_ON(!level);
        *level = 0xbe;
 
        return 0;
index de0d7f28a181ca4acb1da2131d82a981627a8e96..e428277781ac4422bec2e8f47fd35476a85a74f7 100644 (file)
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
 #include <linux/spi/spi.h>
 #include <linux/workqueue.h>
 #include <linux/interrupt.h>
+#include <linux/irq.h>
 #include <linux/skbuff.h>
 #include <linux/of_gpio.h>
 #include <linux/regmap.h>
index 4377c26f714d0522ebf5d1de6ac774b6e42024ea..4a949569ec4c51668fe7b795caef7ece5d61854b 100644 (file)
@@ -75,10 +75,23 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
 {
        struct ipvl_dev *ipvlan;
        struct net_device *mdev = port->dev;
-       int err = 0;
+       unsigned int flags;
+       int err;
 
        ASSERT_RTNL();
        if (port->mode != nval) {
+               list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+                       flags = ipvlan->dev->flags;
+                       if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) {
+                               err = dev_change_flags(ipvlan->dev,
+                                                      flags | IFF_NOARP);
+                       } else {
+                               err = dev_change_flags(ipvlan->dev,
+                                                      flags & ~IFF_NOARP);
+                       }
+                       if (unlikely(err))
+                               goto fail;
+               }
                if (nval == IPVLAN_MODE_L3S) {
                        /* New mode is L3S */
                        err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
@@ -86,21 +99,28 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
                                mdev->l3mdev_ops = &ipvl_l3mdev_ops;
                                mdev->priv_flags |= IFF_L3MDEV_MASTER;
                        } else
-                               return err;
+                               goto fail;
                } else if (port->mode == IPVLAN_MODE_L3S) {
                        /* Old mode was L3S */
                        mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
                        ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
                        mdev->l3mdev_ops = NULL;
                }
-               list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
-                       if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S)
-                               ipvlan->dev->flags |= IFF_NOARP;
-                       else
-                               ipvlan->dev->flags &= ~IFF_NOARP;
-               }
                port->mode = nval;
        }
+       return 0;
+
+fail:
+       /* Undo the flags changes that have been done so far. */
+       list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) {
+               flags = ipvlan->dev->flags;
+               if (port->mode == IPVLAN_MODE_L3 ||
+                   port->mode == IPVLAN_MODE_L3S)
+                       dev_change_flags(ipvlan->dev, flags | IFF_NOARP);
+               else
+                       dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP);
+       }
+
        return err;
 }
 
@@ -594,7 +614,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
        ipvlan->phy_dev = phy_dev;
        ipvlan->dev = dev;
        ipvlan->sfeatures = IPVLAN_FEATURES;
-       ipvlan_adjust_mtu(ipvlan, phy_dev);
+       if (!tb[IFLA_MTU])
+               ipvlan_adjust_mtu(ipvlan, phy_dev);
        INIT_LIST_HEAD(&ipvlan->addrs);
        spin_lock_init(&ipvlan->addrs_lock);
 
@@ -693,6 +714,7 @@ void ipvlan_link_setup(struct net_device *dev)
 {
        ether_setup(dev);
 
+       dev->max_mtu = ETH_MAX_MTU;
        dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
        dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
        dev->netdev_ops = &ipvlan_netdev_ops;
index 83f7420ddea569126db0cc25719940892d760075..4f390fa557e4ba0c897b20faefaa85b03f4ec70a 100644 (file)
@@ -527,7 +527,7 @@ static int net_failover_slave_register(struct net_device *slave_dev,
 
        netif_addr_lock_bh(failover_dev);
        dev_uc_sync_multiple(slave_dev, failover_dev);
-       dev_uc_sync_multiple(slave_dev, failover_dev);
+       dev_mc_sync_multiple(slave_dev, failover_dev);
        netif_addr_unlock_bh(failover_dev);
 
        err = vlan_vids_add_by_dev(slave_dev, failover_dev);
index 081d99aa39853097e7d486e813f344fb895598aa..49ac678eb2dc7ca6539794b9ace40ba86aaa8d6a 100644 (file)
@@ -222,7 +222,7 @@ static int dp83811_config_intr(struct phy_device *phydev)
                if (err < 0)
                        return err;
 
-               err = phy_write(phydev, MII_DP83811_INT_STAT1, 0);
+               err = phy_write(phydev, MII_DP83811_INT_STAT2, 0);
        }
 
        return err;
index b8f57e9b937901fd142413c4002f39205546c35a..1cd439bdf6087af2913f589b499cd5c5abe5a3bb 100644 (file)
 #define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS             BIT(12)
 #define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE    BIT(14)
 
-#define MII_88E1121_PHY_LED_CTRL       16
+#define MII_PHY_LED_CTRL               16
 #define MII_88E1121_PHY_LED_DEF                0x0030
+#define MII_88E1510_PHY_LED_DEF                0x1177
 
 #define MII_M1011_PHY_STATUS           0x11
 #define MII_M1011_PHY_STATUS_1000      0x8000
@@ -632,8 +633,40 @@ error:
        return err;
 }
 
+static void marvell_config_led(struct phy_device *phydev)
+{
+       u16 def_config;
+       int err;
+
+       switch (MARVELL_PHY_FAMILY_ID(phydev->phy_id)) {
+       /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
+       case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1121R):
+       case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1318S):
+               def_config = MII_88E1121_PHY_LED_DEF;
+               break;
+       /* Default PHY LED config:
+        * LED[0] .. 1000Mbps Link
+        * LED[1] .. 100Mbps Link
+        * LED[2] .. Blink, Activity
+        */
+       case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1510):
+               def_config = MII_88E1510_PHY_LED_DEF;
+               break;
+       default:
+               return;
+       }
+
+       err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, MII_PHY_LED_CTRL,
+                             def_config);
+       if (err < 0)
+               pr_warn("Fail to config marvell phy LED.\n");
+}
+
 static int marvell_config_init(struct phy_device *phydev)
 {
+       /* Set defalut LED */
+       marvell_config_led(phydev);
+
        /* Set registers from marvell,reg-init DT property */
        return marvell_of_reg_init(phydev);
 }
@@ -813,21 +846,6 @@ static int m88e1111_config_init(struct phy_device *phydev)
        return genphy_soft_reset(phydev);
 }
 
-static int m88e1121_config_init(struct phy_device *phydev)
-{
-       int err;
-
-       /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
-       err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE,
-                             MII_88E1121_PHY_LED_CTRL,
-                             MII_88E1121_PHY_LED_DEF);
-       if (err < 0)
-               return err;
-
-       /* Set marvell,reg-init configuration from device tree */
-       return marvell_config_init(phydev);
-}
-
 static int m88e1318_config_init(struct phy_device *phydev)
 {
        if (phy_interrupt_is_valid(phydev)) {
@@ -841,7 +859,7 @@ static int m88e1318_config_init(struct phy_device *phydev)
                        return err;
        }
 
-       return m88e1121_config_init(phydev);
+       return marvell_config_init(phydev);
 }
 
 static int m88e1510_config_init(struct phy_device *phydev)
@@ -2087,7 +2105,7 @@ static struct phy_driver marvell_drivers[] = {
                .features = PHY_GBIT_FEATURES,
                .flags = PHY_HAS_INTERRUPT,
                .probe = &m88e1121_probe,
-               .config_init = &m88e1121_config_init,
+               .config_init = &marvell_config_init,
                .config_aneg = &m88e1121_config_aneg,
                .read_status = &marvell_read_status,
                .ack_interrupt = &marvell_ack_interrupt,
index bd0f339f69fd064737f8f3c80e6645e73c56a2b9..b9f5f40a7ac1e6640a653e8207cdd8885100e09f 100644 (file)
@@ -1724,11 +1724,8 @@ EXPORT_SYMBOL(genphy_loopback);
 
 static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
 {
-       /* The default values for phydev->supported are provided by the PHY
-        * driver "features" member, we want to reset to sane defaults first
-        * before supporting higher speeds.
-        */
-       phydev->supported &= PHY_DEFAULT_FEATURES;
+       phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
+                              PHY_10BT_FEATURES);
 
        switch (max_speed) {
        default:
index d437f4f5ed5291d21236a71ef3e36089344f9201..740655261e5b7347116d2a5b53445c8d023cb49c 100644 (file)
@@ -349,7 +349,6 @@ static int sfp_register_bus(struct sfp_bus *bus)
        }
        if (bus->started)
                bus->socket_ops->start(bus->sfp);
-       bus->netdev->sfp_bus = bus;
        bus->registered = true;
        return 0;
 }
@@ -364,7 +363,6 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
                if (bus->phydev && ops && ops->disconnect_phy)
                        ops->disconnect_phy(bus->upstream);
        }
-       bus->netdev->sfp_bus = NULL;
        bus->registered = false;
 }
 
@@ -436,6 +434,14 @@ void sfp_upstream_stop(struct sfp_bus *bus)
 }
 EXPORT_SYMBOL_GPL(sfp_upstream_stop);
 
+static void sfp_upstream_clear(struct sfp_bus *bus)
+{
+       bus->upstream_ops = NULL;
+       bus->upstream = NULL;
+       bus->netdev->sfp_bus = NULL;
+       bus->netdev = NULL;
+}
+
 /**
  * sfp_register_upstream() - Register the neighbouring device
  * @fwnode: firmware node for the SFP bus
@@ -461,9 +467,13 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
                bus->upstream_ops = ops;
                bus->upstream = upstream;
                bus->netdev = ndev;
+               ndev->sfp_bus = bus;
 
-               if (bus->sfp)
+               if (bus->sfp) {
                        ret = sfp_register_bus(bus);
+                       if (ret)
+                               sfp_upstream_clear(bus);
+               }
                rtnl_unlock();
        }
 
@@ -488,8 +498,7 @@ void sfp_unregister_upstream(struct sfp_bus *bus)
        rtnl_lock();
        if (bus->sfp)
                sfp_unregister_bus(bus);
-       bus->upstream = NULL;
-       bus->netdev = NULL;
+       sfp_upstream_clear(bus);
        rtnl_unlock();
 
        sfp_bus_put(bus);
@@ -561,6 +570,13 @@ void sfp_module_remove(struct sfp_bus *bus)
 }
 EXPORT_SYMBOL_GPL(sfp_module_remove);
 
+static void sfp_socket_clear(struct sfp_bus *bus)
+{
+       bus->sfp_dev = NULL;
+       bus->sfp = NULL;
+       bus->socket_ops = NULL;
+}
+
 struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
                                    const struct sfp_socket_ops *ops)
 {
@@ -573,8 +589,11 @@ struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
                bus->sfp = sfp;
                bus->socket_ops = ops;
 
-               if (bus->netdev)
+               if (bus->netdev) {
                        ret = sfp_register_bus(bus);
+                       if (ret)
+                               sfp_socket_clear(bus);
+               }
                rtnl_unlock();
        }
 
@@ -592,9 +611,7 @@ void sfp_unregister_socket(struct sfp_bus *bus)
        rtnl_lock();
        if (bus->netdev)
                sfp_unregister_bus(bus);
-       bus->sfp_dev = NULL;
-       bus->sfp = NULL;
-       bus->socket_ops = NULL;
+       sfp_socket_clear(bus);
        rtnl_unlock();
 
        sfp_bus_put(bus);
index de51e8f70f44ea6663b330d2ae41024e99865490..ce61231e96ea5fe27f512fbd0d80d4609997e508 100644 (file)
@@ -1107,7 +1107,7 @@ static const struct proto_ops pppoe_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = pppoe_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = sock_no_setsockopt,
index a192a017cc68878360505b93df151de3d0b9b730..f5727baac84a5d10fd70837a75fcfa8194992f9a 100644 (file)
@@ -1688,7 +1688,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
                case XDP_TX:
                        get_page(alloc_frag->page);
                        alloc_frag->offset += buflen;
-                       if (tun_xdp_tx(tun->dev, &xdp))
+                       if (tun_xdp_tx(tun->dev, &xdp) < 0)
                                goto err_redirect;
                        rcu_read_unlock();
                        local_bh_enable();
index 3d4f7959dabb9c39e17754df4f72013c89743d5a..b1b3d8f7e67dd052eae618e33698c633751df60a 100644 (file)
@@ -642,10 +642,12 @@ static void ax88772_restore_phy(struct usbnet *dev)
                                     priv->presvd_phy_advertise);
 
                /* Restore BMCR */
+               if (priv->presvd_phy_bmcr & BMCR_ANENABLE)
+                       priv->presvd_phy_bmcr |= BMCR_ANRESTART;
+
                asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR,
                                     priv->presvd_phy_bmcr);
 
-               mii_nway_restart(&dev->mii);
                priv->presvd_phy_advertise = 0;
                priv->presvd_phy_bmcr = 0;
        }
index b0e8b9613054137215e2f502f9deeab3bbad8f80..1eaec648bd1f716db3d06622cdfb7834e64e4e38 100644 (file)
@@ -967,8 +967,7 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
 
        atomic_set(&ctx->stop, 1);
 
-       if (hrtimer_active(&ctx->tx_timer))
-               hrtimer_cancel(&ctx->tx_timer);
+       hrtimer_cancel(&ctx->tx_timer);
 
        tasklet_kill(&ctx->bh);
 
index 8dff87ec6d99c5dca122dcdb5d3697157564cfa2..ed10d49eb5e0b66068fe366950cee3de2de3257d 100644 (file)
@@ -64,6 +64,7 @@
 #define DEFAULT_RX_CSUM_ENABLE         (true)
 #define DEFAULT_TSO_CSUM_ENABLE                (true)
 #define DEFAULT_VLAN_FILTER_ENABLE     (true)
+#define DEFAULT_VLAN_RX_OFFLOAD                (true)
 #define TX_OVERHEAD                    (8)
 #define RXW_PADDING                    2
 
@@ -2298,7 +2299,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
        if ((ll_mtu % dev->maxpacket) == 0)
                return -EDOM;
 
-       ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
+       ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
 
        netdev->mtu = new_mtu;
 
@@ -2364,6 +2365,11 @@ static int lan78xx_set_features(struct net_device *netdev,
        }
 
        if (features & NETIF_F_HW_VLAN_CTAG_RX)
+               pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
+       else
+               pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
+
+       if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
                pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
        else
                pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
@@ -2587,7 +2593,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
        buf |= FCT_TX_CTL_EN_;
        ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
 
-       ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
+       ret = lan78xx_set_rx_max_frame_length(dev,
+                                             dev->net->mtu + VLAN_ETH_HLEN);
 
        ret = lan78xx_read_reg(dev, MAC_RX, &buf);
        buf |= MAC_RX_RXEN_;
@@ -2975,6 +2982,12 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
        if (DEFAULT_TSO_CSUM_ENABLE)
                dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
 
+       if (DEFAULT_VLAN_RX_OFFLOAD)
+               dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+       if (DEFAULT_VLAN_FILTER_ENABLE)
+               dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
        dev->net->hw_features = dev->net->features;
 
        ret = lan78xx_setup_irq_domain(dev);
@@ -3039,8 +3052,13 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
                                    struct sk_buff *skb,
                                    u32 rx_cmd_a, u32 rx_cmd_b)
 {
+       /* HW Checksum offload appears to be flawed if used when not stripping
+        * VLAN headers. Drop back to S/W checksums under these conditions.
+        */
        if (!(dev->net->features & NETIF_F_RXCSUM) ||
-           unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
+           unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
+           ((rx_cmd_a & RX_CMD_A_FVTG_) &&
+            !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
                skb->ip_summed = CHECKSUM_NONE;
        } else {
                skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
@@ -3048,6 +3066,16 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
        }
 }
 
+static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
+                                   struct sk_buff *skb,
+                                   u32 rx_cmd_a, u32 rx_cmd_b)
+{
+       if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+           (rx_cmd_a & RX_CMD_A_FVTG_))
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                      (rx_cmd_b & 0xffff));
+}
+
 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
 {
        int             status;
@@ -3112,6 +3140,8 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
                        if (skb->len == size) {
                                lan78xx_rx_csum_offload(dev, skb,
                                                        rx_cmd_a, rx_cmd_b);
+                               lan78xx_rx_vlan_offload(dev, skb,
+                                                       rx_cmd_a, rx_cmd_b);
 
                                skb_trim(skb, skb->len - 4); /* remove fcs */
                                skb->truesize = size + sizeof(struct sk_buff);
@@ -3130,6 +3160,7 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
                        skb_set_tail_pointer(skb2, size);
 
                        lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
+                       lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
 
                        skb_trim(skb2, skb2->len - 4); /* remove fcs */
                        skb2->truesize = size + sizeof(struct sk_buff);
@@ -3313,6 +3344,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
        pkt_cnt = 0;
        count = 0;
        length = 0;
+       spin_lock_irqsave(&tqp->lock, flags);
        for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
                if (skb_is_gso(skb)) {
                        if (pkt_cnt) {
@@ -3321,7 +3353,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
                        }
                        count = 1;
                        length = skb->len - TX_OVERHEAD;
-                       skb2 = skb_dequeue(tqp);
+                       __skb_unlink(skb, tqp);
+                       spin_unlock_irqrestore(&tqp->lock, flags);
                        goto gso_skb;
                }
 
@@ -3330,6 +3363,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
                skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
                pkt_cnt++;
        }
+       spin_unlock_irqrestore(&tqp->lock, flags);
 
        /* copy to a single skb */
        skb = alloc_skb(skb_totallen, GFP_ATOMIC);
index 8e8b51f171f4fa227340e80009ce5c2c059db053..38502809420b9c76ac5fdc89c59faf71dff41e9b 100644 (file)
@@ -1246,12 +1246,14 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x413c, 0x81b3, 8)},    /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
        {QMI_FIXED_INTF(0x413c, 0x81b6, 8)},    /* Dell Wireless 5811e */
        {QMI_FIXED_INTF(0x413c, 0x81b6, 10)},   /* Dell Wireless 5811e */
+       {QMI_FIXED_INTF(0x413c, 0x81d7, 1)},    /* Dell Wireless 5821e */
        {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)},    /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
        {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)},    /* HP lt4120 Snapdragon X5 LTE */
        {QMI_FIXED_INTF(0x22de, 0x9061, 3)},    /* WeTelecom WPD-600N */
        {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0  Mini PCIe */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
        {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
 
index 86f7196f9d91fbf55c791fff88687a43518d66d8..2a58607a6aea809b14e0aa03955cfa099118e607 100644 (file)
@@ -3962,7 +3962,8 @@ static int rtl8152_close(struct net_device *netdev)
 #ifdef CONFIG_PM_SLEEP
        unregister_pm_notifier(&tp->pm_notifier);
 #endif
-       napi_disable(&tp->napi);
+       if (!test_bit(RTL8152_UNPLUG, &tp->flags))
+               napi_disable(&tp->napi);
        clear_bit(WORK_ENABLE, &tp->flags);
        usb_kill_urb(tp->intr_urb);
        cancel_delayed_work_sync(&tp->schedule);
index 5f565bd574da3bc7ce741e3b280a9ff5dece4352..48ba80a8ca5ce8e566931979edcff4bcfe47bc2e 100644 (file)
@@ -681,7 +681,7 @@ static void rtl8150_set_multicast(struct net_device *netdev)
                   (netdev->flags & IFF_ALLMULTI)) {
                rx_creg &= 0xfffe;
                rx_creg |= 0x0002;
-               dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name);
+               dev_dbg(&netdev->dev, "%s: allmulti set\n", netdev->name);
        } else {
                /* ~RX_MULTICAST, ~RX_PROMISCUOUS */
                rx_creg &= 0x00fc;
index 7a6a1fe793090b8e28f5ef075f5ebc2ad385b5eb..05553d2524469f97e4a02bb48f43f6820ad2b3e5 100644 (file)
@@ -82,6 +82,9 @@ static bool turbo_mode = true;
 module_param(turbo_mode, bool, 0644);
 MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
 
+static int smsc75xx_link_ok_nopm(struct usbnet *dev);
+static int smsc75xx_phy_gig_workaround(struct usbnet *dev);
+
 static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
                                            u32 *data, int in_pm)
 {
@@ -852,6 +855,9 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
                return -EIO;
        }
 
+       /* phy workaround for gig link */
+       smsc75xx_phy_gig_workaround(dev);
+
        smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
                ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
                ADVERTISE_PAUSE_ASYM);
@@ -987,6 +993,62 @@ static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
        return -EIO;
 }
 
+static int smsc75xx_phy_gig_workaround(struct usbnet *dev)
+{
+       struct mii_if_info *mii = &dev->mii;
+       int ret = 0, timeout = 0;
+       u32 buf, link_up = 0;
+
+       /* Set the phy in Gig loopback */
+       smsc75xx_mdio_write(dev->net, mii->phy_id, MII_BMCR, 0x4040);
+
+       /* Wait for the link up */
+       do {
+               link_up = smsc75xx_link_ok_nopm(dev);
+               usleep_range(10000, 20000);
+               timeout++;
+       } while ((!link_up) && (timeout < 1000));
+
+       if (timeout >= 1000) {
+               netdev_warn(dev->net, "Timeout waiting for PHY link up\n");
+               return -EIO;
+       }
+
+       /* phy reset */
+       ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+       if (ret < 0) {
+               netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret);
+               return ret;
+       }
+
+       buf |= PMT_CTL_PHY_RST;
+
+       ret = smsc75xx_write_reg(dev, PMT_CTL, buf);
+       if (ret < 0) {
+               netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret);
+               return ret;
+       }
+
+       timeout = 0;
+       do {
+               usleep_range(10000, 20000);
+               ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+               if (ret < 0) {
+                       netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n",
+                                   ret);
+                       return ret;
+               }
+               timeout++;
+       } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100));
+
+       if (timeout >= 100) {
+               netdev_warn(dev->net, "timeout waiting for PHY Reset\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
 static int smsc75xx_reset(struct usbnet *dev)
 {
        struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
index b6c9a2af37328d1037c3b0ba761256092556167e..53085c63277b4ecfa9d8651543bfb5e545fa73ee 100644 (file)
@@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644);
 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
 #define VIRTIO_XDP_HEADROOM 256
 
+/* Separating two types of XDP xmit */
+#define VIRTIO_XDP_TX          BIT(0)
+#define VIRTIO_XDP_REDIR       BIT(1)
+
 /* RX packet size EWMA. The average packet size is used to determine the packet
  * buffer size when refilling RX rings. As the entire RX ring may be refilled
  * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -582,7 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
                                     struct receive_queue *rq,
                                     void *buf, void *ctx,
                                     unsigned int len,
-                                    bool *xdp_xmit)
+                                    unsigned int *xdp_xmit)
 {
        struct sk_buff *skb;
        struct bpf_prog *xdp_prog;
@@ -654,14 +658,14 @@ static struct sk_buff *receive_small(struct net_device *dev,
                                trace_xdp_exception(vi->dev, xdp_prog, act);
                                goto err_xdp;
                        }
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_TX;
                        rcu_read_unlock();
                        goto xdp_xmit;
                case XDP_REDIRECT:
                        err = xdp_do_redirect(dev, &xdp, xdp_prog);
                        if (err)
                                goto err_xdp;
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_REDIR;
                        rcu_read_unlock();
                        goto xdp_xmit;
                default:
@@ -723,7 +727,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                         void *buf,
                                         void *ctx,
                                         unsigned int len,
-                                        bool *xdp_xmit)
+                                        unsigned int *xdp_xmit)
 {
        struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
        u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
@@ -818,7 +822,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                        put_page(xdp_page);
                                goto err_xdp;
                        }
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_TX;
                        if (unlikely(xdp_page != page))
                                put_page(page);
                        rcu_read_unlock();
@@ -830,7 +834,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                        put_page(xdp_page);
                                goto err_xdp;
                        }
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_REDIR;
                        if (unlikely(xdp_page != page))
                                put_page(page);
                        rcu_read_unlock();
@@ -939,7 +943,8 @@ xdp_xmit:
 }
 
 static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
-                      void *buf, unsigned int len, void **ctx, bool *xdp_xmit)
+                      void *buf, unsigned int len, void **ctx,
+                      unsigned int *xdp_xmit)
 {
        struct net_device *dev = vi->dev;
        struct sk_buff *skb;
@@ -1232,7 +1237,8 @@ static void refill_work(struct work_struct *work)
        }
 }
 
-static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
+static int virtnet_receive(struct receive_queue *rq, int budget,
+                          unsigned int *xdp_xmit)
 {
        struct virtnet_info *vi = rq->vq->vdev->priv;
        unsigned int len, received = 0, bytes = 0;
@@ -1321,7 +1327,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
        struct virtnet_info *vi = rq->vq->vdev->priv;
        struct send_queue *sq;
        unsigned int received, qp;
-       bool xdp_xmit = false;
+       unsigned int xdp_xmit = 0;
 
        virtnet_poll_cleantx(rq);
 
@@ -1331,12 +1337,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
        if (received < budget)
                virtqueue_napi_complete(napi, rq->vq, received);
 
-       if (xdp_xmit) {
+       if (xdp_xmit & VIRTIO_XDP_REDIR)
+               xdp_do_flush_map();
+
+       if (xdp_xmit & VIRTIO_XDP_TX) {
                qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
                     smp_processor_id();
                sq = &vi->sq[qp];
                virtqueue_kick(sq->vq);
-               xdp_do_flush_map();
        }
 
        return received;
index aee0e60471f10d59c39ad39f8170eedea722455d..f6bb1d54d4bdec833b4104134d3744a7a11af312 100644 (file)
@@ -623,9 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
        flush = 0;
 
 out:
-       skb_gro_remcsum_cleanup(skb, &grc);
-       skb->remcsum_offload = 0;
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
 
        return pp;
 }
index e9c2fb318c03362d84031241a4191db9f4602c1a..836e0a47b94a0a192b210620d6652c41145cbab1 100644 (file)
@@ -6058,8 +6058,19 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
                           ath10k_mac_max_vht_nss(vht_mcs_mask)));
 
        if (changed & IEEE80211_RC_BW_CHANGED) {
-               ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
-                          sta->addr, bw);
+               enum wmi_phy_mode mode;
+
+               mode = chan_to_phymode(&def);
+               ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n",
+                               sta->addr, bw, mode);
+
+               err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+                               WMI_PEER_PHYMODE, mode);
+               if (err) {
+                       ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n",
+                                       sta->addr, mode, err);
+                       goto exit;
+               }
 
                err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
                                                WMI_PEER_CHAN_WIDTH, bw);
@@ -6100,6 +6111,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
                                    sta->addr);
        }
 
+exit:
        mutex_unlock(&ar->conf_mutex);
 }
 
index b48db54e986516271daab61f30277815a7a9bf6d..d68afb65402a069528b0dc3a01eab142c833898c 100644 (file)
@@ -6144,6 +6144,7 @@ enum wmi_peer_param {
        WMI_PEER_NSS        = 0x5,
        WMI_PEER_USE_4ADDR  = 0x6,
        WMI_PEER_DEBUG      = 0xa,
+       WMI_PEER_PHYMODE    = 0xd,
        WMI_PEER_DUMMY_VAR  = 0xff, /* dummy parameter for STA PS workaround */
 };
 
index 1279064a3b716c2ef6cf82d82c27ea664f1496b4..51a038022c8b80404b9bd841c6fefd3b866ffe66 100644 (file)
@@ -1,4 +1,4 @@
-/*
+/*
  * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
index 9d99eb42d9176f0f833048b3f87a906542c9e90c..6acba67bca07abd7d662466b4422295dff359a33 100644 (file)
@@ -60,7 +60,6 @@ config BRCMFMAC_PCIE
        bool "PCIE bus interface support for FullMAC driver"
        depends on BRCMFMAC
        depends on PCI
-       depends on HAS_DMA
        select BRCMFMAC_PROTO_MSGBUF
        select FW_LOADER
        ---help---
index c99a191e8d693a3e6ef006826fcde5affb74a02d..a907d7b065fa8e0b7ab6a35dc2c265a6385d9c75 100644 (file)
@@ -4296,6 +4296,13 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
        brcmf_dbg(TRACE, "Enter\n");
 
        if (bus) {
+               /* Stop watchdog task */
+               if (bus->watchdog_tsk) {
+                       send_sig(SIGTERM, bus->watchdog_tsk, 1);
+                       kthread_stop(bus->watchdog_tsk);
+                       bus->watchdog_tsk = NULL;
+               }
+
                /* De-register interrupt handler */
                brcmf_sdiod_intr_unregister(bus->sdiodev);
 
index 6e3cf9817730b53f751f31401425ffd1c1dc82a4..88f4c89f89ba85f5ff64085f8f29abf20722d5ea 100644 (file)
@@ -644,11 +644,6 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
                                         MWIFIEX_FUNC_SHUTDOWN);
        }
 
-       if (adapter->workqueue)
-               flush_workqueue(adapter->workqueue);
-
-       mwifiex_usb_free(card);
-
        mwifiex_dbg(adapter, FATAL,
                    "%s: removing card\n", __func__);
        mwifiex_remove_card(adapter);
@@ -1356,6 +1351,8 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
 {
        struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
 
+       mwifiex_usb_free(card);
+
        mwifiex_usb_cleanup_tx_aggr(adapter);
 
        card->adapter = NULL;
index 9d2f9a776ef18e405c80e73c40c9eb3bc137b50d..b804abd464ae06365adbe108a5706412efe53f4b 100644 (file)
@@ -986,13 +986,15 @@ static void mt7601u_agc_tune(struct mt7601u_dev *dev)
         */
        spin_lock_bh(&dev->con_mon_lock);
        avg_rssi = ewma_rssi_read(&dev->avg_rssi);
-       WARN_ON_ONCE(avg_rssi == 0);
+       spin_unlock_bh(&dev->con_mon_lock);
+       if (avg_rssi == 0)
+               return;
+
        avg_rssi = -avg_rssi;
        if (avg_rssi <= -70)
                val -= 0x20;
        else if (avg_rssi <= -60)
                val -= 0x10;
-       spin_unlock_bh(&dev->con_mon_lock);
 
        if (val != mt7601u_bbp_rr(dev, 66))
                mt7601u_bbp_wr(dev, 66, val);
index 025fa6018550895ae529c7222d9595a1fb621748..8d1492a90bd135c09213f05d52ff85682a80de71 100644 (file)
@@ -7,7 +7,7 @@ config QTNFMAC
 config QTNFMAC_PEARL_PCIE
        tristate "Quantenna QSR10g PCIe support"
        default n
-       depends on HAS_DMA && PCI && CFG80211
+       depends on PCI && CFG80211
        select QTNFMAC
        select FW_LOADER
        select CRC32
index 220e2b71020859163cc4affc71f505648561b151..ae0ca800684950e65ecc01916f4782af54a8e0eb 100644 (file)
@@ -654,8 +654,7 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
        vif = qtnf_mac_get_base_vif(mac);
        if (!vif) {
                pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
-               ret = -EFAULT;
-               goto out;
+               return -EFAULT;
        }
 
        if (vif->wdev.iftype != NL80211_IFTYPE_STATION) {
index 39c817eddd78e9cf736fbbd440c6617867afad20..54c9f6ab0c8cadb483d10413783b45b401c6f6f4 100644 (file)
@@ -484,18 +484,21 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
 
 }
 
-void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
+void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
        del_timer_sync(&rtlpriv->works.watchdog_timer);
 
-       cancel_delayed_work(&rtlpriv->works.watchdog_wq);
-       cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
-       cancel_delayed_work(&rtlpriv->works.ps_work);
-       cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
-       cancel_delayed_work(&rtlpriv->works.fwevt_wq);
-       cancel_delayed_work(&rtlpriv->works.c2hcmd_wq);
+       cancel_delayed_work_sync(&rtlpriv->works.watchdog_wq);
+       if (ips_wq)
+               cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
+       else
+               cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq);
+       cancel_delayed_work_sync(&rtlpriv->works.ps_work);
+       cancel_delayed_work_sync(&rtlpriv->works.ps_rfon_wq);
+       cancel_delayed_work_sync(&rtlpriv->works.fwevt_wq);
+       cancel_delayed_work_sync(&rtlpriv->works.c2hcmd_wq);
 }
 EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work);
 
index 912f205779c39e68387269825fdae5c4fcaa206f..a7ae40eaa3cd538f96622e4e9a53da3c2b13ccec 100644 (file)
@@ -121,7 +121,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw);
 void rtl_deinit_rfkill(struct ieee80211_hw *hw);
 
 void rtl_watch_dog_timer_callback(struct timer_list *t);
-void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
+void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq);
 
 bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
 int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht,
index cfea57efa7f43c6bc1c6e99a45978053a1eed2ba..4bf7967590ca7be3b9b452d0b0ec65d98b829599 100644 (file)
@@ -130,7 +130,6 @@ found_alt:
                       firmware->size);
                rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
        }
-       rtlpriv->rtlhal.fwsize = firmware->size;
        release_firmware(firmware);
 }
 
@@ -196,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
                /* reset sec info */
                rtl_cam_reset_sec_info(hw);
 
-               rtl_deinit_deferred_work(hw);
+               rtl_deinit_deferred_work(hw, false);
        }
        rtlpriv->intf_ops->adapter_stop(hw);
 
index ae13bcfb3bf09cc142a81c9ab78c31ad80fd035e..5d1fda16fc8c4c966ff8e24ca8d49bd3b6ca38c6 100644 (file)
@@ -2377,7 +2377,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
                ieee80211_unregister_hw(hw);
                rtlmac->mac80211_registered = 0;
        } else {
-               rtl_deinit_deferred_work(hw);
+               rtl_deinit_deferred_work(hw, false);
                rtlpriv->intf_ops->adapter_stop(hw);
        }
        rtlpriv->cfg->ops->disable_interrupt(hw);
index 71af24e2e05197a344dd549c94d6f8ecba7ce00f..479a4cfc245d349e105457845719ca79b7ebb10d 100644 (file)
@@ -71,7 +71,7 @@ bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
 
        /*<1> Stop all timer */
-       rtl_deinit_deferred_work(hw);
+       rtl_deinit_deferred_work(hw, true);
 
        /*<2> Disable Interrupt */
        rtlpriv->cfg->ops->disable_interrupt(hw);
@@ -292,7 +292,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
        enum rf_pwrstate rtstate;
 
-       cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
+       cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq);
 
        mutex_lock(&rtlpriv->locks.ips_mutex);
        if (ppsc->inactiveps) {
index f9faffc498bcbd2d94cad365814955f9b1347759..2ac5004d7a401ab5d1255126c5c0a00a5e233705 100644 (file)
@@ -1132,7 +1132,7 @@ void rtl_usb_disconnect(struct usb_interface *intf)
                ieee80211_unregister_hw(hw);
                rtlmac->mac80211_registered = 0;
        } else {
-               rtl_deinit_deferred_work(hw);
+               rtl_deinit_deferred_work(hw, false);
                rtlpriv->intf_ops->adapter_stop(hw);
        }
        /*deinit rfkill */
index 922ce0abf5cf105a5394285b07356ebcad055d78..a57daecf1d574fc1a6e25ca5eb043c1617fe2dcc 100644 (file)
@@ -1810,7 +1810,7 @@ static int talk_to_netback(struct xenbus_device *dev,
        err = xen_net_read_mac(dev, info->netdev->dev_addr);
        if (err) {
                xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
-               goto out;
+               goto out_unlocked;
        }
 
        rtnl_lock();
@@ -1925,6 +1925,7 @@ abort_transaction_no_dev_fatal:
        xennet_destroy_queues(info);
  out:
        rtnl_unlock();
+out_unlocked:
        device_unregister(&dev->dev);
        return err;
 }
@@ -1950,10 +1951,6 @@ static int xennet_connect(struct net_device *dev)
        /* talk_to_netback() sets the correct number of queues */
        num_queues = dev->real_num_tx_queues;
 
-       rtnl_lock();
-       netdev_update_features(dev);
-       rtnl_unlock();
-
        if (dev->reg_state == NETREG_UNINITIALIZED) {
                err = register_netdev(dev);
                if (err) {
@@ -1963,6 +1960,10 @@ static int xennet_connect(struct net_device *dev)
                }
        }
 
+       rtnl_lock();
+       netdev_update_features(dev);
+       rtnl_unlock();
+
        /*
         * All public and private state should now be sane.  Get
         * ready to start sending and receiving packets and give the driver
index d5553c47014fade81a4f461903b3cb6c4372ccf5..5d823e965883b0f5f23db5ab39afc9f96a128267 100644 (file)
@@ -74,7 +74,7 @@ static void pn533_recv_response(struct urb *urb)
        struct sk_buff *skb = NULL;
 
        if (!urb->status) {
-               skb = alloc_skb(urb->actual_length, GFP_KERNEL);
+               skb = alloc_skb(urb->actual_length, GFP_ATOMIC);
                if (!skb) {
                        nfc_err(&phy->udev->dev, "failed to alloc memory\n");
                } else {
@@ -186,7 +186,7 @@ static int pn533_usb_send_frame(struct pn533 *dev,
 
        if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
                /* request for response for sent packet directly */
-               rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC);
+               rc = pn533_submit_urb_for_response(phy, GFP_KERNEL);
                if (rc)
                        goto error;
        } else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) {
index 2e96b34bc936bf89f6a9a65d983e4bbf3a673fbd..fb667bf469c7e980411c2836d4a9b97d1f96a53d 100644 (file)
@@ -278,6 +278,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
                        return -EIO;
                if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0)
                        return -EIO;
+               return 0;
        }
 
        if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
index 68940356cad3f100f4cfbdd325d42235ea3c5da4..8b1fd7f1a224eedebf08cddfe2258949c50a6bcf 100644 (file)
@@ -414,7 +414,8 @@ static int pmem_attach_disk(struct device *dev,
        blk_queue_logical_block_size(q, pmem_sector_size(ndns));
        blk_queue_max_hw_sectors(q, UINT_MAX);
        blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
-       blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+       if (pmem->pfn_flags & PFN_MAP)
+               blk_queue_flag_set(QUEUE_FLAG_DAX, q);
        q->queuedata = pmem;
 
        disk = alloc_disk_node(0, nid);
index 21710a7460c823bbc4f84134d7ecce70d3f993ba..46df030b2c3f74f33621dc7d4512b17fe40fd079 100644 (file)
@@ -1808,6 +1808,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
                u32 max_segments =
                        (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
 
+               max_segments = min_not_zero(max_segments, ctrl->max_segments);
                blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
                blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
        }
index b528a2f5826cbfe19b22aadd7e09e1ceff512cb6..41d45a1b5c628c143caac1886bd52f08a360f15e 100644 (file)
@@ -2790,6 +2790,9 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
        /* re-enable the admin_q so anything new can fast fail */
        blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
 
+       /* resume the io queues so that things will fast fail */
+       nvme_start_queues(&ctrl->ctrl);
+
        nvme_fc_ctlr_inactive_on_rport(ctrl);
 }
 
@@ -2804,9 +2807,6 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
         * waiting for io to terminate
         */
        nvme_fc_delete_association(ctrl);
-
-       /* resume the io queues so that things will fast fail */
-       nvme_start_queues(nctrl);
 }
 
 static void
index 231807cbc849869afcbc16fce2e3389539ce2684..0c4a33df3b2f3bb9d8a710556dab3f8c55ed2302 100644 (file)
@@ -170,6 +170,7 @@ struct nvme_ctrl {
        u64 cap;
        u32 page_size;
        u32 max_hw_sectors;
+       u32 max_segments;
        u16 oncs;
        u16 oacs;
        u16 nssa;
index fc33804662e7bd35cfbacd93a26101bf23b3f43d..ba943f211687c638cab9708dbb5011b7e3d53b2f 100644 (file)
 
 #define SGES_PER_PAGE  (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
 
+/*
+ * These can be higher, but we need to ensure that any command doesn't
+ * require an sg allocation that needs more than a page of data.
+ */
+#define NVME_MAX_KB_SZ 4096
+#define NVME_MAX_SEGS  127
+
 static int use_threaded_interrupts;
 module_param(use_threaded_interrupts, int, 0);
 
@@ -100,6 +107,8 @@ struct nvme_dev {
        struct nvme_ctrl ctrl;
        struct completion ioq_wait;
 
+       mempool_t *iod_mempool;
+
        /* shadow doorbell buffer support: */
        u32 *dbbuf_dbs;
        dma_addr_t dbbuf_dbs_dma_addr;
@@ -477,10 +486,7 @@ static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
        iod->use_sgl = nvme_pci_use_sgls(dev, rq);
 
        if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
-               size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
-                               iod->use_sgl);
-
-               iod->sg = kmalloc(alloc_size, GFP_ATOMIC);
+               iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
                if (!iod->sg)
                        return BLK_STS_RESOURCE;
        } else {
@@ -526,7 +532,7 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
        }
 
        if (iod->sg != iod->inline_sg)
-               kfree(iod->sg);
+               mempool_free(iod->sg, dev->iod_mempool);
 }
 
 #ifdef CONFIG_BLK_DEV_INTEGRITY
@@ -2280,6 +2286,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
                blk_put_queue(dev->ctrl.admin_q);
        kfree(dev->queues);
        free_opal_dev(dev->ctrl.opal_dev);
+       mempool_destroy(dev->iod_mempool);
        kfree(dev);
 }
 
@@ -2289,6 +2296,7 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
 
        nvme_get_ctrl(&dev->ctrl);
        nvme_dev_disable(dev, false);
+       nvme_kill_queues(&dev->ctrl);
        if (!queue_work(nvme_wq, &dev->remove_work))
                nvme_put_ctrl(&dev->ctrl);
 }
@@ -2333,6 +2341,13 @@ static void nvme_reset_work(struct work_struct *work)
        if (result)
                goto out;
 
+       /*
+        * Limit the max command size to prevent iod->sg allocations going
+        * over a single page.
+        */
+       dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
+       dev->ctrl.max_segments = NVME_MAX_SEGS;
+
        result = nvme_init_identify(&dev->ctrl);
        if (result)
                goto out;
@@ -2405,7 +2420,6 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
        struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
        struct pci_dev *pdev = to_pci_dev(dev->dev);
 
-       nvme_kill_queues(&dev->ctrl);
        if (pci_get_drvdata(pdev))
                device_release_driver(&pdev->dev);
        nvme_put_ctrl(&dev->ctrl);
@@ -2509,6 +2523,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        int node, result = -ENOMEM;
        struct nvme_dev *dev;
        unsigned long quirks = id->driver_data;
+       size_t alloc_size;
 
        node = dev_to_node(&pdev->dev);
        if (node == NUMA_NO_NODE)
@@ -2546,6 +2561,23 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (result)
                goto release_pools;
 
+       /*
+        * Double check that our mempool alloc size will cover the biggest
+        * command we support.
+        */
+       alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ,
+                                               NVME_MAX_SEGS, true);
+       WARN_ON_ONCE(alloc_size > PAGE_SIZE);
+
+       dev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
+                                               mempool_kfree,
+                                               (void *) alloc_size,
+                                               GFP_KERNEL, node);
+       if (!dev->iod_mempool) {
+               result = -ENOMEM;
+               goto release_pools;
+       }
+
        dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
 
        nvme_get_ctrl(&dev->ctrl);
index c9424da0d23e3cbbdd0e2b5209d9eddca9f1591f..518c5b09038c1e9041a89590a6d79101995650d4 100644 (file)
@@ -560,12 +560,6 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
        if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
                return;
 
-       if (nvme_rdma_queue_idx(queue) == 0) {
-               nvme_rdma_free_qe(queue->device->dev,
-                       &queue->ctrl->async_event_sqe,
-                       sizeof(struct nvme_command), DMA_TO_DEVICE);
-       }
-
        nvme_rdma_destroy_queue_ib(queue);
        rdma_destroy_id(queue->cm_id);
 }
@@ -698,7 +692,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
                set = &ctrl->tag_set;
                memset(set, 0, sizeof(*set));
                set->ops = &nvme_rdma_mq_ops;
-               set->queue_depth = nctrl->opts->queue_size;
+               set->queue_depth = nctrl->sqsize + 1;
                set->reserved_tags = 1; /* fabric connect */
                set->numa_node = NUMA_NO_NODE;
                set->flags = BLK_MQ_F_SHOULD_MERGE;
@@ -734,11 +728,15 @@ out:
 static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
                bool remove)
 {
-       nvme_rdma_stop_queue(&ctrl->queues[0]);
        if (remove) {
                blk_cleanup_queue(ctrl->ctrl.admin_q);
                nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
        }
+       if (ctrl->async_event_sqe.data) {
+               nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+                               sizeof(struct nvme_command), DMA_TO_DEVICE);
+               ctrl->async_event_sqe.data = NULL;
+       }
        nvme_rdma_free_queue(&ctrl->queues[0]);
 }
 
@@ -755,11 +753,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
 
        ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
 
+       error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+                       sizeof(struct nvme_command), DMA_TO_DEVICE);
+       if (error)
+               goto out_free_queue;
+
        if (new) {
                ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
                if (IS_ERR(ctrl->ctrl.admin_tagset)) {
                        error = PTR_ERR(ctrl->ctrl.admin_tagset);
-                       goto out_free_queue;
+                       goto out_free_async_qe;
                }
 
                ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
@@ -795,12 +798,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
        if (error)
                goto out_stop_queue;
 
-       error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
-                       &ctrl->async_event_sqe, sizeof(struct nvme_command),
-                       DMA_TO_DEVICE);
-       if (error)
-               goto out_stop_queue;
-
        return 0;
 
 out_stop_queue:
@@ -811,6 +808,9 @@ out_cleanup_queue:
 out_free_tagset:
        if (new)
                nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
+out_free_async_qe:
+       nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+               sizeof(struct nvme_command), DMA_TO_DEVICE);
 out_free_queue:
        nvme_rdma_free_queue(&ctrl->queues[0]);
        return error;
@@ -819,7 +819,6 @@ out_free_queue:
 static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
                bool remove)
 {
-       nvme_rdma_stop_io_queues(ctrl);
        if (remove) {
                blk_cleanup_queue(ctrl->ctrl.connect_q);
                nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
@@ -888,9 +887,9 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
        list_del(&ctrl->list);
        mutex_unlock(&nvme_rdma_ctrl_mutex);
 
-       kfree(ctrl->queues);
        nvmf_free_options(nctrl->opts);
 free_ctrl:
+       kfree(ctrl->queues);
        kfree(ctrl);
 }
 
@@ -949,6 +948,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
        return;
 
 destroy_admin:
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
        nvme_rdma_destroy_admin_queue(ctrl, false);
 requeue:
        dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
@@ -965,12 +965,14 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
 
        if (ctrl->ctrl.queue_count > 1) {
                nvme_stop_queues(&ctrl->ctrl);
+               nvme_rdma_stop_io_queues(ctrl);
                blk_mq_tagset_busy_iter(&ctrl->tag_set,
                                        nvme_cancel_request, &ctrl->ctrl);
                nvme_rdma_destroy_io_queues(ctrl, false);
        }
 
        blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
        blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
                                nvme_cancel_request, &ctrl->ctrl);
        nvme_rdma_destroy_admin_queue(ctrl, false);
@@ -1736,6 +1738,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
 {
        if (ctrl->ctrl.queue_count > 1) {
                nvme_stop_queues(&ctrl->ctrl);
+               nvme_rdma_stop_io_queues(ctrl);
                blk_mq_tagset_busy_iter(&ctrl->tag_set,
                                        nvme_cancel_request, &ctrl->ctrl);
                nvme_rdma_destroy_io_queues(ctrl, shutdown);
@@ -1747,6 +1750,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
                nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
 
        blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
        blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
                                nvme_cancel_request, &ctrl->ctrl);
        blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
@@ -1932,11 +1936,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
                goto out_free_ctrl;
        }
 
-       ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
-                               0 /* no quirks, we're perfect! */);
-       if (ret)
-               goto out_free_ctrl;
-
        INIT_DELAYED_WORK(&ctrl->reconnect_work,
                        nvme_rdma_reconnect_ctrl_work);
        INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
@@ -1950,14 +1949,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
        ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
                                GFP_KERNEL);
        if (!ctrl->queues)
-               goto out_uninit_ctrl;
+               goto out_free_ctrl;
+
+       ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
+                               0 /* no quirks, we're perfect! */);
+       if (ret)
+               goto out_kfree_queues;
 
        changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
        WARN_ON_ONCE(!changed);
 
        ret = nvme_rdma_configure_admin_queue(ctrl, true);
        if (ret)
-               goto out_kfree_queues;
+               goto out_uninit_ctrl;
 
        /* sanity check icdoff */
        if (ctrl->ctrl.icdoff) {
@@ -1974,20 +1978,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
                goto out_remove_admin_queue;
        }
 
-       if (opts->queue_size > ctrl->ctrl.maxcmd) {
-               /* warn if maxcmd is lower than queue_size */
-               dev_warn(ctrl->ctrl.device,
-                       "queue_size %zu > ctrl maxcmd %u, clamping down\n",
-                       opts->queue_size, ctrl->ctrl.maxcmd);
-               opts->queue_size = ctrl->ctrl.maxcmd;
-       }
-
+       /* only warn if argument is too large here, will clamp later */
        if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
-               /* warn if sqsize is lower than queue_size */
                dev_warn(ctrl->ctrl.device,
                        "queue_size %zu > ctrl sqsize %u, clamping down\n",
                        opts->queue_size, ctrl->ctrl.sqsize + 1);
-               opts->queue_size = ctrl->ctrl.sqsize + 1;
+       }
+
+       /* warn if maxcmd is lower than sqsize+1 */
+       if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
+               dev_warn(ctrl->ctrl.device,
+                       "sqsize %u > ctrl maxcmd %u, clamping down\n",
+                       ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
+               ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
        }
 
        if (opts->nr_io_queues) {
@@ -2013,15 +2016,16 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
        return &ctrl->ctrl;
 
 out_remove_admin_queue:
+       nvme_rdma_stop_queue(&ctrl->queues[0]);
        nvme_rdma_destroy_admin_queue(ctrl, true);
-out_kfree_queues:
-       kfree(ctrl->queues);
 out_uninit_ctrl:
        nvme_uninit_ctrl(&ctrl->ctrl);
        nvme_put_ctrl(&ctrl->ctrl);
        if (ret > 0)
                ret = -EIO;
        return ERR_PTR(ret);
+out_kfree_queues:
+       kfree(ctrl->queues);
 out_free_ctrl:
        kfree(ctrl);
        return ERR_PTR(ret);
index a03da764ecae8cb3ec9bf0bb9c68971669b895fc..74d4b785d2daac7d203108f06286221b5337f993 100644 (file)
@@ -686,6 +686,14 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
        }
 
        ctrl->csts = NVME_CSTS_RDY;
+
+       /*
+        * Controllers that are not yet enabled should not really enforce the
+        * keep alive timeout, but we still want to track a timeout and cleanup
+        * in case a host died before it enabled the controller.  Hence, simply
+        * reset the keep alive timer when the controller is enabled.
+        */
+       mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
 }
 
 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
index b5b0cdc21d01b4cd940e4fddb6f06976258e782d..514d1dfc563059684d4fbc6a3e8b7aa8bd07da64 100644 (file)
@@ -936,6 +936,10 @@ struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
                        return cell;
        }
 
+       /* NULL cell_id only allowed for device tree; invalid otherwise */
+       if (!cell_id)
+               return ERR_PTR(-EINVAL);
+
        return nvmem_cell_get_from_list(cell_id);
 }
 EXPORT_SYMBOL_GPL(nvmem_cell_get);
index 848f549164cd0434ae8a28210d47b9ade2b42de3..466e3c8582f0fd62628b90872b2046971e064776 100644 (file)
@@ -102,7 +102,7 @@ static u32 phandle_cache_mask;
  *   - the phandle lookup overhead reduction provided by the cache
  *     will likely be less
  */
-static void of_populate_phandle_cache(void)
+void of_populate_phandle_cache(void)
 {
        unsigned long flags;
        u32 cache_entries;
@@ -134,8 +134,7 @@ out:
        raw_spin_unlock_irqrestore(&devtree_lock, flags);
 }
 
-#ifndef CONFIG_MODULES
-static int __init of_free_phandle_cache(void)
+int of_free_phandle_cache(void)
 {
        unsigned long flags;
 
@@ -148,6 +147,7 @@ static int __init of_free_phandle_cache(void)
 
        return 0;
 }
+#if !defined(CONFIG_MODULES)
 late_initcall_sync(of_free_phandle_cache);
 #endif
 
index 891d780c076a12d14dde8d50b87d940e9c83707e..216175d11d3dc2ca3fdfaa429306ecf50218a01a 100644 (file)
@@ -79,6 +79,8 @@ int of_resolve_phandles(struct device_node *tree);
 #if defined(CONFIG_OF_OVERLAY)
 void of_overlay_mutex_lock(void);
 void of_overlay_mutex_unlock(void);
+int of_free_phandle_cache(void);
+void of_populate_phandle_cache(void);
 #else
 static inline void of_overlay_mutex_lock(void) {};
 static inline void of_overlay_mutex_unlock(void) {};
index 7baa53e5b1d74d469959341945e3bc239cf7d5c7..eda57ef12fd057b3d92c750dec983703e85f38e3 100644 (file)
@@ -804,6 +804,8 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree,
                goto err_free_overlay_changeset;
        }
 
+       of_populate_phandle_cache();
+
        ret = __of_changeset_apply_notify(&ovcs->cset);
        if (ret)
                pr_err("overlay changeset entry notify error %d\n", ret);
@@ -1046,8 +1048,17 @@ int of_overlay_remove(int *ovcs_id)
 
        list_del(&ovcs->ovcs_list);
 
+       /*
+        * Disable phandle cache.  Avoids race condition that would arise
+        * from removing cache entry when the associated node is deleted.
+        */
+       of_free_phandle_cache();
+
        ret_apply = 0;
        ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply);
+
+       of_populate_phandle_cache();
+
        if (ret) {
                if (ret_apply)
                        devicetree_state_flags |= DTSF_REVERT_FAIL;
index ab2f3fead6b1ceee55b0ced767dafdacb1202b35..31ff03dbeb83771be1ba57fde89ea6c32f63c2aa 100644 (file)
@@ -598,7 +598,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
        }
 
        /* Scaling up? Scale voltage before frequency */
-       if (freq > old_freq) {
+       if (freq >= old_freq) {
                ret = _set_opp_voltage(dev, reg, new_supply);
                if (ret)
                        goto restore_voltage;
index 65113b6eed1473aa00daf59b1dfd57b8abd6baaf..89ee6a2b6eb838f426d6d9d3773f70769d1a489f 100644 (file)
@@ -629,6 +629,18 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
 {
        struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
 
+       /*
+        * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
+        * system-wide suspend/resume confuses the platform firmware, so avoid
+        * doing that, unless the bridge has a driver that should take care of
+        * the PM handling.  According to Section 16.1.6 of ACPI 6.2, endpoint
+        * devices are expected to be in D3 before invoking the S3 entry path
+        * from the firmware, so they should not be affected by this issue.
+        */
+       if (pci_is_bridge(dev) && !dev->driver &&
+           acpi_target_system_state() != ACPI_STATE_S0)
+               return true;
+
        if (!adev || !acpi_device_power_manageable(adev))
                return false;
 
index 6bdb1dad805f8198a6879aeab21a5ff9051b1510..0e31f1392a53ca042519bbbe4bc37dab3bfe87c0 100644 (file)
@@ -1463,7 +1463,7 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
        case PMU_TYPE_IOB:
                return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id);
        case PMU_TYPE_IOB_SLOW:
-               return devm_kasprintf(dev, GFP_KERNEL, "iob-slow%d", id);
+               return devm_kasprintf(dev, GFP_KERNEL, "iob_slow%d", id);
        case PMU_TYPE_MCB:
                return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id);
        case PMU_TYPE_MC:
index 76243caa08c630c064ebd674f566089bea1fc4ba..b5c880b50bb371f5fb5eeddcd0799cd9d7057289 100644 (file)
@@ -333,7 +333,7 @@ static int owl_pin_config_set(struct pinctrl_dev *pctrldev,
        unsigned long flags;
        unsigned int param;
        u32 reg, bit, width, arg;
-       int ret, i;
+       int ret = 0, i;
 
        info = &pctrl->soc->padinfo[pin];
 
index 35c17653c694767c8e13d1a8d02d7d91718d23b3..87618a4e90e451f2834214a337ce81e12de560fc 100644 (file)
@@ -460,8 +460,8 @@ static int nsp_pinmux_enable(struct pinctrl_dev *pctrl_dev,
        const struct nsp_pin_function *func;
        const struct nsp_pin_group *grp;
 
-       if (grp_select > pinctrl->num_groups ||
-               func_select > pinctrl->num_functions)
+       if (grp_select >= pinctrl->num_groups ||
+           func_select >= pinctrl->num_functions)
                return -EINVAL;
 
        func = &pinctrl->functions[func_select];
@@ -577,6 +577,8 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
                return PTR_ERR(pinctrl->base0);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       if (!res)
+               return -EINVAL;
        pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
                                              resource_size(res));
        if (!pinctrl->base1) {
index b601039d6c69a28d771eff622f0001d70bf84204..c4aa411f5935b7b0275c004a3924ffda3613630a 100644 (file)
@@ -101,10 +101,11 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
 }
 
 static int dt_to_map_one_config(struct pinctrl *p,
-                               struct pinctrl_dev *pctldev,
+                               struct pinctrl_dev *hog_pctldev,
                                const char *statename,
                                struct device_node *np_config)
 {
+       struct pinctrl_dev *pctldev = NULL;
        struct device_node *np_pctldev;
        const struct pinctrl_ops *ops;
        int ret;
@@ -123,8 +124,10 @@ static int dt_to_map_one_config(struct pinctrl *p,
                        return -EPROBE_DEFER;
                }
                /* If we're creating a hog we can use the passed pctldev */
-               if (pctldev && (np_pctldev == p->dev->of_node))
+               if (hog_pctldev && (np_pctldev == p->dev->of_node)) {
+                       pctldev = hog_pctldev;
                        break;
+               }
                pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
                if (pctldev)
                        break;
index ad6da1184c9f0b1117275df587f502d78abbe904..4c4740ffeb9ca0807f63d77271ad619407155c3b 100644 (file)
@@ -1424,7 +1424,7 @@ static struct pinctrl_desc mtk_desc = {
 
 static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
 {
-       struct mtk_pinctrl *hw = dev_get_drvdata(chip->parent);
+       struct mtk_pinctrl *hw = gpiochip_get_data(chip);
        int value, err;
 
        err = mtk_hw_get_value(hw, gpio, PINCTRL_PIN_REG_DI, &value);
@@ -1436,7 +1436,7 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
 
 static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
 {
-       struct mtk_pinctrl *hw = dev_get_drvdata(chip->parent);
+       struct mtk_pinctrl *hw = gpiochip_get_data(chip);
 
        mtk_hw_set_value(hw, gpio, PINCTRL_PIN_REG_DO, !!value);
 }
@@ -1459,6 +1459,9 @@ static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
        struct mtk_pinctrl *hw = gpiochip_get_data(chip);
        unsigned long eint_n;
 
+       if (!hw->eint)
+               return -ENOTSUPP;
+
        eint_n = offset;
 
        return mtk_eint_find_irq(hw->eint, eint_n);
@@ -1471,7 +1474,8 @@ static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
        unsigned long eint_n;
        u32 debounce;
 
-       if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+       if (!hw->eint ||
+           pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
                return -ENOTSUPP;
 
        debounce = pinconf_to_config_argument(config);
@@ -1504,11 +1508,20 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
        if (ret < 0)
                return ret;
 
-       ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
-                                    chip->ngpio);
-       if (ret < 0) {
-               gpiochip_remove(chip);
-               return ret;
+       /* Just for backward compatible for these old pinctrl nodes without
+        * "gpio-ranges" property. Otherwise, called directly from a
+        * DeviceTree-supported pinctrl driver is DEPRECATED.
+        * Please see Section 2.1 of
+        * Documentation/devicetree/bindings/gpio/gpio.txt on how to
+        * bind pinctrl and gpio drivers via the "gpio-ranges" property.
+        */
+       if (!of_find_property(np, "gpio-ranges", NULL)) {
+               ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
+                                            chip->ngpio);
+               if (ret < 0) {
+                       gpiochip_remove(chip);
+                       return ret;
+               }
        }
 
        return 0;
@@ -1691,15 +1704,16 @@ static int mtk_pinctrl_probe(struct platform_device *pdev)
        mtk_desc.custom_conf_items = mtk_conf_items;
 #endif
 
-       hw->pctrl = devm_pinctrl_register(&pdev->dev, &mtk_desc, hw);
-       if (IS_ERR(hw->pctrl))
-               return PTR_ERR(hw->pctrl);
+       err = devm_pinctrl_register_and_init(&pdev->dev, &mtk_desc, hw,
+                                            &hw->pctrl);
+       if (err)
+               return err;
 
        /* Setup groups descriptions per SoC types */
        err = mtk_build_groups(hw);
        if (err) {
                dev_err(&pdev->dev, "Failed to build groups\n");
-               return 0;
+               return err;
        }
 
        /* Setup functions descriptions per SoC types */
@@ -1709,17 +1723,25 @@ static int mtk_pinctrl_probe(struct platform_device *pdev)
                return err;
        }
 
-       err = mtk_build_gpiochip(hw, pdev->dev.of_node);
-       if (err) {
-               dev_err(&pdev->dev, "Failed to add gpio_chip\n");
+       /* For able to make pinctrl_claim_hogs, we must not enable pinctrl
+        * until all groups and functions are being added one.
+        */
+       err = pinctrl_enable(hw->pctrl);
+       if (err)
                return err;
-       }
 
        err = mtk_build_eint(hw, pdev);
        if (err)
                dev_warn(&pdev->dev,
                         "Failed to add EINT, but pinctrl still can work\n");
 
+       /* Build gpiochip should be after pinctrl_enable is done */
+       err = mtk_build_gpiochip(hw, pdev->dev.of_node);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to add gpio_chip\n");
+               return err;
+       }
+
        platform_set_drvdata(pdev, hw);
 
        return 0;
index b3799695d8db8264ce917232cb3b1439793fe845..16ff56f93501794edb33231b770afa3dc5d55b73 100644 (file)
@@ -1000,11 +1000,6 @@ static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
                return -ENOMEM;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "Unable to get eint resource\n");
-               return -ENODEV;
-       }
-
        pctl->eint->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(pctl->eint->base))
                return PTR_ERR(pctl->eint->base);
index a1d7156d0a43ad49ac6312ab67b67a7312ad9e10..6a1b6058b9910269c60c448cbca3350bce399af6 100644 (file)
@@ -536,7 +536,7 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
                ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
        } else {
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
-               ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, input);
+               ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input);
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, false);
        }
 
index b3153c095199d3bed84d7b846432fa3e783c08f0..e5647dac0818d46353629a543733fe0af804210b 100644 (file)
@@ -1590,8 +1590,11 @@ static int pcs_save_context(struct pcs_device *pcs)
 
        mux_bytes = pcs->width / BITS_PER_BYTE;
 
-       if (!pcs->saved_vals)
+       if (!pcs->saved_vals) {
                pcs->saved_vals = devm_kzalloc(pcs->dev, pcs->size, GFP_ATOMIC);
+               if (!pcs->saved_vals)
+                       return -ENOMEM;
+       }
 
        switch (pcs->width) {
        case 64:
@@ -1651,8 +1654,13 @@ static int pinctrl_single_suspend(struct platform_device *pdev,
        if (!pcs)
                return -EINVAL;
 
-       if (pcs->flags & PCS_CONTEXT_LOSS_OFF)
-               pcs_save_context(pcs);
+       if (pcs->flags & PCS_CONTEXT_LOSS_OFF) {
+               int ret;
+
+               ret = pcs_save_context(pcs);
+               if (ret < 0)
+                       return ret;
+       }
 
        return pinctrl_force_sleep(pcs->pctl);
 }
index b02caf31671186d97ea194e612b94adeb70335f4..eeb58b3bbc9a0cef4b47f65c4b8855f683e9f7e0 100644 (file)
 #include "core.h"
 #include "sh_pfc.h"
 
-#define CFG_FLAGS SH_PFC_PIN_CFG_DRIVE_STRENGTH
-
 #define CPU_ALL_PORT(fn, sfx)                                          \
-       PORT_GP_CFG_22(0, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
-       PORT_GP_CFG_28(1, fn, sfx, CFG_FLAGS),                          \
-       PORT_GP_CFG_17(2, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
-       PORT_GP_CFG_17(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
-       PORT_GP_CFG_6(4,  fn, sfx, CFG_FLAGS),                          \
-       PORT_GP_CFG_15(5, fn, sfx, CFG_FLAGS)
+       PORT_GP_CFG_22(0, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE),          \
+       PORT_GP_28(1, fn, sfx),                                         \
+       PORT_GP_CFG_17(2, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE),          \
+       PORT_GP_CFG_17(3, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE),          \
+       PORT_GP_6(4,  fn, sfx),                                         \
+       PORT_GP_15(5, fn, sfx)
 /*
  * F_() : just information
  * FM() : macro for FN_xxx / xxx_MARK
index 767c485af59b2ee0583242b7dbf31581f76cb91a..01b0e2bb33190c78fb3818e34d5aebf4f60b2832 100644 (file)
@@ -89,6 +89,7 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
        case PTP_PF_PHYSYNC:
                if (chan != 0)
                        return -EINVAL;
+               break;
        default:
                return -EINVAL;
        }
@@ -221,7 +222,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                }
                pct = &sysoff->ts[0];
                for (i = 0; i < sysoff->n_samples; i++) {
-                       getnstimeofday64(&ts);
+                       ktime_get_real_ts64(&ts);
                        pct->sec = ts.tv_sec;
                        pct->nsec = ts.tv_nsec;
                        pct++;
@@ -230,7 +231,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                        pct->nsec = ts.tv_nsec;
                        pct++;
                }
-               getnstimeofday64(&ts);
+               ktime_get_real_ts64(&ts);
                pct->sec = ts.tv_sec;
                pct->nsec = ts.tv_nsec;
                if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
index 1468a1642b4978f5048ccace2af1846baf9c51e9..e8652c148c5223d24c67089539b3bb4e861e42d5 100644 (file)
@@ -374,7 +374,7 @@ static int qoriq_ptp_probe(struct platform_device *dev)
                pr_err("ioremap ptp registers failed\n");
                goto no_ioremap;
        }
-       getnstimeofday64(&now);
+       ktime_get_real_ts64(&now);
        ptp_qoriq_settime(&qoriq_ptp->caps, &now);
 
        tmr_ctrl =
index 6d4012dd69221a1ebc4b72866824be8a95f29468..bac1eeb3d31204d9e99a93e1e682972b5f7177bb 100644 (file)
@@ -265,8 +265,10 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
                        return err;
 
                /* full-function RTCs won't have such missing fields */
-               if (rtc_valid_tm(&alarm->time) == 0)
+               if (rtc_valid_tm(&alarm->time) == 0) {
+                       rtc_add_offset(rtc, &alarm->time);
                        return 0;
+               }
 
                /* get the "after" timestamp, to detect wrapped fields */
                err = rtc_read_time(rtc, &now);
@@ -409,7 +411,6 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
        if (err)
                return err;
 
-       rtc_subtract_offset(rtc, &alarm->time);
        scheduled = rtc_tm_to_time64(&alarm->time);
 
        /* Make sure we're not setting alarms in the past */
@@ -426,6 +427,8 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
         * over right here, before we set the alarm.
         */
 
+       rtc_subtract_offset(rtc, &alarm->time);
+
        if (!rtc->ops)
                err = -ENODEV;
        else if (!rtc->ops->set_alarm)
@@ -467,7 +470,6 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 
        mutex_unlock(&rtc->ops_lock);
 
-       rtc_add_offset(rtc, &alarm->time);
        return err;
 }
 EXPORT_SYMBOL_GPL(rtc_set_alarm);
index 097a4d4e2aba1e947ceaae3c3a7651a56927dac5..1925aaf09093713326553740db6db4358eb9fb51 100644 (file)
@@ -367,10 +367,8 @@ static int vrtc_mrst_do_probe(struct device *dev, struct resource *iomem,
        }
 
        retval = rtc_register_device(mrst_rtc.rtc);
-       if (retval) {
-               retval = PTR_ERR(mrst_rtc.rtc);
+       if (retval)
                goto cleanup0;
-       }
 
        dev_dbg(dev, "initialised\n");
        return 0;
index 73cce3ecb97fefbccc66266a4fd29f08e453079e..a9f60d0ee02ea2e941edfc7969377b3482e5d04e 100644 (file)
 
 #define DASD_DIAG_MOD          "dasd_diag_mod"
 
+static unsigned int queue_depth = 32;
+static unsigned int nr_hw_queues = 4;
+
+module_param(queue_depth, uint, 0444);
+MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
+
+module_param(nr_hw_queues, uint, 0444);
+MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
+
 /*
  * SECTION: exported variables of dasd.c
  */
@@ -1222,80 +1231,37 @@ static void dasd_hosts_init(struct dentry *base_dentry,
                device->hosts_dentry = pde;
 }
 
-/*
- * Allocate memory for a channel program with 'cplength' channel
- * command words and 'datasize' additional space. There are two
- * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
- * memory and 2) dasd_smalloc_request uses the static ccw memory
- * that gets allocated for each device.
- */
-struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
-                                         int datasize,
-                                         struct dasd_device *device)
-{
-       struct dasd_ccw_req *cqr;
-
-       /* Sanity checks */
-       BUG_ON(datasize > PAGE_SIZE ||
-            (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
-
-       cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
-       if (cqr == NULL)
-               return ERR_PTR(-ENOMEM);
-       cqr->cpaddr = NULL;
-       if (cplength > 0) {
-               cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
-                                     GFP_ATOMIC | GFP_DMA);
-               if (cqr->cpaddr == NULL) {
-                       kfree(cqr);
-                       return ERR_PTR(-ENOMEM);
-               }
-       }
-       cqr->data = NULL;
-       if (datasize > 0) {
-               cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
-               if (cqr->data == NULL) {
-                       kfree(cqr->cpaddr);
-                       kfree(cqr);
-                       return ERR_PTR(-ENOMEM);
-               }
-       }
-       cqr->magic =  magic;
-       set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
-       dasd_get_device(device);
-       return cqr;
-}
-EXPORT_SYMBOL(dasd_kmalloc_request);
-
-struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
-                                         int datasize,
-                                         struct dasd_device *device)
+struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
+                                         struct dasd_device *device,
+                                         struct dasd_ccw_req *cqr)
 {
        unsigned long flags;
-       struct dasd_ccw_req *cqr;
-       char *data;
-       int size;
+       char *data, *chunk;
+       int size = 0;
 
-       size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
        if (cplength > 0)
                size += cplength * sizeof(struct ccw1);
        if (datasize > 0)
                size += datasize;
+       if (!cqr)
+               size += (sizeof(*cqr) + 7L) & -8L;
+
        spin_lock_irqsave(&device->mem_lock, flags);
-       cqr = (struct dasd_ccw_req *)
-               dasd_alloc_chunk(&device->ccw_chunks, size);
+       data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
        spin_unlock_irqrestore(&device->mem_lock, flags);
-       if (cqr == NULL)
+       if (!chunk)
                return ERR_PTR(-ENOMEM);
-       memset(cqr, 0, sizeof(struct dasd_ccw_req));
-       data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
-       cqr->cpaddr = NULL;
+       if (!cqr) {
+               cqr = (void *) data;
+               data += (sizeof(*cqr) + 7L) & -8L;
+       }
+       memset(cqr, 0, sizeof(*cqr));
+       cqr->mem_chunk = chunk;
        if (cplength > 0) {
-               cqr->cpaddr = (struct ccw1 *) data;
-               data += cplength*sizeof(struct ccw1);
-               memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
+               cqr->cpaddr = data;
+               data += cplength * sizeof(struct ccw1);
+               memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
        }
-       cqr->data = NULL;
        if (datasize > 0) {
                cqr->data = data;
                memset(cqr->data, 0, datasize);
@@ -1307,33 +1273,12 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
 }
 EXPORT_SYMBOL(dasd_smalloc_request);
 
-/*
- * Free memory of a channel program. This function needs to free all the
- * idal lists that might have been created by dasd_set_cda and the
- * struct dasd_ccw_req itself.
- */
-void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
-{
-       struct ccw1 *ccw;
-
-       /* Clear any idals used for the request. */
-       ccw = cqr->cpaddr;
-       do {
-               clear_normalized_cda(ccw);
-       } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
-       kfree(cqr->cpaddr);
-       kfree(cqr->data);
-       kfree(cqr);
-       dasd_put_device(device);
-}
-EXPORT_SYMBOL(dasd_kfree_request);
-
 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&device->mem_lock, flags);
-       dasd_free_chunk(&device->ccw_chunks, cqr);
+       dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
        spin_unlock_irqrestore(&device->mem_lock, flags);
        dasd_put_device(device);
 }
@@ -1885,6 +1830,33 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device,
        }
 }
 
+static void __dasd_process_cqr(struct dasd_device *device,
+                              struct dasd_ccw_req *cqr)
+{
+       char errorstring[ERRORLENGTH];
+
+       switch (cqr->status) {
+       case DASD_CQR_SUCCESS:
+               cqr->status = DASD_CQR_DONE;
+               break;
+       case DASD_CQR_ERROR:
+               cqr->status = DASD_CQR_NEED_ERP;
+               break;
+       case DASD_CQR_CLEARED:
+               cqr->status = DASD_CQR_TERMINATED;
+               break;
+       default:
+               /* internal error 12 - wrong cqr status*/
+               snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
+               dev_err(&device->cdev->dev,
+                       "An error occurred in the DASD device driver, "
+                       "reason=%s\n", errorstring);
+               BUG();
+       }
+       if (cqr->callback)
+               cqr->callback(cqr, cqr->callback_data);
+}
+
 /*
  * the cqrs from the final queue are returned to the upper layer
  * by setting a dasd_block state and calling the callback function
@@ -1895,40 +1867,18 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
        struct list_head *l, *n;
        struct dasd_ccw_req *cqr;
        struct dasd_block *block;
-       void (*callback)(struct dasd_ccw_req *, void *data);
-       void *callback_data;
-       char errorstring[ERRORLENGTH];
 
        list_for_each_safe(l, n, final_queue) {
                cqr = list_entry(l, struct dasd_ccw_req, devlist);
                list_del_init(&cqr->devlist);
                block = cqr->block;
-               callback = cqr->callback;
-               callback_data = cqr->callback_data;
-               if (block)
+               if (!block) {
+                       __dasd_process_cqr(device, cqr);
+               } else {
                        spin_lock_bh(&block->queue_lock);
-               switch (cqr->status) {
-               case DASD_CQR_SUCCESS:
-                       cqr->status = DASD_CQR_DONE;
-                       break;
-               case DASD_CQR_ERROR:
-                       cqr->status = DASD_CQR_NEED_ERP;
-                       break;
-               case DASD_CQR_CLEARED:
-                       cqr->status = DASD_CQR_TERMINATED;
-                       break;
-               default:
-                       /* internal error 12 - wrong cqr status*/
-                       snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
-                       dev_err(&device->cdev->dev,
-                               "An error occurred in the DASD device driver, "
-                               "reason=%s\n", errorstring);
-                       BUG();
-               }
-               if (cqr->callback != NULL)
-                       (callback)(cqr, callback_data);
-               if (block)
+                       __dasd_process_cqr(device, cqr);
                        spin_unlock_bh(&block->queue_lock);
+               }
        }
 }
 
@@ -3041,7 +2991,6 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
        cqr->callback_data = req;
        cqr->status = DASD_CQR_FILLED;
        cqr->dq = dq;
-       *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr;
 
        blk_mq_start_request(req);
        spin_lock(&block->queue_lock);
@@ -3072,7 +3021,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
        unsigned long flags;
        int rc = 0;
 
-       cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req));
+       cqr = blk_mq_rq_to_pdu(req);
        if (!cqr)
                return BLK_EH_DONE;
 
@@ -3174,9 +3123,9 @@ static int dasd_alloc_queue(struct dasd_block *block)
        int rc;
 
        block->tag_set.ops = &dasd_mq_ops;
-       block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *);
-       block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
-       block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
+       block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
+       block->tag_set.nr_hw_queues = nr_hw_queues;
+       block->tag_set.queue_depth = queue_depth;
        block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
 
        rc = blk_mq_alloc_tag_set(&block->tag_set);
@@ -4038,7 +3987,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
        struct ccw1 *ccw;
        unsigned long *idaw;
 
-       cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
+       cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
+                                  NULL);
 
        if (IS_ERR(cqr)) {
                /* internal error 13 - Allocating the RDC request failed*/
index 5e963fe0e38d4c2125c43ae801ca7e9b28d98d07..e36a114354fc368e2141aae5c080c7f61b674da8 100644 (file)
@@ -407,9 +407,9 @@ static int read_unit_address_configuration(struct dasd_device *device,
        int rc;
        unsigned long flags;
 
-       cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr))
                return PTR_ERR(cqr);
        cqr->startdev = device;
@@ -457,7 +457,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
                lcu->flags |= NEED_UAC_UPDATE;
                spin_unlock_irqrestore(&lcu->lock, flags);
        }
-       dasd_kfree_request(cqr, cqr->memdev);
+       dasd_sfree_request(cqr, cqr->memdev);
        return rc;
 }
 
index 131f1989f6f3dff0345250c71943f5ac338af19c..e1fe02477ea8fca951232dabe7f89754c8f287ff 100644 (file)
@@ -536,7 +536,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
        /* Build the request */
        datasize = sizeof(struct dasd_diag_req) +
                count*sizeof(struct dasd_diag_bio);
-       cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev);
+       cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
index be208e7adcb46087e7fb2436fadf8a737d7c472e..bbf95b78ef5d9e4c5903e466e7de3f71b615c9ce 100644 (file)
@@ -886,7 +886,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
        }
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
                                   0, /* use rcd_buf as data ara */
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                              "Could not allocate RCD request");
@@ -1442,7 +1442,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data) +
                                    sizeof(struct dasd_rssd_features)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
                                "allocate initialization request");
@@ -1504,7 +1504,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
                                  sizeof(struct dasd_psf_ssc_data),
-                                 device);
+                                  device, NULL);
 
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
@@ -1815,7 +1815,8 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
 
        cplength = 8;
        datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
+                                  NULL);
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
@@ -2092,7 +2093,8 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
         */
        itcw_size = itcw_calc_size(0, count, 0);
 
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
+                                  NULL);
        if (IS_ERR(cqr))
                return cqr;
 
@@ -2186,7 +2188,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
        cplength += count;
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
-                                 startdev);
+                                  startdev, NULL);
        if (IS_ERR(cqr))
                return cqr;
 
@@ -2332,7 +2334,7 @@ dasd_eckd_build_format(struct dasd_device *base,
        }
        /* Allocate the format ccw request. */
        fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
-                                  datasize, startdev);
+                                  datasize, startdev, NULL);
        if (IS_ERR(fcp))
                return fcp;
 
@@ -3103,7 +3105,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
        }
        /* Allocate the ccw request. */
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
-                                  startdev);
+                                  startdev, blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
@@ -3262,7 +3264,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
 
        /* Allocate the ccw request. */
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
-                                  startdev);
+                                  startdev, blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
@@ -3595,7 +3597,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
 
        /* Allocate the ccw request. */
        itcw_size = itcw_calc_size(0, ctidaw, 0);
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
@@ -3862,7 +3865,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
 
        /* Allocate the ccw request. */
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
-                                  datasize, startdev);
+                                  datasize, startdev, blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
@@ -4102,7 +4105,7 @@ dasd_eckd_release(struct dasd_device *device)
                return -EACCES;
 
        useglobal = 0;
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4157,7 +4160,7 @@ dasd_eckd_reserve(struct dasd_device *device)
                return -EACCES;
 
        useglobal = 0;
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4211,7 +4214,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
                return -EACCES;
 
        useglobal = 0;
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4271,7 +4274,8 @@ static int dasd_eckd_snid(struct dasd_device *device,
 
        useglobal = 0;
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
-                                  sizeof(struct dasd_snid_data), device);
+                                  sizeof(struct dasd_snid_data), device,
+                                  NULL);
        if (IS_ERR(cqr)) {
                mutex_lock(&dasd_reserve_mutex);
                useglobal = 1;
@@ -4331,7 +4335,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data) +
                                    sizeof(struct dasd_rssd_perf_stats_t)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                            "Could not allocate initialization request");
@@ -4477,7 +4481,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
        psf1 = psf_data[1];
 
        /* setup CCWs for PSF + RSSD */
-       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
                        "Could not allocate initialization request");
@@ -5037,7 +5041,7 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device,
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   (sizeof(struct dasd_psf_prssd_data) +
                                    sizeof(struct dasd_rssd_messages)),
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
                                "Could not allocate read message buffer request");
@@ -5126,7 +5130,7 @@ static int dasd_eckd_query_host_access(struct dasd_device *device,
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
                                   sizeof(struct dasd_psf_prssd_data) + 1,
-                                  device);
+                                  device, NULL);
        if (IS_ERR(cqr)) {
                DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
                                "Could not allocate read message buffer request");
@@ -5284,8 +5288,8 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
        int rc;
 
        cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
-                                 sizeof(struct dasd_psf_cuir_response),
-                                 device);
+                                  sizeof(struct dasd_psf_cuir_response),
+                                  device, NULL);
 
        if (IS_ERR(cqr)) {
                DBF_DEV_EVENT(DBF_WARNING, device, "%s",
index 0af8c5295b650b1132e5946b123b558a08e91ccc..6ef8714dc6935047ec0f48d8cc3f6e34dd77c19c 100644 (file)
@@ -447,7 +447,7 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
                 * is a new ccw in device->eer_cqr. Free the "old"
                 * snss request now.
                 */
-               dasd_kfree_request(cqr, device);
+               dasd_sfree_request(cqr, device);
 }
 
 /*
@@ -472,8 +472,8 @@ int dasd_eer_enable(struct dasd_device *device)
        if (rc)
                goto out;
 
-       cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
-                                  SNSS_DATA_SIZE, device);
+       cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
+                                  SNSS_DATA_SIZE, device, NULL);
        if (IS_ERR(cqr)) {
                rc = -ENOMEM;
                cqr = NULL;
@@ -505,7 +505,7 @@ out:
        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
 
        if (cqr)
-               dasd_kfree_request(cqr, device);
+               dasd_sfree_request(cqr, device);
 
        return rc;
 }
@@ -528,7 +528,7 @@ void dasd_eer_disable(struct dasd_device *device)
        in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
        spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
        if (cqr && !in_use)
-               dasd_kfree_request(cqr, device);
+               dasd_sfree_request(cqr, device);
 }
 
 /*
index a6b132f7e869eb4eb804b3fa8407cd064c92b699..56007a3e7f110358e27ad74563f24e428cbae473 100644 (file)
@@ -356,7 +356,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_discard(
        datasize = sizeof(struct DE_fba_data) +
                nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1));
 
-       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
+       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
 
@@ -490,7 +491,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
                datasize += (count - 1)*sizeof(struct LO_fba_data);
        }
        /* Allocate the ccw request. */
-       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
+       cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
+                                  blk_mq_rq_to_pdu(req));
        if (IS_ERR(cqr))
                return cqr;
        ccw = cqr->cpaddr;
index 96709b1a7bf8d8af0f4e0db7748cd5ac8e5a8650..de6b96036aa40fb104e84c6c9e58ba89beebb232 100644 (file)
@@ -158,40 +158,33 @@ do { \
 
 struct dasd_ccw_req {
        unsigned int magic;             /* Eye catcher */
+       int intrc;                      /* internal error, e.g. from start_IO */
        struct list_head devlist;       /* for dasd_device request queue */
        struct list_head blocklist;     /* for dasd_block request queue */
-
-       /* Where to execute what... */
        struct dasd_block *block;       /* the originating block device */
        struct dasd_device *memdev;     /* the device used to allocate this */
        struct dasd_device *startdev;   /* device the request is started on */
        struct dasd_device *basedev;    /* base device if no block->base */
        void *cpaddr;                   /* address of ccw or tcw */
+       short retries;                  /* A retry counter */
        unsigned char cpmode;           /* 0 = cmd mode, 1 = itcw */
        char status;                    /* status of this request */
-       short retries;                  /* A retry counter */
+       char lpm;                       /* logical path mask */
        unsigned long flags;            /* flags of this request */
        struct dasd_queue *dq;
-
-       /* ... and how */
        unsigned long starttime;        /* jiffies time of request start */
        unsigned long expires;          /* expiration period in jiffies */
-       char lpm;                       /* logical path mask */
        void *data;                     /* pointer to data area */
-
-       /* these are important for recovering erroneous requests          */
-       int intrc;                      /* internal error, e.g. from start_IO */
        struct irb irb;                 /* device status in case of an error */
        struct dasd_ccw_req *refers;    /* ERP-chain queueing. */
        void *function;                 /* originating ERP action */
+       void *mem_chunk;
 
-       /* these are for statistics only */
        unsigned long buildclk;         /* TOD-clock of request generation */
        unsigned long startclk;         /* TOD-clock of request start */
        unsigned long stopclk;          /* TOD-clock of request interrupt */
        unsigned long endclk;           /* TOD-clock of request termination */
 
-        /* Callback that is called after reaching final status. */
        void (*callback)(struct dasd_ccw_req *, void *data);
        void *callback_data;
 };
@@ -235,14 +228,6 @@ struct dasd_ccw_req {
 #define DASD_CQR_SUPPRESS_IL   6       /* Suppress 'Incorrect Length' error */
 #define DASD_CQR_SUPPRESS_CR   7       /* Suppress 'Command Reject' error */
 
-/*
- * There is no reliable way to determine the number of available CPUs on
- * LPAR but there is no big performance difference between 1 and the
- * maximum CPU number.
- * 64 is a good trade off performance wise.
- */
-#define DASD_NR_HW_QUEUES 64
-#define DASD_MAX_LCU_DEV 256
 #define DASD_REQ_PER_DEV 4
 
 /* Signature for error recovery functions. */
@@ -714,19 +699,10 @@ extern const struct block_device_operations dasd_device_operations;
 extern struct kmem_cache *dasd_page_cache;
 
 struct dasd_ccw_req *
-dasd_kmalloc_request(int , int, int, struct dasd_device *);
-struct dasd_ccw_req *
-dasd_smalloc_request(int , int, int, struct dasd_device *);
-void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
+dasd_smalloc_request(int, int, int, struct dasd_device *, struct dasd_ccw_req *);
 void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
 void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
 
-static inline int
-dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
-{
-       return set_normalized_cda(ccw, cda);
-}
-
 struct dasd_device *dasd_alloc_device(void);
 void dasd_free_device(struct dasd_device *);
 
index a070ef0efe65d0079cc10245b1ed8b79b8e8fba9..f230516abb96d31b4eabb2689a7230905857c48f 100644 (file)
@@ -5,6 +5,7 @@
 
 # The following is required for define_trace.h to find ./trace.h
 CFLAGS_trace.o := -I$(src)
+CFLAGS_vfio_ccw_fsm.o := -I$(src)
 
 obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
        fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o
index dce92b2a895d6ff3bbe38104ed08ea32c7979432..dbe7c7ac9ac8c8c4456f142b14c740d3bdc0c5e6 100644 (file)
 #define CCWCHAIN_LEN_MAX       256
 
 struct pfn_array {
+       /* Starting guest physical I/O address. */
        unsigned long           pa_iova;
+       /* Array that stores PFNs of the pages need to pin. */
        unsigned long           *pa_iova_pfn;
+       /* Array that receives PFNs of the pages pinned. */
        unsigned long           *pa_pfn;
+       /* Number of pages pinned from @pa_iova. */
        int                     pa_nr;
 };
 
@@ -46,70 +50,33 @@ struct ccwchain {
 };
 
 /*
- * pfn_array_pin() - pin user pages in memory
+ * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory
  * @pa: pfn_array on which to perform the operation
  * @mdev: the mediated device to perform pin/unpin operations
+ * @iova: target guest physical address
+ * @len: number of bytes that should be pinned from @iova
  *
- * Attempt to pin user pages in memory.
+ * Attempt to allocate memory for PFNs, and pin user pages in memory.
  *
  * Usage of pfn_array:
- * @pa->pa_iova     starting guest physical I/O address. Assigned by caller.
- * @pa->pa_iova_pfn array that stores PFNs of the pages need to pin. Allocated
- *                  by caller.
- * @pa->pa_pfn      array that receives PFNs of the pages pinned. Allocated by
- *                  caller.
- * @pa->pa_nr       number of pages from @pa->pa_iova to pin. Assigned by
- *                  caller.
- *                  number of pages pinned. Assigned by callee.
+ * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
+ * this structure will be filled in by this function.
  *
  * Returns:
  *   Number of pages pinned on success.
- *   If @pa->pa_nr is 0 or negative, returns 0.
+ *   If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially,
+ *   returns -EINVAL.
  *   If no pages were pinned, returns -errno.
  */
-static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
-{
-       int i, ret;
-
-       if (pa->pa_nr <= 0) {
-               pa->pa_nr = 0;
-               return 0;
-       }
-
-       pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
-       for (i = 1; i < pa->pa_nr; i++)
-               pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
-
-       ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
-                            IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
-
-       if (ret > 0 && ret != pa->pa_nr) {
-               vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
-               pa->pa_nr = 0;
-               return 0;
-       }
-
-       return ret;
-}
-
-/* Unpin the pages before releasing the memory. */
-static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
-{
-       vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
-       pa->pa_nr = 0;
-       kfree(pa->pa_iova_pfn);
-}
-
-/* Alloc memory for PFNs, then pin pages with them. */
 static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
                               u64 iova, unsigned int len)
 {
-       int ret = 0;
+       int i, ret = 0;
 
        if (!len)
                return 0;
 
-       if (pa->pa_nr)
+       if (pa->pa_nr || pa->pa_iova_pfn)
                return -EINVAL;
 
        pa->pa_iova = iova;
@@ -126,18 +93,39 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
                return -ENOMEM;
        pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
 
-       ret = pfn_array_pin(pa, mdev);
+       pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
+       for (i = 1; i < pa->pa_nr; i++)
+               pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
 
-       if (ret > 0)
-               return ret;
-       else if (!ret)
+       ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
+                            IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
+
+       if (ret < 0) {
+               goto err_out;
+       } else if (ret > 0 && ret != pa->pa_nr) {
+               vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
                ret = -EINVAL;
+               goto err_out;
+       }
 
+       return ret;
+
+err_out:
+       pa->pa_nr = 0;
        kfree(pa->pa_iova_pfn);
+       pa->pa_iova_pfn = NULL;
 
        return ret;
 }
 
+/* Unpin the pages before releasing the memory. */
+static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
+{
+       vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
+       pa->pa_nr = 0;
+       kfree(pa->pa_iova_pfn);
+}
+
 static int pfn_array_table_init(struct pfn_array_table *pat, int nr)
 {
        pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL);
@@ -365,6 +353,9 @@ static void cp_unpin_free(struct channel_program *cp)
  * This is the chain length not considering any TICs.
  * You need to do a new round for each TIC target.
  *
+ * The program is also validated for absence of not yet supported
+ * indirect data addressing scenarios.
+ *
  * Returns: the length of the ccw chain or -errno.
  */
 static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
@@ -391,6 +382,14 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
        do {
                cnt++;
 
+               /*
+                * As we don't want to fail direct addressing even if the
+                * orb specified one of the unsupported formats, we defer
+                * checking for IDAWs in unsupported formats to here.
+                */
+               if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
+                       return -EOPNOTSUPP;
+
                if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
                        break;
 
@@ -503,7 +502,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
        struct ccw1 *ccw;
        struct pfn_array_table *pat;
        unsigned long *idaws;
-       int idaw_nr;
+       int ret;
 
        ccw = chain->ch_ccw + idx;
 
@@ -523,18 +522,19 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
         * needed when translating a direct ccw to a idal ccw.
         */
        pat = chain->ch_pat + idx;
-       if (pfn_array_table_init(pat, 1))
-               return -ENOMEM;
-       idaw_nr = pfn_array_alloc_pin(pat->pat_pa, cp->mdev,
-                                     ccw->cda, ccw->count);
-       if (idaw_nr < 0)
-               return idaw_nr;
+       ret = pfn_array_table_init(pat, 1);
+       if (ret)
+               goto out_init;
+
+       ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
+       if (ret < 0)
+               goto out_init;
 
        /* Translate this direct ccw to a idal ccw. */
-       idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
+       idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
        if (!idaws) {
-               pfn_array_table_unpin_free(pat, cp->mdev);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_unpin;
        }
        ccw->cda = (__u32) virt_to_phys(idaws);
        ccw->flags |= CCW_FLAG_IDA;
@@ -542,6 +542,12 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
        pfn_array_table_idal_create_words(pat, idaws);
 
        return 0;
+
+out_unpin:
+       pfn_array_table_unpin_free(pat, cp->mdev);
+out_init:
+       ccw->cda = 0;
+       return ret;
 }
 
 static int ccwchain_fetch_idal(struct ccwchain *chain,
@@ -571,7 +577,7 @@ static int ccwchain_fetch_idal(struct ccwchain *chain,
        pat = chain->ch_pat + idx;
        ret = pfn_array_table_init(pat, idaw_nr);
        if (ret)
-               return ret;
+               goto out_init;
 
        /* Translate idal ccw to use new allocated idaws. */
        idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL);
@@ -603,6 +609,8 @@ out_free_idaws:
        kfree(idaws);
 out_unpin:
        pfn_array_table_unpin_free(pat, cp->mdev);
+out_init:
+       ccw->cda = 0;
        return ret;
 }
 
@@ -656,10 +664,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
        /*
         * XXX:
         * Only support prefetch enable mode now.
-        * Only support 64bit addressing idal.
-        * Only support 4k IDAW.
         */
-       if (!orb->cmd.pfch || !orb->cmd.c64 || orb->cmd.i2k)
+       if (!orb->cmd.pfch)
                return -EOPNOTSUPP;
 
        INIT_LIST_HEAD(&cp->ccwchain_list);
@@ -688,6 +694,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
        ret = ccwchain_loop_tic(chain, cp);
        if (ret)
                cp_unpin_free(cp);
+       /* It is safe to force: if not set but idals used
+        * ccwchain_calc_length returns an error.
+        */
+       cp->orb.cmd.c64 = 1;
 
        return ret;
 }
index ea6a2d0b2894decac95c3421c544183ee89c3383..770fa9cfc31041dd84a78a00f0f4135bef5a79ed 100644 (file)
@@ -177,6 +177,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
 {
        struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
        unsigned long flags;
+       int rc = -EAGAIN;
 
        spin_lock_irqsave(sch->lock, flags);
        if (!device_is_registered(&sch->dev))
@@ -187,6 +188,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
 
        if (cio_update_schib(sch)) {
                vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+               rc = 0;
                goto out_unlock;
        }
 
@@ -195,11 +197,12 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
                private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
                                 VFIO_CCW_STATE_STANDBY;
        }
+       rc = 0;
 
 out_unlock:
        spin_unlock_irqrestore(sch->lock, flags);
 
-       return 0;
+       return rc;
 }
 
 static struct css_device_id vfio_ccw_sch_ids[] = {
index 3c800642134e4330d62bb8c0053df62618840ff3..797a82731159a5f9f584810f924adc3467b1e702 100644 (file)
@@ -13,6 +13,9 @@
 #include "ioasm.h"
 #include "vfio_ccw_private.h"
 
+#define CREATE_TRACE_POINTS
+#include "vfio_ccw_trace.h"
+
 static int fsm_io_helper(struct vfio_ccw_private *private)
 {
        struct subchannel *sch;
@@ -110,6 +113,10 @@ static void fsm_disabled_irq(struct vfio_ccw_private *private,
         */
        cio_disable_subchannel(sch);
 }
+inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
+{
+       return p->sch->schid;
+}
 
 /*
  * Deal with the ccw command request from the userspace.
@@ -121,6 +128,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
        union scsw *scsw = &private->scsw;
        struct ccw_io_region *io_region = &private->io_region;
        struct mdev_device *mdev = private->mdev;
+       char *errstr = "request";
 
        private->state = VFIO_CCW_STATE_BOXED;
 
@@ -132,15 +140,19 @@ static void fsm_io_request(struct vfio_ccw_private *private,
                /* Don't try to build a cp if transport mode is specified. */
                if (orb->tm.b) {
                        io_region->ret_code = -EOPNOTSUPP;
+                       errstr = "transport mode";
                        goto err_out;
                }
                io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
                                              orb);
-               if (io_region->ret_code)
+               if (io_region->ret_code) {
+                       errstr = "cp init";
                        goto err_out;
+               }
 
                io_region->ret_code = cp_prefetch(&private->cp);
                if (io_region->ret_code) {
+                       errstr = "cp prefetch";
                        cp_free(&private->cp);
                        goto err_out;
                }
@@ -148,6 +160,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
                /* Start channel program and wait for I/O interrupt. */
                io_region->ret_code = fsm_io_helper(private);
                if (io_region->ret_code) {
+                       errstr = "cp fsm_io_helper";
                        cp_free(&private->cp);
                        goto err_out;
                }
@@ -164,6 +177,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
 
 err_out:
        private->state = VFIO_CCW_STATE_IDLE;
+       trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
+                              io_region->ret_code, errstr);
 }
 
 /*
diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h
new file mode 100644 (file)
index 0000000..b1da53d
--- /dev/null
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Tracepoints for vfio_ccw driver
+ *
+ * Copyright IBM Corp. 2018
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ *            Halil Pasic <pasic@linux.vnet.ibm.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vfio_ccw
+
+#if !defined(_VFIO_CCW_TRACE_) || defined(TRACE_HEADER_MULTI_READ)
+#define _VFIO_CCW_TRACE_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(vfio_ccw_io_fctl,
+       TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr),
+       TP_ARGS(fctl, schid, errno, errstr),
+
+       TP_STRUCT__entry(
+               __field(int, fctl)
+               __field_struct(struct subchannel_id, schid)
+               __field(int, errno)
+               __field(char*, errstr)
+       ),
+
+       TP_fast_assign(
+               __entry->fctl = fctl;
+               __entry->schid = schid;
+               __entry->errno = errno;
+               __entry->errstr = errstr;
+       ),
+
+       TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s",
+                 __entry->schid.cssid,
+                 __entry->schid.ssid,
+                 __entry->schid.sch_no,
+                 __entry->fctl,
+                 __entry->errno,
+                 __entry->errstr)
+);
+
+#endif /* _VFIO_CCW_TRACE_ */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE vfio_ccw_trace
+
+#include <trace/define_trace.h>
index 2a5fec55bf60f6f30fd684e1c8c5c9a594aa8e75..a246a618f9a497047e4a81614f38da1eb295ef0b 100644 (file)
@@ -829,6 +829,17 @@ struct qeth_trap_id {
 /*some helper functions*/
 #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
 
+static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
+                                         unsigned int elements)
+{
+       unsigned int i;
+
+       for (i = 0; i < elements; i++)
+               memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element));
+       buf->element[14].sflags = 0;
+       buf->element[15].sflags = 0;
+}
+
 /**
  * qeth_get_elements_for_range() -     find number of SBALEs to cover range.
  * @start:                             Start of the address range.
@@ -1029,7 +1040,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
                                                 __u16, __u16,
                                                 enum qeth_prot_versions);
 int qeth_set_features(struct net_device *, netdev_features_t);
-void qeth_recover_features(struct net_device *dev);
+void qeth_enable_hw_features(struct net_device *dev);
 netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
 netdev_features_t qeth_features_check(struct sk_buff *skb,
                                      struct net_device *dev,
index 8e1474f1ffacfb22b773b02aa1bff6ff91c61ce9..d01ac29fd986d82b84b7215c5268c37e94aaadaa 100644 (file)
@@ -73,9 +73,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
                struct qeth_qdio_out_buffer *buf,
                enum iucv_tx_notify notification);
 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
-static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
-               struct qeth_qdio_out_buffer *buf,
-               enum qeth_qdio_buffer_states newbufstate);
 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
 
 struct workqueue_struct *qeth_wq;
@@ -489,6 +486,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
        struct qaob *aob;
        struct qeth_qdio_out_buffer *buffer;
        enum iucv_tx_notify notification;
+       unsigned int i;
 
        aob = (struct qaob *) phys_to_virt(phys_aob_addr);
        QETH_CARD_TEXT(card, 5, "haob");
@@ -513,10 +511,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
        qeth_notify_skbs(buffer->q, buffer, notification);
 
        buffer->aob = NULL;
-       qeth_clear_output_buffer(buffer->q, buffer,
-                                QETH_QDIO_BUF_HANDLED_DELAYED);
+       /* Free dangling allocations. The attached skbs are handled by
+        * qeth_cleanup_handled_pending().
+        */
+       for (i = 0;
+            i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
+            i++) {
+               if (aob->sba[i] && buffer->is_header[i])
+                       kmem_cache_free(qeth_core_header_cache,
+                                       (void *) aob->sba[i]);
+       }
+       atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
 
-       /* from here on: do not touch buffer anymore */
        qdio_release_aob(aob);
 }
 
@@ -3759,6 +3765,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
                        QETH_CARD_TEXT(queue->card, 5, "aob");
                        QETH_CARD_TEXT_(queue->card, 5, "%lx",
                                        virt_to_phys(buffer->aob));
+
+                       /* prepare the queue slot for re-use: */
+                       qeth_scrub_qdio_buffer(buffer->buffer,
+                                              QETH_MAX_BUFFER_ELEMENTS(card));
                        if (qeth_init_qdio_out_buf(queue, bidx)) {
                                QETH_CARD_TEXT(card, 2, "outofbuf");
                                qeth_schedule_recovery(card);
@@ -4834,7 +4844,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
                goto out;
        }
 
-       ccw_device_get_id(CARD_RDEV(card), &id);
+       ccw_device_get_id(CARD_DDEV(card), &id);
        request->resp_buf_len = sizeof(*response);
        request->resp_version = DIAG26C_VERSION2;
        request->op_code = DIAG26C_GET_MAC;
@@ -6459,28 +6469,27 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
 #define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
                          NETIF_F_IPV6_CSUM)
 /**
- * qeth_recover_features() - Restore device features after recovery
- * @dev:       the recovering net_device
- *
- * Caller must hold rtnl lock.
+ * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
+ * @dev:       a net_device
  */
-void qeth_recover_features(struct net_device *dev)
+void qeth_enable_hw_features(struct net_device *dev)
 {
-       netdev_features_t features = dev->features;
        struct qeth_card *card = dev->ml_priv;
+       netdev_features_t features;
 
+       rtnl_lock();
+       features = dev->features;
        /* force-off any feature that needs an IPA sequence.
         * netdev_update_features() will restart them.
         */
        dev->features &= ~QETH_HW_FEATURES;
        netdev_update_features(dev);
-
-       if (features == dev->features)
-               return;
-       dev_warn(&card->gdev->dev,
-                "Device recovery failed to restore all offload features\n");
+       if (features != dev->features)
+               dev_warn(&card->gdev->dev,
+                        "Device recovery failed to restore all offload features\n");
+       rtnl_unlock();
 }
-EXPORT_SYMBOL_GPL(qeth_recover_features);
+EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
 
 int qeth_set_features(struct net_device *dev, netdev_features_t features)
 {
index a7cb37da6a21313eda8d03119135f1475d35f47d..2487f0aeb165c1afae905540d1ff547f7fab4f54 100644 (file)
@@ -140,7 +140,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
 
 static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
 {
-       enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+       enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
                                        IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
        int rc;
 
@@ -157,7 +157,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
 
 static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
 {
-       enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+       enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
                                        IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
        int rc;
 
@@ -501,27 +501,34 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
                return -ERESTARTSYS;
        }
 
+       /* avoid racing against concurrent state change: */
+       if (!mutex_trylock(&card->conf_mutex))
+               return -EAGAIN;
+
        if (!qeth_card_hw_is_reachable(card)) {
                ether_addr_copy(dev->dev_addr, addr->sa_data);
-               return 0;
+               goto out_unlock;
        }
 
        /* don't register the same address twice */
        if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
            (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
-               return 0;
+               goto out_unlock;
 
        /* add the new address, switch over, drop the old */
        rc = qeth_l2_send_setmac(card, addr->sa_data);
        if (rc)
-               return rc;
+               goto out_unlock;
        ether_addr_copy(old_addr, dev->dev_addr);
        ether_addr_copy(dev->dev_addr, addr->sa_data);
 
        if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
                qeth_l2_remove_mac(card, old_addr);
        card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
-       return 0;
+
+out_unlock:
+       mutex_unlock(&card->conf_mutex);
+       return rc;
 }
 
 static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -1112,6 +1119,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                netif_carrier_off(card->dev);
 
        qeth_set_allowed_threads(card, 0xffffffff, 0);
+
+       qeth_enable_hw_features(card->dev);
        if (recover_flag == CARD_STATE_RECOVER) {
                if (recovery_mode &&
                    card->info.type != QETH_CARD_TYPE_OSN) {
@@ -1123,9 +1132,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                }
                /* this also sets saved unicast addresses */
                qeth_l2_set_rx_mode(card->dev);
-               rtnl_lock();
-               qeth_recover_features(card->dev);
-               rtnl_unlock();
        }
        /* let user_space know that device is online */
        kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
index e7fa479adf47e0dd41bfacaed8fd347bc40b5581..5905dc63e2569baf761611ad25bf3b91786a3235 100644 (file)
@@ -2662,6 +2662,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                netif_carrier_on(card->dev);
        else
                netif_carrier_off(card->dev);
+
+       qeth_enable_hw_features(card->dev);
        if (recover_flag == CARD_STATE_RECOVER) {
                rtnl_lock();
                if (recovery_mode)
@@ -2669,7 +2671,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                else
                        dev_open(card->dev);
                qeth_l3_set_rx_mode(card->dev);
-               qeth_recover_features(card->dev);
                rtnl_unlock();
        }
        qeth_trace_features(card);
index a9831bd37a73d52462489d025c1631a576ca2fbc..a57f3a7d47488e5aac06d24e47b14eb90fc9615c 100644 (file)
@@ -1974,7 +1974,6 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
        u32 lun_count, nexus;
        u32 i, bus, target;
        u8 expose_flag, attribs;
-       u8 devtype;
 
        lun_count = aac_get_safw_phys_lun_count(dev);
 
@@ -1992,23 +1991,23 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
                        continue;
 
                if (expose_flag != 0) {
-                       devtype = AAC_DEVTYPE_RAID_MEMBER;
-                       goto update_devtype;
+                       dev->hba_map[bus][target].devtype =
+                               AAC_DEVTYPE_RAID_MEMBER;
+                       continue;
                }
 
                if (nexus != 0 && (attribs & 8)) {
-                       devtype = AAC_DEVTYPE_NATIVE_RAW;
+                       dev->hba_map[bus][target].devtype =
+                               AAC_DEVTYPE_NATIVE_RAW;
                        dev->hba_map[bus][target].rmw_nexus =
                                        nexus;
                } else
-                       devtype = AAC_DEVTYPE_ARC_RAW;
+                       dev->hba_map[bus][target].devtype =
+                               AAC_DEVTYPE_ARC_RAW;
 
                dev->hba_map[bus][target].scan_counter = dev->scan_counter;
 
                aac_set_safw_target_qd(dev, bus, target);
-
-update_devtype:
-               dev->hba_map[bus][target].devtype = devtype;
        }
 }
 
index 0a9b8b387bd2e70e87310ef7908012a46f32942f..02d65dce74e504230ceb3080b58972d8d5dff950 100644 (file)
@@ -760,7 +760,6 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
                ioa_cfg->hrrq[i].allow_interrupts = 0;
                spin_unlock(&ioa_cfg->hrrq[i]._lock);
        }
-       wmb();
 
        /* Set interrupt mask to stop all new interrupts */
        if (ioa_cfg->sis64)
@@ -8403,7 +8402,6 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
                ioa_cfg->hrrq[i].allow_interrupts = 1;
                spin_unlock(&ioa_cfg->hrrq[i]._lock);
        }
-       wmb();
        if (ioa_cfg->sis64) {
                /* Set the adapter to the correct endian mode. */
                writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
index 0fea2e2326becbf4993dd7cc216e36dad529d678..1027b0cb7fa3634baf0bd870ffdc93e9286cac8e 100644 (file)
@@ -1224,7 +1224,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
 void qlt_schedule_sess_for_deletion(struct fc_port *sess)
 {
        struct qla_tgt *tgt = sess->tgt;
-       struct qla_hw_data *ha = sess->vha->hw;
        unsigned long flags;
 
        if (sess->disc_state == DSC_DELETE_PEND)
@@ -1241,16 +1240,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
                        return;
        }
 
-       spin_lock_irqsave(&ha->tgt.sess_lock, flags);
        if (sess->deleted == QLA_SESS_DELETED)
                sess->logout_on_delete = 0;
 
+       spin_lock_irqsave(&sess->vha->work_lock, flags);
        if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
-               spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+               spin_unlock_irqrestore(&sess->vha->work_lock, flags);
                return;
        }
        sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
-       spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+       spin_unlock_irqrestore(&sess->vha->work_lock, flags);
 
        sess->disc_state = DSC_DELETE_PEND;
 
index 24d7496cd9e23cfc2a97126fc22b5f4c25a253b0..364e71861bfd5c2c17caf1e93cdeae669e95b971 100644 (file)
@@ -5507,9 +5507,9 @@ static void __exit scsi_debug_exit(void)
        int k = sdebug_add_host;
 
        stop_all_queued();
-       free_all_queued();
        for (; k; k--)
                sdebug_remove_adapter();
+       free_all_queued();
        driver_unregister(&sdebug_driverfs_driver);
        bus_unregister(&pseudo_lld_bus);
        root_device_unregister(pseudo_primary);
index 1da3d71e9f61f784e8131093bd5378d94bb98745..13948102ca298cf1a20d45d49781aa4dee55d851 100644 (file)
@@ -3592,7 +3592,7 @@ fc_bsg_job_timeout(struct request *req)
 
        /* the blk_end_sync_io() doesn't check the error */
        if (inflight)
-               blk_mq_complete_request(req);
+               __blk_complete_request(req);
        return BLK_EH_DONE;
 }
 
index 53ae52dbff84afd2021e80b7c1329cb7c53117c2..cd2fdac000c9e23fbf1df8cca453f30379b22d66 100644 (file)
@@ -51,6 +51,7 @@ static int sg_version_num = 30536;    /* 2 digits for each component */
 #include <linux/atomic.h>
 #include <linux/ratelimit.h>
 #include <linux/uio.h>
+#include <linux/cred.h> /* for sg_check_file_access() */
 
 #include "scsi.h"
 #include <scsi/scsi_dbg.h>
@@ -209,6 +210,33 @@ static void sg_device_destroy(struct kref *kref);
        sdev_prefix_printk(prefix, (sdp)->device,               \
                           (sdp)->disk->disk_name, fmt, ##a)
 
+/*
+ * The SCSI interfaces that use read() and write() as an asynchronous variant of
+ * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways
+ * to trigger read() and write() calls from various contexts with elevated
+ * privileges. This can lead to kernel memory corruption (e.g. if these
+ * interfaces are called through splice()) and privilege escalation inside
+ * userspace (e.g. if a process with access to such a device passes a file
+ * descriptor to a SUID binary as stdin/stdout/stderr).
+ *
+ * This function provides protection for the legacy API by restricting the
+ * calling context.
+ */
+static int sg_check_file_access(struct file *filp, const char *caller)
+{
+       if (filp->f_cred != current_real_cred()) {
+               pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+                       caller, task_tgid_vnr(current), current->comm);
+               return -EPERM;
+       }
+       if (uaccess_kernel()) {
+               pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n",
+                       caller, task_tgid_vnr(current), current->comm);
+               return -EACCES;
+       }
+       return 0;
+}
+
 static int sg_allow_access(struct file *filp, unsigned char *cmd)
 {
        struct sg_fd *sfp = filp->private_data;
@@ -393,6 +421,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
        struct sg_header *old_hdr = NULL;
        int retval = 0;
 
+       /*
+        * This could cause a response to be stranded. Close the associated
+        * file descriptor to free up any resources being held.
+        */
+       retval = sg_check_file_access(filp, __func__);
+       if (retval)
+               return retval;
+
        if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
                return -ENXIO;
        SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
@@ -580,9 +616,11 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
        struct sg_header old_hdr;
        sg_io_hdr_t *hp;
        unsigned char cmnd[SG_MAX_CDB_SIZE];
+       int retval;
 
-       if (unlikely(uaccess_kernel()))
-               return -EINVAL;
+       retval = sg_check_file_access(filp, __func__);
+       if (retval)
+               return retval;
 
        if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
                return -ENXIO;
index 36f59a1be7e9a60be61c2b1ba8f7468dfbd8c6c9..61389bdc7926690100fc0a38fc59e8b6a73853ab 100644 (file)
@@ -654,10 +654,17 @@ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
 static int scsifront_sdev_configure(struct scsi_device *sdev)
 {
        struct vscsifrnt_info *info = shost_priv(sdev->host);
+       int err;
 
-       if (info && current == info->curr)
-               xenbus_printf(XBT_NIL, info->dev->nodename,
+       if (info && current == info->curr) {
+               err = xenbus_printf(XBT_NIL, info->dev->nodename,
                              info->dev_state_path, "%d", XenbusStateConnected);
+               if (err) {
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing dev_state_path", __func__);
+                       return err;
+               }
+       }
 
        return 0;
 }
@@ -665,10 +672,15 @@ static int scsifront_sdev_configure(struct scsi_device *sdev)
 static void scsifront_sdev_destroy(struct scsi_device *sdev)
 {
        struct vscsifrnt_info *info = shost_priv(sdev->host);
+       int err;
 
-       if (info && current == info->curr)
-               xenbus_printf(XBT_NIL, info->dev->nodename,
+       if (info && current == info->curr) {
+               err = xenbus_printf(XBT_NIL, info->dev->nodename,
                              info->dev_state_path, "%d", XenbusStateClosed);
+               if (err)
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing dev_state_path", __func__);
+       }
 }
 
 static struct scsi_host_template scsifront_sht = {
@@ -1003,9 +1015,12 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
 
                        if (scsi_add_device(info->host, chn, tgt, lun)) {
                                dev_err(&dev->dev, "scsi_add_device\n");
-                               xenbus_printf(XBT_NIL, dev->nodename,
+                               err = xenbus_printf(XBT_NIL, dev->nodename,
                                              info->dev_state_path,
                                              "%d", XenbusStateClosed);
+                               if (err)
+                                       xenbus_dev_error(dev, err,
+                                               "%s: writing dev_state_path", __func__);
                        }
                        break;
                case VSCSIFRONT_OP_DEL_LUN:
@@ -1019,10 +1034,14 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
                        }
                        break;
                case VSCSIFRONT_OP_READD_LUN:
-                       if (device_state == XenbusStateConnected)
-                               xenbus_printf(XBT_NIL, dev->nodename,
+                       if (device_state == XenbusStateConnected) {
+                               err = xenbus_printf(XBT_NIL, dev->nodename,
                                              info->dev_state_path,
                                              "%d", XenbusStateConnected);
+                               if (err)
+                                       xenbus_dev_error(dev, err,
+                                               "%s: writing dev_state_path", __func__);
+                       }
                        break;
                default:
                        break;
index f4e3bd40c72e60c0448c98456f7b53f6be7936bd..6ef18cf8f24387e324cf455ae98c30f2b27c95d3 100644 (file)
 
 #define GPC_M4_PU_PDN_FLG              0x1bc
 
-
-#define PGC_MIPI                       4
-#define PGC_PCIE                       5
-#define PGC_USB_HSIC                   8
+/*
+ * The PGC offset values in Reference Manual
+ * (Rev. 1, 01/2018 and the older ones) GPC chapter's
+ * GPC_PGC memory map are incorrect, below offset
+ * values are from design RTL.
+ */
+#define PGC_MIPI                       16
+#define PGC_PCIE                       17
+#define PGC_USB_HSIC                   20
 #define GPC_PGC_CTRL(n)                        (0x800 + (n) * 0x40)
 #define GPC_PGC_SR(n)                  (GPC_PGC_CTRL(n) + 0xc)
 
index 9dc02f390ba314bf8cfbb1b86223af1859af2507..5856e792d09c8d317b01627c2d03a97eeaebff37 100644 (file)
@@ -5,7 +5,8 @@ menu "Qualcomm SoC drivers"
 
 config QCOM_COMMAND_DB
        bool "Qualcomm Command DB"
-       depends on (ARCH_QCOM && OF) || COMPILE_TEST
+       depends on ARCH_QCOM || COMPILE_TEST
+       depends on OF_RESERVED_MEM
        help
          Command DB queries shared memory by key string for shared system
          resources. Platform drivers that require to set state of a shared
index 95120acc4d806da630f49737cca1eede3286edae..50d03d8b4f9a55f50d52f328039afe0c27991740 100644 (file)
@@ -194,11 +194,12 @@ static int rcar_sysc_pd_power_on(struct generic_pm_domain *genpd)
 
 static bool has_cpg_mstp;
 
-static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
+static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
 {
        struct generic_pm_domain *genpd = &pd->genpd;
        const char *name = pd->genpd.name;
        struct dev_power_governor *gov = &simple_qos_governor;
+       int error;
 
        if (pd->flags & PD_CPU) {
                /*
@@ -251,7 +252,11 @@ static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
        rcar_sysc_power_up(&pd->ch);
 
 finalize:
-       pm_genpd_init(genpd, gov, false);
+       error = pm_genpd_init(genpd, gov, false);
+       if (error)
+               pr_err("Failed to init PM domain %s: %d\n", name, error);
+
+       return error;
 }
 
 static const struct of_device_id rcar_sysc_matches[] __initconst = {
@@ -375,6 +380,9 @@ static int __init rcar_sysc_pd_init(void)
        pr_debug("%pOF: syscier = 0x%08x\n", np, syscier);
        iowrite32(syscier, base + SYSCIER);
 
+       /*
+        * First, create all PM domains
+        */
        for (i = 0; i < info->num_areas; i++) {
                const struct rcar_sysc_area *area = &info->areas[i];
                struct rcar_sysc_pd *pd;
@@ -397,14 +405,29 @@ static int __init rcar_sysc_pd_init(void)
                pd->ch.isr_bit = area->isr_bit;
                pd->flags = area->flags;
 
-               rcar_sysc_pd_setup(pd);
-               if (area->parent >= 0)
-                       pm_genpd_add_subdomain(domains->domains[area->parent],
-                                              &pd->genpd);
+               error = rcar_sysc_pd_setup(pd);
+               if (error)
+                       goto out_put;
 
                domains->domains[area->isr_bit] = &pd->genpd;
        }
 
+       /*
+        * Second, link all PM domains to their parents
+        */
+       for (i = 0; i < info->num_areas; i++) {
+               const struct rcar_sysc_area *area = &info->areas[i];
+
+               if (!area->name || area->parent < 0)
+                       continue;
+
+               error = pm_genpd_add_subdomain(domains->domains[area->parent],
+                                              domains->domains[area->isr_bit]);
+               if (error)
+                       pr_warn("Failed to add PM subdomain %s to parent %u\n",
+                               area->name, area->parent);
+       }
+
        error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
 
 out_put:
index e8c4403297082898c54d0aacf09c778a399aa2d0..31db510018a9462ead01b016f02c4952c6871c96 100644 (file)
@@ -30,7 +30,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
        struct page **tmp = pages;
 
        if (!pages)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        if (buffer->flags & ION_FLAG_CACHED)
                pgprot = PAGE_KERNEL;
index ea194aa01a642e0c691c9cb8b78e9d8ef43cfa79..257b0daff01f21317cf1e8f1a251efdbd9dc1189 100644 (file)
@@ -642,7 +642,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev,
        /* Make sure D/A update mode is direct update */
        outb(0, dev->iobase + DAQP_AUX_REG);
 
-       for (i = 0; i > insn->n; i++) {
+       for (i = 0; i < insn->n; i++) {
                unsigned int val = data[i];
                int ret;
 
index 45c05527a57a327a7490acfe889581cfe0bb41ba..faf4b4158cfa2c174ea2f68c3829f7933d64c35f 100644 (file)
@@ -1051,7 +1051,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf,  int len)
                return _FAIL;
 
 
-       if (len > MAX_IE_SZ)
+       if (len < 0 || len > MAX_IE_SZ)
                return _FAIL;
 
        pbss_network->IELength = len;
index 7947edb239a13b7d7752cf2e2a82f0814424306e..88ba5b2fea6acdb47863ba959f61eb05318f75a7 100644 (file)
@@ -803,7 +803,7 @@ static void _rtl8822be_enable_aspm_back_door(struct ieee80211_hw *hw)
                return;
 
        pci_read_config_byte(rtlpci->pdev, 0x70f, &tmp);
-       pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | BIT(7));
+       pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | ASPM_L1_LATENCY << 3);
 
        pci_read_config_byte(rtlpci->pdev, 0x719, &tmp);
        pci_write_config_byte(rtlpci->pdev, 0x719, tmp | BIT(3) | BIT(4));
index 012fb618840b05e910e6d68fba38e044dfc26ab0..a45f0eb69d3f2fdb1b14df330a5dbf8cb0ddc24d 100644 (file)
@@ -88,6 +88,7 @@
 #define RTL_USB_MAX_RX_COUNT                   100
 #define QBSS_LOAD_SIZE                         5
 #define MAX_WMMELE_LENGTH                      64
+#define ASPM_L1_LATENCY                                7
 
 #define TOTAL_CAM_ENTRY                                32
 
index 3aa981fbc8f56c4215344de1d20269cfc1ae9fc3..e45ed08a51668fbe5ba03abc849746471a1a48e1 100644 (file)
@@ -11,6 +11,7 @@ config TYPEC_TCPCI
 
 config TYPEC_RT1711H
        tristate "Richtek RT1711H Type-C chip driver"
+       depends on I2C
        select TYPEC_TCPCI
        help
          Richtek RT1711H Type-C chip driver that works with
index 01ac306131c1f163c6eb6043651a412e3e71dc76..10db5656fd5dcb8e95769a922223b8e88cf23983 100644 (file)
@@ -3727,11 +3727,16 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
                 * Check for overflow of 8byte PRI READ_KEYS payload and
                 * next reservation key list descriptor.
                 */
-               if ((add_len + 8) > (cmd->data_length - 8))
-                       break;
-
-               put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
-               off += 8;
+               if (off + 8 <= cmd->data_length) {
+                       put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
+                       off += 8;
+               }
+               /*
+                * SPC5r17: 6.16.2 READ KEYS service action
+                * The ADDITIONAL LENGTH field indicates the number of bytes in
+                * the Reservation key list. The contents of the ADDITIONAL
+                * LENGTH field are not altered based on the allocation length
+                */
                add_len += 8;
        }
        spin_unlock(&dev->t10_pr.registration_lock);
index 7f96dfa32b9cdf1cbf167fe1b0581e3b94f1a08b..d8dc3d22051f7810efa5faafba0cc71e3ad43040 100644 (file)
@@ -656,7 +656,7 @@ static void scatter_data_area(struct tcmu_dev *udev,
 }
 
 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
-                            bool bidi)
+                            bool bidi, uint32_t read_len)
 {
        struct se_cmd *se_cmd = cmd->se_cmd;
        int i, dbi;
@@ -689,7 +689,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
        for_each_sg(data_sg, sg, data_nents, i) {
                int sg_remaining = sg->length;
                to = kmap_atomic(sg_page(sg)) + sg->offset;
-               while (sg_remaining > 0) {
+               while (sg_remaining > 0 && read_len > 0) {
                        if (block_remaining == 0) {
                                if (from)
                                        kunmap_atomic(from);
@@ -701,6 +701,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
                        }
                        copy_bytes = min_t(size_t, sg_remaining,
                                        block_remaining);
+                       if (read_len < copy_bytes)
+                               copy_bytes = read_len;
                        offset = DATA_BLOCK_SIZE - block_remaining;
                        tcmu_flush_dcache_range(from, copy_bytes);
                        memcpy(to + sg->length - sg_remaining, from + offset,
@@ -708,8 +710,11 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
 
                        sg_remaining -= copy_bytes;
                        block_remaining -= copy_bytes;
+                       read_len -= copy_bytes;
                }
                kunmap_atomic(to - sg->offset);
+               if (read_len == 0)
+                       break;
        }
        if (from)
                kunmap_atomic(from);
@@ -1042,6 +1047,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
 {
        struct se_cmd *se_cmd = cmd->se_cmd;
        struct tcmu_dev *udev = cmd->tcmu_dev;
+       bool read_len_valid = false;
+       uint32_t read_len = se_cmd->data_length;
 
        /*
         * cmd has been completed already from timeout, just reclaim
@@ -1056,13 +1063,28 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
                pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
                        cmd->se_cmd);
                entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
-       } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
+               goto done;
+       }
+
+       if (se_cmd->data_direction == DMA_FROM_DEVICE &&
+           (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
+               read_len_valid = true;
+               if (entry->rsp.read_len < read_len)
+                       read_len = entry->rsp.read_len;
+       }
+
+       if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
                transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
-       } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
+               if (!read_len_valid )
+                       goto done;
+               else
+                       se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
+       }
+       if (se_cmd->se_cmd_flags & SCF_BIDI) {
                /* Get Data-In buffer before clean up */
-               gather_data_area(udev, cmd, true);
+               gather_data_area(udev, cmd, true, read_len);
        } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
-               gather_data_area(udev, cmd, false);
+               gather_data_area(udev, cmd, false, read_len);
        } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
                /* TODO: */
        } else if (se_cmd->data_direction != DMA_NONE) {
@@ -1070,7 +1092,13 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
                        se_cmd->data_direction);
        }
 
-       target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
+done:
+       if (read_len_valid) {
+               pr_debug("read_len = %d\n", read_len);
+               target_complete_cmd_with_length(cmd->se_cmd,
+                                       entry->rsp.scsi_status, read_len);
+       } else
+               target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
 
 out:
        cmd->se_cmd = NULL;
@@ -1740,7 +1768,7 @@ static int tcmu_configure_device(struct se_device *dev)
        /* Initialise the mailbox of the ring buffer */
        mb = udev->mb_addr;
        mb->version = TCMU_MAILBOX_VERSION;
-       mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
+       mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN;
        mb->cmdr_off = CMDR_OFF;
        mb->cmdr_size = udev->cmdr_size;
 
index 6281266b8ec0a15721da5196b641b707b8a5c973..a923ebdeb73c80bf845af7acc90dcc20a9c2ce1b 100644 (file)
@@ -213,6 +213,10 @@ static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
                goto err_free_acl;
        }
        ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
+       if (!ret) {
+               /* Notify userspace about the change */
+               kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
+       }
        mutex_unlock(&tb->lock);
 
 err_free_acl:
index cbe98bc2b998276fd95b2d8086a6fabcf2351bf7..43174220170924e094567ad6271703bd881e2a6f 100644 (file)
@@ -124,6 +124,8 @@ struct n_tty_data {
        struct mutex output_lock;
 };
 
+#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
+
 static inline size_t read_cnt(struct n_tty_data *ldata)
 {
        return ldata->read_head - ldata->read_tail;
@@ -141,6 +143,7 @@ static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i)
 
 static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i)
 {
+       smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */
        return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
 }
 
@@ -316,9 +319,7 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
 static void reset_buffer_flags(struct n_tty_data *ldata)
 {
        ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
-       ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
        ldata->commit_head = 0;
-       ldata->echo_mark = 0;
        ldata->line_start = 0;
 
        ldata->erasing = 0;
@@ -617,12 +618,19 @@ static size_t __process_echoes(struct tty_struct *tty)
        old_space = space = tty_write_room(tty);
 
        tail = ldata->echo_tail;
-       while (ldata->echo_commit != tail) {
+       while (MASK(ldata->echo_commit) != MASK(tail)) {
                c = echo_buf(ldata, tail);
                if (c == ECHO_OP_START) {
                        unsigned char op;
                        int no_space_left = 0;
 
+                       /*
+                        * Since add_echo_byte() is called without holding
+                        * output_lock, we might see only portion of multi-byte
+                        * operation.
+                        */
+                       if (MASK(ldata->echo_commit) == MASK(tail + 1))
+                               goto not_yet_stored;
                        /*
                         * If the buffer byte is the start of a multi-byte
                         * operation, get the next byte, which is either the
@@ -634,6 +642,8 @@ static size_t __process_echoes(struct tty_struct *tty)
                                unsigned int num_chars, num_bs;
 
                        case ECHO_OP_ERASE_TAB:
+                               if (MASK(ldata->echo_commit) == MASK(tail + 2))
+                                       goto not_yet_stored;
                                num_chars = echo_buf(ldata, tail + 2);
 
                                /*
@@ -728,7 +738,8 @@ static size_t __process_echoes(struct tty_struct *tty)
        /* If the echo buffer is nearly full (so that the possibility exists
         * of echo overrun before the next commit), then discard enough
         * data at the tail to prevent a subsequent overrun */
-       while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
+       while (ldata->echo_commit > tail &&
+              ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
                if (echo_buf(ldata, tail) == ECHO_OP_START) {
                        if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
                                tail += 3;
@@ -738,6 +749,7 @@ static size_t __process_echoes(struct tty_struct *tty)
                        tail++;
        }
 
+ not_yet_stored:
        ldata->echo_tail = tail;
        return old_space - space;
 }
@@ -748,6 +760,7 @@ static void commit_echoes(struct tty_struct *tty)
        size_t nr, old, echoed;
        size_t head;
 
+       mutex_lock(&ldata->output_lock);
        head = ldata->echo_head;
        ldata->echo_mark = head;
        old = ldata->echo_commit - ldata->echo_tail;
@@ -756,10 +769,12 @@ static void commit_echoes(struct tty_struct *tty)
         * is over the threshold (and try again each time another
         * block is accumulated) */
        nr = head - ldata->echo_tail;
-       if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK))
+       if (nr < ECHO_COMMIT_WATERMARK ||
+           (nr % ECHO_BLOCK > old % ECHO_BLOCK)) {
+               mutex_unlock(&ldata->output_lock);
                return;
+       }
 
-       mutex_lock(&ldata->output_lock);
        ldata->echo_commit = head;
        echoed = __process_echoes(tty);
        mutex_unlock(&ldata->output_lock);
@@ -810,7 +825,9 @@ static void flush_echoes(struct tty_struct *tty)
 
 static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
 {
-       *echo_buf_addr(ldata, ldata->echo_head++) = c;
+       *echo_buf_addr(ldata, ldata->echo_head) = c;
+       smp_wmb(); /* Matches smp_rmb() in echo_buf(). */
+       ldata->echo_head++;
 }
 
 /**
@@ -978,14 +995,15 @@ static void eraser(unsigned char c, struct tty_struct *tty)
        }
 
        seen_alnums = 0;
-       while (ldata->read_head != ldata->canon_head) {
+       while (MASK(ldata->read_head) != MASK(ldata->canon_head)) {
                head = ldata->read_head;
 
                /* erase a single possibly multibyte character */
                do {
                        head--;
                        c = read_buf(ldata, head);
-               } while (is_continuation(c, tty) && head != ldata->canon_head);
+               } while (is_continuation(c, tty) &&
+                        MASK(head) != MASK(ldata->canon_head));
 
                /* do not partially erase */
                if (is_continuation(c, tty))
@@ -1027,7 +1045,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
                                 * This info is used to go back the correct
                                 * number of columns.
                                 */
-                               while (tail != ldata->canon_head) {
+                               while (MASK(tail) != MASK(ldata->canon_head)) {
                                        tail--;
                                        c = read_buf(ldata, tail);
                                        if (c == '\t') {
@@ -1302,7 +1320,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
                        finish_erasing(ldata);
                        echo_char(c, tty);
                        echo_char_raw('\n', ldata);
-                       while (tail != ldata->read_head) {
+                       while (MASK(tail) != MASK(ldata->read_head)) {
                                echo_char(read_buf(ldata, tail), tty);
                                tail++;
                        }
@@ -1878,30 +1896,21 @@ static int n_tty_open(struct tty_struct *tty)
        struct n_tty_data *ldata;
 
        /* Currently a malloc failure here can panic */
-       ldata = vmalloc(sizeof(*ldata));
+       ldata = vzalloc(sizeof(*ldata));
        if (!ldata)
-               goto err;
+               return -ENOMEM;
 
        ldata->overrun_time = jiffies;
        mutex_init(&ldata->atomic_read_lock);
        mutex_init(&ldata->output_lock);
 
        tty->disc_data = ldata;
-       reset_buffer_flags(tty->disc_data);
-       ldata->column = 0;
-       ldata->canon_column = 0;
-       ldata->num_overrun = 0;
-       ldata->no_room = 0;
-       ldata->lnext = 0;
        tty->closing = 0;
        /* indicate buffer work may resume */
        clear_bit(TTY_LDISC_HALTED, &tty->flags);
        n_tty_set_termios(tty, NULL);
        tty_unthrottle(tty);
-
        return 0;
-err:
-       return -ENOMEM;
 }
 
 static inline int input_available_p(struct tty_struct *tty, int poll)
@@ -2411,7 +2420,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata)
        tail = ldata->read_tail;
        nr = head - tail;
        /* Skip EOF-chars.. */
-       while (head != tail) {
+       while (MASK(head) != MASK(tail)) {
                if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) &&
                    read_buf(ldata, tail) == __DISABLED_CHAR)
                        nr--;
index df93b727e984ee3d185fa0f5a42cad09d63a3f65..9e59f4788589c879358ce12507362baec459533d 100644 (file)
@@ -617,6 +617,7 @@ EXPORT_SYMBOL_GPL(__serdev_device_driver_register);
 static void __exit serdev_exit(void)
 {
        bus_unregister(&serdev_bus_type);
+       ida_destroy(&ctrl_ida);
 }
 module_exit(serdev_exit);
 
index 3296a05cda2db8d53b1d869123ed8e2aa884a248..f80a300b5d68f6e8ad61b7daf2544234da7e1662 100644 (file)
@@ -3339,9 +3339,7 @@ static const struct pci_device_id blacklist[] = {
        /* multi-io cards handled by parport_serial */
        { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
        { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
-       { PCI_DEVICE(0x4348, 0x7173), }, /* WCH CH355 4S */
        { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
-       { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
 
        /* Moxa Smartio MUE boards handled by 8250_moxa */
        { PCI_VDEVICE(MOXA, 0x1024), },
index 1eb1a376a0419d4084cd7a72e1e2c7eb769798f7..15eb6c829d39c5b108adfca034fa50769763c0bc 100644 (file)
@@ -784,7 +784,7 @@ int vc_allocate(unsigned int currcons)      /* return 0 on success */
        if (!*vc->vc_uni_pagedir_loc)
                con_set_default_unimap(vc);
 
-       vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
+       vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL);
        if (!vc->vc_screenbuf)
                goto err_free;
 
@@ -871,7 +871,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
 
        if (new_screen_size > (4 << 20))
                return -EINVAL;
-       newscreen = kmalloc(new_screen_size, GFP_USER);
+       newscreen = kzalloc(new_screen_size, GFP_USER);
        if (!newscreen)
                return -ENOMEM;
 
index e8f4ac9400ea842a8fe631fe23519e04f565f4e6..5d421d7e8904fc633f9f25c0faf6d69ec1d8f498 100644 (file)
@@ -215,7 +215,20 @@ static ssize_t name_show(struct device *dev,
                         struct device_attribute *attr, char *buf)
 {
        struct uio_device *idev = dev_get_drvdata(dev);
-       return sprintf(buf, "%s\n", idev->info->name);
+       int ret;
+
+       mutex_lock(&idev->info_lock);
+       if (!idev->info) {
+               ret = -EINVAL;
+               dev_err(dev, "the device has been unregistered\n");
+               goto out;
+       }
+
+       ret = sprintf(buf, "%s\n", idev->info->name);
+
+out:
+       mutex_unlock(&idev->info_lock);
+       return ret;
 }
 static DEVICE_ATTR_RO(name);
 
@@ -223,7 +236,20 @@ static ssize_t version_show(struct device *dev,
                            struct device_attribute *attr, char *buf)
 {
        struct uio_device *idev = dev_get_drvdata(dev);
-       return sprintf(buf, "%s\n", idev->info->version);
+       int ret;
+
+       mutex_lock(&idev->info_lock);
+       if (!idev->info) {
+               ret = -EINVAL;
+               dev_err(dev, "the device has been unregistered\n");
+               goto out;
+       }
+
+       ret = sprintf(buf, "%s\n", idev->info->version);
+
+out:
+       mutex_unlock(&idev->info_lock);
+       return ret;
 }
 static DEVICE_ATTR_RO(version);
 
@@ -415,11 +441,15 @@ EXPORT_SYMBOL_GPL(uio_event_notify);
 static irqreturn_t uio_interrupt(int irq, void *dev_id)
 {
        struct uio_device *idev = (struct uio_device *)dev_id;
-       irqreturn_t ret = idev->info->handler(irq, idev->info);
+       irqreturn_t ret;
 
+       mutex_lock(&idev->info_lock);
+
+       ret = idev->info->handler(irq, idev->info);
        if (ret == IRQ_HANDLED)
                uio_event_notify(idev->info);
 
+       mutex_unlock(&idev->info_lock);
        return ret;
 }
 
@@ -433,7 +463,6 @@ static int uio_open(struct inode *inode, struct file *filep)
        struct uio_device *idev;
        struct uio_listener *listener;
        int ret = 0;
-       unsigned long flags;
 
        mutex_lock(&minor_lock);
        idev = idr_find(&uio_idr, iminor(inode));
@@ -460,10 +489,16 @@ static int uio_open(struct inode *inode, struct file *filep)
        listener->event_count = atomic_read(&idev->event);
        filep->private_data = listener;
 
-       spin_lock_irqsave(&idev->info_lock, flags);
+       mutex_lock(&idev->info_lock);
+       if (!idev->info) {
+               mutex_unlock(&idev->info_lock);
+               ret = -EINVAL;
+               goto err_alloc_listener;
+       }
+
        if (idev->info && idev->info->open)
                ret = idev->info->open(idev->info, inode);
-       spin_unlock_irqrestore(&idev->info_lock, flags);
+       mutex_unlock(&idev->info_lock);
        if (ret)
                goto err_infoopen;
 
@@ -495,12 +530,11 @@ static int uio_release(struct inode *inode, struct file *filep)
        int ret = 0;
        struct uio_listener *listener = filep->private_data;
        struct uio_device *idev = listener->dev;
-       unsigned long flags;
 
-       spin_lock_irqsave(&idev->info_lock, flags);
+       mutex_lock(&idev->info_lock);
        if (idev->info && idev->info->release)
                ret = idev->info->release(idev->info, inode);
-       spin_unlock_irqrestore(&idev->info_lock, flags);
+       mutex_unlock(&idev->info_lock);
 
        module_put(idev->owner);
        kfree(listener);
@@ -513,12 +547,11 @@ static __poll_t uio_poll(struct file *filep, poll_table *wait)
        struct uio_listener *listener = filep->private_data;
        struct uio_device *idev = listener->dev;
        __poll_t ret = 0;
-       unsigned long flags;
 
-       spin_lock_irqsave(&idev->info_lock, flags);
+       mutex_lock(&idev->info_lock);
        if (!idev->info || !idev->info->irq)
                ret = -EIO;
-       spin_unlock_irqrestore(&idev->info_lock, flags);
+       mutex_unlock(&idev->info_lock);
 
        if (ret)
                return ret;
@@ -537,12 +570,11 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
        DECLARE_WAITQUEUE(wait, current);
        ssize_t retval = 0;
        s32 event_count;
-       unsigned long flags;
 
-       spin_lock_irqsave(&idev->info_lock, flags);
+       mutex_lock(&idev->info_lock);
        if (!idev->info || !idev->info->irq)
                retval = -EIO;
-       spin_unlock_irqrestore(&idev->info_lock, flags);
+       mutex_unlock(&idev->info_lock);
 
        if (retval)
                return retval;
@@ -592,9 +624,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
        struct uio_device *idev = listener->dev;
        ssize_t retval;
        s32 irq_on;
-       unsigned long flags;
 
-       spin_lock_irqsave(&idev->info_lock, flags);
+       mutex_lock(&idev->info_lock);
+       if (!idev->info) {
+               retval = -EINVAL;
+               goto out;
+       }
+
        if (!idev->info || !idev->info->irq) {
                retval = -EIO;
                goto out;
@@ -618,7 +654,7 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
        retval = idev->info->irqcontrol(idev->info, irq_on);
 
 out:
-       spin_unlock_irqrestore(&idev->info_lock, flags);
+       mutex_unlock(&idev->info_lock);
        return retval ? retval : sizeof(s32);
 }
 
@@ -640,10 +676,20 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
        struct page *page;
        unsigned long offset;
        void *addr;
+       int ret = 0;
+       int mi;
 
-       int mi = uio_find_mem_index(vmf->vma);
-       if (mi < 0)
-               return VM_FAULT_SIGBUS;
+       mutex_lock(&idev->info_lock);
+       if (!idev->info) {
+               ret = VM_FAULT_SIGBUS;
+               goto out;
+       }
+
+       mi = uio_find_mem_index(vmf->vma);
+       if (mi < 0) {
+               ret = VM_FAULT_SIGBUS;
+               goto out;
+       }
 
        /*
         * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
@@ -658,7 +704,11 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
                page = vmalloc_to_page(addr);
        get_page(page);
        vmf->page = page;
-       return 0;
+
+out:
+       mutex_unlock(&idev->info_lock);
+
+       return ret;
 }
 
 static const struct vm_operations_struct uio_logical_vm_ops = {
@@ -683,6 +733,7 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
        struct uio_device *idev = vma->vm_private_data;
        int mi = uio_find_mem_index(vma);
        struct uio_mem *mem;
+
        if (mi < 0)
                return -EINVAL;
        mem = idev->info->mem + mi;
@@ -724,30 +775,46 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
 
        vma->vm_private_data = idev;
 
+       mutex_lock(&idev->info_lock);
+       if (!idev->info) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        mi = uio_find_mem_index(vma);
-       if (mi < 0)
-               return -EINVAL;
+       if (mi < 0) {
+               ret = -EINVAL;
+               goto out;
+       }
 
        requested_pages = vma_pages(vma);
        actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK)
                        + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;
-       if (requested_pages > actual_pages)
-               return -EINVAL;
+       if (requested_pages > actual_pages) {
+               ret = -EINVAL;
+               goto out;
+       }
 
        if (idev->info->mmap) {
                ret = idev->info->mmap(idev->info, vma);
-               return ret;
+               goto out;
        }
 
        switch (idev->info->mem[mi].memtype) {
                case UIO_MEM_PHYS:
-                       return uio_mmap_physical(vma);
+                       ret = uio_mmap_physical(vma);
+                       break;
                case UIO_MEM_LOGICAL:
                case UIO_MEM_VIRTUAL:
-                       return uio_mmap_logical(vma);
+                       ret = uio_mmap_logical(vma);
+                       break;
                default:
-                       return -EINVAL;
+                       ret = -EINVAL;
        }
+
+out:
+       mutex_unlock(&idev->info_lock);
+       return 0;
 }
 
 static const struct file_operations uio_fops = {
@@ -865,7 +932,7 @@ int __uio_register_device(struct module *owner,
 
        idev->owner = owner;
        idev->info = info;
-       spin_lock_init(&idev->info_lock);
+       mutex_init(&idev->info_lock);
        init_waitqueue_head(&idev->wait);
        atomic_set(&idev->event, 0);
 
@@ -902,8 +969,9 @@ int __uio_register_device(struct module *owner,
                 * FDs at the time of unregister and therefore may not be
                 * freed until they are released.
                 */
-               ret = request_irq(info->irq, uio_interrupt,
-                                 info->irq_flags, info->name, idev);
+               ret = request_threaded_irq(info->irq, NULL, uio_interrupt,
+                                          info->irq_flags, info->name, idev);
+
                if (ret)
                        goto err_request_irq;
        }
@@ -928,7 +996,6 @@ EXPORT_SYMBOL_GPL(__uio_register_device);
 void uio_unregister_device(struct uio_info *info)
 {
        struct uio_device *idev;
-       unsigned long flags;
 
        if (!info || !info->uio_dev)
                return;
@@ -937,14 +1004,14 @@ void uio_unregister_device(struct uio_info *info)
 
        uio_free_minor(idev);
 
+       mutex_lock(&idev->info_lock);
        uio_dev_del_attributes(idev);
 
        if (info->irq && info->irq != UIO_IRQ_CUSTOM)
                free_irq(info->irq, idev);
 
-       spin_lock_irqsave(&idev->info_lock, flags);
        idev->info = NULL;
-       spin_unlock_irqrestore(&idev->info_lock, flags);
+       mutex_unlock(&idev->info_lock);
 
        device_unregister(&idev->dev);
 
index af45aa3222b5ce1c99ccb4de586c988d3b3a0b9f..4638d9b066bea7ad2b35f6c966ee7acec428facf 100644 (file)
@@ -124,8 +124,11 @@ static int host_start(struct ci_hdrc *ci)
 
        hcd->power_budget = ci->platdata->power_budget;
        hcd->tpl_support = ci->platdata->tpl_support;
-       if (ci->phy || ci->usb_phy)
+       if (ci->phy || ci->usb_phy) {
                hcd->skip_phy_initialization = 1;
+               if (ci->usb_phy)
+                       hcd->usb_phy = ci->usb_phy;
+       }
 
        ehci = hcd_to_ehci(hcd);
        ehci->caps = ci->hw_bank.cap;
index 7b366a6c0b493f2eb8bec4959830d222223f3cb2..998b32d0167e9970c4aa096520bdd7c9011f9137 100644 (file)
@@ -1758,6 +1758,9 @@ static const struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
        .driver_info = SINGLE_RX_URB,
        },
+       { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
+       .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+       },
        { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
        .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
        },
index c55def2f1320f92c6c0c652fc94c7056165ee467..097057d2eacf7bcb18316473c6f0def8a5744b62 100644 (file)
@@ -378,6 +378,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Corsair K70 RGB */
        { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
 
+       /* Corsair Strafe */
+       { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
+         USB_QUIRK_DELAY_CTRL_MSG },
+
        /* Corsair Strafe RGB */
        { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
          USB_QUIRK_DELAY_CTRL_MSG },
index 4a56ac772a3c35360d3834f5542aad22d5b1b720..71b3b08ad516c9fb3bfbd403da1687b5825e7e14 100644 (file)
@@ -1004,6 +1004,7 @@ struct dwc2_hregs_backup {
  * @frame_list_sz:      Frame list size
  * @desc_gen_cache:     Kmem cache for generic descriptors
  * @desc_hsisoc_cache:  Kmem cache for hs isochronous descriptors
+ * @unaligned_cache:    Kmem cache for DMA mode to handle non-aligned buf
  *
  * These are for peripheral mode:
  *
@@ -1177,6 +1178,8 @@ struct dwc2_hsotg {
        u32 frame_list_sz;
        struct kmem_cache *desc_gen_cache;
        struct kmem_cache *desc_hsisoc_cache;
+       struct kmem_cache *unaligned_cache;
+#define DWC2_KMEM_UNALIGNED_BUF_SIZE 1024
 
 #endif /* CONFIG_USB_DWC2_HOST || CONFIG_USB_DWC2_DUAL_ROLE */
 
index f0d9ccf1d665ad37b2f23786806bde8c16da11ea..a0f82cca2d9a8e70f0760acbc3b19ced08c7d7dc 100644 (file)
@@ -812,6 +812,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
        u32 index;
        u32 maxsize = 0;
        u32 mask = 0;
+       u8 pid = 0;
 
        maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
 
@@ -840,7 +841,11 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
                         ((len << DEV_DMA_NBYTES_SHIFT) & mask));
 
        if (hs_ep->dir_in) {
-               desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) &
+               if (len)
+                       pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket);
+               else
+                       pid = 1;
+               desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) &
                                 DEV_DMA_ISOC_PID_MASK) |
                                ((len % hs_ep->ep.maxpacket) ?
                                 DEV_DMA_SHORT : 0) |
@@ -884,6 +889,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
        struct dwc2_dma_desc *desc;
 
        if (list_empty(&hs_ep->queue)) {
+               hs_ep->target_frame = TARGET_FRAME_INITIAL;
                dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
                return;
        }
@@ -2755,8 +2761,6 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
         */
        tmp = dwc2_hsotg_read_frameno(hsotg);
 
-       dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), 0);
-
        if (using_desc_dma(hsotg)) {
                if (ep->target_frame == TARGET_FRAME_INITIAL) {
                        /* Start first ISO Out */
@@ -2817,9 +2821,6 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
 
                tmp = dwc2_hsotg_read_frameno(hsotg);
                if (using_desc_dma(hsotg)) {
-                       dwc2_hsotg_complete_request(hsotg, hs_ep,
-                                                   get_ep_head(hs_ep), 0);
-
                        hs_ep->target_frame = tmp;
                        dwc2_gadget_incr_frame_num(hs_ep);
                        dwc2_gadget_start_isoc_ddma(hs_ep);
@@ -4739,9 +4740,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
        }
 
        ret = usb_add_gadget_udc(dev, &hsotg->gadget);
-       if (ret)
+       if (ret) {
+               dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
+                                          hsotg->ctrl_req);
                return ret;
-
+       }
        dwc2_hsotg_dump(hsotg);
 
        return 0;
@@ -4755,6 +4758,7 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
 int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
 {
        usb_del_gadget_udc(&hsotg->gadget);
+       dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
 
        return 0;
 }
index edaf0b6af4f0491ba192d346c29a792a06a85751..b1104be3429c2285677d2101004080e9a30af769 100644 (file)
@@ -1567,11 +1567,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
        }
 
        if (hsotg->params.host_dma) {
-               dwc2_writel((u32)chan->xfer_dma,
-                           hsotg->regs + HCDMA(chan->hc_num));
+               dma_addr_t dma_addr;
+
+               if (chan->align_buf) {
+                       if (dbg_hc(chan))
+                               dev_vdbg(hsotg->dev, "align_buf\n");
+                       dma_addr = chan->align_buf;
+               } else {
+                       dma_addr = chan->xfer_dma;
+               }
+               dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
+
                if (dbg_hc(chan))
                        dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
-                                (unsigned long)chan->xfer_dma, chan->hc_num);
+                                (unsigned long)dma_addr, chan->hc_num);
        }
 
        /* Start the split */
@@ -2625,6 +2634,35 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
        }
 }
 
+static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
+                                           struct dwc2_qh *qh,
+                                           struct dwc2_host_chan *chan)
+{
+       if (!hsotg->unaligned_cache ||
+           chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE)
+               return -ENOMEM;
+
+       if (!qh->dw_align_buf) {
+               qh->dw_align_buf = kmem_cache_alloc(hsotg->unaligned_cache,
+                                                   GFP_ATOMIC | GFP_DMA);
+               if (!qh->dw_align_buf)
+                       return -ENOMEM;
+       }
+
+       qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf,
+                                             DWC2_KMEM_UNALIGNED_BUF_SIZE,
+                                             DMA_FROM_DEVICE);
+
+       if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) {
+               dev_err(hsotg->dev, "can't map align_buf\n");
+               chan->align_buf = 0;
+               return -EINVAL;
+       }
+
+       chan->align_buf = qh->dw_align_buf_dma;
+       return 0;
+}
+
 #define DWC2_USB_DMA_ALIGN 4
 
 struct dma_aligned_buffer {
@@ -2802,6 +2840,32 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
        /* Set the transfer attributes */
        dwc2_hc_init_xfer(hsotg, chan, qtd);
 
+       /* For non-dword aligned buffers */
+       if (hsotg->params.host_dma && qh->do_split &&
+           chan->ep_is_in && (chan->xfer_dma & 0x3)) {
+               dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
+               if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) {
+                       dev_err(hsotg->dev,
+                               "Failed to allocate memory to handle non-aligned buffer\n");
+                       /* Add channel back to free list */
+                       chan->align_buf = 0;
+                       chan->multi_count = 0;
+                       list_add_tail(&chan->hc_list_entry,
+                                     &hsotg->free_hc_list);
+                       qtd->in_process = 0;
+                       qh->channel = NULL;
+                       return -ENOMEM;
+               }
+       } else {
+               /*
+                * We assume that DMA is always aligned in non-split
+                * case or split out case. Warn if not.
+                */
+               WARN_ON_ONCE(hsotg->params.host_dma &&
+                            (chan->xfer_dma & 0x3));
+               chan->align_buf = 0;
+       }
+
        if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
            chan->ep_type == USB_ENDPOINT_XFER_ISOC)
                /*
@@ -5246,6 +5310,19 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
                }
        }
 
+       if (hsotg->params.host_dma) {
+               /*
+                * Create kmem caches to handle non-aligned buffer
+                * in Buffer DMA mode.
+                */
+               hsotg->unaligned_cache = kmem_cache_create("dwc2-unaligned-dma",
+                                               DWC2_KMEM_UNALIGNED_BUF_SIZE, 4,
+                                               SLAB_CACHE_DMA, NULL);
+               if (!hsotg->unaligned_cache)
+                       dev_err(hsotg->dev,
+                               "unable to create dwc2 unaligned cache\n");
+       }
+
        hsotg->otg_port = 1;
        hsotg->frame_list = NULL;
        hsotg->frame_list_dma = 0;
@@ -5280,8 +5357,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
        return 0;
 
 error4:
-       kmem_cache_destroy(hsotg->desc_gen_cache);
+       kmem_cache_destroy(hsotg->unaligned_cache);
        kmem_cache_destroy(hsotg->desc_hsisoc_cache);
+       kmem_cache_destroy(hsotg->desc_gen_cache);
 error3:
        dwc2_hcd_release(hsotg);
 error2:
@@ -5322,8 +5400,9 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg)
        usb_remove_hcd(hcd);
        hsotg->priv = NULL;
 
-       kmem_cache_destroy(hsotg->desc_gen_cache);
+       kmem_cache_destroy(hsotg->unaligned_cache);
        kmem_cache_destroy(hsotg->desc_hsisoc_cache);
+       kmem_cache_destroy(hsotg->desc_gen_cache);
 
        dwc2_hcd_release(hsotg);
        usb_put_hcd(hcd);
@@ -5435,7 +5514,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
        dwc2_writel(hprt0, hsotg->regs + HPRT0);
 
        /* Wait for the HPRT0.PrtSusp register field to be set */
-       if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 300))
+       if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
                dev_warn(hsotg->dev, "Suspend wasn't generated\n");
 
        /*
@@ -5616,6 +5695,8 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
                return ret;
        }
 
+       dwc2_hcd_rem_wakeup(hsotg);
+
        hsotg->hibernated = 0;
        hsotg->bus_suspended = 0;
        hsotg->lx_state = DWC2_L0;
index 7db1ee7e7a7781c12100d413abe7e4d62828e951..5502a501f5166640a2926132e4d4e1ee2f450724 100644 (file)
@@ -76,6 +76,8 @@ struct dwc2_qh;
  *                      (micro)frame
  * @xfer_buf:           Pointer to current transfer buffer position
  * @xfer_dma:           DMA address of xfer_buf
+ * @align_buf:          In Buffer DMA mode this will be used if xfer_buf is not
+ *                      DWORD aligned
  * @xfer_len:           Total number of bytes to transfer
  * @xfer_count:         Number of bytes transferred so far
  * @start_pkt_count:    Packet count at start of transfer
@@ -133,6 +135,7 @@ struct dwc2_host_chan {
 
        u8 *xfer_buf;
        dma_addr_t xfer_dma;
+       dma_addr_t align_buf;
        u32 xfer_len;
        u32 xfer_count;
        u16 start_pkt_count;
@@ -302,6 +305,9 @@ struct dwc2_hs_transfer_time {
  *                           speed.  Note that this is in "schedule slice" which
  *                           is tightly packed.
  * @ntd:                Actual number of transfer descriptors in a list
+ * @dw_align_buf:       Used instead of original buffer if its physical address
+ *                      is not dword-aligned
+ * @dw_align_buf_dma:   DMA address for dw_align_buf
  * @qtd_list:           List of QTDs for this QH
  * @channel:            Host channel currently processing transfers for this QH
  * @qh_list_entry:      Entry for QH in either the periodic or non-periodic
@@ -350,6 +356,8 @@ struct dwc2_qh {
        struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES];
        u32 ls_start_schedule_slice;
        u16 ntd;
+       u8 *dw_align_buf;
+       dma_addr_t dw_align_buf_dma;
        struct list_head qtd_list;
        struct dwc2_host_chan *channel;
        struct list_head qh_list_entry;
index fbea5e3fb9479bc4ff4ef250026df2583969ec67..ed7f05cf490637ba554e8be2fabea46bc118bb8c 100644 (file)
@@ -942,14 +942,21 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
        frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
        len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
                                          DWC2_HC_XFER_COMPLETE, NULL);
-       if (!len) {
+       if (!len && !qtd->isoc_split_offset) {
                qtd->complete_split = 0;
-               qtd->isoc_split_offset = 0;
                return 0;
        }
 
        frame_desc->actual_length += len;
 
+       if (chan->align_buf) {
+               dev_vdbg(hsotg->dev, "non-aligned buffer\n");
+               dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
+                                DWC2_KMEM_UNALIGNED_BUF_SIZE, DMA_FROM_DEVICE);
+               memcpy(qtd->urb->buf + (chan->xfer_dma - qtd->urb->dma),
+                      chan->qh->dw_align_buf, len);
+       }
+
        qtd->isoc_split_offset += len;
 
        hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
index d7c3d6c776d86a8edf5c41832a7856cd6fca29eb..301ced1618f873203b534ffa77cf7ef59a773f84 100644 (file)
@@ -383,7 +383,7 @@ static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
        /* Get the map and adjust if this is a multi_tt hub */
        map = qh->dwc_tt->periodic_bitmaps;
        if (qh->dwc_tt->usb_tt->multi)
-               map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
+               map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
 
        return map;
 }
@@ -1696,6 +1696,9 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
 
        if (qh->desc_list)
                dwc2_hcd_qh_free_ddma(hsotg, qh);
+       else if (hsotg->unaligned_cache && qh->dw_align_buf)
+               kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf);
+
        kfree(qh);
 }
 
index ea91310113b9abd2a233a17bcd973d7a1ed1e09e..103807587dc640a747d75339f703a3aea0e8e992 100644 (file)
@@ -1272,7 +1272,6 @@ static int dwc3_probe(struct platform_device *pdev)
        if (!dwc->clks)
                return -ENOMEM;
 
-       dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
        dwc->dev = dev;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1307,15 +1306,19 @@ static int dwc3_probe(struct platform_device *pdev)
        if (IS_ERR(dwc->reset))
                return PTR_ERR(dwc->reset);
 
-       ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks);
-       if (ret == -EPROBE_DEFER)
-               return ret;
-       /*
-        * Clocks are optional, but new DT platforms should support all clocks
-        * as required by the DT-binding.
-        */
-       if (ret)
-               dwc->num_clks = 0;
+       if (dev->of_node) {
+               dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
+
+               ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks);
+               if (ret == -EPROBE_DEFER)
+                       return ret;
+               /*
+                * Clocks are optional, but new DT platforms should support all
+                * clocks as required by the DT-binding.
+                */
+               if (ret)
+                       dwc->num_clks = 0;
+       }
 
        ret = reset_control_deassert(dwc->reset);
        if (ret)
index 6b3ccd542bd76f6c40308a9df550ac2674673e45..dbeff5e6ad1461eea71a4cf9e562755cd50b1b17 100644 (file)
@@ -165,8 +165,9 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
 
        reset_control_put(simple->resets);
 
-       pm_runtime_put_sync(dev);
        pm_runtime_disable(dev);
+       pm_runtime_put_noidle(dev);
+       pm_runtime_set_suspended(dev);
 
        return 0;
 }
index c961a94d136b5248a5e242a6ab3370b22f3fa360..f57e7c94b8e5e0154ef430e3a0b3973d6ff84bd9 100644 (file)
@@ -34,6 +34,7 @@
 #define PCI_DEVICE_ID_INTEL_GLK                        0x31aa
 #define PCI_DEVICE_ID_INTEL_CNPLP              0x9dee
 #define PCI_DEVICE_ID_INTEL_CNPH               0xa36e
+#define PCI_DEVICE_ID_INTEL_ICLLP              0x34ee
 
 #define PCI_INTEL_BXT_DSM_GUID         "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
 #define PCI_INTEL_BXT_FUNC_PMU_PWR     4
@@ -289,6 +290,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICLLP), },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
        {  }    /* Terminating Entry */
 };
index b0e67ab2f98cd09ba54eaabfdab1680e30783e63..a6d0203e40b6e048bfb736133321ef6c857c09ca 100644 (file)
@@ -490,6 +490,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
        qcom->dwc3 = of_find_device_by_node(dwc3_np);
        if (!qcom->dwc3) {
                dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
+               ret = -ENODEV;
                goto depopulate;
        }
 
@@ -547,8 +548,7 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int dwc3_qcom_pm_suspend(struct device *dev)
+static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev)
 {
        struct dwc3_qcom *qcom = dev_get_drvdata(dev);
        int ret = 0;
@@ -560,7 +560,7 @@ static int dwc3_qcom_pm_suspend(struct device *dev)
        return ret;
 }
 
-static int dwc3_qcom_pm_resume(struct device *dev)
+static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev)
 {
        struct dwc3_qcom *qcom = dev_get_drvdata(dev);
        int ret;
@@ -571,23 +571,20 @@ static int dwc3_qcom_pm_resume(struct device *dev)
 
        return ret;
 }
-#endif
 
-#ifdef CONFIG_PM
-static int dwc3_qcom_runtime_suspend(struct device *dev)
+static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev)
 {
        struct dwc3_qcom *qcom = dev_get_drvdata(dev);
 
        return dwc3_qcom_suspend(qcom);
 }
 
-static int dwc3_qcom_runtime_resume(struct device *dev)
+static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev)
 {
        struct dwc3_qcom *qcom = dev_get_drvdata(dev);
 
        return dwc3_qcom_resume(qcom);
 }
-#endif
 
 static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume)
index f242c2bcea810c0dee04f067ceea0dc716912058..d2fa071c21b17a1c0c03fbcf1db416d311e82a12 100644 (file)
@@ -1719,6 +1719,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
                 */
                if (w_value && !f->get_alt)
                        break;
+
+               spin_lock(&cdev->lock);
                value = f->set_alt(f, w_index, w_value);
                if (value == USB_GADGET_DELAYED_STATUS) {
                        DBG(cdev,
@@ -1728,6 +1730,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
                        DBG(cdev, "delayed_status count %d\n",
                                        cdev->delayed_status);
                }
+               spin_unlock(&cdev->lock);
                break;
        case USB_REQ_GET_INTERFACE:
                if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
index dce9d12c7981afb1733be479e9daa43977bd218a..33e2030503fa5b47cff5de52c43ac080b5507702 100644 (file)
@@ -215,6 +215,7 @@ struct ffs_io_data {
 
        struct mm_struct *mm;
        struct work_struct work;
+       struct work_struct cancellation_work;
 
        struct usb_ep *ep;
        struct usb_request *req;
@@ -1072,22 +1073,31 @@ ffs_epfile_open(struct inode *inode, struct file *file)
        return 0;
 }
 
+static void ffs_aio_cancel_worker(struct work_struct *work)
+{
+       struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
+                                                  cancellation_work);
+
+       ENTER();
+
+       usb_ep_dequeue(io_data->ep, io_data->req);
+}
+
 static int ffs_aio_cancel(struct kiocb *kiocb)
 {
        struct ffs_io_data *io_data = kiocb->private;
-       struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+       struct ffs_data *ffs = io_data->ffs;
        int value;
 
        ENTER();
 
-       spin_lock_irq(&epfile->ffs->eps_lock);
-
-       if (likely(io_data && io_data->ep && io_data->req))
-               value = usb_ep_dequeue(io_data->ep, io_data->req);
-       else
+       if (likely(io_data && io_data->ep && io_data->req)) {
+               INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker);
+               queue_work(ffs->io_completion_wq, &io_data->cancellation_work);
+               value = -EINPROGRESS;
+       } else {
                value = -EINVAL;
-
-       spin_unlock_irq(&epfile->ffs->eps_lock);
+       }
 
        return value;
 }
index f0cdf89b850371e693db8fefc505efd90b72cd0d..83ba8a2eb6af9f95fe84a95e44904a20ada3c5ac 100644 (file)
@@ -2,6 +2,7 @@
 config USB_ASPEED_VHUB
        tristate "Aspeed vHub UDC driver"
        depends on ARCH_ASPEED || COMPILE_TEST
+       depends on USB_LIBCOMPOSITE
        help
          USB peripheral controller for the Aspeed AST2500 family
          SoCs supporting the "vHub" functionality and USB2.0
index 1fbfd89d0a0f00945abec540a75c58582a3d1943..387f124a83340b5f27eb9cfd2e4bb0323fcbd954 100644 (file)
@@ -508,16 +508,18 @@ static int xhci_do_dbc_start(struct xhci_hcd *xhci)
        return 0;
 }
 
-static void xhci_do_dbc_stop(struct xhci_hcd *xhci)
+static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
 {
        struct xhci_dbc         *dbc = xhci->dbc;
 
        if (dbc->state == DS_DISABLED)
-               return;
+               return -1;
 
        writel(0, &dbc->regs->control);
        xhci_dbc_mem_cleanup(xhci);
        dbc->state = DS_DISABLED;
+
+       return 0;
 }
 
 static int xhci_dbc_start(struct xhci_hcd *xhci)
@@ -544,6 +546,7 @@ static int xhci_dbc_start(struct xhci_hcd *xhci)
 
 static void xhci_dbc_stop(struct xhci_hcd *xhci)
 {
+       int ret;
        unsigned long           flags;
        struct xhci_dbc         *dbc = xhci->dbc;
        struct dbc_port         *port = &dbc->port;
@@ -556,10 +559,11 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
                xhci_dbc_tty_unregister_device(xhci);
 
        spin_lock_irqsave(&dbc->lock, flags);
-       xhci_do_dbc_stop(xhci);
+       ret = xhci_do_dbc_stop(xhci);
        spin_unlock_irqrestore(&dbc->lock, flags);
 
-       pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
+       if (!ret)
+               pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
 }
 
 static void
index acbd3d7b8828693f79a51f19ad70fb1aa4ade050..ef350c33dc4a8615a0188af0cea13ae29a876532 100644 (file)
@@ -595,7 +595,7 @@ struct xhci_ring *xhci_stream_id_to_ring(
        if (!ep->stream_info)
                return NULL;
 
-       if (stream_id > ep->stream_info->num_streams)
+       if (stream_id >= ep->stream_info->num_streams)
                return NULL;
        return ep->stream_info->stream_rings[stream_id];
 }
@@ -886,12 +886,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
 
        dev = xhci->devs[slot_id];
 
-       trace_xhci_free_virt_device(dev);
-
        xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
        if (!dev)
                return;
 
+       trace_xhci_free_virt_device(dev);
+
        if (dev->tt_info)
                old_active_eps = dev->tt_info->active_eps;
 
index a8c1d073cba05e3b070e73722d02b32eaf112e69..4b463e5202a421705be74610a136239dc3a9c423 100644 (file)
@@ -481,7 +481,7 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
        unsigned long mask;
        unsigned int port;
        bool idle, enable;
-       int err;
+       int err = 0;
 
        memset(&rsp, 0, sizeof(rsp));
 
@@ -1223,10 +1223,10 @@ disable_rpm:
        pm_runtime_disable(&pdev->dev);
        usb_put_hcd(tegra->hcd);
 disable_xusbc:
-       if (!&pdev->dev.pm_domain)
+       if (!pdev->dev.pm_domain)
                tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
 disable_xusba:
-       if (!&pdev->dev.pm_domain)
+       if (!pdev->dev.pm_domain)
                tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
 put_padctl:
        tegra_xusb_padctl_put(tegra->padctl);
index 410544ffe78f68fa73e7328e8e105028a9116008..88b427434bd82536c653a911bb393c4bdb87814b 100644 (file)
@@ -171,6 +171,37 @@ DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue,
        TP_ARGS(ring, trb)
 );
 
+DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
+       TP_PROTO(struct xhci_virt_device *vdev),
+       TP_ARGS(vdev),
+       TP_STRUCT__entry(
+               __field(void *, vdev)
+               __field(unsigned long long, out_ctx)
+               __field(unsigned long long, in_ctx)
+               __field(u8, fake_port)
+               __field(u8, real_port)
+               __field(u16, current_mel)
+
+       ),
+       TP_fast_assign(
+               __entry->vdev = vdev;
+               __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
+               __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
+               __entry->fake_port = (u8) vdev->fake_port;
+               __entry->real_port = (u8) vdev->real_port;
+               __entry->current_mel = (u16) vdev->current_mel;
+               ),
+       TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d",
+               __entry->vdev, __entry->in_ctx, __entry->out_ctx,
+               __entry->fake_port, __entry->real_port, __entry->current_mel
+       )
+);
+
+DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device,
+       TP_PROTO(struct xhci_virt_device *vdev),
+       TP_ARGS(vdev)
+);
+
 DECLARE_EVENT_CLASS(xhci_log_virt_dev,
        TP_PROTO(struct xhci_virt_device *vdev),
        TP_ARGS(vdev),
@@ -208,11 +239,6 @@ DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device,
        TP_ARGS(vdev)
 );
 
-DEFINE_EVENT(xhci_log_virt_dev, xhci_free_virt_device,
-       TP_PROTO(struct xhci_virt_device *vdev),
-       TP_ARGS(vdev)
-);
-
 DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device,
        TP_PROTO(struct xhci_virt_device *vdev),
        TP_ARGS(vdev)
index 8c8da2d657fa1008c1e612e6f30d3e9716d534b6..2f4850f25e82f0f009b8f614b593fc74eb5247aa 100644 (file)
@@ -908,6 +908,41 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
        spin_unlock_irqrestore(&xhci->lock, flags);
 }
 
+static bool xhci_pending_portevent(struct xhci_hcd *xhci)
+{
+       struct xhci_port        **ports;
+       int                     port_index;
+       u32                     status;
+       u32                     portsc;
+
+       status = readl(&xhci->op_regs->status);
+       if (status & STS_EINT)
+               return true;
+       /*
+        * Checking STS_EINT is not enough as there is a lag between a change
+        * bit being set and the Port Status Change Event that it generated
+        * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
+        */
+
+       port_index = xhci->usb2_rhub.num_ports;
+       ports = xhci->usb2_rhub.ports;
+       while (port_index--) {
+               portsc = readl(ports[port_index]->addr);
+               if (portsc & PORT_CHANGE_MASK ||
+                   (portsc & PORT_PLS_MASK) == XDEV_RESUME)
+                       return true;
+       }
+       port_index = xhci->usb3_rhub.num_ports;
+       ports = xhci->usb3_rhub.ports;
+       while (port_index--) {
+               portsc = readl(ports[port_index]->addr);
+               if (portsc & PORT_CHANGE_MASK ||
+                   (portsc & PORT_PLS_MASK) == XDEV_RESUME)
+                       return true;
+       }
+       return false;
+}
+
 /*
  * Stop HC (not bus-specific)
  *
@@ -1009,7 +1044,7 @@ EXPORT_SYMBOL_GPL(xhci_suspend);
  */
 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 {
-       u32                     command, temp = 0, status;
+       u32                     command, temp = 0;
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
        struct usb_hcd          *secondary_hcd;
        int                     retval = 0;
@@ -1043,8 +1078,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                command = readl(&xhci->op_regs->command);
                command |= CMD_CRS;
                writel(command, &xhci->op_regs->command);
+               /*
+                * Some controllers take up to 55+ ms to complete the controller
+                * restore so setting the timeout to 100ms. Xhci specification
+                * doesn't mention any timeout value.
+                */
                if (xhci_handshake(&xhci->op_regs->status,
-                             STS_RESTORE, 0, 10 * 1000)) {
+                             STS_RESTORE, 0, 100 * 1000)) {
                        xhci_warn(xhci, "WARN: xHC restore state timeout\n");
                        spin_unlock_irq(&xhci->lock);
                        return -ETIMEDOUT;
@@ -1134,8 +1174,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
  done:
        if (retval == 0) {
                /* Resume root hubs only when have pending events. */
-               status = readl(&xhci->op_regs->status);
-               if (status & STS_EINT) {
+               if (xhci_pending_portevent(xhci)) {
                        usb_hcd_resume_root_hub(xhci->shared_hcd);
                        usb_hcd_resume_root_hub(hcd);
                }
index 939e2f86b595eecbf1f1ecac7dcd7f39965d238d..841e89ffe2e9d88f6f81255340da58144916ca59 100644 (file)
@@ -382,6 +382,10 @@ struct xhci_op_regs {
 #define PORT_PLC       (1 << 22)
 /* port configure error change - port failed to configure its link partner */
 #define PORT_CEC       (1 << 23)
+#define PORT_CHANGE_MASK       (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
+                                PORT_RC | PORT_PLC | PORT_CEC)
+
+
 /* Cold Attach Status - xHC can set this bit to report device attached during
  * Sx state. Warm port reset should be perfomed to clear this bit and move port
  * to connected state.
index 8abb6cbbd98a17d6b6ff95d0ad88268832780b87..3be40eaa1ac9b2caf493a8fd21e8980982b5d9a1 100644 (file)
@@ -396,8 +396,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
                          loff_t *ppos)
 {
        struct usb_yurex *dev;
-       int retval = 0;
-       int bytes_read = 0;
+       int len = 0;
        char in_buffer[20];
        unsigned long flags;
 
@@ -405,26 +404,16 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
 
        mutex_lock(&dev->io_mutex);
        if (!dev->interface) {          /* already disconnected */
-               retval = -ENODEV;
-               goto exit;
+               mutex_unlock(&dev->io_mutex);
+               return -ENODEV;
        }
 
        spin_lock_irqsave(&dev->lock, flags);
-       bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
+       len = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
        spin_unlock_irqrestore(&dev->lock, flags);
-
-       if (*ppos < bytes_read) {
-               if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos))
-                       retval = -EFAULT;
-               else {
-                       retval = bytes_read - *ppos;
-                       *ppos += bytes_read;
-               }
-       }
-
-exit:
        mutex_unlock(&dev->io_mutex);
-       return retval;
+
+       return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
 }
 
 static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
index bdd7a5ad3bf1c0060bcef8a3a1dc640b4a45cbcc..3bb1fff02bedd0e076581baabeff440e4552e3ce 100644 (file)
@@ -128,7 +128,7 @@ static int ch341_control_in(struct usb_device *dev,
        r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
                            USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
                            value, index, buf, bufsize, DEFAULT_TIMEOUT);
-       if (r < bufsize) {
+       if (r < (int)bufsize) {
                if (r >= 0) {
                        dev_err(&dev->dev,
                                "short control message received (%d < %u)\n",
index eb6c26cbe5792b0e535c77b9e2e245b700071458..626a29d9aa58d7e13770f048ae8c705dcfab2fea 100644 (file)
@@ -95,6 +95,9 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
        { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
        { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
+       { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */
+       { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */
+       { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */
        { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
        { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
        { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
@@ -112,6 +115,9 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
        { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
        { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+       { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
+       { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
+       { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
        { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
        { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
        { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
@@ -124,7 +130,9 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
        { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
        { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+       { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */
        { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
+       { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */
        { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
        { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
        { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -134,17 +142,24 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
        { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
+       { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */
+       { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */
        { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
        { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
        { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
        { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
+       { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
+       { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
        { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
        { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
        { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+       { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */
        { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
+       { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */
+       { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */
        { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
        { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
        { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
index 5169624d8b11386ecb07d50234850ca99397150f..38d43c4b7ce547700e007f6f406d17f2c65d3ca4 100644 (file)
@@ -369,8 +369,10 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial,
                             3, /* get pins */
                             USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
                             0, 0, data, 1, 2000);
-       if (rc >= 0)
+       if (rc == 1)
                *value = *data;
+       else if (rc >= 0)
+               rc = -EIO;
 
        kfree(data);
        return rc;
index fdceb46d9fc61a0c5eea2f113abd494dc4cc693b..b580b4c7fa488bbf80f1b5628e4e46bb8e28807b 100644 (file)
@@ -468,6 +468,9 @@ static void mos7840_control_callback(struct urb *urb)
        }
 
        dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
+       if (urb->actual_length < 1)
+               goto out;
+
        dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__,
                mos7840_port->MsrLsr, mos7840_port->port_num);
        data = urb->transfer_buffer;
index 8a201dd53d36b352b3d7fb68cf6486c08ddb96ff..150f43668bec1c8757115126ad9ffd20fe1255ed 100644 (file)
@@ -418,17 +418,18 @@ static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
        u64 ts_nsec = local_clock();
        unsigned long rem_nsec;
 
+       mutex_lock(&port->logbuffer_lock);
        if (!port->logbuffer[port->logbuffer_head]) {
                port->logbuffer[port->logbuffer_head] =
                                kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
-               if (!port->logbuffer[port->logbuffer_head])
+               if (!port->logbuffer[port->logbuffer_head]) {
+                       mutex_unlock(&port->logbuffer_lock);
                        return;
+               }
        }
 
        vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
 
-       mutex_lock(&port->logbuffer_lock);
-
        if (tcpm_log_full(port)) {
                port->logbuffer_head = max(port->logbuffer_head - 1, 0);
                strcpy(tmpbuffer, "overflow");
@@ -724,6 +725,9 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
 
        tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
 
+       port->supply_voltage = mv;
+       port->current_limit = max_ma;
+
        if (port->tcpc->set_current_limit)
                ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
 
@@ -2594,8 +2598,6 @@ static void tcpm_reset_port(struct tcpm_port *port)
        tcpm_set_attached_state(port, false);
        port->try_src_count = 0;
        port->try_snk_count = 0;
-       port->supply_voltage = 0;
-       port->current_limit = 0;
        port->usb_type = POWER_SUPPLY_USB_TYPE_C;
 
        power_supply_changed(port->psy);
@@ -3043,7 +3045,8 @@ static void run_state_machine(struct tcpm_port *port)
                    tcpm_port_is_sink(port) &&
                    time_is_after_jiffies(port->delayed_runtime)) {
                        tcpm_set_state(port, SNK_DISCOVERY,
-                                      port->delayed_runtime - jiffies);
+                                      jiffies_to_msecs(port->delayed_runtime -
+                                                       jiffies));
                        break;
                }
                tcpm_set_state(port, unattached_state(port), 0);
index bd5cca5632b395def6384ec233d8ba5926e81c93..8d0a6fe748bdc50ca99800c3d6ba5680a4e9f0bd 100644 (file)
@@ -350,6 +350,19 @@ static void ucsi_connector_change(struct work_struct *work)
        }
 
        if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
+               typec_set_pwr_role(con->port, con->status.pwr_dir);
+
+               switch (con->status.partner_type) {
+               case UCSI_CONSTAT_PARTNER_TYPE_UFP:
+                       typec_set_data_role(con->port, TYPEC_HOST);
+                       break;
+               case UCSI_CONSTAT_PARTNER_TYPE_DFP:
+                       typec_set_data_role(con->port, TYPEC_DEVICE);
+                       break;
+               default:
+                       break;
+               }
+
                if (con->status.connected)
                        ucsi_register_partner(con);
                else
index 44eb4e1ea817b2e38eab36cee60021368508a342..a18112a83faed2df09e49c0a5a93d2fce0823c5f 100644 (file)
@@ -79,6 +79,11 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
+       /* This will make sure we can use ioremap_nocache() */
+       status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1);
+       if (ACPI_FAILURE(status))
+               return -ENOMEM;
+
        /*
         * NOTE: The memory region for the data structures is used also in an
         * operation region, which means ACPI has already reserved it. Therefore
index 24ee2605b9f043c9c1128d73bd44a4aa47322a37..42dc1d3d71cf05a7c91c5316ee832b62b15bf75e 100644 (file)
@@ -28,5 +28,13 @@ config VFIO_PCI_INTX
        def_bool y if !S390
 
 config VFIO_PCI_IGD
-       depends on VFIO_PCI
-       def_bool y if X86
+       bool "VFIO PCI extensions for Intel graphics (GVT-d)"
+       depends on VFIO_PCI && X86
+       default y
+       help
+         Support for Intel IGD specific extensions to enable direct
+         assignment to virtual machines.  This includes exposing an IGD
+         specific firmware table and read-only copies of the host bridge
+         and LPC bridge config space.
+
+         To enable Intel IGD assignment through vfio-pci, say Y.
index 2c75b33db4ac19768ea77415685b4fac700dc4d3..3e5b17710a4f1fa47eb4f1333c17c96e1eae2cdd 100644 (file)
@@ -343,18 +343,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
        struct page *page[1];
        struct vm_area_struct *vma;
        struct vm_area_struct *vmas[1];
+       unsigned int flags = 0;
        int ret;
 
+       if (prot & IOMMU_WRITE)
+               flags |= FOLL_WRITE;
+
+       down_read(&mm->mmap_sem);
        if (mm == current->mm) {
-               ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE),
-                                             page, vmas);
+               ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas);
        } else {
-               unsigned int flags = 0;
-
-               if (prot & IOMMU_WRITE)
-                       flags |= FOLL_WRITE;
-
-               down_read(&mm->mmap_sem);
                ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
                                            vmas, NULL);
                /*
@@ -368,8 +366,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
                        ret = -EOPNOTSUPP;
                        put_page(page[0]);
                }
-               up_read(&mm->mmap_sem);
        }
+       up_read(&mm->mmap_sem);
 
        if (ret == 1) {
                *pfn = page_to_pfn(page[0]);
index 686dc670fd294b3077cf363241338ab871b26244..29756d88799b630f2c73ca097b56b092a14a7d5a 100644 (file)
@@ -1226,7 +1226,8 @@ err_used:
        if (ubufs)
                vhost_net_ubuf_put_wait_and_free(ubufs);
 err_ubufs:
-       sockfd_put(sock);
+       if (sock)
+               sockfd_put(sock);
 err_vq:
        mutex_unlock(&vq->mutex);
 err:
index 451e833f593175886fd4fa6ae066860dad4cf95e..48b154276179f0269c7444e38d6717ac493a06d5 100644 (file)
@@ -41,4 +41,4 @@ obj-$(CONFIG_XEN_PVCALLS_FRONTEND)    += pvcalls-front.o
 xen-evtchn-y                           := evtchn.o
 xen-gntdev-y                           := gntdev.o
 xen-gntalloc-y                         := gntalloc.o
-xen-privcmd-y                          := privcmd.o
+xen-privcmd-y                          := privcmd.o privcmd-buf.o
index 762378f1811cc9069dc6171edb55aaa3610b82fa..08e4af04d6f2c32850a049a83721933a82883b8c 100644 (file)
@@ -628,8 +628,6 @@ static void __unbind_from_irq(unsigned int irq)
                xen_irq_info_cleanup(info);
        }
 
-       BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
-
        xen_free_irq(irq);
 }
 
index 2473b0a9e6e41d5d51b47e318d7e3b26d81ec5f6..ba9f3eec2bd00f6f39eb952ed5815e7b45c9735e 100644 (file)
@@ -799,7 +799,7 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
 
        return 0;
 }
-EXPORT_SYMBOL(gnttab_alloc_pages);
+EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
 
 /**
  * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
@@ -820,7 +820,7 @@ void gnttab_free_pages(int nr_pages, struct page **pages)
        }
        free_xenballooned_pages(nr_pages, pages);
 }
-EXPORT_SYMBOL(gnttab_free_pages);
+EXPORT_SYMBOL_GPL(gnttab_free_pages);
 
 /* Handling of paged out grant targets (GNTST_eagain) */
 #define MAX_DELAY 256
index 8835065029d34a150a91662bb4562b4a41be50ca..c93d8ef8df3483bbc393b2101c189120f844b634 100644 (file)
@@ -289,8 +289,15 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
                return;
        }
 
-       if (sysrq_key != '\0')
-               xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
+       if (sysrq_key != '\0') {
+               err = xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
+               if (err) {
+                       pr_err("%s: Error %d writing sysrq in control/sysrq\n",
+                              __func__, err);
+                       xenbus_transaction_end(xbt, 1);
+                       return;
+               }
+       }
 
        err = xenbus_transaction_end(xbt, 0);
        if (err == -EAGAIN)
@@ -342,7 +349,12 @@ static int setup_shutdown_watcher(void)
                        continue;
                snprintf(node, FEATURE_PATH_SIZE, "feature-%s",
                         shutdown_handlers[idx].command);
-               xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+               err = xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+               if (err) {
+                       pr_err("%s: Error %d writing %s\n", __func__,
+                               err, node);
+                       return err;
+               }
        }
 
        return 0;
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
new file mode 100644 (file)
index 0000000..df1ed37
--- /dev/null
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/******************************************************************************
+ * privcmd-buf.c
+ *
+ * Mmap of hypercall buffers.
+ *
+ * Copyright (c) 2018 Juergen Gross
+ */
+
+#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include "privcmd.h"
+
+MODULE_LICENSE("GPL");
+
+static unsigned int limit = 64;
+module_param(limit, uint, 0644);
+MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by "
+                       "the privcmd-buf device per open file");
+
+struct privcmd_buf_private {
+       struct mutex lock;
+       struct list_head list;
+       unsigned int allocated;
+};
+
+struct privcmd_buf_vma_private {
+       struct privcmd_buf_private *file_priv;
+       struct list_head list;
+       unsigned int users;
+       unsigned int n_pages;
+       struct page *pages[];
+};
+
+static int privcmd_buf_open(struct inode *ino, struct file *file)
+{
+       struct privcmd_buf_private *file_priv;
+
+       file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+       if (!file_priv)
+               return -ENOMEM;
+
+       mutex_init(&file_priv->lock);
+       INIT_LIST_HEAD(&file_priv->list);
+
+       file->private_data = file_priv;
+
+       return 0;
+}
+
+static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv)
+{
+       unsigned int i;
+
+       vma_priv->file_priv->allocated -= vma_priv->n_pages;
+
+       list_del(&vma_priv->list);
+
+       for (i = 0; i < vma_priv->n_pages; i++)
+               if (vma_priv->pages[i])
+                       __free_page(vma_priv->pages[i]);
+
+       kfree(vma_priv);
+}
+
+static int privcmd_buf_release(struct inode *ino, struct file *file)
+{
+       struct privcmd_buf_private *file_priv = file->private_data;
+       struct privcmd_buf_vma_private *vma_priv;
+
+       mutex_lock(&file_priv->lock);
+
+       while (!list_empty(&file_priv->list)) {
+               vma_priv = list_first_entry(&file_priv->list,
+                                           struct privcmd_buf_vma_private,
+                                           list);
+               privcmd_buf_vmapriv_free(vma_priv);
+       }
+
+       mutex_unlock(&file_priv->lock);
+
+       kfree(file_priv);
+
+       return 0;
+}
+
+static void privcmd_buf_vma_open(struct vm_area_struct *vma)
+{
+       struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
+
+       if (!vma_priv)
+               return;
+
+       mutex_lock(&vma_priv->file_priv->lock);
+       vma_priv->users++;
+       mutex_unlock(&vma_priv->file_priv->lock);
+}
+
+static void privcmd_buf_vma_close(struct vm_area_struct *vma)
+{
+       struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
+       struct privcmd_buf_private *file_priv;
+
+       if (!vma_priv)
+               return;
+
+       file_priv = vma_priv->file_priv;
+
+       mutex_lock(&file_priv->lock);
+
+       vma_priv->users--;
+       if (!vma_priv->users)
+               privcmd_buf_vmapriv_free(vma_priv);
+
+       mutex_unlock(&file_priv->lock);
+}
+
+static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf)
+{
+       pr_debug("fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
+                vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
+                vmf->pgoff, (void *)vmf->address);
+
+       return VM_FAULT_SIGBUS;
+}
+
+static const struct vm_operations_struct privcmd_buf_vm_ops = {
+       .open = privcmd_buf_vma_open,
+       .close = privcmd_buf_vma_close,
+       .fault = privcmd_buf_vma_fault,
+};
+
+static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct privcmd_buf_private *file_priv = file->private_data;
+       struct privcmd_buf_vma_private *vma_priv;
+       unsigned long count = vma_pages(vma);
+       unsigned int i;
+       int ret = 0;
+
+       if (!(vma->vm_flags & VM_SHARED) || count > limit ||
+           file_priv->allocated + count > limit)
+               return -EINVAL;
+
+       vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
+                          GFP_KERNEL);
+       if (!vma_priv)
+               return -ENOMEM;
+
+       vma_priv->n_pages = count;
+       count = 0;
+       for (i = 0; i < vma_priv->n_pages; i++) {
+               vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               if (!vma_priv->pages[i])
+                       break;
+               count++;
+       }
+
+       mutex_lock(&file_priv->lock);
+
+       file_priv->allocated += count;
+
+       vma_priv->file_priv = file_priv;
+       vma_priv->users = 1;
+
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND;
+       vma->vm_ops = &privcmd_buf_vm_ops;
+       vma->vm_private_data = vma_priv;
+
+       list_add(&vma_priv->list, &file_priv->list);
+
+       if (vma_priv->n_pages != count)
+               ret = -ENOMEM;
+       else
+               for (i = 0; i < vma_priv->n_pages; i++) {
+                       ret = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
+                                            vma_priv->pages[i]);
+                       if (ret)
+                               break;
+               }
+
+       if (ret)
+               privcmd_buf_vmapriv_free(vma_priv);
+
+       mutex_unlock(&file_priv->lock);
+
+       return ret;
+}
+
+const struct file_operations xen_privcmdbuf_fops = {
+       .owner = THIS_MODULE,
+       .open = privcmd_buf_open,
+       .release = privcmd_buf_release,
+       .mmap = privcmd_buf_mmap,
+};
+EXPORT_SYMBOL_GPL(xen_privcmdbuf_fops);
+
+struct miscdevice xen_privcmdbuf_dev = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "xen/hypercall",
+       .fops = &xen_privcmdbuf_fops,
+};
index 8ae0349d9f0ae47036ed2b6b8e968230c1fdfb41..7e6e682104dc4e9a77d8149e2f4500ded81b41b8 100644 (file)
@@ -1007,12 +1007,21 @@ static int __init privcmd_init(void)
                pr_err("Could not register Xen privcmd device\n");
                return err;
        }
+
+       err = misc_register(&xen_privcmdbuf_dev);
+       if (err != 0) {
+               pr_err("Could not register Xen hypercall-buf device\n");
+               misc_deregister(&privcmd_dev);
+               return err;
+       }
+
        return 0;
 }
 
 static void __exit privcmd_exit(void)
 {
        misc_deregister(&privcmd_dev);
+       misc_deregister(&xen_privcmdbuf_dev);
 }
 
 module_init(privcmd_init);
index 14facaeed36fda1a1492aaa2a88a72bc8855c450..0dd9f8f67ee30efc849a7bdf2085036c0c0e84ab 100644 (file)
@@ -1,3 +1,6 @@
 #include <linux/fs.h>
 
 extern const struct file_operations xen_privcmd_fops;
+extern const struct file_operations xen_privcmdbuf_fops;
+
+extern struct miscdevice xen_privcmdbuf_dev;
index 7bc88fd43cfc84d05873893ef4ddec8307e76c2a..e2f3e8b0fba9ff160a7c82a37e64cf5fe0b3c8f0 100644 (file)
@@ -1012,6 +1012,7 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
 {
        struct v2p_entry *entry;
        unsigned long flags;
+       int err;
 
        if (try) {
                spin_lock_irqsave(&info->v2p_lock, flags);
@@ -1027,8 +1028,11 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
                        scsiback_del_translation_entry(info, vir);
                }
        } else if (!try) {
-               xenbus_printf(XBT_NIL, info->dev->nodename, state,
+               err = xenbus_printf(XBT_NIL, info->dev->nodename, state,
                              "%d", XenbusStateClosed);
+               if (err)
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing %s", __func__, state);
        }
 }
 
@@ -1067,8 +1071,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
        snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent);
        val = xenbus_read(XBT_NIL, dev->nodename, str, NULL);
        if (IS_ERR(val)) {
-               xenbus_printf(XBT_NIL, dev->nodename, state,
+               err = xenbus_printf(XBT_NIL, dev->nodename, state,
                              "%d", XenbusStateClosed);
+               if (err)
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing %s", __func__, state);
                return;
        }
        strlcpy(phy, val, VSCSI_NAMELEN);
@@ -1079,8 +1086,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
        err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u",
                           &vir.hst, &vir.chn, &vir.tgt, &vir.lun);
        if (XENBUS_EXIST_ERR(err)) {
-               xenbus_printf(XBT_NIL, dev->nodename, state,
+               err = xenbus_printf(XBT_NIL, dev->nodename, state,
                              "%d", XenbusStateClosed);
+               if (err)
+                       xenbus_dev_error(info->dev, err,
+                               "%s: writing %s", __func__, state);
                return;
        }
 
index e1d20124ec0e8698a1e8a5940537ff45f2e57d2c..210df9da1283744078a2c7fb6b5ae050ddf811de 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -5,7 +5,6 @@
  *     Implements an efficient asynchronous io interface.
  *
  *     Copyright 2000, 2001, 2002 Red Hat, Inc.  All Rights Reserved.
- *     Copyright 2018 Christoph Hellwig.
  *
  *     See ../COPYING for licensing terms.
  */
@@ -165,22 +164,10 @@ struct fsync_iocb {
        bool                    datasync;
 };
 
-struct poll_iocb {
-       struct file             *file;
-       __poll_t                events;
-       struct wait_queue_head  *head;
-
-       union {
-               struct wait_queue_entry wait;
-               struct work_struct      work;
-       };
-};
-
 struct aio_kiocb {
        union {
                struct kiocb            rw;
                struct fsync_iocb       fsync;
-               struct poll_iocb        poll;
        };
 
        struct kioctx           *ki_ctx;
@@ -1590,6 +1577,7 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
        if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
                        iocb->aio_rw_flags))
                return -EINVAL;
+
        req->file = fget(iocb->aio_fildes);
        if (unlikely(!req->file))
                return -EBADF;
@@ -1604,137 +1592,6 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
        return 0;
 }
 
-/* need to use list_del_init so we can check if item was present */
-static inline bool __aio_poll_remove(struct poll_iocb *req)
-{
-       if (list_empty(&req->wait.entry))
-               return false;
-       list_del_init(&req->wait.entry);
-       return true;
-}
-
-static inline void __aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
-{
-       fput(iocb->poll.file);
-       aio_complete(iocb, mangle_poll(mask), 0);
-}
-
-static void aio_poll_work(struct work_struct *work)
-{
-       struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, poll.work);
-
-       if (!list_empty_careful(&iocb->ki_list))
-               aio_remove_iocb(iocb);
-       __aio_poll_complete(iocb, iocb->poll.events);
-}
-
-static int aio_poll_cancel(struct kiocb *iocb)
-{
-       struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
-       struct poll_iocb *req = &aiocb->poll;
-       struct wait_queue_head *head = req->head;
-       bool found = false;
-
-       spin_lock(&head->lock);
-       found = __aio_poll_remove(req);
-       spin_unlock(&head->lock);
-
-       if (found) {
-               req->events = 0;
-               INIT_WORK(&req->work, aio_poll_work);
-               schedule_work(&req->work);
-       }
-       return 0;
-}
-
-static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
-               void *key)
-{
-       struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
-       struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
-       struct file *file = req->file;
-       __poll_t mask = key_to_poll(key);
-
-       assert_spin_locked(&req->head->lock);
-
-       /* for instances that support it check for an event match first: */
-       if (mask && !(mask & req->events))
-               return 0;
-
-       mask = file->f_op->poll_mask(file, req->events) & req->events;
-       if (!mask)
-               return 0;
-
-       __aio_poll_remove(req);
-
-       /*
-        * Try completing without a context switch if we can acquire ctx_lock
-        * without spinning.  Otherwise we need to defer to a workqueue to
-        * avoid a deadlock due to the lock order.
-        */
-       if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
-               list_del_init(&iocb->ki_list);
-               spin_unlock(&iocb->ki_ctx->ctx_lock);
-
-               __aio_poll_complete(iocb, mask);
-       } else {
-               req->events = mask;
-               INIT_WORK(&req->work, aio_poll_work);
-               schedule_work(&req->work);
-       }
-
-       return 1;
-}
-
-static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
-{
-       struct kioctx *ctx = aiocb->ki_ctx;
-       struct poll_iocb *req = &aiocb->poll;
-       __poll_t mask;
-
-       /* reject any unknown events outside the normal event mask. */
-       if ((u16)iocb->aio_buf != iocb->aio_buf)
-               return -EINVAL;
-       /* reject fields that are not defined for poll */
-       if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
-               return -EINVAL;
-
-       req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
-       req->file = fget(iocb->aio_fildes);
-       if (unlikely(!req->file))
-               return -EBADF;
-       if (!file_has_poll_mask(req->file))
-               goto out_fail;
-
-       req->head = req->file->f_op->get_poll_head(req->file, req->events);
-       if (!req->head)
-               goto out_fail;
-       if (IS_ERR(req->head)) {
-               mask = EPOLLERR;
-               goto done;
-       }
-
-       init_waitqueue_func_entry(&req->wait, aio_poll_wake);
-       aiocb->ki_cancel = aio_poll_cancel;
-
-       spin_lock_irq(&ctx->ctx_lock);
-       spin_lock(&req->head->lock);
-       mask = req->file->f_op->poll_mask(req->file, req->events) & req->events;
-       if (!mask) {
-               __add_wait_queue(req->head, &req->wait);
-               list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
-       }
-       spin_unlock(&req->head->lock);
-       spin_unlock_irq(&ctx->ctx_lock);
-done:
-       if (mask)
-               __aio_poll_complete(aiocb, mask);
-       return 0;
-out_fail:
-       fput(req->file);
-       return -EINVAL; /* same as no support for IOCB_CMD_POLL */
-}
-
 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                         bool compat)
 {
@@ -1808,9 +1665,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
        case IOCB_CMD_FDSYNC:
                ret = aio_fsync(&req->fsync, &iocb, true);
                break;
-       case IOCB_CMD_POLL:
-               ret = aio_poll(req, &iocb);
-               break;
        default:
                pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode);
                ret = -EINVAL;
index 43fedde15c26203548c08c866aa7fbf5cfc27dd0..1f85d35ec8b7b7f6a3866962bce468e3ae7d56f6 100644 (file)
@@ -2,6 +2,6 @@
 # Makefile for the linux autofs-filesystem routines.
 #
 
-obj-$(CONFIG_AUTOFS_FS) += autofs.o
+obj-$(CONFIG_AUTOFS_FS) += autofs4.o
 
-autofs-objs := init.o inode.o root.o symlink.o waitq.o expire.o dev-ioctl.o
+autofs4-objs := init.o inode.o root.o symlink.o waitq.o expire.o dev-ioctl.o
index ea4ca1445ab78808644408de99430d8bbd6e1fd9..86eafda4a65226ef292f8713c2a86dee48e831ff 100644 (file)
@@ -135,6 +135,15 @@ static int validate_dev_ioctl(int cmd, struct autofs_dev_ioctl *param)
                                cmd);
                        goto out;
                }
+       } else {
+               unsigned int inr = _IOC_NR(cmd);
+
+               if (inr == AUTOFS_DEV_IOCTL_OPENMOUNT_CMD ||
+                   inr == AUTOFS_DEV_IOCTL_REQUESTER_CMD ||
+                   inr == AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD) {
+                       err = -EINVAL;
+                       goto out;
+               }
        }
 
        err = 0;
@@ -271,7 +280,8 @@ static int autofs_dev_ioctl_openmount(struct file *fp,
        dev_t devid;
        int err, fd;
 
-       /* param->path has already been checked */
+       /* param->path has been checked in validate_dev_ioctl() */
+
        if (!param->openmount.devid)
                return -EINVAL;
 
@@ -433,10 +443,7 @@ static int autofs_dev_ioctl_requester(struct file *fp,
        dev_t devid;
        int err = -ENOENT;
 
-       if (param->size <= AUTOFS_DEV_IOCTL_SIZE) {
-               err = -EINVAL;
-               goto out;
-       }
+       /* param->path has been checked in validate_dev_ioctl() */
 
        devid = sbi->sb->s_dev;
 
@@ -521,10 +528,7 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp,
        unsigned int devid, magic;
        int err = -ENOENT;
 
-       if (param->size <= AUTOFS_DEV_IOCTL_SIZE) {
-               err = -EINVAL;
-               goto out;
-       }
+       /* param->path has been checked in validate_dev_ioctl() */
 
        name = param->path;
        type = param->ismountpoint.in.type;
index cc9447e1903f7a16d023067c0098c4123e764351..79ae07d9592f55cc06a10086cf45453250637d30 100644 (file)
@@ -23,7 +23,7 @@ static struct file_system_type autofs_fs_type = {
        .kill_sb        = autofs_kill_sb,
 };
 MODULE_ALIAS_FS("autofs");
-MODULE_ALIAS("autofs4");
+MODULE_ALIAS("autofs");
 
 static int __init init_autofs_fs(void)
 {
index 0ac456b52bddb62e9c817d61bccc886c7c8cde85..816cc921cf36f766ca4521145b1b911a33fc7a13 100644 (file)
@@ -1259,9 +1259,8 @@ static int load_elf_library(struct file *file)
                goto out_free_ph;
        }
 
-       len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
-                           ELF_MIN_ALIGN - 1);
-       bss = eppnt->p_memsz + eppnt->p_vaddr;
+       len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
+       bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
        if (bss > len) {
                error = vm_brk(len, bss - len);
                if (error)
index cce6087d6880fa4c1673dbc8aab0026fc62391f4..e55843f536bcaa03b6b2aa8e1c3ffb67b946df88 100644 (file)
@@ -4542,8 +4542,11 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                        offset_in_extent = em_start - em->start;
                em_end = extent_map_end(em);
                em_len = em_end - em_start;
-               disko = em->block_start + offset_in_extent;
                flags = 0;
+               if (em->block_start < EXTENT_MAP_LAST_BYTE)
+                       disko = em->block_start + offset_in_extent;
+               else
+                       disko = 0;
 
                /*
                 * bump off for our next call to get_extent
index e9482f0db9d08ffd79a117f0d6f08b6eb94cae99..eba61bcb9bb3cdd9759837b539c257aaaed1edef 100644 (file)
@@ -9005,13 +9005,14 @@ again:
 
        unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
 
-out_unlock:
        if (!ret2) {
                btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, true);
                sb_end_pagefault(inode->i_sb);
                extent_changeset_free(data_reserved);
                return VM_FAULT_LOCKED;
        }
+
+out_unlock:
        unlock_page(page);
 out:
        btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, (ret != 0));
@@ -9443,6 +9444,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
        u64 new_idx = 0;
        u64 root_objectid;
        int ret;
+       int ret2;
        bool root_log_pinned = false;
        bool dest_log_pinned = false;
 
@@ -9639,7 +9641,8 @@ out_fail:
                        dest_log_pinned = false;
                }
        }
-       ret = btrfs_end_transaction(trans);
+       ret2 = btrfs_end_transaction(trans);
+       ret = ret ? ret : ret2;
 out_notrans:
        if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&fs_info->subvol_sem);
index c2837a32d689de9a7d5d3bfc96d7d861cd221dfb..b077544b523245c05c6ec53710d4f9d45d1eb641 100644 (file)
@@ -3327,11 +3327,13 @@ static void btrfs_cmp_data_free(struct cmp_pages *cmp)
                if (pg) {
                        unlock_page(pg);
                        put_page(pg);
+                       cmp->src_pages[i] = NULL;
                }
                pg = cmp->dst_pages[i];
                if (pg) {
                        unlock_page(pg);
                        put_page(pg);
+                       cmp->dst_pages[i] = NULL;
                }
        }
 }
@@ -3577,7 +3579,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
                ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
                                              dst, dst_loff, &cmp);
                if (ret)
-                       goto out_unlock;
+                       goto out_free;
 
                loff += BTRFS_MAX_DEDUPE_LEN;
                dst_loff += BTRFS_MAX_DEDUPE_LEN;
@@ -3587,16 +3589,16 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
                ret = btrfs_extent_same_range(src, loff, tail_len, dst,
                                              dst_loff, &cmp);
 
+out_free:
+       kvfree(cmp.src_pages);
+       kvfree(cmp.dst_pages);
+
 out_unlock:
        if (same_inode)
                inode_unlock(src);
        else
                btrfs_double_inode_unlock(src, dst);
 
-out_free:
-       kvfree(cmp.src_pages);
-       kvfree(cmp.dst_pages);
-
        return ret;
 }
 
index 1874a6d2e6f5422c809759d0ca29e9bb973826bb..c25dc47210a397560e929f55fc3feea8f26798dd 100644 (file)
@@ -2680,8 +2680,10 @@ out:
                free_extent_buffer(scratch_leaf);
        }
 
-       if (done && !ret)
+       if (done && !ret) {
                ret = 1;
+               fs_info->qgroup_rescan_progress.objectid = (u64)-1;
+       }
        return ret;
 }
 
@@ -2784,13 +2786,20 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
 
        if (!init_flags) {
                /* we're resuming qgroup rescan at mount time */
-               if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN))
+               if (!(fs_info->qgroup_flags &
+                     BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
                        btrfs_warn(fs_info,
                        "qgroup rescan init failed, qgroup is not enabled");
-               else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
+                       ret = -EINVAL;
+               } else if (!(fs_info->qgroup_flags &
+                            BTRFS_QGROUP_STATUS_FLAG_ON)) {
                        btrfs_warn(fs_info,
                        "qgroup rescan init failed, qgroup rescan is not queued");
-               return -EINVAL;
+                       ret = -EINVAL;
+               }
+
+               if (ret)
+                       return ret;
        }
 
        mutex_lock(&fs_info->qgroup_rescan_lock);
index 5723060364776d1fd3e3e1e09bdfc09d4bb7eadb..6702896cdb8f7bcdb93a393f5ee3482498376445 100644 (file)
@@ -1151,11 +1151,6 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
                return ret;
        }
 
-       if (sctx->is_dev_replace && !is_metadata && !have_csum) {
-               sblocks_for_recheck = NULL;
-               goto nodatasum_case;
-       }
-
        /*
         * read all mirrors one after the other. This includes to
         * re-read the extent or metadata block that failed (that was
@@ -1268,13 +1263,19 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
                goto out;
        }
 
-       if (!is_metadata && !have_csum) {
+       /*
+        * NOTE: Even for nodatasum case, it's still possible that it's a
+        * compressed data extent, thus scrub_fixup_nodatasum(), which write
+        * inode page cache onto disk, could cause serious data corruption.
+        *
+        * So here we could only read from disk, and hope our recovery could
+        * reach disk before the newer write.
+        */
+       if (0 && !is_metadata && !have_csum) {
                struct scrub_fixup_nodatasum *fixup_nodatasum;
 
                WARN_ON(sctx->is_dev_replace);
 
-nodatasum_case:
-
                /*
                 * !is_metadata and !have_csum, this means that the data
                 * might not be COWed, that it might be modified
index e034ad9e23b48b42826de6bed1a8f59d6e926a20..1da162928d1a9b305ab36c2d99386afb2f060326 100644 (file)
@@ -1146,6 +1146,7 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
 {
        int ret;
 
+       mutex_lock(&uuid_mutex);
        mutex_lock(&fs_devices->device_list_mutex);
        if (fs_devices->opened) {
                fs_devices->opened++;
@@ -1155,6 +1156,7 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
                ret = open_fs_devices(fs_devices, flags, holder);
        }
        mutex_unlock(&fs_devices->device_list_mutex);
+       mutex_unlock(&uuid_mutex);
 
        return ret;
 }
index ee764ac352ab7b855165b797c1daf579fbaa45e1..a866be999216a81bcfa90dfcb17cc11177442731 100644 (file)
@@ -1135,6 +1135,7 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
        if (IS_ERR(realdn)) {
                pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
                       PTR_ERR(realdn), dn, in, ceph_vinop(in));
+               dput(dn);
                dn = realdn; /* note realdn contains the error */
                goto out;
        } else if (realdn) {
index 116146022aa1fa82d334790f7e2d7ff46b052bf3..bfe99950581527bcc494acb6419436e6373aa923 100644 (file)
@@ -126,6 +126,25 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
        seq_putc(m, '\n');
 }
 
+static void
+cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
+{
+       struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr;
+       struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&iface->sockaddr;
+
+       seq_printf(m, "\t\tSpeed: %zu bps\n", iface->speed);
+       seq_puts(m, "\t\tCapabilities: ");
+       if (iface->rdma_capable)
+               seq_puts(m, "rdma ");
+       if (iface->rss_capable)
+               seq_puts(m, "rss ");
+       seq_putc(m, '\n');
+       if (iface->sockaddr.ss_family == AF_INET)
+               seq_printf(m, "\t\tIPv4: %pI4\n", &ipv4->sin_addr);
+       else if (iface->sockaddr.ss_family == AF_INET6)
+               seq_printf(m, "\t\tIPv6: %pI6\n", &ipv6->sin6_addr);
+}
+
 static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
 {
        struct list_head *tmp1, *tmp2, *tmp3;
@@ -312,6 +331,16 @@ skip_rdma:
                                              mid_entry->mid);
                        }
                        spin_unlock(&GlobalMid_Lock);
+
+                       spin_lock(&ses->iface_lock);
+                       if (ses->iface_count)
+                               seq_printf(m, "\n\tServer interfaces: %zu\n",
+                                          ses->iface_count);
+                       for (j = 0; j < ses->iface_count; j++) {
+                               seq_printf(m, "\t%d)\n", j);
+                               cifs_dump_iface(m, &ses->iface_list[j]);
+                       }
+                       spin_unlock(&ses->iface_lock);
                }
        }
        spin_unlock(&cifs_tcp_ses_lock);
index 937251cc61c046916228f150916c8c0a82a442a5..ee2a8ec70056f7451695cb75bfe1e00a95280ff0 100644 (file)
@@ -37,7 +37,6 @@
 #include <crypto/aead.h>
 
 int __cifs_calc_signature(struct smb_rqst *rqst,
-                       int start,
                        struct TCP_Server_Info *server, char *signature,
                        struct shash_desc *shash)
 {
@@ -45,16 +44,27 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
        int rc;
        struct kvec *iov = rqst->rq_iov;
        int n_vec = rqst->rq_nvec;
+       int is_smb2 = server->vals->header_preamble_size == 0;
 
-       for (i = start; i < n_vec; i++) {
+       /* iov[0] is actual data and not the rfc1002 length for SMB2+ */
+       if (is_smb2) {
+               if (iov[0].iov_len <= 4)
+                       return -EIO;
+               i = 0;
+       } else {
+               if (n_vec < 2 || iov[0].iov_len != 4)
+                       return -EIO;
+               i = 1; /* skip rfc1002 length */
+       }
+
+       for (; i < n_vec; i++) {
                if (iov[i].iov_len == 0)
                        continue;
                if (iov[i].iov_base == NULL) {
                        cifs_dbg(VFS, "null iovec entry\n");
                        return -EIO;
                }
-               if (i == 1 && iov[1].iov_len <= 4)
-                       break; /* nothing to sign or corrupt header */
+
                rc = crypto_shash_update(shash,
                                         iov[i].iov_base, iov[i].iov_len);
                if (rc) {
@@ -118,7 +128,7 @@ static int cifs_calc_signature(struct smb_rqst *rqst,
                return rc;
        }
 
-       return __cifs_calc_signature(rqst, 1, server, signature,
+       return __cifs_calc_signature(rqst, server, signature,
                                     &server->secmech.sdescmd5->shash);
 }
 
index 1efa2e65bc1a8971f01811ac699a82cb7c1f1727..c923c785402757c36d25528c5e77e53909b227dc 100644 (file)
@@ -33,6 +33,9 @@
 
 #define CIFS_MAGIC_NUMBER 0xFF534D42      /* the first four bytes of SMB PDUs */
 
+#define CIFS_PORT 445
+#define RFC1001_PORT 139
+
 /*
  * The sizes of various internal tables and strings
  */
@@ -312,6 +315,10 @@ struct smb_version_operations {
        /* send echo request */
        int (*echo)(struct TCP_Server_Info *);
        /* create directory */
+       int (*posix_mkdir)(const unsigned int xid, struct inode *inode,
+                       umode_t mode, struct cifs_tcon *tcon,
+                       const char *full_path,
+                       struct cifs_sb_info *cifs_sb);
        int (*mkdir)(const unsigned int, struct cifs_tcon *, const char *,
                     struct cifs_sb_info *);
        /* set info on created directory */
@@ -416,7 +423,7 @@ struct smb_version_operations {
        void (*set_oplock_level)(struct cifsInodeInfo *, __u32, unsigned int,
                                 bool *);
        /* create lease context buffer for CREATE request */
-       char * (*create_lease_buf)(u8 *, u8);
+       char * (*create_lease_buf)(u8 *lease_key, u8 oplock);
        /* parse lease context buffer and return oplock/epoch info */
        __u8 (*parse_lease_buf)(void *buf, unsigned int *epoch, char *lkey);
        ssize_t (*copychunk_range)(const unsigned int,
@@ -838,6 +845,13 @@ static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
 
 #endif
 
+struct cifs_server_iface {
+       size_t speed;
+       unsigned int rdma_capable : 1;
+       unsigned int rss_capable : 1;
+       struct sockaddr_storage sockaddr;
+};
+
 /*
  * Session structure.  One of these for each uid session with a particular host
  */
@@ -875,6 +889,20 @@ struct cifs_ses {
 #ifdef CONFIG_CIFS_SMB311
        __u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
 #endif /* 3.1.1 */
+
+       /*
+        * Network interfaces available on the server this session is
+        * connected to.
+        *
+        * Other channels can be opened by connecting and binding this
+        * session to interfaces from this list.
+        *
+        * iface_lock should be taken when accessing any of these fields
+        */
+       spinlock_t iface_lock;
+       struct cifs_server_iface *iface_list;
+       size_t iface_count;
+       unsigned long iface_last_update; /* jiffies */
 };
 
 static inline bool
@@ -883,6 +911,14 @@ cap_unix(struct cifs_ses *ses)
        return ses->server->vals->cap_unix & ses->capabilities;
 }
 
+struct cached_fid {
+       bool is_valid:1;        /* Do we have a useable root fid */
+       struct cifs_fid *fid;
+       struct mutex fid_mutex;
+       struct cifs_tcon *tcon;
+       struct work_struct lease_break;
+};
+
 /*
  * there is one of these for each connection to a resource on a particular
  * session
@@ -987,9 +1023,7 @@ struct cifs_tcon {
        struct fscache_cookie *fscache; /* cookie for share */
 #endif
        struct list_head pending_opens; /* list of incomplete opens */
-       bool valid_root_fid:1;  /* Do we have a useable root fid */
-       struct mutex prfid_mutex; /* prevents reopen race after dead ses*/
-       struct cifs_fid *prfid; /* handle to the directory at top of share */
+       struct cached_fid crfid; /* Cached root fid */
        /* BB add field for back pointer to sb struct(s)? */
 };
 
@@ -1382,6 +1416,7 @@ typedef int (mid_handle_t)(struct TCP_Server_Info *server,
 /* one of these for every pending CIFS request to the server */
 struct mid_q_entry {
        struct list_head qhead; /* mids waiting on reply from this server */
+       struct kref refcount;
        struct TCP_Server_Info *server; /* server corresponding to this mid */
        __u64 mid;              /* multiplex id */
        __u32 pid;              /* process id */
index 4e0d183c3d1016918d9934420af6e626d128d077..1890f534c88b168b8476a64fd165cce64f905887 100644 (file)
@@ -82,6 +82,7 @@ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
                                        struct TCP_Server_Info *server);
 extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
 extern void cifs_delete_mid(struct mid_q_entry *mid);
+extern void cifs_mid_q_entry_release(struct mid_q_entry *midEntry);
 extern void cifs_wake_up_task(struct mid_q_entry *mid);
 extern int cifs_handle_standard(struct TCP_Server_Info *server,
                                struct mid_q_entry *mid);
@@ -112,10 +113,6 @@ extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
                        struct kvec *, int /* nvec to send */,
                        int * /* type of buf returned */, const int flags,
                        struct kvec * /* resp vec */);
-extern int smb2_send_recv(const unsigned int xid, struct cifs_ses *pses,
-                         struct kvec *pkvec, int nvec_to_send,
-                         int *pbuftype, const int flags,
-                         struct kvec *presp);
 extern int SendReceiveBlockingLock(const unsigned int xid,
                        struct cifs_tcon *ptcon,
                        struct smb_hdr *in_buf ,
@@ -544,7 +541,7 @@ int cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
                           struct cifs_sb_info *cifs_sb,
                           const unsigned char *path, char *pbuf,
                           unsigned int *pbytes_written);
-int __cifs_calc_signature(struct smb_rqst *rqst, int start,
+int __cifs_calc_signature(struct smb_rqst *rqst,
                        struct TCP_Server_Info *server, char *signature,
                        struct shash_desc *shash);
 enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
@@ -552,6 +549,7 @@ enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
 struct cifs_aio_ctx *cifs_aio_ctx_alloc(void);
 void cifs_aio_ctx_release(struct kref *refcount);
 int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
+void smb2_cached_lease_break(struct work_struct *work);
 
 int cifs_alloc_hash(const char *name, struct crypto_shash **shash,
                    struct sdesc **sdesc);
index 42329b25877db2b3de349b0ce5723f70bebad92b..93408eab92e78988bcf79b715ac77049db643e7f 100644 (file)
@@ -107,10 +107,10 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
        }
        spin_unlock(&tcon->open_file_lock);
 
-       mutex_lock(&tcon->prfid_mutex);
-       tcon->valid_root_fid = false;
-       memset(tcon->prfid, 0, sizeof(struct cifs_fid));
-       mutex_unlock(&tcon->prfid_mutex);
+       mutex_lock(&tcon->crfid.fid_mutex);
+       tcon->crfid.is_valid = false;
+       memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid));
+       mutex_unlock(&tcon->crfid.fid_mutex);
 
        /*
         * BB Add call to invalidate_inodes(sb) for all superblocks mounted
@@ -157,8 +157,14 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
         * greater than cifs socket timeout which is 7 seconds
         */
        while (server->tcpStatus == CifsNeedReconnect) {
-               wait_event_interruptible_timeout(server->response_q,
-                       (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
+               rc = wait_event_interruptible_timeout(server->response_q,
+                                                     (server->tcpStatus != CifsNeedReconnect),
+                                                     10 * HZ);
+               if (rc < 0) {
+                       cifs_dbg(FYI, "%s: aborting reconnect due to a received"
+                                " signal by the process\n", __func__);
+                       return -ERESTARTSYS;
+               }
 
                /* are we still trying to reconnect? */
                if (server->tcpStatus != CifsNeedReconnect)
index 96645a7d8f27144a885863578d33e2b757afeec6..5df2c0698cda7a5ae093db0e3886b275bc0565cb 100644 (file)
@@ -57,9 +57,6 @@
 #include "smb2proto.h"
 #include "smbdirect.h"
 
-#define CIFS_PORT 445
-#define RFC1001_PORT 139
-
 extern mempool_t *cifs_req_poolp;
 extern bool disable_legacy_dialects;
 
@@ -927,6 +924,7 @@ next_pdu:
                                server->pdu_size = next_offset;
                }
 
+               mid_entry = NULL;
                if (server->ops->is_transform_hdr &&
                    server->ops->receive_transform &&
                    server->ops->is_transform_hdr(buf)) {
@@ -941,8 +939,11 @@ next_pdu:
                                length = mid_entry->receive(server, mid_entry);
                }
 
-               if (length < 0)
+               if (length < 0) {
+                       if (mid_entry)
+                               cifs_mid_q_entry_release(mid_entry);
                        continue;
+               }
 
                if (server->large_buf)
                        buf = server->bigbuf;
@@ -959,6 +960,8 @@ next_pdu:
 
                        if (!mid_entry->multiRsp || mid_entry->multiEnd)
                                mid_entry->callback(mid_entry);
+
+                       cifs_mid_q_entry_release(mid_entry);
                } else if (server->ops->is_oplock_break &&
                           server->ops->is_oplock_break(buf, server)) {
                        cifs_dbg(FYI, "Received oplock break\n");
@@ -3029,8 +3032,11 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
 
 #ifdef CONFIG_CIFS_SMB311
        if ((volume_info->linux_ext) && (ses->server->posix_ext_supported)) {
-               if (ses->server->vals->protocol_id == SMB311_PROT_ID)
+               if (ses->server->vals->protocol_id == SMB311_PROT_ID) {
                        tcon->posix_extensions = true;
+                       printk_once(KERN_WARNING
+                               "SMB3.11 POSIX Extensions are experimental\n");
+               }
        }
 #endif /* 311 */
 
index f4697f548a394dbf5c42f731bf13bd529c9aaea0..a2cfb33e85c1f8cb25a2d32a52bb5d60c93b79f1 100644 (file)
@@ -1575,6 +1575,17 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
                goto mkdir_out;
        }
 
+       server = tcon->ses->server;
+
+#ifdef CONFIG_CIFS_SMB311
+       if ((server->ops->posix_mkdir) && (tcon->posix_extensions)) {
+               rc = server->ops->posix_mkdir(xid, inode, mode, tcon, full_path,
+                                             cifs_sb);
+               d_drop(direntry); /* for time being always refresh inode info */
+               goto mkdir_out;
+       }
+#endif /* SMB311 */
+
        if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
                                le64_to_cpu(tcon->fsUnixInfo.Capability))) {
                rc = cifs_posix_mkdir(inode, direntry, mode, full_path, cifs_sb,
@@ -1583,8 +1594,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
                        goto mkdir_out;
        }
 
-       server = tcon->ses->server;
-
        if (!server->ops->mkdir) {
                rc = -ENOSYS;
                goto mkdir_out;
index af29ade195c002c0323d855edb391a155f1620f7..53e8362cbc4a953218d3fbd50f1c7133e5435cc9 100644 (file)
@@ -82,6 +82,7 @@ sesInfoAlloc(void)
                INIT_LIST_HEAD(&ret_buf->smb_ses_list);
                INIT_LIST_HEAD(&ret_buf->tcon_list);
                mutex_init(&ret_buf->session_mutex);
+               spin_lock_init(&ret_buf->iface_lock);
        }
        return ret_buf;
 }
@@ -102,6 +103,7 @@ sesInfoFree(struct cifs_ses *buf_to_free)
        kfree(buf_to_free->user_name);
        kfree(buf_to_free->domainName);
        kzfree(buf_to_free->auth_key.response);
+       kfree(buf_to_free->iface_list);
        kzfree(buf_to_free);
 }
 
@@ -117,8 +119,9 @@ tconInfoAlloc(void)
                INIT_LIST_HEAD(&ret_buf->openFileList);
                INIT_LIST_HEAD(&ret_buf->tcon_list);
                spin_lock_init(&ret_buf->open_file_lock);
-               mutex_init(&ret_buf->prfid_mutex);
-               ret_buf->prfid = kzalloc(sizeof(struct cifs_fid), GFP_KERNEL);
+               mutex_init(&ret_buf->crfid.fid_mutex);
+               ret_buf->crfid.fid = kzalloc(sizeof(struct cifs_fid),
+                                            GFP_KERNEL);
 #ifdef CONFIG_CIFS_STATS
                spin_lock_init(&ret_buf->stat_lock);
 #endif
@@ -136,7 +139,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free)
        atomic_dec(&tconInfoAllocCount);
        kfree(buf_to_free->nativeFileSystem);
        kzfree(buf_to_free->password);
-       kfree(buf_to_free->prfid);
+       kfree(buf_to_free->crfid.fid);
        kfree(buf_to_free);
 }
 
index aff8ce8ba34d55485d1d15aa8b7ea498cf6726f3..646dcd149de1e368baebac10a940a70a095ef479 100644 (file)
@@ -107,6 +107,7 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
                if (compare_mid(mid->mid, buf) &&
                    mid->mid_state == MID_REQUEST_SUBMITTED &&
                    le16_to_cpu(mid->command) == buf->Command) {
+                       kref_get(&mid->refcount);
                        spin_unlock(&GlobalMid_Lock);
                        return mid;
                }
index 788412675723e85589f78cc6056f2d67edbd5ff1..4ed10dd086e6f31f2816462c8a082ec8939175ae 100644 (file)
@@ -41,7 +41,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
        int rc;
        __le16 *smb2_path;
        struct smb2_file_all_info *smb2_data = NULL;
-       __u8 smb2_oplock[17];
+       __u8 smb2_oplock;
        struct cifs_fid *fid = oparms->fid;
        struct network_resiliency_req nr_ioctl_req;
 
@@ -59,12 +59,9 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
        }
 
        oparms->desired_access |= FILE_READ_ATTRIBUTES;
-       *smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH;
+       smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH;
 
-       if (oparms->tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
-               memcpy(smb2_oplock + 1, fid->lease_key, SMB2_LEASE_KEY_SIZE);
-
-       rc = SMB2_open(xid, oparms, smb2_path, smb2_oplock, smb2_data, NULL,
+       rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL,
                       NULL);
        if (rc)
                goto out;
@@ -101,7 +98,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
                move_smb2_info_to_cifs(buf, smb2_data);
        }
 
-       *oplock = *smb2_oplock;
+       *oplock = smb2_oplock;
 out:
        kfree(smb2_data);
        kfree(smb2_path);
index e2bec47c684580089a70e7914ec71d2f523da3e3..3ff7cec2da81141f67482c57ab03de52aed855ba 100644 (file)
@@ -454,7 +454,8 @@ cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb)
 #ifdef CONFIG_CIFS_SMB311
        /* SMB311 POSIX extensions paths do not include leading slash */
        else if (cifs_sb_master_tlink(cifs_sb) &&
-                cifs_sb_master_tcon(cifs_sb)->posix_extensions) {
+                cifs_sb_master_tcon(cifs_sb)->posix_extensions &&
+                (from[0] == '/')) {
                start_of_path = from + 1;
        }
 #endif /* 311 */
@@ -492,10 +493,11 @@ cifs_ses_oplock_break(struct work_struct *work)
 {
        struct smb2_lease_break_work *lw = container_of(work,
                                struct smb2_lease_break_work, lease_break);
-       int rc;
+       int rc = 0;
 
        rc = SMB2_lease_break(0, tlink_tcon(lw->tlink), lw->lease_key,
                              lw->lease_state);
+
        cifs_dbg(FYI, "Lease release rc %d\n", rc);
        cifs_put_tlink(lw->tlink);
        kfree(lw);
@@ -561,6 +563,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
 
                open->oplock = lease_state;
        }
+
        return found;
 }
 
@@ -603,6 +606,18 @@ smb2_is_valid_lease_break(char *buffer)
                                        return true;
                                }
                                spin_unlock(&tcon->open_file_lock);
+
+                               if (tcon->crfid.is_valid &&
+                                   !memcmp(rsp->LeaseKey,
+                                           tcon->crfid.fid->lease_key,
+                                           SMB2_LEASE_KEY_SIZE)) {
+                                       INIT_WORK(&tcon->crfid.lease_break,
+                                                 smb2_cached_lease_break);
+                                       queue_work(cifsiod_wq,
+                                                  &tcon->crfid.lease_break);
+                                       spin_unlock(&cifs_tcp_ses_lock);
+                                       return true;
+                               }
                        }
                }
        }
index b15f5957d64591f0af611670088dd4dd8439fb43..ea92a38b2f08c34f2afd942d5fa933098f04cc07 100644 (file)
@@ -203,6 +203,7 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
                if ((mid->mid == wire_mid) &&
                    (mid->mid_state == MID_REQUEST_SUBMITTED) &&
                    (mid->command == shdr->Command)) {
+                       kref_get(&mid->refcount);
                        spin_unlock(&GlobalMid_Lock);
                        return mid;
                }
@@ -294,34 +295,191 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
        return rsize;
 }
 
-#ifdef CONFIG_CIFS_STATS2
+
+static int
+parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+                       size_t buf_len,
+                       struct cifs_server_iface **iface_list,
+                       size_t *iface_count)
+{
+       struct network_interface_info_ioctl_rsp *p;
+       struct sockaddr_in *addr4;
+       struct sockaddr_in6 *addr6;
+       struct iface_info_ipv4 *p4;
+       struct iface_info_ipv6 *p6;
+       struct cifs_server_iface *info;
+       ssize_t bytes_left;
+       size_t next = 0;
+       int nb_iface = 0;
+       int rc = 0;
+
+       *iface_list = NULL;
+       *iface_count = 0;
+
+       /*
+        * Fist pass: count and sanity check
+        */
+
+       bytes_left = buf_len;
+       p = buf;
+       while (bytes_left >= sizeof(*p)) {
+               nb_iface++;
+               next = le32_to_cpu(p->Next);
+               if (!next) {
+                       bytes_left -= sizeof(*p);
+                       break;
+               }
+               p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
+               bytes_left -= next;
+       }
+
+       if (!nb_iface) {
+               cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       if (bytes_left || p->Next)
+               cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
+
+
+       /*
+        * Second pass: extract info to internal structure
+        */
+
+       *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
+       if (!*iface_list) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       info = *iface_list;
+       bytes_left = buf_len;
+       p = buf;
+       while (bytes_left >= sizeof(*p)) {
+               info->speed = le64_to_cpu(p->LinkSpeed);
+               info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
+               info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
+
+               cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
+               cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
+               cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
+                        le32_to_cpu(p->Capability));
+
+               switch (p->Family) {
+               /*
+                * The kernel and wire socket structures have the same
+                * layout and use network byte order but make the
+                * conversion explicit in case either one changes.
+                */
+               case INTERNETWORK:
+                       addr4 = (struct sockaddr_in *)&info->sockaddr;
+                       p4 = (struct iface_info_ipv4 *)p->Buffer;
+                       addr4->sin_family = AF_INET;
+                       memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
+
+                       /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
+                       addr4->sin_port = cpu_to_be16(CIFS_PORT);
+
+                       cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
+                                &addr4->sin_addr);
+                       break;
+               case INTERNETWORKV6:
+                       addr6 = (struct sockaddr_in6 *)&info->sockaddr;
+                       p6 = (struct iface_info_ipv6 *)p->Buffer;
+                       addr6->sin6_family = AF_INET6;
+                       memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
+
+                       /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
+                       addr6->sin6_flowinfo = 0;
+                       addr6->sin6_scope_id = 0;
+                       addr6->sin6_port = cpu_to_be16(CIFS_PORT);
+
+                       cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
+                                &addr6->sin6_addr);
+                       break;
+               default:
+                       cifs_dbg(VFS,
+                                "%s: skipping unsupported socket family\n",
+                                __func__);
+                       goto next_iface;
+               }
+
+               (*iface_count)++;
+               info++;
+next_iface:
+               next = le32_to_cpu(p->Next);
+               if (!next)
+                       break;
+               p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
+               bytes_left -= next;
+       }
+
+       if (!*iface_count) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+out:
+       if (rc) {
+               kfree(*iface_list);
+               *iface_count = 0;
+               *iface_list = NULL;
+       }
+       return rc;
+}
+
+
 static int
 SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
 {
        int rc;
        unsigned int ret_data_len = 0;
-       struct network_interface_info_ioctl_rsp *out_buf;
+       struct network_interface_info_ioctl_rsp *out_buf = NULL;
+       struct cifs_server_iface *iface_list;
+       size_t iface_count;
+       struct cifs_ses *ses = tcon->ses;
 
        rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
                        FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
                        NULL /* no data input */, 0 /* no data input */,
                        (char **)&out_buf, &ret_data_len);
-       if (rc != 0)
+       if (rc != 0) {
                cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
-       else if (ret_data_len < sizeof(struct network_interface_info_ioctl_rsp)) {
-               cifs_dbg(VFS, "server returned bad net interface info buf\n");
-               rc = -EINVAL;
-       } else {
-               /* Dump info on first interface */
-               cifs_dbg(FYI, "Adapter Capability 0x%x\t",
-                       le32_to_cpu(out_buf->Capability));
-               cifs_dbg(FYI, "Link Speed %lld\n",
-                       le64_to_cpu(out_buf->LinkSpeed));
+               goto out;
        }
+
+       rc = parse_server_interfaces(out_buf, ret_data_len,
+                                    &iface_list, &iface_count);
+       if (rc)
+               goto out;
+
+       spin_lock(&ses->iface_lock);
+       kfree(ses->iface_list);
+       ses->iface_list = iface_list;
+       ses->iface_count = iface_count;
+       ses->iface_last_update = jiffies;
+       spin_unlock(&ses->iface_lock);
+
+out:
        kfree(out_buf);
        return rc;
 }
-#endif /* STATS2 */
+
+void
+smb2_cached_lease_break(struct work_struct *work)
+{
+       struct cached_fid *cfid = container_of(work,
+                               struct cached_fid, lease_break);
+       mutex_lock(&cfid->fid_mutex);
+       if (cfid->is_valid) {
+               cifs_dbg(FYI, "clear cached root file handle\n");
+               SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
+                          cfid->fid->volatile_fid);
+               cfid->is_valid = false;
+       }
+       mutex_unlock(&cfid->fid_mutex);
+}
 
 /*
  * Open the directory at the root of a share
@@ -331,13 +489,13 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
        struct cifs_open_parms oparams;
        int rc;
        __le16 srch_path = 0; /* Null - since an open of top of share */
-       u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+       u8 oplock = SMB2_OPLOCK_LEVEL_II;
 
-       mutex_lock(&tcon->prfid_mutex);
-       if (tcon->valid_root_fid) {
+       mutex_lock(&tcon->crfid.fid_mutex);
+       if (tcon->crfid.is_valid) {
                cifs_dbg(FYI, "found a cached root file handle\n");
-               memcpy(pfid, tcon->prfid, sizeof(struct cifs_fid));
-               mutex_unlock(&tcon->prfid_mutex);
+               memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
+               mutex_unlock(&tcon->crfid.fid_mutex);
                return 0;
        }
 
@@ -350,10 +508,11 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
 
        rc = SMB2_open(xid, &oparams, &srch_path, &oplock, NULL, NULL, NULL);
        if (rc == 0) {
-               memcpy(tcon->prfid, pfid, sizeof(struct cifs_fid));
-               tcon->valid_root_fid = true;
+               memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
+               tcon->crfid.tcon = tcon;
+               tcon->crfid.is_valid = true;
        }
-       mutex_unlock(&tcon->prfid_mutex);
+       mutex_unlock(&tcon->crfid.fid_mutex);
        return rc;
 }
 
@@ -383,9 +542,7 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
        if (rc)
                return;
 
-#ifdef CONFIG_CIFS_STATS2
        SMB3_request_interfaces(xid, tcon);
-#endif /* STATS2 */
 
        SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
                        FS_ATTRIBUTE_INFORMATION);
@@ -436,7 +593,7 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
        struct cifs_open_parms oparms;
        struct cifs_fid fid;
 
-       if ((*full_path == 0) && tcon->valid_root_fid)
+       if ((*full_path == 0) && tcon->crfid.is_valid)
                return 0;
 
        utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
@@ -699,6 +856,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
 
        rc = SMB2_set_ea(xid, tcon, fid.persistent_fid, fid.volatile_fid, ea,
                         len);
+       kfree(ea);
+
        SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
 
        return rc;
@@ -2063,8 +2222,7 @@ smb2_create_lease_buf(u8 *lease_key, u8 oplock)
        if (!buf)
                return NULL;
 
-       buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key));
-       buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8)));
+       memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
        buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
 
        buf->ccontext.DataOffset = cpu_to_le16(offsetof
@@ -2090,8 +2248,7 @@ smb3_create_lease_buf(u8 *lease_key, u8 oplock)
        if (!buf)
                return NULL;
 
-       buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key));
-       buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8)));
+       memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
        buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
 
        buf->ccontext.DataOffset = cpu_to_le16(offsetof
@@ -2128,8 +2285,7 @@ smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
        if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
                return SMB2_OPLOCK_LEVEL_NOCHANGE;
        if (lease_key)
-               memcpy(lease_key, &lc->lcontext.LeaseKeyLow,
-                      SMB2_LEASE_KEY_SIZE);
+               memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
        return le32_to_cpu(lc->lcontext.LeaseState);
 }
 
@@ -2151,7 +2307,7 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
                   struct smb_rqst *old_rq)
 {
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)old_rq->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
 
        memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
        tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
@@ -2171,14 +2327,13 @@ static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
 }
 
 /* Assumes:
- * rqst->rq_iov[0]  is rfc1002 length
- * rqst->rq_iov[1]  is tranform header
- * rqst->rq_iov[2+] data to be encrypted/decrypted
+ * rqst->rq_iov[0]  is transform header
+ * rqst->rq_iov[1+] data to be encrypted/decrypted
  */
 static struct scatterlist *
 init_sg(struct smb_rqst *rqst, u8 *sign)
 {
-       unsigned int sg_len = rqst->rq_nvec + rqst->rq_npages;
+       unsigned int sg_len = rqst->rq_nvec + rqst->rq_npages + 1;
        unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
        struct scatterlist *sg;
        unsigned int i;
@@ -2189,10 +2344,10 @@ init_sg(struct smb_rqst *rqst, u8 *sign)
                return NULL;
 
        sg_init_table(sg, sg_len);
-       smb2_sg_set_buf(&sg[0], rqst->rq_iov[1].iov_base + 20, assoc_data_len);
-       for (i = 1; i < rqst->rq_nvec - 1; i++)
-               smb2_sg_set_buf(&sg[i], rqst->rq_iov[i+1].iov_base,
-                                               rqst->rq_iov[i+1].iov_len);
+       smb2_sg_set_buf(&sg[0], rqst->rq_iov[0].iov_base + 20, assoc_data_len);
+       for (i = 1; i < rqst->rq_nvec; i++)
+               smb2_sg_set_buf(&sg[i], rqst->rq_iov[i].iov_base,
+                                               rqst->rq_iov[i].iov_len);
        for (j = 0; i < sg_len - 1; i++, j++) {
                unsigned int len, offset;
 
@@ -2224,18 +2379,17 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
        return 1;
 }
 /*
- * Encrypt or decrypt @rqst message. @rqst has the following format:
- * iov[0] - rfc1002 length
- * iov[1] - transform header (associate data),
- * iov[2-N] and pages - data to encrypt.
- * On success return encrypted data in iov[2-N] and pages, leave iov[0-1]
+ * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
+ * iov[0]   - transform header (associate data),
+ * iov[1-N] - SMB2 header and pages - data to encrypt.
+ * On success return encrypted data in iov[1-N] and pages, leave iov[0]
  * untouched.
  */
 static int
 crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
 {
        struct smb2_transform_hdr *tr_hdr =
-                       (struct smb2_transform_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_transform_hdr *)rqst->rq_iov[0].iov_base;
        unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
        int rc = 0;
        struct scatterlist *sg;
@@ -2323,10 +2477,6 @@ free_req:
        return rc;
 }
 
-/*
- * This is called from smb_send_rqst. At this point we have the rfc1002
- * header as the first element in the vector.
- */
 static int
 smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
                       struct smb_rqst *old_rq)
@@ -2335,7 +2485,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
        struct page **pages;
        struct smb2_transform_hdr *tr_hdr;
        unsigned int npages = old_rq->rq_npages;
-       unsigned int orig_len = get_rfc1002_length(old_rq->rq_iov[0].iov_base);
+       unsigned int orig_len;
        int i;
        int rc = -ENOMEM;
 
@@ -2355,18 +2505,14 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
                        goto err_free_pages;
        }
 
-       /* Make space for one extra iov to hold the transform header */
        iov = kmalloc_array(old_rq->rq_nvec + 1, sizeof(struct kvec),
                            GFP_KERNEL);
        if (!iov)
                goto err_free_pages;
 
-       /* copy all iovs from the old except the 1st one (rfc1002 length) */
-       memcpy(&iov[2], &old_rq->rq_iov[1],
-                               sizeof(struct kvec) * (old_rq->rq_nvec - 1));
-       /* copy the rfc1002 iov */
-       iov[0].iov_base = old_rq->rq_iov[0].iov_base;
-       iov[0].iov_len  = old_rq->rq_iov[0].iov_len;
+       /* copy all iovs from the old */
+       memcpy(&iov[1], &old_rq->rq_iov[0],
+                               sizeof(struct kvec) * old_rq->rq_nvec);
 
        new_rq->rq_iov = iov;
        new_rq->rq_nvec = old_rq->rq_nvec + 1;
@@ -2375,14 +2521,12 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
        if (!tr_hdr)
                goto err_free_iov;
 
+       orig_len = smb_rqst_len(server, old_rq);
+
        /* fill the 2nd iov with a transform header */
        fill_transform_hdr(tr_hdr, orig_len, old_rq);
-       new_rq->rq_iov[1].iov_base = tr_hdr;
-       new_rq->rq_iov[1].iov_len = sizeof(struct smb2_transform_hdr);
-
-       /* Update rfc1002 header */
-       inc_rfc1001_len(new_rq->rq_iov[0].iov_base,
-                       sizeof(struct smb2_transform_hdr));
+       new_rq->rq_iov[0].iov_base = tr_hdr;
+       new_rq->rq_iov[0].iov_len = sizeof(struct smb2_transform_hdr);
 
        /* copy pages form the old */
        for (i = 0; i < npages; i++) {
@@ -2426,7 +2570,7 @@ smb3_free_transform_rq(struct smb_rqst *rqst)
                put_page(rqst->rq_pages[i]);
        kfree(rqst->rq_pages);
        /* free transform header */
-       kfree(rqst->rq_iov[1].iov_base);
+       kfree(rqst->rq_iov[0].iov_base);
        kfree(rqst->rq_iov);
 }
 
@@ -2443,19 +2587,17 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
                 unsigned int buf_data_size, struct page **pages,
                 unsigned int npages, unsigned int page_data_size)
 {
-       struct kvec iov[3];
+       struct kvec iov[2];
        struct smb_rqst rqst = {NULL};
        int rc;
 
-       iov[0].iov_base = NULL;
-       iov[0].iov_len = 0;
-       iov[1].iov_base = buf;
-       iov[1].iov_len = sizeof(struct smb2_transform_hdr);
-       iov[2].iov_base = buf + sizeof(struct smb2_transform_hdr);
-       iov[2].iov_len = buf_data_size;
+       iov[0].iov_base = buf;
+       iov[0].iov_len = sizeof(struct smb2_transform_hdr);
+       iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
+       iov[1].iov_len = buf_data_size;
 
        rqst.rq_iov = iov;
-       rqst.rq_nvec = 3;
+       rqst.rq_nvec = 2;
        rqst.rq_pages = pages;
        rqst.rq_npages = npages;
        rqst.rq_pagesz = PAGE_SIZE;
@@ -2467,7 +2609,7 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
        if (rc)
                return rc;
 
-       memmove(buf, iov[2].iov_base, buf_data_size);
+       memmove(buf, iov[1].iov_base, buf_data_size);
 
        server->total_read = buf_data_size + page_data_size;
 
@@ -3170,6 +3312,7 @@ struct smb_version_operations smb311_operations = {
        .set_compression = smb2_set_compression,
        .mkdir = smb2_mkdir,
        .mkdir_setinfo = smb2_mkdir_setinfo,
+       .posix_mkdir = smb311_posix_mkdir,
        .rmdir = smb2_rmdir,
        .unlink = smb2_unlink,
        .rename = smb2_rename_path,
index af032e1a3eac7adaf0570f5923e0ba6164e8ed6b..3c92678cb45bc8fab4ce27cfcbadaef43586a3e9 100644 (file)
@@ -155,7 +155,7 @@ out:
 static int
 smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
 {
-       int rc = 0;
+       int rc;
        struct nls_table *nls_codepage;
        struct cifs_ses *ses;
        struct TCP_Server_Info *server;
@@ -166,10 +166,10 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
         * for those three - in the calling routine.
         */
        if (tcon == NULL)
-               return rc;
+               return 0;
 
        if (smb2_command == SMB2_TREE_CONNECT)
-               return rc;
+               return 0;
 
        if (tcon->tidStatus == CifsExiting) {
                /*
@@ -212,8 +212,14 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
                        return -EAGAIN;
                }
 
-               wait_event_interruptible_timeout(server->response_q,
-                       (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
+               rc = wait_event_interruptible_timeout(server->response_q,
+                                                     (server->tcpStatus != CifsNeedReconnect),
+                                                     10 * HZ);
+               if (rc < 0) {
+                       cifs_dbg(FYI, "%s: aborting reconnect due to a received"
+                                " signal by the process\n", __func__);
+                       return -ERESTARTSYS;
+               }
 
                /* are we still trying to reconnect? */
                if (server->tcpStatus != CifsNeedReconnect)
@@ -231,7 +237,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
        }
 
        if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
-               return rc;
+               return 0;
 
        nls_codepage = load_nls_default();
 
@@ -340,7 +346,10 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
                return rc;
 
        /* BB eventually switch this to SMB2 specific small buf size */
-       *request_buf = cifs_small_buf_get();
+       if (smb2_command == SMB2_SET_INFO)
+               *request_buf = cifs_buf_get();
+       else
+               *request_buf = cifs_small_buf_get();
        if (*request_buf == NULL) {
                /* BB should we add a retry in here if not a writepage? */
                return -ENOMEM;
@@ -602,6 +611,7 @@ static void assemble_neg_contexts(struct smb2_negotiate_req *req,
 int
 SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
 {
+       struct smb_rqst rqst;
        struct smb2_negotiate_req *req;
        struct smb2_negotiate_rsp *rsp;
        struct kvec iov[1];
@@ -673,7 +683,11 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
        /*
@@ -990,8 +1004,9 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
        req->PreviousSessionId = sess_data->previous_session;
 
        req->Flags = 0; /* MBZ */
-       /* to enable echos and oplocks */
-       req->sync_hdr.CreditRequest = cpu_to_le16(3);
+
+       /* enough to enable echos and oplocks and one max size write */
+       req->sync_hdr.CreditRequest = cpu_to_le16(130);
 
        /* only one of SMB2 signing flags may be set in SMB2 request */
        if (server->sign)
@@ -1027,6 +1042,7 @@ static int
 SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
 {
        int rc;
+       struct smb_rqst rqst;
        struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
        struct kvec rsp_iov = { NULL, 0 };
 
@@ -1035,10 +1051,13 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
                cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */);
        req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
 
-       /* BB add code to build os and lm fields */
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = sess_data->iov;
+       rqst.rq_nvec = 2;
 
-       rc = smb2_send_recv(sess_data->xid, sess_data->ses,
-                           sess_data->iov, 2,
+       /* BB add code to build os and lm fields */
+       rc = cifs_send_recv(sess_data->xid, sess_data->ses,
+                           &rqst,
                            &sess_data->buf0_type,
                            CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov);
        cifs_small_buf_release(sess_data->iov[0].iov_base);
@@ -1376,6 +1395,7 @@ out:
 int
 SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
 {
+       struct smb_rqst rqst;
        struct smb2_logoff_req *req; /* response is also trivial struct */
        int rc = 0;
        struct TCP_Server_Info *server;
@@ -1413,7 +1433,11 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
        /*
         * No tcon so can't do
@@ -1443,6 +1467,7 @@ int
 SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
          struct cifs_tcon *tcon, const struct nls_table *cp)
 {
+       struct smb_rqst rqst;
        struct smb2_tree_connect_req *req;
        struct smb2_tree_connect_rsp *rsp = NULL;
        struct kvec iov[2];
@@ -1499,7 +1524,11 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
            !smb3_encryption_required(tcon))
                req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
 
-       rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 2;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
 
@@ -1563,6 +1592,7 @@ tcon_error_exit:
 int
 SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
 {
+       struct smb_rqst rqst;
        struct smb2_tree_disconnect_req *req; /* response is trivial */
        int rc = 0;
        struct cifs_ses *ses = tcon->ses;
@@ -1593,7 +1623,11 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
        if (rc)
                cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
@@ -1682,12 +1716,12 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
 
 static int
 add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
-                 unsigned int *num_iovec, __u8 *oplock)
+                 unsigned int *num_iovec, u8 *lease_key, __u8 *oplock)
 {
        struct smb2_create_req *req = iov[0].iov_base;
        unsigned int num = *num_iovec;
 
-       iov[num].iov_base = server->ops->create_lease_buf(oplock+1, *oplock);
+       iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock);
        if (iov[num].iov_base == NULL)
                return -ENOMEM;
        iov[num].iov_len = server->vals->create_lease_size;
@@ -1886,11 +1920,165 @@ alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
        return 0;
 }
 
+#ifdef CONFIG_CIFS_SMB311
+int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+                              umode_t mode, struct cifs_tcon *tcon,
+                              const char *full_path,
+                              struct cifs_sb_info *cifs_sb)
+{
+       struct smb_rqst rqst;
+       struct smb2_create_req *req;
+       struct smb2_create_rsp *rsp;
+       struct TCP_Server_Info *server;
+       struct cifs_ses *ses = tcon->ses;
+       struct kvec iov[3]; /* make sure at least one for each open context */
+       struct kvec rsp_iov = {NULL, 0};
+       int resp_buftype;
+       int uni_path_len;
+       __le16 *copy_path = NULL;
+       int copy_size;
+       int rc = 0;
+       unsigned int n_iov = 2;
+       __u32 file_attributes = 0;
+       char *pc_buf = NULL;
+       int flags = 0;
+       unsigned int total_len;
+       __le16 *path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+
+       if (!path)
+               return -ENOMEM;
+
+       cifs_dbg(FYI, "mkdir\n");
+
+       if (ses && (ses->server))
+               server = ses->server;
+       else
+               return -EIO;
+
+       rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
+
+       if (rc)
+               return rc;
+
+       if (smb3_encryption_required(tcon))
+               flags |= CIFS_TRANSFORM_REQ;
+
+
+       req->ImpersonationLevel = IL_IMPERSONATION;
+       req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
+       /* File attributes ignored on open (used in create though) */
+       req->FileAttributes = cpu_to_le32(file_attributes);
+       req->ShareAccess = FILE_SHARE_ALL_LE;
+       req->CreateDisposition = cpu_to_le32(FILE_CREATE);
+       req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
+
+       iov[0].iov_base = (char *)req;
+       /* -1 since last byte is buf[0] which is sent below (path) */
+       iov[0].iov_len = total_len - 1;
+
+       req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
+
+       /* [MS-SMB2] 2.2.13 NameOffset:
+        * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
+        * the SMB2 header, the file name includes a prefix that will
+        * be processed during DFS name normalization as specified in
+        * section 3.3.5.9. Otherwise, the file name is relative to
+        * the share that is identified by the TreeId in the SMB2
+        * header.
+        */
+       if (tcon->share_flags & SHI1005_FLAGS_DFS) {
+               int name_len;
+
+               req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
+               rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
+                                                &name_len,
+                                                tcon->treeName, path);
+               if (rc) {
+                       cifs_small_buf_release(req);
+                       return rc;
+               }
+               req->NameLength = cpu_to_le16(name_len * 2);
+               uni_path_len = copy_size;
+               path = copy_path;
+       } else {
+               uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
+               /* MUST set path len (NameLength) to 0 opening root of share */
+               req->NameLength = cpu_to_le16(uni_path_len - 2);
+               if (uni_path_len % 8 != 0) {
+                       copy_size = roundup(uni_path_len, 8);
+                       copy_path = kzalloc(copy_size, GFP_KERNEL);
+                       if (!copy_path) {
+                               cifs_small_buf_release(req);
+                               return -ENOMEM;
+                       }
+                       memcpy((char *)copy_path, (const char *)path,
+                              uni_path_len);
+                       uni_path_len = copy_size;
+                       path = copy_path;
+               }
+       }
+
+       iov[1].iov_len = uni_path_len;
+       iov[1].iov_base = path;
+       req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
+
+       if (tcon->posix_extensions) {
+               if (n_iov > 2) {
+                       struct create_context *ccontext =
+                           (struct create_context *)iov[n_iov-1].iov_base;
+                       ccontext->Next =
+                               cpu_to_le32(iov[n_iov-1].iov_len);
+               }
+
+               rc = add_posix_context(iov, &n_iov, mode);
+               if (rc) {
+                       cifs_small_buf_release(req);
+                       kfree(copy_path);
+                       return rc;
+               }
+               pc_buf = iov[n_iov-1].iov_base;
+       }
+
+
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_iov;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
+                           &rsp_iov);
+
+       cifs_small_buf_release(req);
+       rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
+
+       if (rc != 0) {
+               cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
+               trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
+                                   CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES, rc);
+               goto smb311_mkdir_exit;
+       } else
+               trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid,
+                                    ses->Suid, CREATE_NOT_FILE,
+                                    FILE_WRITE_ATTRIBUTES);
+
+       SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
+
+       /* Eventually save off posix specific response info and timestaps */
+
+smb311_mkdir_exit:
+       kfree(copy_path);
+       kfree(pc_buf);
+       free_rsp_buf(resp_buftype, rsp);
+       return rc;
+
+}
+#endif /* SMB311 */
+
 int
 SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
          __u8 *oplock, struct smb2_file_all_info *buf,
          struct kvec *err_iov, int *buftype)
 {
+       struct smb_rqst rqst;
        struct smb2_create_req *req;
        struct smb2_create_rsp *rsp;
        struct TCP_Server_Info *server;
@@ -1993,7 +2181,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
            *oplock == SMB2_OPLOCK_LEVEL_NONE)
                req->RequestedOplockLevel = *oplock;
        else {
-               rc = add_lease_context(server, iov, &n_iov, oplock);
+               rc = add_lease_context(server, iov, &n_iov,
+                                      oparms->fid->lease_key, oplock);
                if (rc) {
                        cifs_small_buf_release(req);
                        kfree(copy_path);
@@ -2043,7 +2232,11 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
        }
 #endif /* SMB311 */
 
-       rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_iov;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
                            &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
@@ -2099,6 +2292,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
           char *in_data, u32 indatalen,
           char **out_data, u32 *plen /* returned data len */)
 {
+       struct smb_rqst rqst;
        struct smb2_ioctl_req *req;
        struct smb2_ioctl_rsp *rsp;
        struct cifs_ses *ses;
@@ -2189,7 +2383,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
                req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
 
-       rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_iov;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
                            &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
@@ -2274,6 +2472,7 @@ int
 SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
                 u64 persistent_fid, u64 volatile_fid, int flags)
 {
+       struct smb_rqst rqst;
        struct smb2_close_req *req;
        struct smb2_close_rsp *rsp;
        struct cifs_ses *ses = tcon->ses;
@@ -2301,7 +2500,11 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
 
@@ -2387,6 +2590,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
           u32 additional_info, size_t output_len, size_t min_len, void **data,
                u32 *dlen)
 {
+       struct smb_rqst rqst;
        struct smb2_query_info_req *req;
        struct smb2_query_info_rsp *rsp = NULL;
        struct kvec iov[2];
@@ -2427,7 +2631,11 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
        /* 1 for Buffer */
        iov[0].iov_len = total_len - 1;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
 
@@ -2594,11 +2802,10 @@ SMB2_echo(struct TCP_Server_Info *server)
 {
        struct smb2_echo_req *req;
        int rc = 0;
-       struct kvec iov[2];
+       struct kvec iov[1];
        struct smb_rqst rqst = { .rq_iov = iov,
-                                .rq_nvec = 2 };
+                                .rq_nvec = 1 };
        unsigned int total_len;
-       __be32 rfc1002_marker;
 
        cifs_dbg(FYI, "In echo request\n");
 
@@ -2614,11 +2821,8 @@ SMB2_echo(struct TCP_Server_Info *server)
 
        req->sync_hdr.CreditRequest = cpu_to_le16(1);
 
-       iov[0].iov_len = 4;
-       rfc1002_marker = cpu_to_be32(total_len);
-       iov[0].iov_base = &rfc1002_marker;
-       iov[1].iov_len = total_len;
-       iov[1].iov_base = (char *)req;
+       iov[0].iov_len = total_len;
+       iov[0].iov_base = (char *)req;
 
        rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
                             server, CIFS_ECHO_OP);
@@ -2633,6 +2837,7 @@ int
 SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
           u64 volatile_fid)
 {
+       struct smb_rqst rqst;
        struct smb2_flush_req *req;
        struct cifs_ses *ses = tcon->ses;
        struct kvec iov[1];
@@ -2660,7 +2865,11 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        if (rc != 0) {
@@ -2848,10 +3057,9 @@ smb2_async_readv(struct cifs_readdata *rdata)
        struct smb2_sync_hdr *shdr;
        struct cifs_io_parms io_parms;
        struct smb_rqst rqst = { .rq_iov = rdata->iov,
-                                .rq_nvec = 2 };
+                                .rq_nvec = 1 };
        struct TCP_Server_Info *server;
        unsigned int total_len;
-       __be32 req_len;
 
        cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
                 __func__, rdata->offset, rdata->bytes);
@@ -2882,12 +3090,8 @@ smb2_async_readv(struct cifs_readdata *rdata)
        if (smb3_encryption_required(io_parms.tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
-       req_len = cpu_to_be32(total_len);
-
-       rdata->iov[0].iov_base = &req_len;
-       rdata->iov[0].iov_len = sizeof(__be32);
-       rdata->iov[1].iov_base = buf;
-       rdata->iov[1].iov_len = total_len;
+       rdata->iov[0].iov_base = buf;
+       rdata->iov[0].iov_len = total_len;
 
        shdr = (struct smb2_sync_hdr *)buf;
 
@@ -2926,6 +3130,7 @@ int
 SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
          unsigned int *nbytes, char **buf, int *buf_type)
 {
+       struct smb_rqst rqst;
        int resp_buftype, rc = -EACCES;
        struct smb2_read_plain_req *req = NULL;
        struct smb2_read_rsp *rsp = NULL;
@@ -2946,7 +3151,11 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
@@ -3062,10 +3271,9 @@ smb2_async_writev(struct cifs_writedata *wdata,
        struct smb2_sync_hdr *shdr;
        struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
        struct TCP_Server_Info *server = tcon->ses->server;
-       struct kvec iov[2];
+       struct kvec iov[1];
        struct smb_rqst rqst = { };
        unsigned int total_len;
-       __be32 rfc1002_marker;
 
        rc = smb2_plain_req_init(SMB2_WRITE, tcon, (void **) &req, &total_len);
        if (rc) {
@@ -3137,15 +3345,11 @@ smb2_async_writev(struct cifs_writedata *wdata,
                v1->length = cpu_to_le32(wdata->mr->mr->length);
        }
 #endif
-       /* 4 for rfc1002 length field and 1 for Buffer */
-       iov[0].iov_len = 4;
-       rfc1002_marker = cpu_to_be32(total_len - 1 + wdata->bytes);
-       iov[0].iov_base = &rfc1002_marker;
-       iov[1].iov_len = total_len - 1;
-       iov[1].iov_base = (char *)req;
+       iov[0].iov_len = total_len - 1;
+       iov[0].iov_base = (char *)req;
 
        rqst.rq_iov = iov;
-       rqst.rq_nvec = 2;
+       rqst.rq_nvec = 1;
        rqst.rq_pages = wdata->pages;
        rqst.rq_offset = wdata->page_offset;
        rqst.rq_npages = wdata->nr_pages;
@@ -3153,7 +3357,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
        rqst.rq_tailsz = wdata->tailsz;
 #ifdef CONFIG_CIFS_SMB_DIRECT
        if (wdata->mr) {
-               iov[1].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
+               iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
                rqst.rq_npages = 0;
        }
 #endif
@@ -3210,6 +3414,7 @@ int
 SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
           unsigned int *nbytes, struct kvec *iov, int n_vec)
 {
+       struct smb_rqst rqst;
        int rc = 0;
        struct smb2_write_req *req = NULL;
        struct smb2_write_rsp *rsp = NULL;
@@ -3251,7 +3456,11 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
        /* 1 for Buffer */
        iov[0].iov_len = total_len - 1;
 
-       rc = smb2_send_recv(xid, io_parms->tcon->ses, iov, n_vec + 1,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = n_vec + 1;
+
+       rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
                            &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
@@ -3323,6 +3532,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
                     u64 persistent_fid, u64 volatile_fid, int index,
                     struct cifs_search_info *srch_inf)
 {
+       struct smb_rqst rqst;
        struct smb2_query_directory_req *req;
        struct smb2_query_directory_rsp *rsp = NULL;
        struct kvec iov[2];
@@ -3395,7 +3605,11 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        iov[1].iov_base = (char *)(req->Buffer);
        iov[1].iov_len = len;
 
-       rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 2;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
 
@@ -3454,6 +3668,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
               u8 info_type, u32 additional_info, unsigned int num,
                void **data, unsigned int *size)
 {
+       struct smb_rqst rqst;
        struct smb2_set_info_req *req;
        struct smb2_set_info_rsp *rsp = NULL;
        struct kvec *iov;
@@ -3509,9 +3724,13 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
                iov[i].iov_len = size[i];
        }
 
-       rc = smb2_send_recv(xid, ses, iov, num, &resp_buftype, flags,
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = num;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
                            &rsp_iov);
-       cifs_small_buf_release(req);
+       cifs_buf_release(req);
        rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
 
        if (rc != 0) {
@@ -3664,6 +3883,7 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
                  const u64 persistent_fid, const u64 volatile_fid,
                  __u8 oplock_level)
 {
+       struct smb_rqst rqst;
        int rc;
        struct smb2_oplock_break *req = NULL;
        struct cifs_ses *ses = tcon->ses;
@@ -3692,7 +3912,11 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        if (rc) {
@@ -3755,6 +3979,7 @@ int
 SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
              u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
 {
+       struct smb_rqst rqst;
        struct smb2_query_info_rsp *rsp = NULL;
        struct kvec iov;
        struct kvec rsp_iov;
@@ -3773,7 +3998,11 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
-       rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = &iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(iov.iov_base);
        if (rc) {
                cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -3798,6 +4027,7 @@ int
 SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
              u64 persistent_fid, u64 volatile_fid, int level)
 {
+       struct smb_rqst rqst;
        struct smb2_query_info_rsp *rsp = NULL;
        struct kvec iov;
        struct kvec rsp_iov;
@@ -3829,7 +4059,11 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
-       rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = &iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(iov.iov_base);
        if (rc) {
                cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -3868,6 +4102,7 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
           const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
           const __u32 num_lock, struct smb2_lock_element *buf)
 {
+       struct smb_rqst rqst;
        int rc = 0;
        struct smb2_lock_req *req = NULL;
        struct kvec iov[2];
@@ -3900,7 +4135,12 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
        iov[1].iov_len = count;
 
        cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
-       rc = smb2_send_recv(xid, tcon->ses, iov, 2, &resp_buf_type, flags,
+
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 2;
+
+       rc = cifs_send_recv(xid, tcon->ses, &rqst, &resp_buf_type, flags,
                            &rsp_iov);
        cifs_small_buf_release(req);
        if (rc) {
@@ -3934,6 +4174,7 @@ int
 SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
                 __u8 *lease_key, const __le32 lease_state)
 {
+       struct smb_rqst rqst;
        int rc;
        struct smb2_lease_ack *req = NULL;
        struct cifs_ses *ses = tcon->ses;
@@ -3964,7 +4205,11 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
        iov[0].iov_base = (char *)req;
        iov[0].iov_len = total_len;
 
-       rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+       memset(&rqst, 0, sizeof(struct smb_rqst));
+       rqst.rq_iov = iov;
+       rqst.rq_nvec = 1;
+
+       rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
 
        if (rc) {
index a345560001ced354c550d6ab2f507a18d72ff9d2..a671adcc44a6c8c6d460585c9b2c8d6b546fc015 100644 (file)
@@ -678,16 +678,14 @@ struct create_context {
 #define SMB2_LEASE_KEY_SIZE 16
 
 struct lease_context {
-       __le64 LeaseKeyLow;
-       __le64 LeaseKeyHigh;
+       u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
        __le32 LeaseState;
        __le32 LeaseFlags;
        __le64 LeaseDuration;
 } __packed;
 
 struct lease_context_v2 {
-       __le64 LeaseKeyLow;
-       __le64 LeaseKeyHigh;
+       u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
        __le32 LeaseState;
        __le32 LeaseFlags;
        __le64 LeaseDuration;
@@ -851,8 +849,11 @@ struct validate_negotiate_info_rsp {
        __le16 Dialect; /* Dialect in use for the connection */
 } __packed;
 
-#define RSS_CAPABLE    0x00000001
-#define RDMA_CAPABLE   0x00000002
+#define RSS_CAPABLE    cpu_to_le32(0x00000001)
+#define RDMA_CAPABLE   cpu_to_le32(0x00000002)
+
+#define INTERNETWORK   cpu_to_le16(0x0002)
+#define INTERNETWORKV6 cpu_to_le16(0x0017)
 
 struct network_interface_info_ioctl_rsp {
        __le32 Next; /* next interface. zero if this is last one */
@@ -860,7 +861,21 @@ struct network_interface_info_ioctl_rsp {
        __le32 Capability; /* RSS or RDMA Capable */
        __le32 Reserved;
        __le64 LinkSpeed;
-       char    SockAddr_Storage[128];
+       __le16 Family;
+       __u8 Buffer[126];
+} __packed;
+
+struct iface_info_ipv4 {
+       __be16 Port;
+       __be32 IPv4Address;
+       __be64 Reserved;
+} __packed;
+
+struct iface_info_ipv6 {
+       __be16 Port;
+       __be32 FlowInfo;
+       __u8   IPv6Address[16];
+       __be32 ScopeId;
 } __packed;
 
 #define NO_FILE_ID 0xFFFFFFFFFFFFFFFFULL /* general ioctls to srv not to file */
index c84020057bd816c31a69fd173746374c5c224c8a..6e6a4f2ec890dc0f0ae02b53c9326ae379b02bf7 100644 (file)
@@ -79,6 +79,10 @@ extern int smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
                              struct cifs_sb_info *cifs_sb, bool set_alloc);
 extern int smb2_set_file_info(struct inode *inode, const char *full_path,
                              FILE_BASIC_INFO *buf, const unsigned int xid);
+extern int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+                              umode_t mode, struct cifs_tcon *tcon,
+                              const char *full_path,
+                              struct cifs_sb_info *cifs_sb);
 extern int smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon,
                      const char *name, struct cifs_sb_info *cifs_sb);
 extern void smb2_mkdir_setinfo(struct inode *inode, const char *full_path,
@@ -109,6 +113,8 @@ extern int smb2_unlock_range(struct cifsFileInfo *cfile,
 extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile);
 extern void smb2_reconnect_server(struct work_struct *work);
 extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server);
+extern unsigned long smb_rqst_len(struct TCP_Server_Info *server,
+                                 struct smb_rqst *rqst);
 
 /*
  * SMB2 Worker functions - most of protocol specific implementation details
index 349d5ccf854c26999ed8554f6d19cf64a89a33a3..719d55e63d88fe9efc307d16813ffe3f7b9d6762 100644 (file)
@@ -171,10 +171,10 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
        unsigned char smb2_signature[SMB2_HMACSHA256_SIZE];
        unsigned char *sigptr = smb2_signature;
        struct kvec *iov = rqst->rq_iov;
-       int iov_hdr_index = rqst->rq_nvec > 1 ? 1 : 0;
-       struct smb2_sync_hdr *shdr =
-               (struct smb2_sync_hdr *)iov[iov_hdr_index].iov_base;
+       struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
        struct cifs_ses *ses;
+       struct shash_desc *shash = &server->secmech.sdeschmacsha256->shash;
+       struct smb_rqst drqst;
 
        ses = smb2_find_smb_ses(server, shdr->SessionId);
        if (!ses) {
@@ -192,21 +192,39 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
        }
 
        rc = crypto_shash_setkey(server->secmech.hmacsha256,
-               ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE);
+                                ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE);
        if (rc) {
                cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
                return rc;
        }
 
-       rc = crypto_shash_init(&server->secmech.sdeschmacsha256->shash);
+       rc = crypto_shash_init(shash);
        if (rc) {
                cifs_dbg(VFS, "%s: Could not init sha256", __func__);
                return rc;
        }
 
-       rc = __cifs_calc_signature(rqst, iov_hdr_index,  server, sigptr,
-               &server->secmech.sdeschmacsha256->shash);
+       /*
+        * For SMB2+, __cifs_calc_signature() expects to sign only the actual
+        * data, that is, iov[0] should not contain a rfc1002 length.
+        *
+        * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to
+        * __cifs_calc_signature().
+        */
+       drqst = *rqst;
+       if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) {
+               rc = crypto_shash_update(shash, iov[0].iov_base,
+                                        iov[0].iov_len);
+               if (rc) {
+                       cifs_dbg(VFS, "%s: Could not update with payload\n",
+                                __func__);
+                       return rc;
+               }
+               drqst.rq_iov++;
+               drqst.rq_nvec--;
+       }
 
+       rc = __cifs_calc_signature(&drqst, server, sigptr, shash);
        if (!rc)
                memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE);
 
@@ -410,14 +428,14 @@ generate_smb311signingkey(struct cifs_ses *ses)
 int
 smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
 {
-       int rc = 0;
+       int rc;
        unsigned char smb3_signature[SMB2_CMACAES_SIZE];
        unsigned char *sigptr = smb3_signature;
        struct kvec *iov = rqst->rq_iov;
-       int iov_hdr_index = rqst->rq_nvec > 1 ? 1 : 0;
-       struct smb2_sync_hdr *shdr =
-               (struct smb2_sync_hdr *)iov[iov_hdr_index].iov_base;
+       struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
        struct cifs_ses *ses;
+       struct shash_desc *shash = &server->secmech.sdesccmacaes->shash;
+       struct smb_rqst drqst;
 
        ses = smb2_find_smb_ses(server, shdr->SessionId);
        if (!ses) {
@@ -429,8 +447,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
        memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE);
 
        rc = crypto_shash_setkey(server->secmech.cmacaes,
-               ses->smb3signingkey, SMB2_CMACAES_SIZE);
-
+                                ses->smb3signingkey, SMB2_CMACAES_SIZE);
        if (rc) {
                cifs_dbg(VFS, "%s: Could not set key for cmac aes\n", __func__);
                return rc;
@@ -441,15 +458,33 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
         * so unlike smb2 case we do not have to check here if secmech are
         * initialized
         */
-       rc = crypto_shash_init(&server->secmech.sdesccmacaes->shash);
+       rc = crypto_shash_init(shash);
        if (rc) {
                cifs_dbg(VFS, "%s: Could not init cmac aes\n", __func__);
                return rc;
        }
 
-       rc = __cifs_calc_signature(rqst, iov_hdr_index, server, sigptr,
-                                  &server->secmech.sdesccmacaes->shash);
+       /*
+        * For SMB2+, __cifs_calc_signature() expects to sign only the actual
+        * data, that is, iov[0] should not contain a rfc1002 length.
+        *
+        * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to
+        * __cifs_calc_signature().
+        */
+       drqst = *rqst;
+       if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) {
+               rc = crypto_shash_update(shash, iov[0].iov_base,
+                                        iov[0].iov_len);
+               if (rc) {
+                       cifs_dbg(VFS, "%s: Could not update with payload\n",
+                                __func__);
+                       return rc;
+               }
+               drqst.rq_iov++;
+               drqst.rq_nvec--;
+       }
 
+       rc = __cifs_calc_signature(&drqst, server, sigptr, shash);
        if (!rc)
                memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE);
 
@@ -462,7 +497,7 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
 {
        int rc = 0;
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
 
        if (!(shdr->Flags & SMB2_FLAGS_SIGNED) ||
            server->tcpStatus == CifsNeedNegotiate)
@@ -552,6 +587,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
 
        temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
        memset(temp, 0, sizeof(struct mid_q_entry));
+       kref_init(&temp->refcount);
        temp->mid = le64_to_cpu(shdr->MessageId);
        temp->pid = current->pid;
        temp->command = shdr->Command; /* Always LE */
@@ -635,7 +671,7 @@ smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
 {
        int rc;
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
        struct mid_q_entry *mid;
 
        smb2_seq_num_into_buf(ses->server, shdr);
@@ -656,7 +692,7 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 {
        int rc;
        struct smb2_sync_hdr *shdr =
-                       (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+                       (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
        struct mid_q_entry *mid;
 
        smb2_seq_num_into_buf(server, shdr);
index e459c97151b34e684dc3f3cbbc36772fee5aaee5..c55ea4e6201bbf08041968e483ae26d0183a5f3a 100644 (file)
@@ -18,6 +18,7 @@
 #include "smbdirect.h"
 #include "cifs_debug.h"
 #include "cifsproto.h"
+#include "smb2proto.h"
 
 static struct smbd_response *get_empty_queue_buffer(
                struct smbd_connection *info);
@@ -2082,12 +2083,13 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
  * rqst: the data to write
  * return value: 0 if successfully write, otherwise error code
  */
-int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
+int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 {
+       struct smbd_connection *info = server->smbd_conn;
        struct kvec vec;
        int nvecs;
        int size;
-       unsigned int buflen = 0, remaining_data_length;
+       unsigned int buflen, remaining_data_length;
        int start, i, j;
        int max_iov_size =
                info->max_send_size - sizeof(struct smbd_data_transfer);
@@ -2111,25 +2113,13 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
                log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
                return -EINVAL;
        }
-       iov = &rqst->rq_iov[1];
-
-       /* total up iov array first */
-       for (i = 0; i < rqst->rq_nvec-1; i++) {
-               buflen += iov[i].iov_len;
-       }
 
        /*
         * Add in the page array if there is one. The caller needs to set
         * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
         * ends at page boundary
         */
-       if (rqst->rq_npages) {
-               if (rqst->rq_npages == 1)
-                       buflen += rqst->rq_tailsz;
-               else
-                       buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
-                                       rqst->rq_offset + rqst->rq_tailsz;
-       }
+       buflen = smb_rqst_len(server, rqst);
 
        if (buflen + sizeof(struct smbd_data_transfer) >
                info->max_fragmented_send_size) {
@@ -2139,6 +2129,8 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
                goto done;
        }
 
+       iov = &rqst->rq_iov[1];
+
        cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen);
        for (i = 0; i < rqst->rq_nvec-1; i++)
                dump_smb(iov[i].iov_base, iov[i].iov_len);
index 1e419c21dc60527c753747bee44625cafdc7ca3d..a11096254f2965d02478132af55e9ccf6613c578 100644 (file)
@@ -292,7 +292,7 @@ void smbd_destroy(struct smbd_connection *info);
 
 /* Interface for carrying upper layer I/O through send/recv */
 int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
-int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst);
+int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst);
 
 enum mr_state {
        MR_READY,
@@ -332,7 +332,7 @@ static inline void *smbd_get_connection(
 static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
 static inline void smbd_destroy(struct smbd_connection *info) {}
 static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
-static inline int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) {return -1; }
+static inline int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) {return -1; }
 #endif
 
 #endif
index 61e74d455d90625339591a3b47560f5bdb50c343..67e413f6ee4d8fd1dbd1eede0a7b0a9e6442a9e3 100644 (file)
@@ -378,7 +378,7 @@ DEFINE_EVENT(smb3_open_err_class, smb3_##name,    \
        TP_ARGS(xid, tid, sesid, create_options, desired_access, rc))
 
 DEFINE_SMB3_OPEN_ERR_EVENT(open_err);
-
+DEFINE_SMB3_OPEN_ERR_EVENT(posix_mkdir_err);
 
 DECLARE_EVENT_CLASS(smb3_open_done_class,
        TP_PROTO(unsigned int xid,
@@ -420,6 +420,7 @@ DEFINE_EVENT(smb3_open_done_class, smb3_##name,  \
        TP_ARGS(xid, fid, tid, sesid, create_options, desired_access))
 
 DEFINE_SMB3_OPEN_DONE_EVENT(open_done);
+DEFINE_SMB3_OPEN_DONE_EVENT(posix_mkdir_done);
 
 #endif /* _CIFS_TRACE_H */
 
index 1f1a68f8911001bae86976171e44a09402982d92..a341ec839c83de8ba9b9a10bb31f3b7ce8d45e8f 100644 (file)
@@ -61,6 +61,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
 
        temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
        memset(temp, 0, sizeof(struct mid_q_entry));
+       kref_init(&temp->refcount);
        temp->mid = get_mid(smb_buffer);
        temp->pid = current->pid;
        temp->command = cpu_to_le16(smb_buffer->Command);
@@ -82,6 +83,21 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
        return temp;
 }
 
+static void _cifs_mid_q_entry_release(struct kref *refcount)
+{
+       struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
+                                              refcount);
+
+       mempool_free(mid, cifs_mid_poolp);
+}
+
+void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
+{
+       spin_lock(&GlobalMid_Lock);
+       kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
+       spin_unlock(&GlobalMid_Lock);
+}
+
 void
 DeleteMidQEntry(struct mid_q_entry *midEntry)
 {
@@ -110,7 +126,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
                }
        }
 #endif
-       mempool_free(midEntry, cifs_mid_poolp);
+       cifs_mid_q_entry_release(midEntry);
 }
 
 void
@@ -201,15 +217,25 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
        return 0;
 }
 
-static unsigned long
-rqst_len(struct smb_rqst *rqst)
+unsigned long
+smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
 {
        unsigned int i;
-       struct kvec *iov = rqst->rq_iov;
+       struct kvec *iov;
+       int nvec;
        unsigned long buflen = 0;
 
+       if (server->vals->header_preamble_size == 0 &&
+           rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
+               iov = &rqst->rq_iov[1];
+               nvec = rqst->rq_nvec - 1;
+       } else {
+               iov = rqst->rq_iov;
+               nvec = rqst->rq_nvec;
+       }
+
        /* total up iov array first */
-       for (i = 0; i < rqst->rq_nvec; i++)
+       for (i = 0; i < nvec; i++)
                buflen += iov[i].iov_len;
 
        /*
@@ -236,70 +262,88 @@ rqst_len(struct smb_rqst *rqst)
 }
 
 static int
-__smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+               struct smb_rqst *rqst)
 {
-       int rc;
-       struct kvec *iov = rqst->rq_iov;
-       int n_vec = rqst->rq_nvec;
-       unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
-       unsigned long send_length;
-       unsigned int i;
+       int rc = 0;
+       struct kvec *iov;
+       int n_vec;
+       unsigned int send_length = 0;
+       unsigned int i, j;
        size_t total_len = 0, sent, size;
        struct socket *ssocket = server->ssocket;
        struct msghdr smb_msg;
        int val = 1;
+       __be32 rfc1002_marker;
+
        if (cifs_rdma_enabled(server) && server->smbd_conn) {
-               rc = smbd_send(server->smbd_conn, rqst);
+               rc = smbd_send(server, rqst);
                goto smbd_done;
        }
        if (ssocket == NULL)
                return -ENOTSOCK;
 
-       /* sanity check send length */
-       send_length = rqst_len(rqst);
-       if (send_length != smb_buf_length + 4) {
-               WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
-                       send_length, smb_buf_length);
-               return -EIO;
-       }
-
-       if (n_vec < 2)
-               return -EIO;
-
-       cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
-       dump_smb(iov[0].iov_base, iov[0].iov_len);
-       dump_smb(iov[1].iov_base, iov[1].iov_len);
-
        /* cork the socket */
        kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
                                (char *)&val, sizeof(val));
 
-       size = 0;
-       for (i = 0; i < n_vec; i++)
-               size += iov[i].iov_len;
+       for (j = 0; j < num_rqst; j++)
+               send_length += smb_rqst_len(server, &rqst[j]);
+       rfc1002_marker = cpu_to_be32(send_length);
 
-       iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size);
+       /* Generate a rfc1002 marker for SMB2+ */
+       if (server->vals->header_preamble_size == 0) {
+               struct kvec hiov = {
+                       .iov_base = &rfc1002_marker,
+                       .iov_len  = 4
+               };
+               iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, &hiov,
+                             1, 4);
+               rc = smb_send_kvec(server, &smb_msg, &sent);
+               if (rc < 0)
+                       goto uncork;
 
-       rc = smb_send_kvec(server, &smb_msg, &sent);
-       if (rc < 0)
-               goto uncork;
+               total_len += sent;
+               send_length += 4;
+       }
 
-       total_len += sent;
+       cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
 
-       /* now walk the page array and send each page in it */
-       for (i = 0; i < rqst->rq_npages; i++) {
-               struct bio_vec bvec;
+       for (j = 0; j < num_rqst; j++) {
+               iov = rqst[j].rq_iov;
+               n_vec = rqst[j].rq_nvec;
+
+               size = 0;
+               for (i = 0; i < n_vec; i++) {
+                       dump_smb(iov[i].iov_base, iov[i].iov_len);
+                       size += iov[i].iov_len;
+               }
 
-               bvec.bv_page = rqst->rq_pages[i];
-               rqst_page_get_length(rqst, i, &bvec.bv_len, &bvec.bv_offset);
+               iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC,
+                             iov, n_vec, size);
 
-               iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
-                             &bvec, 1, bvec.bv_len);
                rc = smb_send_kvec(server, &smb_msg, &sent);
                if (rc < 0)
-                       break;
+                       goto uncork;
 
                total_len += sent;
+
+               /* now walk the page array and send each page in it */
+               for (i = 0; i < rqst[j].rq_npages; i++) {
+                       struct bio_vec bvec;
+
+                       bvec.bv_page = rqst[j].rq_pages[i];
+                       rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
+                                            &bvec.bv_offset);
+
+                       iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
+                                     &bvec, 1, bvec.bv_len);
+                       rc = smb_send_kvec(server, &smb_msg, &sent);
+                       if (rc < 0)
+                               break;
+
+                       total_len += sent;
+               }
        }
 
 uncork:
@@ -308,9 +352,9 @@ uncork:
        kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
                                (char *)&val, sizeof(val));
 
-       if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
+       if ((total_len > 0) && (total_len != send_length)) {
                cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
-                        smb_buf_length + 4, total_len);
+                        send_length, total_len);
                /*
                 * If we have only sent part of an SMB then the next SMB could
                 * be taken as the remainder of this one. We need to kill the
@@ -335,7 +379,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
        int rc;
 
        if (!(flags & CIFS_TRANSFORM_REQ))
-               return __smb_send_rqst(server, rqst);
+               return __smb_send_rqst(server, 1, rqst);
 
        if (!server->ops->init_transform_rq ||
            !server->ops->free_transform_rq) {
@@ -347,7 +391,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
        if (rc)
                return rc;
 
-       rc = __smb_send_rqst(server, &cur_rqst);
+       rc = __smb_send_rqst(server, 1, &cur_rqst);
        server->ops->free_transform_rq(&cur_rqst);
        return rc;
 }
@@ -365,7 +409,7 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
        iov[1].iov_base = (char *)smb_buffer + 4;
        iov[1].iov_len = smb_buf_length;
 
-       return __smb_send_rqst(server, &rqst);
+       return __smb_send_rqst(server, 1, &rqst);
 }
 
 static int
@@ -730,7 +774,6 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
         * to the same server. We may make this configurable later or
         * use ses->maxReq.
         */
-
        rc = wait_for_free_request(ses->server, timeout, optype);
        if (rc)
                return rc;
@@ -766,8 +809,8 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
 
 #ifdef CONFIG_CIFS_SMB311
        if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
-               smb311_update_preauth_hash(ses, rqst->rq_iov+1,
-                                          rqst->rq_nvec-1);
+               smb311_update_preauth_hash(ses, rqst->rq_iov,
+                                          rqst->rq_nvec);
 #endif
 
        if (timeout == CIFS_ASYNC_OP)
@@ -812,8 +855,8 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
 #ifdef CONFIG_CIFS_SMB311
        if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
                struct kvec iov = {
-                       .iov_base = buf,
-                       .iov_len = midQ->resp_buf_size
+                       .iov_base = resp_iov->iov_base,
+                       .iov_len = resp_iov->iov_len
                };
                smb311_update_preauth_hash(ses, &iov, 1);
        }
@@ -872,49 +915,6 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
        return rc;
 }
 
-/* Like SendReceive2 but iov[0] does not contain an rfc1002 header */
-int
-smb2_send_recv(const unsigned int xid, struct cifs_ses *ses,
-              struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
-              const int flags, struct kvec *resp_iov)
-{
-       struct smb_rqst rqst;
-       struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
-       int rc;
-       int i;
-       __u32 count;
-       __be32 rfc1002_marker;
-
-       if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
-               new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
-                                       GFP_KERNEL);
-               if (!new_iov)
-                       return -ENOMEM;
-       } else
-               new_iov = s_iov;
-
-       /* 1st iov is an RFC1002 Session Message length */
-       memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
-
-       count = 0;
-       for (i = 1; i < n_vec + 1; i++)
-               count += new_iov[i].iov_len;
-
-       rfc1002_marker = cpu_to_be32(count);
-
-       new_iov[0].iov_base = &rfc1002_marker;
-       new_iov[0].iov_len = 4;
-
-       memset(&rqst, 0, sizeof(struct smb_rqst));
-       rqst.rq_iov = new_iov;
-       rqst.rq_nvec = n_vec + 1;
-
-       rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
-       if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
-               kfree(new_iov);
-       return rc;
-}
-
 int
 SendReceive(const unsigned int xid, struct cifs_ses *ses,
            struct smb_hdr *in_buf, struct smb_hdr *out_buf,
index ceb1031f1cac948e74a970f02058cfeb52d7a351..08d3bd602f73d8f219ee1f259c0cbaa839245c56 100644 (file)
@@ -101,20 +101,14 @@ static int eventfd_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-static struct wait_queue_head *
-eventfd_get_poll_head(struct file *file, __poll_t events)
-{
-       struct eventfd_ctx *ctx = file->private_data;
-
-       return &ctx->wqh;
-}
-
-static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask)
+static __poll_t eventfd_poll(struct file *file, poll_table *wait)
 {
        struct eventfd_ctx *ctx = file->private_data;
        __poll_t events = 0;
        u64 count;
 
+       poll_wait(file, &ctx->wqh, wait);
+
        /*
         * All writes to ctx->count occur within ctx->wqh.lock.  This read
         * can be done outside ctx->wqh.lock because we know that poll_wait
@@ -156,11 +150,11 @@ static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask)
        count = READ_ONCE(ctx->count);
 
        if (count > 0)
-               events |= (EPOLLIN & eventmask);
+               events |= EPOLLIN;
        if (count == ULLONG_MAX)
                events |= EPOLLERR;
        if (ULLONG_MAX - 1 > count)
-               events |= (EPOLLOUT & eventmask);
+               events |= EPOLLOUT;
 
        return events;
 }
@@ -311,8 +305,7 @@ static const struct file_operations eventfd_fops = {
        .show_fdinfo    = eventfd_show_fdinfo,
 #endif
        .release        = eventfd_release,
-       .get_poll_head  = eventfd_get_poll_head,
-       .poll_mask      = eventfd_poll_mask,
+       .poll           = eventfd_poll,
        .read           = eventfd_read,
        .write          = eventfd_write,
        .llseek         = noop_llseek,
index ea4436f409fb005a16edeca3f49f29f955db0171..67db22fe99c5ce8bf0ba606c0a45f221cbf69b38 100644 (file)
@@ -922,18 +922,14 @@ static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head
        return 0;
 }
 
-static struct wait_queue_head *ep_eventpoll_get_poll_head(struct file *file,
-               __poll_t eventmask)
-{
-       struct eventpoll *ep = file->private_data;
-       return &ep->poll_wait;
-}
-
-static __poll_t ep_eventpoll_poll_mask(struct file *file, __poll_t eventmask)
+static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
 {
        struct eventpoll *ep = file->private_data;
        int depth = 0;
 
+       /* Insert inside our poll wait queue */
+       poll_wait(file, &ep->poll_wait, wait);
+
        /*
         * Proceed to find out if wanted events are really available inside
         * the ready list.
@@ -972,8 +968,7 @@ static const struct file_operations eventpoll_fops = {
        .show_fdinfo    = ep_show_fdinfo,
 #endif
        .release        = ep_eventpoll_release,
-       .get_poll_head  = ep_eventpoll_get_poll_head,
-       .poll_mask      = ep_eventpoll_poll_mask,
+       .poll           = ep_eventpoll_poll,
        .llseek         = noop_llseek,
 };
 
index cc40802ddfa856d14aefc8ef75ec9e61b89864b0..00e759f051619cfd37a58108265bc9f798554a21 100644 (file)
@@ -748,7 +748,6 @@ extern void ext2_free_blocks (struct inode *, unsigned long,
                              unsigned long);
 extern unsigned long ext2_count_free_blocks (struct super_block *);
 extern unsigned long ext2_count_dirs (struct super_block *);
-extern void ext2_check_blocks_bitmap (struct super_block *);
 extern struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
                                                    unsigned int block_group,
                                                    struct buffer_head ** bh);
@@ -771,7 +770,6 @@ extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page
 extern struct inode * ext2_new_inode (struct inode *, umode_t, const struct qstr *);
 extern void ext2_free_inode (struct inode *);
 extern unsigned long ext2_count_free_inodes (struct super_block *);
-extern void ext2_check_inodes_bitmap (struct super_block *);
 extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
 
 /* inode.c */
index 25ab1274090f8532254e783def084bccd24a21c4..8ff53f8da3bcc414fdad44ac3bb76a88258e4d51 100644 (file)
@@ -557,6 +557,9 @@ static int parse_options(char *options, struct super_block *sb,
                        set_opt (opts->s_mount_opt, NO_UID32);
                        break;
                case Opt_nocheck:
+                       ext2_msg(sb, KERN_WARNING,
+                               "Option nocheck/check=none is deprecated and"
+                               " will be removed in June 2020.");
                        clear_opt (opts->s_mount_opt, CHECK);
                        break;
                case Opt_debug:
@@ -1335,9 +1338,6 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
        new_opts.s_resgid = sbi->s_resgid;
        spin_unlock(&sbi->s_lock);
 
-       /*
-        * Allow the "check" option to be passed as a remount option.
-        */
        if (!parse_options(data, sb, &new_opts))
                return -EINVAL;
 
index b00481c475cb1ea63195ef970bde30af613364c6..e68cefe082612c84b39c612ef3ff32fca8ac3d25 100644 (file)
@@ -184,7 +184,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
        unsigned int bit, bit_max;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        ext4_fsblk_t start, tmp;
-       int flex_bg = 0;
 
        J_ASSERT_BH(bh, buffer_locked(bh));
 
@@ -207,22 +206,19 @@ static int ext4_init_block_bitmap(struct super_block *sb,
 
        start = ext4_group_first_block_no(sb, block_group);
 
-       if (ext4_has_feature_flex_bg(sb))
-               flex_bg = 1;
-
        /* Set bits for block and inode bitmaps, and inode table */
        tmp = ext4_block_bitmap(sb, gdp);
-       if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
+       if (ext4_block_in_group(sb, tmp, block_group))
                ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
 
        tmp = ext4_inode_bitmap(sb, gdp);
-       if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
+       if (ext4_block_in_group(sb, tmp, block_group))
                ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
 
        tmp = ext4_inode_table(sb, gdp);
        for (; tmp < ext4_inode_table(sb, gdp) +
                     sbi->s_itb_per_group; tmp++) {
-               if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
+               if (ext4_block_in_group(sb, tmp, block_group))
                        ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
        }
 
@@ -442,7 +438,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
                goto verify;
        }
        ext4_lock_group(sb, block_group);
-       if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+       if (ext4_has_group_desc_csum(sb) &&
+           (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
+               if (block_group == 0) {
+                       ext4_unlock_group(sb, block_group);
+                       unlock_buffer(bh);
+                       ext4_error(sb, "Block bitmap for bg 0 marked "
+                                  "uninitialized");
+                       err = -EFSCORRUPTED;
+                       goto out;
+               }
                err = ext4_init_block_bitmap(sb, bh, block_group, desc);
                set_bitmap_uptodate(bh);
                set_buffer_uptodate(bh);
index 0b127853c5845aef5bcfeaa9ec2485f47d7939fb..7c7123f265c25ae9a586877dc30d7b80ede5b62c 100644 (file)
@@ -1114,6 +1114,7 @@ struct ext4_inode_info {
 #define EXT4_MOUNT_DIOREAD_NOLOCK      0x400000 /* Enable support for dio read nolocking */
 #define EXT4_MOUNT_JOURNAL_CHECKSUM    0x800000 /* Journal checksums */
 #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT        0x1000000 /* Journal Async Commit */
+#define EXT4_MOUNT_WARN_ON_ERROR       0x2000000 /* Trigger WARN_ON on error */
 #define EXT4_MOUNT_DELALLOC            0x8000000 /* Delalloc support */
 #define EXT4_MOUNT_DATA_ERR_ABORT      0x10000000 /* Abort on file data write */
 #define EXT4_MOUNT_BLOCK_VALIDITY      0x20000000 /* Block validity checking */
@@ -1507,11 +1508,6 @@ static inline struct ext4_inode_info *EXT4_I(struct inode *inode)
 static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
 {
        return ino == EXT4_ROOT_INO ||
-               ino == EXT4_USR_QUOTA_INO ||
-               ino == EXT4_GRP_QUOTA_INO ||
-               ino == EXT4_BOOT_LOADER_INO ||
-               ino == EXT4_JOURNAL_INO ||
-               ino == EXT4_RESIZE_INO ||
                (ino >= EXT4_FIRST_INO(sb) &&
                 ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
 }
@@ -3018,9 +3014,6 @@ extern int ext4_inline_data_fiemap(struct inode *inode,
 struct iomap;
 extern int ext4_inline_data_iomap(struct inode *inode, struct iomap *iomap);
 
-extern int ext4_try_to_evict_inline_data(handle_t *handle,
-                                        struct inode *inode,
-                                        int needed);
 extern int ext4_inline_data_truncate(struct inode *inode, int *has_inline);
 
 extern int ext4_convert_inline_data(struct inode *inode);
index 98fb0c119c6827dd50b86ac3521f216bba41170c..adf6668b596f9e20aab3878b791009bfa5f051a0 100644 (file)
@@ -91,6 +91,7 @@ struct ext4_extent_header {
 };
 
 #define EXT4_EXT_MAGIC         cpu_to_le16(0xf30a)
+#define EXT4_MAX_EXTENT_DEPTH 5
 
 #define EXT4_EXTENT_TAIL_OFFSET(hdr) \
        (sizeof(struct ext4_extent_header) + \
index 0057fe3f248d195736ee58ec40131dadd98d59bb..8ce6fd5b10dd331a9cd86fb41e15ba84095c75e7 100644 (file)
@@ -869,6 +869,12 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
 
        eh = ext_inode_hdr(inode);
        depth = ext_depth(inode);
+       if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
+               EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
+                                depth);
+               ret = -EFSCORRUPTED;
+               goto err;
+       }
 
        if (path) {
                ext4_ext_drop_refs(path);
index f525f909b559c8c12e361f0750b56717a972ffb1..fb83750c1a14662a2e6337133c2ddb8460956aaf 100644 (file)
@@ -150,7 +150,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
        }
 
        ext4_lock_group(sb, block_group);
-       if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
+       if (ext4_has_group_desc_csum(sb) &&
+           (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
+               if (block_group == 0) {
+                       ext4_unlock_group(sb, block_group);
+                       unlock_buffer(bh);
+                       ext4_error(sb, "Inode bitmap for bg 0 marked "
+                                  "uninitialized");
+                       err = -EFSCORRUPTED;
+                       goto out;
+               }
                memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
                ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
                                     sb->s_blocksize * 8, bh->b_data);
@@ -994,7 +1003,8 @@ got:
 
                /* recheck and clear flag under lock if we still need to */
                ext4_lock_group(sb, group);
-               if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+               if (ext4_has_group_desc_csum(sb) &&
+                   (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
                        gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
                        ext4_free_group_clusters_set(sb, gdp,
                                ext4_free_clusters_after_init(sb, group, gdp));
index 285ed1588730c34c892566c0639b048e9c9a017e..e55a8bc870bd1ef0743cf479e0f1aa565eb364c6 100644 (file)
@@ -437,6 +437,7 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle,
 
        memset((void *)ext4_raw_inode(&is.iloc)->i_block,
                0, EXT4_MIN_INLINE_DATA_SIZE);
+       memset(ei->i_data, 0, EXT4_MIN_INLINE_DATA_SIZE);
 
        if (ext4_has_feature_extents(inode->i_sb)) {
                if (S_ISDIR(inode->i_mode) ||
@@ -886,11 +887,11 @@ retry_journal:
        flags |= AOP_FLAG_NOFS;
 
        if (ret == -ENOSPC) {
+               ext4_journal_stop(handle);
                ret = ext4_da_convert_inline_data_to_extent(mapping,
                                                            inode,
                                                            flags,
                                                            fsdata);
-               ext4_journal_stop(handle);
                if (ret == -ENOSPC &&
                    ext4_should_retry_alloc(inode->i_sb, &retries))
                        goto retry_journal;
@@ -1890,42 +1891,6 @@ out:
        return (error < 0 ? error : 0);
 }
 
-/*
- * Called during xattr set, and if we can sparse space 'needed',
- * just create the extent tree evict the data to the outer block.
- *
- * We use jbd2 instead of page cache to move data to the 1st block
- * so that the whole transaction can be committed as a whole and
- * the data isn't lost because of the delayed page cache write.
- */
-int ext4_try_to_evict_inline_data(handle_t *handle,
-                                 struct inode *inode,
-                                 int needed)
-{
-       int error;
-       struct ext4_xattr_entry *entry;
-       struct ext4_inode *raw_inode;
-       struct ext4_iloc iloc;
-
-       error = ext4_get_inode_loc(inode, &iloc);
-       if (error)
-               return error;
-
-       raw_inode = ext4_raw_inode(&iloc);
-       entry = (struct ext4_xattr_entry *)((void *)raw_inode +
-                                           EXT4_I(inode)->i_inline_off);
-       if (EXT4_XATTR_LEN(entry->e_name_len) +
-           EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)) < needed) {
-               error = -ENOSPC;
-               goto out;
-       }
-
-       error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
-out:
-       brelse(iloc.bh);
-       return error;
-}
-
 int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
 {
        handle_t *handle;
index 2ea07efbe0165d0d5bbff1cd4a570cb8bc337ae6..7d6c10017bdf5910a655d9f005a65c5b190f727a 100644 (file)
@@ -402,9 +402,9 @@ static int __check_block_validity(struct inode *inode, const char *func,
        if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
                                   map->m_len)) {
                ext4_error_inode(inode, func, line, map->m_pblk,
-                                "lblock %lu mapped to illegal pblock "
+                                "lblock %lu mapped to illegal pblock %llu "
                                 "(length %d)", (unsigned long) map->m_lblk,
-                                map->m_len);
+                                map->m_pblk, map->m_len);
                return -EFSCORRUPTED;
        }
        return 0;
@@ -4506,7 +4506,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
        int                     inodes_per_block, inode_offset;
 
        iloc->bh = NULL;
-       if (!ext4_valid_inum(sb, inode->i_ino))
+       if (inode->i_ino < EXT4_ROOT_INO ||
+           inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
                return -EFSCORRUPTED;
 
        iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
index 6eae2b91aafa20b21fd19c61bb15fa7add625935..f7ab340881626be5f28407334c0f25f18717eb75 100644 (file)
@@ -2423,7 +2423,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
         * initialize bb_free to be able to skip
         * empty groups without initialization
         */
-       if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+       if (ext4_has_group_desc_csum(sb) &&
+           (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
                meta_group_info[i]->bb_free =
                        ext4_free_clusters_after_init(sb, group, desc);
        } else {
@@ -2989,7 +2990,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
 #endif
        ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
                      ac->ac_b_ex.fe_len);
-       if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+       if (ext4_has_group_desc_csum(sb) &&
+           (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
                gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
                ext4_free_group_clusters_set(sb, gdp,
                                             ext4_free_clusters_after_init(sb,
index 0c4c2201b3aa2ee9680478f8fd11685e66634f50..ba2396a7bd04b099ed1726a8100d6983954efd36 100644 (file)
@@ -405,6 +405,9 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
 
 static void ext4_handle_error(struct super_block *sb)
 {
+       if (test_opt(sb, WARN_ON_ERROR))
+               WARN_ON_ONCE(1);
+
        if (sb_rdonly(sb))
                return;
 
@@ -740,6 +743,9 @@ __acquires(bitlock)
                va_end(args);
        }
 
+       if (test_opt(sb, WARN_ON_ERROR))
+               WARN_ON_ONCE(1);
+
        if (test_opt(sb, ERRORS_CONT)) {
                ext4_commit_super(sb, 0);
                return;
@@ -1371,7 +1377,8 @@ enum {
        Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
        Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
        Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
-       Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
+       Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
+       Opt_nowarn_on_error, Opt_mblk_io_submit,
        Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
        Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
        Opt_inode_readahead_blks, Opt_journal_ioprio,
@@ -1438,6 +1445,8 @@ static const match_table_t tokens = {
        {Opt_dax, "dax"},
        {Opt_stripe, "stripe=%u"},
        {Opt_delalloc, "delalloc"},
+       {Opt_warn_on_error, "warn_on_error"},
+       {Opt_nowarn_on_error, "nowarn_on_error"},
        {Opt_lazytime, "lazytime"},
        {Opt_nolazytime, "nolazytime"},
        {Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
@@ -1602,6 +1611,8 @@ static const struct mount_opts {
         MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
        {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
         MOPT_EXT4_ONLY | MOPT_CLEAR},
+       {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET},
+       {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR},
        {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
         MOPT_EXT4_ONLY | MOPT_CLEAR},
        {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
@@ -2331,6 +2342,7 @@ static int ext4_check_descriptors(struct super_block *sb,
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
        ext4_fsblk_t last_block;
+       ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1;
        ext4_fsblk_t block_bitmap;
        ext4_fsblk_t inode_bitmap;
        ext4_fsblk_t inode_table;
@@ -2363,6 +2375,14 @@ static int ext4_check_descriptors(struct super_block *sb,
                        if (!sb_rdonly(sb))
                                return 0;
                }
+               if (block_bitmap >= sb_block + 1 &&
+                   block_bitmap <= last_bg_block) {
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+                                "Block bitmap for group %u overlaps "
+                                "block group descriptors", i);
+                       if (!sb_rdonly(sb))
+                               return 0;
+               }
                if (block_bitmap < first_block || block_bitmap > last_block) {
                        ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
                               "Block bitmap for group %u not in group "
@@ -2377,6 +2397,14 @@ static int ext4_check_descriptors(struct super_block *sb,
                        if (!sb_rdonly(sb))
                                return 0;
                }
+               if (inode_bitmap >= sb_block + 1 &&
+                   inode_bitmap <= last_bg_block) {
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+                                "Inode bitmap for group %u overlaps "
+                                "block group descriptors", i);
+                       if (!sb_rdonly(sb))
+                               return 0;
+               }
                if (inode_bitmap < first_block || inode_bitmap > last_block) {
                        ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
                               "Inode bitmap for group %u not in group "
@@ -2391,6 +2419,14 @@ static int ext4_check_descriptors(struct super_block *sb,
                        if (!sb_rdonly(sb))
                                return 0;
                }
+               if (inode_table >= sb_block + 1 &&
+                   inode_table <= last_bg_block) {
+                       ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+                                "Inode table for group %u overlaps "
+                                "block group descriptors", i);
+                       if (!sb_rdonly(sb))
+                               return 0;
+               }
                if (inode_table < first_block ||
                    inode_table + sbi->s_itb_per_group - 1 > last_block) {
                        ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -3097,13 +3133,22 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
        ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
        struct ext4_group_desc *gdp = NULL;
 
+       if (!ext4_has_group_desc_csum(sb))
+               return ngroups;
+
        for (group = 0; group < ngroups; group++) {
                gdp = ext4_get_group_desc(sb, group, NULL);
                if (!gdp)
                        continue;
 
-               if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
+               if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
+                       continue;
+               if (group != 0)
                        break;
+               ext4_error(sb, "Inode table for bg 0 marked as "
+                          "needing zeroing");
+               if (sb_rdonly(sb))
+                       return ngroups;
        }
 
        return group;
@@ -3742,6 +3787,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                         le32_to_cpu(es->s_log_block_size));
                goto failed_mount;
        }
+       if (le32_to_cpu(es->s_log_cluster_size) >
+           (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+               ext4_msg(sb, KERN_ERR,
+                        "Invalid log cluster size: %u",
+                        le32_to_cpu(es->s_log_cluster_size));
+               goto failed_mount;
+       }
 
        if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
                ext4_msg(sb, KERN_ERR,
@@ -3806,6 +3858,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        } else {
                sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
                sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
+               if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
+                       ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
+                                sbi->s_first_ino);
+                       goto failed_mount;
+               }
                if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
                    (!is_power_of_2(sbi->s_inode_size)) ||
                    (sbi->s_inode_size > blocksize)) {
@@ -3882,13 +3939,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                                 "block size (%d)", clustersize, blocksize);
                        goto failed_mount;
                }
-               if (le32_to_cpu(es->s_log_cluster_size) >
-                   (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
-                       ext4_msg(sb, KERN_ERR,
-                                "Invalid log cluster size: %u",
-                                le32_to_cpu(es->s_log_cluster_size));
-                       goto failed_mount;
-               }
                sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
                        le32_to_cpu(es->s_log_block_size);
                sbi->s_clusters_per_group =
@@ -3909,10 +3959,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                }
        } else {
                if (clustersize != blocksize) {
-                       ext4_warning(sb, "fragment/cluster size (%d) != "
-                                    "block size (%d)", clustersize,
-                                    blocksize);
-                       clustersize = blocksize;
+                       ext4_msg(sb, KERN_ERR,
+                                "fragment/cluster size (%d) != "
+                                "block size (%d)", clustersize, blocksize);
+                       goto failed_mount;
                }
                if (sbi->s_blocks_per_group > blocksize * 8) {
                        ext4_msg(sb, KERN_ERR,
@@ -3966,6 +4016,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                         ext4_blocks_count(es));
                goto failed_mount;
        }
+       if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
+           (sbi->s_cluster_ratio == 1)) {
+               ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
+                        "block is 0 with a 1k block and cluster size");
+               goto failed_mount;
+       }
+
        blocks_count = (ext4_blocks_count(es) -
                        le32_to_cpu(es->s_first_data_block) +
                        EXT4_BLOCKS_PER_GROUP(sb) - 1);
@@ -4001,6 +4058,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                ret = -ENOMEM;
                goto failed_mount;
        }
+       if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
+           le32_to_cpu(es->s_inodes_count)) {
+               ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
+                        le32_to_cpu(es->s_inodes_count),
+                        ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
+               ret = -EINVAL;
+               goto failed_mount;
+       }
 
        bgl_lock_init(sbi->s_blockgroup_lock);
 
@@ -4736,6 +4801,14 @@ static int ext4_commit_super(struct super_block *sb, int sync)
 
        if (!sbh || block_device_ejected(sb))
                return error;
+
+       /*
+        * The superblock bh should be mapped, but it might not be if the
+        * device was hot-removed. Not much we can do but fail the I/O.
+        */
+       if (!buffer_mapped(sbh))
+               return error;
+
        /*
         * If the file system is mounted read-only, don't update the
         * superblock write time.  This avoids updating the superblock
index fc4ced59c565b7b8ad2d36af9b8e1894c7fd3029..723df14f408408607c123dbbb7b7f7fe1fe9b396 100644 (file)
@@ -230,12 +230,12 @@ __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh,
 {
        int error = -EFSCORRUPTED;
 
-       if (buffer_verified(bh))
-               return 0;
-
        if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
            BHDR(bh)->h_blocks != cpu_to_le32(1))
                goto errout;
+       if (buffer_verified(bh))
+               return 0;
+
        error = -EFSBADCRC;
        if (!ext4_xattr_block_csum_verify(inode, bh))
                goto errout;
@@ -1560,7 +1560,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
                                handle_t *handle, struct inode *inode,
                                bool is_block)
 {
-       struct ext4_xattr_entry *last;
+       struct ext4_xattr_entry *last, *next;
        struct ext4_xattr_entry *here = s->here;
        size_t min_offs = s->end - s->base, name_len = strlen(i->name);
        int in_inode = i->in_inode;
@@ -1595,7 +1595,13 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
 
        /* Compute min_offs and last. */
        last = s->first;
-       for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+       for (; !IS_LAST_ENTRY(last); last = next) {
+               next = EXT4_XATTR_NEXT(last);
+               if ((void *)next >= s->end) {
+                       EXT4_ERROR_INODE(inode, "corrupted xattr entries");
+                       ret = -EFSCORRUPTED;
+                       goto out;
+               }
                if (!last->e_value_inum && last->e_value_size) {
                        size_t offs = le16_to_cpu(last->e_value_offs);
                        if (offs < min_offs)
@@ -2206,23 +2212,8 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
        if (EXT4_I(inode)->i_extra_isize == 0)
                return -ENOSPC;
        error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
-       if (error) {
-               if (error == -ENOSPC &&
-                   ext4_has_inline_data(inode)) {
-                       error = ext4_try_to_evict_inline_data(handle, inode,
-                                       EXT4_XATTR_LEN(strlen(i->name) +
-                                       EXT4_XATTR_SIZE(i->value_len)));
-                       if (error)
-                               return error;
-                       error = ext4_xattr_ibody_find(inode, i, is);
-                       if (error)
-                               return error;
-                       error = ext4_xattr_set_entry(i, s, handle, inode,
-                                                    false /* is_block */);
-               }
-               if (error)
-                       return error;
-       }
+       if (error)
+               return error;
        header = IHDR(inode, ext4_raw_inode(&is->iloc));
        if (!IS_LAST_ENTRY(s->first)) {
                header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
@@ -2651,6 +2642,11 @@ static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode,
                last = IFIRST(header);
                /* Find the entry best suited to be pushed into EA block */
                for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+                       /* never move system.data out of the inode */
+                       if ((last->e_name_len == 4) &&
+                           (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) &&
+                           !memcmp(last->e_name, "data", 4))
+                               continue;
                        total_size = EXT4_XATTR_LEN(last->e_name_len);
                        if (!last->e_value_inum)
                                total_size += EXT4_XATTR_SIZE(
index 2c300e98179607ea0062a2c1dbcee17e9bc926c4..8c86c809ca17b30e003913e169626aa42df2e908 100644 (file)
@@ -1999,8 +1999,14 @@ void inode_init_owner(struct inode *inode, const struct inode *dir,
        inode->i_uid = current_fsuid();
        if (dir && dir->i_mode & S_ISGID) {
                inode->i_gid = dir->i_gid;
+
+               /* Directories are special, and always inherit S_ISGID */
                if (S_ISDIR(mode))
                        mode |= S_ISGID;
+               else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
+                        !in_group_p(inode->i_gid) &&
+                        !capable_wrt_inode_uidgid(dir, CAP_FSETID))
+                       mode &= ~S_ISGID;
        } else
                inode->i_gid = current_fsgid();
        inode->i_mode = mode;
index 51dd68e67b0f3abfcd115196724079e226467d09..c0b66a7a795b1cd22de3061930e454ff24394925 100644 (file)
@@ -1361,6 +1361,13 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
                if (jh->b_transaction == transaction &&
                    jh->b_jlist != BJ_Metadata) {
                        jbd_lock_bh_state(bh);
+                       if (jh->b_transaction == transaction &&
+                           jh->b_jlist != BJ_Metadata)
+                               pr_err("JBD2: assertion failure: h_type=%u "
+                                      "h_line_no=%u block_no=%llu jlist=%u\n",
+                                      handle->h_type, handle->h_line_no,
+                                      (unsigned long long) bh->b_blocknr,
+                                      jh->b_jlist);
                        J_ASSERT_JH(jh, jh->b_transaction != transaction ||
                                        jh->b_jlist == BJ_Metadata);
                        jbd_unlock_bh_state(bh);
@@ -1380,11 +1387,11 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
                 * of the transaction. This needs to be done
                 * once a transaction -bzzz
                 */
-               jh->b_modified = 1;
                if (handle->h_buffer_credits <= 0) {
                        ret = -ENOSPC;
                        goto out_unlock_bh;
                }
+               jh->b_modified = 1;
                handle->h_buffer_credits--;
        }
 
index c60f3d32ee911192c0cd8dae3b7cb11c0f416411..a6797986b625a34d19e097050c58f582c177c30c 100644 (file)
@@ -491,15 +491,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
        if (size > PSIZE) {
                /*
                 * To keep the rest of the code simple.  Allocate a
-                * contiguous buffer to work with
+                * contiguous buffer to work with. Make the buffer large
+                * enough to make use of the whole extent.
                 */
-               ea_buf->xattr = kmalloc(size, GFP_KERNEL);
+               ea_buf->max_size = (size + sb->s_blocksize - 1) &
+                   ~(sb->s_blocksize - 1);
+
+               ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
                if (ea_buf->xattr == NULL)
                        return -ENOMEM;
 
                ea_buf->flag = EA_MALLOC;
-               ea_buf->max_size = (size + sb->s_blocksize - 1) &
-                   ~(sb->s_blocksize - 1);
 
                if (ea_size == 0)
                        return 0;
index bbd0465535ebd9e433a812d60ab345161ef736b3..f033f3a69a3bcf7259192a9e062d7af295f90639 100644 (file)
@@ -883,8 +883,10 @@ struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
        rcu_read_lock();
        list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
                res = nfs_delegation_find_inode_server(server, fhandle);
-               if (res != ERR_PTR(-ENOENT))
+               if (res != ERR_PTR(-ENOENT)) {
+                       rcu_read_unlock();
                        return res;
+               }
        }
        rcu_read_unlock();
        return ERR_PTR(-ENOENT);
index d4a07acad5989e1374f879f2cc46c284f9aa8c4f..8f003792ccde1c24c3bcd444a609b888a629340f 100644 (file)
@@ -1243,17 +1243,18 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
                                           hdr->ds_clp, hdr->lseg,
                                           hdr->pgio_mirror_idx);
 
+       clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
+       clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
        switch (err) {
        case -NFS4ERR_RESET_TO_PNFS:
                if (ff_layout_choose_best_ds_for_read(hdr->lseg,
                                        hdr->pgio_mirror_idx + 1,
                                        &hdr->pgio_mirror_idx))
                        goto out_eagain;
-               ff_layout_read_record_layoutstats_done(task, hdr);
-               pnfs_read_resend_pnfs(hdr);
+               set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
                return task->tk_status;
        case -NFS4ERR_RESET_TO_MDS:
-               ff_layout_reset_read(hdr);
+               set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
                return task->tk_status;
        case -EAGAIN:
                goto out_eagain;
@@ -1403,6 +1404,10 @@ static void ff_layout_read_release(void *data)
        struct nfs_pgio_header *hdr = data;
 
        ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
+       if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
+               pnfs_read_resend_pnfs(hdr);
+       else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
+               ff_layout_reset_read(hdr);
        pnfs_generic_rw_release(data);
 }
 
@@ -1423,12 +1428,14 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
                                           hdr->ds_clp, hdr->lseg,
                                           hdr->pgio_mirror_idx);
 
+       clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
+       clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
        switch (err) {
        case -NFS4ERR_RESET_TO_PNFS:
-               ff_layout_reset_write(hdr, true);
+               set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
                return task->tk_status;
        case -NFS4ERR_RESET_TO_MDS:
-               ff_layout_reset_write(hdr, false);
+               set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
                return task->tk_status;
        case -EAGAIN:
                return -EAGAIN;
@@ -1575,6 +1582,10 @@ static void ff_layout_write_release(void *data)
        struct nfs_pgio_header *hdr = data;
 
        ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
+       if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
+               ff_layout_reset_write(hdr, true);
+       else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
+               ff_layout_reset_write(hdr, false);
        pnfs_generic_rw_release(data);
 }
 
index ed45090e4df6471902f5968b908429fe28976280..6dd146885da99304c8183f5fae21741f4aa3625f 100644 (file)
@@ -3294,6 +3294,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
        struct nfs4_closedata *calldata = data;
        struct nfs4_state *state = calldata->state;
        struct inode *inode = calldata->inode;
+       struct pnfs_layout_hdr *lo;
        bool is_rdonly, is_wronly, is_rdwr;
        int call_close = 0;
 
@@ -3337,6 +3338,12 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
                goto out_wait;
        }
 
+       lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL;
+       if (lo && !pnfs_layout_is_valid(lo)) {
+               calldata->arg.lr_args = NULL;
+               calldata->res.lr_res = NULL;
+       }
+
        if (calldata->arg.fmode == 0)
                task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
 
@@ -5972,12 +5979,19 @@ static void nfs4_delegreturn_release(void *calldata)
 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
 {
        struct nfs4_delegreturndata *d_data;
+       struct pnfs_layout_hdr *lo;
 
        d_data = (struct nfs4_delegreturndata *)data;
 
        if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task))
                return;
 
+       lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
+       if (lo && !pnfs_layout_is_valid(lo)) {
+               d_data->args.lr_args = NULL;
+               d_data->res.lr_res = NULL;
+       }
+
        nfs4_setup_sequence(d_data->res.server->nfs_client,
                        &d_data->args.seq_args,
                        &d_data->res.seq_res,
@@ -8650,6 +8664,8 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
 
        dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
 
+       nfs4_sequence_free_slot(&lgp->res.seq_res);
+
        switch (nfs4err) {
        case 0:
                goto out;
@@ -8714,7 +8730,6 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
                goto out;
        }
 
-       nfs4_sequence_free_slot(&lgp->res.seq_res);
        err = nfs4_handle_exception(server, nfs4err, exception);
        if (!status) {
                if (exception->retry)
@@ -8786,20 +8801,22 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
        if (IS_ERR(task))
                return ERR_CAST(task);
        status = rpc_wait_for_completion_task(task);
-       if (status == 0) {
+       if (status != 0)
+               goto out;
+
+       /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
+       if (task->tk_status < 0 || lgp->res.layoutp->len == 0) {
                status = nfs4_layoutget_handle_exception(task, lgp, &exception);
                *timeout = exception.timeout;
-       }
-
+       } else
+               lseg = pnfs_layout_process(lgp);
+out:
        trace_nfs4_layoutget(lgp->args.ctx,
                        &lgp->args.range,
                        &lgp->res.range,
                        &lgp->res.stateid,
                        status);
 
-       /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
-       if (status == 0 && lgp->res.layoutp->len)
-               lseg = pnfs_layout_process(lgp);
        rpc_put_task(task);
        dprintk("<-- %s status=%d\n", __func__, status);
        if (status)
@@ -8817,6 +8834,8 @@ nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
                        &lrp->args.seq_args,
                        &lrp->res.seq_res,
                        task);
+       if (!pnfs_layout_is_valid(lrp->args.layout))
+               rpc_exit(task, 0);
 }
 
 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
index a8f5e6b167491e3746a921f1f611fbb06b7d5f45..3fe81424337d07b5b19ab77d08825fb27bf523b0 100644 (file)
@@ -801,6 +801,11 @@ static inline void nfs4_lgopen_release(struct nfs4_layoutget *lgp)
 {
 }
 
+static inline bool pnfs_layout_is_valid(const struct pnfs_layout_hdr *lo)
+{
+       return false;
+}
+
 #endif /* CONFIG_NFS_V4_1 */
 
 #if IS_ENABLED(CONFIG_NFS_V4_2)
index bb0840e234f3bc176d2af120d6ed94ee3720aad0..39d6f431da83f4227fbfbee1b6931230aa82e95c 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -509,22 +509,19 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        }
 }
 
-static struct wait_queue_head *
-pipe_get_poll_head(struct file *filp, __poll_t events)
-{
-       struct pipe_inode_info *pipe = filp->private_data;
-
-       return &pipe->wait;
-}
-
 /* No kernel lock held - fine */
-static __poll_t pipe_poll_mask(struct file *filp, __poll_t events)
+static __poll_t
+pipe_poll(struct file *filp, poll_table *wait)
 {
+       __poll_t mask;
        struct pipe_inode_info *pipe = filp->private_data;
-       int nrbufs = pipe->nrbufs;
-       __poll_t mask = 0;
+       int nrbufs;
+
+       poll_wait(filp, &pipe->wait, wait);
 
        /* Reading only -- no need for acquiring the semaphore.  */
+       nrbufs = pipe->nrbufs;
+       mask = 0;
        if (filp->f_mode & FMODE_READ) {
                mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0;
                if (!pipe->writers && filp->f_version != pipe->w_counter)
@@ -1023,8 +1020,7 @@ const struct file_operations pipefifo_fops = {
        .llseek         = no_llseek,
        .read_iter      = pipe_read,
        .write_iter     = pipe_write,
-       .get_poll_head  = pipe_get_poll_head,
-       .poll_mask      = pipe_poll_mask,
+       .poll           = pipe_poll,
        .unlocked_ioctl = pipe_ioctl,
        .release        = pipe_release,
        .fasync         = pipe_fasync,
index b6572944efc340d89f136c5a9c17ac409c8bef00..aaffc0c302162db0fc9d682c071469f55326dc1d 100644 (file)
@@ -235,6 +235,10 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
        if (env_start != arg_end || env_start >= env_end)
                env_start = env_end = arg_end;
 
+       /* .. and limit it to a maximum of one page of slop */
+       if (env_end >= arg_end + PAGE_SIZE)
+               env_end = arg_end + PAGE_SIZE - 1;
+
        /* We're not going to care if "*ppos" has high bits set */
        pos = arg_start + *ppos;
 
@@ -254,10 +258,19 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
        while (count) {
                int got;
                size_t size = min_t(size_t, PAGE_SIZE, count);
+               long offset;
 
-               got = access_remote_vm(mm, pos, page, size, FOLL_ANON);
-               if (got <= 0)
+               /*
+                * Are we already starting past the official end?
+                * We always include the last byte that is *supposed*
+                * to be NUL
+                */
+               offset = (pos >= arg_end) ? pos - arg_end + 1 : 0;
+
+               got = access_remote_vm(mm, pos - offset, page, size + offset, FOLL_ANON);
+               if (got <= offset)
                        break;
+               got -= offset;
 
                /* Don't walk past a NUL character once you hit arg_end */
                if (pos + got >= arg_end) {
@@ -276,12 +289,17 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
                                n = arg_end - pos - 1;
 
                        /* Cut off at first NUL after 'n' */
-                       got = n + strnlen(page+n, got-n);
-                       if (!got)
+                       got = n + strnlen(page+n, offset+got-n);
+                       if (got < offset)
                                break;
+                       got -= offset;
+
+                       /* Include the NUL if it existed */
+                       if (got < size)
+                               got++;
                }
 
-               got -= copy_to_user(buf, page, got);
+               got -= copy_to_user(buf, page+offset, got);
                if (unlikely(!got)) {
                        if (!len)
                                len = -EFAULT;
index 6ac1c92997ea2a20c3af8959c6920218f16a846d..bb1c1625b158d03f5c8685f55e370267e1cc76fb 100644 (file)
@@ -564,11 +564,20 @@ static int proc_seq_open(struct inode *inode, struct file *file)
        return seq_open(file, de->seq_ops);
 }
 
+static int proc_seq_release(struct inode *inode, struct file *file)
+{
+       struct proc_dir_entry *de = PDE(inode);
+
+       if (de->state_size)
+               return seq_release_private(inode, file);
+       return seq_release(inode, file);
+}
+
 static const struct file_operations proc_seq_fops = {
        .open           = proc_seq_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
-       .release        = seq_release,
+       .release        = proc_seq_release,
 };
 
 struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
index e9679016271fba923290c24e13f5368f5f0e0199..dfd73a4616ce565bfccb996a50f5eb549fe41fe8 100644 (file)
@@ -831,7 +831,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
                SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
                SEQ_PUT_DEC(" kB\nSwapPss:        ",
                                                mss->swap_pss >> PSS_SHIFT);
-               SEQ_PUT_DEC(" kB\nLocked:         ", mss->pss >> PSS_SHIFT);
+               SEQ_PUT_DEC(" kB\nLocked:         ",
+                                               mss->pss_locked >> PSS_SHIFT);
                seq_puts(m, " kB\n");
        }
        if (!rollup_mode) {
index d88231e3b2be3ec1bc1f85c3c2fd92973e312c15..fc20e06c56ba55bf229db78cb5b5077c21935931 100644 (file)
@@ -711,21 +711,18 @@ EXPORT_SYMBOL(dquot_quota_sync);
 static unsigned long
 dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 {
-       struct list_head *head;
        struct dquot *dquot;
        unsigned long freed = 0;
 
        spin_lock(&dq_list_lock);
-       head = free_dquots.prev;
-       while (head != &free_dquots && sc->nr_to_scan) {
-               dquot = list_entry(head, struct dquot, dq_free);
+       while (!list_empty(&free_dquots) && sc->nr_to_scan) {
+               dquot = list_first_entry(&free_dquots, struct dquot, dq_free);
                remove_dquot_hash(dquot);
                remove_free_dquot(dquot);
                remove_inuse(dquot);
                do_destroy_dquot(dquot);
                sc->nr_to_scan--;
                freed++;
-               head = free_dquots.prev;
        }
        spin_unlock(&dq_list_lock);
        return freed;
index 7e288d97adcbb7504f2c3c2953ca24debd770b01..9fed1c05f1f4df6f750c599da1670abf4b066445 100644 (file)
@@ -76,83 +76,99 @@ static char *le_type(struct reiserfs_key *key)
 }
 
 /* %k */
-static void sprintf_le_key(char *buf, struct reiserfs_key *key)
+static int scnprintf_le_key(char *buf, size_t size, struct reiserfs_key *key)
 {
        if (key)
-               sprintf(buf, "[%d %d %s %s]", le32_to_cpu(key->k_dir_id),
-                       le32_to_cpu(key->k_objectid), le_offset(key),
-                       le_type(key));
+               return scnprintf(buf, size, "[%d %d %s %s]",
+                                le32_to_cpu(key->k_dir_id),
+                                le32_to_cpu(key->k_objectid), le_offset(key),
+                                le_type(key));
        else
-               sprintf(buf, "[NULL]");
+               return scnprintf(buf, size, "[NULL]");
 }
 
 /* %K */
-static void sprintf_cpu_key(char *buf, struct cpu_key *key)
+static int scnprintf_cpu_key(char *buf, size_t size, struct cpu_key *key)
 {
        if (key)
-               sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id,
-                       key->on_disk_key.k_objectid, reiserfs_cpu_offset(key),
-                       cpu_type(key));
+               return scnprintf(buf, size, "[%d %d %s %s]",
+                                key->on_disk_key.k_dir_id,
+                                key->on_disk_key.k_objectid,
+                                reiserfs_cpu_offset(key), cpu_type(key));
        else
-               sprintf(buf, "[NULL]");
+               return scnprintf(buf, size, "[NULL]");
 }
 
-static void sprintf_de_head(char *buf, struct reiserfs_de_head *deh)
+static int scnprintf_de_head(char *buf, size_t size,
+                            struct reiserfs_de_head *deh)
 {
        if (deh)
-               sprintf(buf,
-                       "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
-                       deh_offset(deh), deh_dir_id(deh), deh_objectid(deh),
-                       deh_location(deh), deh_state(deh));
+               return scnprintf(buf, size,
+                                "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
+                                deh_offset(deh), deh_dir_id(deh),
+                                deh_objectid(deh), deh_location(deh),
+                                deh_state(deh));
        else
-               sprintf(buf, "[NULL]");
+               return scnprintf(buf, size, "[NULL]");
 
 }
 
-static void sprintf_item_head(char *buf, struct item_head *ih)
+static int scnprintf_item_head(char *buf, size_t size, struct item_head *ih)
 {
        if (ih) {
-               strcpy(buf,
-                      (ih_version(ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*");
-               sprintf_le_key(buf + strlen(buf), &(ih->ih_key));
-               sprintf(buf + strlen(buf), ", item_len %d, item_location %d, "
-                       "free_space(entry_count) %d",
-                       ih_item_len(ih), ih_location(ih), ih_free_space(ih));
+               char *p = buf;
+               char * const end = buf + size;
+
+               p += scnprintf(p, end - p, "%s",
+                              (ih_version(ih) == KEY_FORMAT_3_6) ?
+                              "*3.6* " : "*3.5*");
+
+               p += scnprintf_le_key(p, end - p, &ih->ih_key);
+
+               p += scnprintf(p, end - p,
+                              ", item_len %d, item_location %d, free_space(entry_count) %d",
+                              ih_item_len(ih), ih_location(ih),
+                              ih_free_space(ih));
+               return p - buf;
        } else
-               sprintf(buf, "[NULL]");
+               return scnprintf(buf, size, "[NULL]");
 }
 
-static void sprintf_direntry(char *buf, struct reiserfs_dir_entry *de)
+static int scnprintf_direntry(char *buf, size_t size,
+                             struct reiserfs_dir_entry *de)
 {
        char name[20];
 
        memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen);
        name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0;
-       sprintf(buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid);
+       return scnprintf(buf, size, "\"%s\"==>[%d %d]",
+                        name, de->de_dir_id, de->de_objectid);
 }
 
-static void sprintf_block_head(char *buf, struct buffer_head *bh)
+static int scnprintf_block_head(char *buf, size_t size, struct buffer_head *bh)
 {
-       sprintf(buf, "level=%d, nr_items=%d, free_space=%d rdkey ",
-               B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
+       return scnprintf(buf, size,
+                        "level=%d, nr_items=%d, free_space=%d rdkey ",
+                        B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
 }
 
-static void sprintf_buffer_head(char *buf, struct buffer_head *bh)
+static int scnprintf_buffer_head(char *buf, size_t size, struct buffer_head *bh)
 {
-       sprintf(buf,
-               "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
-               bh->b_bdev, bh->b_size,
-               (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)),
-               bh->b_state, bh->b_page,
-               buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
-               buffer_dirty(bh) ? "DIRTY" : "CLEAN",
-               buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
+       return scnprintf(buf, size,
+                        "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
+                        bh->b_bdev, bh->b_size,
+                        (unsigned long long)bh->b_blocknr,
+                        atomic_read(&(bh->b_count)),
+                        bh->b_state, bh->b_page,
+                        buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
+                        buffer_dirty(bh) ? "DIRTY" : "CLEAN",
+                        buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
 }
 
-static void sprintf_disk_child(char *buf, struct disk_child *dc)
+static int scnprintf_disk_child(char *buf, size_t size, struct disk_child *dc)
 {
-       sprintf(buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc),
-               dc_size(dc));
+       return scnprintf(buf, size, "[dc_number=%d, dc_size=%u]",
+                        dc_block_number(dc), dc_size(dc));
 }
 
 static char *is_there_reiserfs_struct(char *fmt, int *what)
@@ -189,55 +205,60 @@ static void prepare_error_buf(const char *fmt, va_list args)
        char *fmt1 = fmt_buf;
        char *k;
        char *p = error_buf;
+       char * const end = &error_buf[sizeof(error_buf)];
        int what;
 
        spin_lock(&error_lock);
 
-       strcpy(fmt1, fmt);
+       if (WARN_ON(strscpy(fmt_buf, fmt, sizeof(fmt_buf)) < 0)) {
+               strscpy(error_buf, "format string too long", end - error_buf);
+               goto out_unlock;
+       }
 
        while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) {
                *k = 0;
 
-               p += vsprintf(p, fmt1, args);
+               p += vscnprintf(p, end - p, fmt1, args);
 
                switch (what) {
                case 'k':
-                       sprintf_le_key(p, va_arg(args, struct reiserfs_key *));
+                       p += scnprintf_le_key(p, end - p,
+                                             va_arg(args, struct reiserfs_key *));
                        break;
                case 'K':
-                       sprintf_cpu_key(p, va_arg(args, struct cpu_key *));
+                       p += scnprintf_cpu_key(p, end - p,
+                                              va_arg(args, struct cpu_key *));
                        break;
                case 'h':
-                       sprintf_item_head(p, va_arg(args, struct item_head *));
+                       p += scnprintf_item_head(p, end - p,
+                                                va_arg(args, struct item_head *));
                        break;
                case 't':
-                       sprintf_direntry(p,
-                                        va_arg(args,
-                                               struct reiserfs_dir_entry *));
+                       p += scnprintf_direntry(p, end - p,
+                                               va_arg(args, struct reiserfs_dir_entry *));
                        break;
                case 'y':
-                       sprintf_disk_child(p,
-                                          va_arg(args, struct disk_child *));
+                       p += scnprintf_disk_child(p, end - p,
+                                                 va_arg(args, struct disk_child *));
                        break;
                case 'z':
-                       sprintf_block_head(p,
-                                          va_arg(args, struct buffer_head *));
+                       p += scnprintf_block_head(p, end - p,
+                                                 va_arg(args, struct buffer_head *));
                        break;
                case 'b':
-                       sprintf_buffer_head(p,
-                                           va_arg(args, struct buffer_head *));
+                       p += scnprintf_buffer_head(p, end - p,
+                                                  va_arg(args, struct buffer_head *));
                        break;
                case 'a':
-                       sprintf_de_head(p,
-                                       va_arg(args,
-                                              struct reiserfs_de_head *));
+                       p += scnprintf_de_head(p, end - p,
+                                              va_arg(args, struct reiserfs_de_head *));
                        break;
                }
 
-               p += strlen(p);
                fmt1 = k + 2;
        }
-       vsprintf(p, fmt1, args);
+       p += vscnprintf(p, end - p, fmt1, args);
+out_unlock:
        spin_unlock(&error_lock);
 
 }
index 317891ff8165ba19b775fcfaa8f6deccb58ba18f..4a6b6e4b21cb91aecdf40492c4763f09bf4ccc3f 100644 (file)
 
 #include <linux/uaccess.h>
 
-__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
-{
-       if (file->f_op->poll) {
-               return file->f_op->poll(file, pt);
-       } else if (file_has_poll_mask(file)) {
-               unsigned int events = poll_requested_events(pt);
-               struct wait_queue_head *head;
-
-               if (pt && pt->_qproc) {
-                       head = file->f_op->get_poll_head(file, events);
-                       if (!head)
-                               return DEFAULT_POLLMASK;
-                       if (IS_ERR(head))
-                               return EPOLLERR;
-                       pt->_qproc(file, head, pt);
-               }
-
-               return file->f_op->poll_mask(file, events);
-       } else {
-               return DEFAULT_POLLMASK;
-       }
-}
-EXPORT_SYMBOL_GPL(vfs_poll);
 
 /*
  * Estimate expected accuracy in ns from a timeval.
index d84a2bee4f82b2f8470b7f2fbd42b2f33beb2bce..cdad49da3ff710e6fd2cc1adf4bf4877623af670 100644 (file)
@@ -226,20 +226,21 @@ static int timerfd_release(struct inode *inode, struct file *file)
        kfree_rcu(ctx, rcu);
        return 0;
 }
-       
-static struct wait_queue_head *timerfd_get_poll_head(struct file *file,
-               __poll_t eventmask)
+
+static __poll_t timerfd_poll(struct file *file, poll_table *wait)
 {
        struct timerfd_ctx *ctx = file->private_data;
+       __poll_t events = 0;
+       unsigned long flags;
 
-       return &ctx->wqh;
-}
+       poll_wait(file, &ctx->wqh, wait);
 
-static __poll_t timerfd_poll_mask(struct file *file, __poll_t eventmask)
-{
-       struct timerfd_ctx *ctx = file->private_data;
+       spin_lock_irqsave(&ctx->wqh.lock, flags);
+       if (ctx->ticks)
+               events |= EPOLLIN;
+       spin_unlock_irqrestore(&ctx->wqh.lock, flags);
 
-       return ctx->ticks ? EPOLLIN : 0;
+       return events;
 }
 
 static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
@@ -363,8 +364,7 @@ static long timerfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg
 
 static const struct file_operations timerfd_fops = {
        .release        = timerfd_release,
-       .get_poll_head  = timerfd_get_poll_head,
-       .poll_mask      = timerfd_poll_mask,
+       .poll           = timerfd_poll,
        .read           = timerfd_read,
        .llseek         = noop_llseek,
        .show_fdinfo    = timerfd_show,
index 1b961b1d9699461cdf0771a90b4771078f6c95fc..fcda0fc97b90a14fd53aafbeb15885d85716e3a1 100644 (file)
@@ -533,8 +533,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
                        udf_write_aext(table, &epos, &eloc,
                                        (etype << 30) | elen, 1);
                } else
-                       udf_delete_aext(table, epos, eloc,
-                                       (etype << 30) | elen);
+                       udf_delete_aext(table, epos);
        } else {
                alloc_count = 0;
        }
@@ -630,7 +629,7 @@ static udf_pblk_t udf_table_new_block(struct super_block *sb,
        if (goal_elen)
                udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
        else
-               udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
+               udf_delete_aext(table, goal_epos);
        brelse(goal_epos.bh);
 
        udf_add_free_space(sb, partition, -1);
index 0a98a2369738fc2cff925c80066b92a58b299066..d9523013096f978c9d4a3ca1d8fdd23b55eeb275 100644 (file)
@@ -141,10 +141,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
                               fibh->ebh->b_data,
                               sizeof(struct fileIdentDesc) + fibh->soffset);
 
-                       fi_len = (sizeof(struct fileIdentDesc) +
-                                 cfi->lengthFileIdent +
-                                 le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3;
-
+                       fi_len = udf_dir_entry_len(cfi);
                        *nf_pos += fi_len - (fibh->eoffset - fibh->soffset);
                        fibh->eoffset = fibh->soffset + fi_len;
                } else {
@@ -152,6 +149,9 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
                               sizeof(struct fileIdentDesc));
                }
        }
+       /* Got last entry outside of dir size - fs is corrupted! */
+       if (*nf_pos > dir->i_size)
+               return NULL;
        return fi;
 }
 
index 7f39d17352c9697863f02140f7cf7ec1120a2215..9915a58fbabd7ff0194709ec883c1bd7003d72c7 100644 (file)
@@ -1147,8 +1147,7 @@ static void udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr
 
        if (startnum > endnum) {
                for (i = 0; i < (startnum - endnum); i++)
-                       udf_delete_aext(inode, *epos, laarr[i].extLocation,
-                                       laarr[i].extLength);
+                       udf_delete_aext(inode, *epos);
        } else if (startnum < endnum) {
                for (i = 0; i < (endnum - startnum); i++) {
                        udf_insert_aext(inode, *epos, laarr[i].extLocation,
@@ -2176,14 +2175,15 @@ static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
        return (nelen >> 30);
 }
 
-int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
-                      struct kernel_lb_addr eloc, uint32_t elen)
+int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
 {
        struct extent_position oepos;
        int adsize;
        int8_t etype;
        struct allocExtDesc *aed;
        struct udf_inode_info *iinfo;
+       struct kernel_lb_addr eloc;
+       uint32_t elen;
 
        if (epos.bh) {
                get_bh(epos.bh);
index c586026508db82d0a27a1df1b964bcbf3fcec45c..06f37ddd2997f4894859722fb7f801994e91239d 100644 (file)
@@ -351,8 +351,6 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
        loff_t f_pos;
        loff_t size = udf_ext0_offset(dir) + dir->i_size;
        int nfidlen;
-       uint8_t lfi;
-       uint16_t liu;
        udf_pblk_t block;
        struct kernel_lb_addr eloc;
        uint32_t elen = 0;
@@ -383,7 +381,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
                namelen = 0;
        }
 
-       nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3;
+       nfidlen = ALIGN(sizeof(struct fileIdentDesc) + namelen, UDF_NAME_PAD);
 
        f_pos = udf_ext0_offset(dir);
 
@@ -424,12 +422,8 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
                        goto out_err;
                }
 
-               liu = le16_to_cpu(cfi->lengthOfImpUse);
-               lfi = cfi->lengthFileIdent;
-
                if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
-                       if (((sizeof(struct fileIdentDesc) +
-                                       liu + lfi + 3) & ~3) == nfidlen) {
+                       if (udf_dir_entry_len(cfi) == nfidlen) {
                                cfi->descTag.tagSerialNum = cpu_to_le16(1);
                                cfi->fileVersionNum = cpu_to_le16(1);
                                cfi->fileCharacteristics = 0;
@@ -1201,9 +1195,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        if (dir_fi) {
                dir_fi->icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location);
-               udf_update_tag((char *)dir_fi,
-                               (sizeof(struct fileIdentDesc) +
-                               le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3);
+               udf_update_tag((char *)dir_fi, udf_dir_entry_len(dir_fi));
                if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
                        mark_inode_dirty(old_inode);
                else
index bae311b59400459338d2c60f9e962429066e1483..84c47dde4d268a12e8f1aeaf2c6e6dda91db4972 100644 (file)
@@ -132,6 +132,12 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
 extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
                        struct fileIdentDesc *, struct udf_fileident_bh *,
                        uint8_t *, uint8_t *);
+static inline unsigned int udf_dir_entry_len(struct fileIdentDesc *cfi)
+{
+       return ALIGN(sizeof(struct fileIdentDesc) +
+               le16_to_cpu(cfi->lengthOfImpUse) + cfi->lengthFileIdent,
+               UDF_NAME_PAD);
+}
 
 /* file.c */
 extern long udf_ioctl(struct file *, unsigned int, unsigned long);
@@ -167,8 +173,7 @@ extern int udf_add_aext(struct inode *, struct extent_position *,
                        struct kernel_lb_addr *, uint32_t, int);
 extern void udf_write_aext(struct inode *, struct extent_position *,
                           struct kernel_lb_addr *, uint32_t, int);
-extern int8_t udf_delete_aext(struct inode *, struct extent_position,
-                             struct kernel_lb_addr, uint32_t);
+extern int8_t udf_delete_aext(struct inode *, struct extent_position);
 extern int8_t udf_next_aext(struct inode *, struct extent_position *,
                            struct kernel_lb_addr *, uint32_t *, int);
 extern int8_t udf_current_aext(struct inode *, struct extent_position *,
index 123bf7d516fc1f475cb89edb8aade4c2ad556f51..594d192b23317d7e69d068b2f124ca6f77de3e07 100644 (file)
@@ -222,24 +222,26 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
                                         unsigned long reason)
 {
        struct mm_struct *mm = ctx->mm;
-       pte_t *pte;
+       pte_t *ptep, pte;
        bool ret = true;
 
        VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
-       pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
-       if (!pte)
+       ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
+
+       if (!ptep)
                goto out;
 
        ret = false;
+       pte = huge_ptep_get(ptep);
 
        /*
         * Lockless access: we're in a wait_event so it's ok if it
         * changes under us.
         */
-       if (huge_pte_none(*pte))
+       if (huge_pte_none(pte))
                ret = true;
-       if (!huge_pte_write(*pte) && (reason & VM_UFFD_WP))
+       if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
                ret = true;
 out:
        return ret;
index 84db76e0e3e3c58ae7d25b38a46a1ce2b4d5cae4..fecd187fcf2c3cd69bd79954ccc272737cb2ce2b 100644 (file)
@@ -157,6 +157,7 @@ __xfs_ag_resv_free(
        error = xfs_mod_fdblocks(pag->pag_mount, oldresv, true);
        resv->ar_reserved = 0;
        resv->ar_asked = 0;
+       resv->ar_orig_reserved = 0;
 
        if (error)
                trace_xfs_ag_resv_free_error(pag->pag_mount, pag->pag_agno,
@@ -189,13 +190,34 @@ __xfs_ag_resv_init(
        struct xfs_mount                *mp = pag->pag_mount;
        struct xfs_ag_resv              *resv;
        int                             error;
-       xfs_extlen_t                    reserved;
+       xfs_extlen_t                    hidden_space;
 
        if (used > ask)
                ask = used;
-       reserved = ask - used;
 
-       error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true);
+       switch (type) {
+       case XFS_AG_RESV_RMAPBT:
+               /*
+                * Space taken by the rmapbt is not subtracted from fdblocks
+                * because the rmapbt lives in the free space.  Here we must
+                * subtract the entire reservation from fdblocks so that we
+                * always have blocks available for rmapbt expansion.
+                */
+               hidden_space = ask;
+               break;
+       case XFS_AG_RESV_METADATA:
+               /*
+                * Space taken by all other metadata btrees are accounted
+                * on-disk as used space.  We therefore only hide the space
+                * that is reserved but not used by the trees.
+                */
+               hidden_space = ask - used;
+               break;
+       default:
+               ASSERT(0);
+               return -EINVAL;
+       }
+       error = xfs_mod_fdblocks(mp, -(int64_t)hidden_space, true);
        if (error) {
                trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
                                error, _RET_IP_);
@@ -216,7 +238,8 @@ __xfs_ag_resv_init(
 
        resv = xfs_perag_resv(pag, type);
        resv->ar_asked = ask;
-       resv->ar_reserved = resv->ar_orig_reserved = reserved;
+       resv->ar_orig_reserved = hidden_space;
+       resv->ar_reserved = ask - used;
 
        trace_xfs_ag_resv_init(pag, type, ask);
        return 0;
index 01628f0c9a0c227543087c70bd7391ad3f0eee2c..7205268b30bc54b488bf513b1a2b6bb737769d64 100644 (file)
@@ -5780,6 +5780,32 @@ del_cursor:
        return error;
 }
 
+/* Make sure we won't be right-shifting an extent past the maximum bound. */
+int
+xfs_bmap_can_insert_extents(
+       struct xfs_inode        *ip,
+       xfs_fileoff_t           off,
+       xfs_fileoff_t           shift)
+{
+       struct xfs_bmbt_irec    got;
+       int                     is_empty;
+       int                     error = 0;
+
+       ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+
+       if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+               return -EIO;
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
+       if (!error && !is_empty && got.br_startoff >= off &&
+           ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
+               error = -EINVAL;
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+       return error;
+}
+
 int
 xfs_bmap_insert_extents(
        struct xfs_trans        *tp,
index 99dddbd0fcc6c606e59544d69a0435b0cc205c5f..9b49ddf99c4115479fe8271cc5b492a2d86b2b70 100644 (file)
@@ -227,6 +227,8 @@ int xfs_bmap_collapse_extents(struct xfs_trans *tp, struct xfs_inode *ip,
                xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
                bool *done, xfs_fsblock_t *firstblock,
                struct xfs_defer_ops *dfops);
+int    xfs_bmap_can_insert_extents(struct xfs_inode *ip, xfs_fileoff_t off,
+               xfs_fileoff_t shift);
 int    xfs_bmap_insert_extents(struct xfs_trans *tp, struct xfs_inode *ip,
                xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
                bool *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock,
index 1c5a8aaf2bfcea6b51b76e7aa7dff4b55b4e4145..059bc44c27e83edf3cb1fe2c494490e65f93c5d8 100644 (file)
@@ -962,6 +962,9 @@ typedef enum xfs_dinode_fmt {
                XFS_DFORK_DSIZE(dip, mp) : \
                XFS_DFORK_ASIZE(dip, mp))
 
+#define XFS_DFORK_MAXEXT(dip, mp, w) \
+       (XFS_DFORK_SIZE(dip, mp, w) / sizeof(struct xfs_bmbt_rec))
+
 /*
  * Return pointers to the data or attribute forks.
  */
@@ -1526,6 +1529,8 @@ typedef struct xfs_bmdr_block {
 #define BMBT_STARTBLOCK_BITLEN 52
 #define BMBT_BLOCKCOUNT_BITLEN 21
 
+#define BMBT_STARTOFF_MASK     ((1ULL << BMBT_STARTOFF_BITLEN) - 1)
+
 typedef struct xfs_bmbt_rec {
        __be64                  l0, l1;
 } xfs_bmbt_rec_t;
index d38d724534c48e2a4644be06acbf6d64da9a65b2..33dc34655ac3ddb32a5a5fa4711048285c8a0343 100644 (file)
@@ -374,6 +374,47 @@ xfs_log_dinode_to_disk(
        }
 }
 
+static xfs_failaddr_t
+xfs_dinode_verify_fork(
+       struct xfs_dinode       *dip,
+       struct xfs_mount        *mp,
+       int                     whichfork)
+{
+       uint32_t                di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork);
+
+       switch (XFS_DFORK_FORMAT(dip, whichfork)) {
+       case XFS_DINODE_FMT_LOCAL:
+               /*
+                * no local regular files yet
+                */
+               if (whichfork == XFS_DATA_FORK) {
+                       if (S_ISREG(be16_to_cpu(dip->di_mode)))
+                               return __this_address;
+                       if (be64_to_cpu(dip->di_size) >
+                                       XFS_DFORK_SIZE(dip, mp, whichfork))
+                               return __this_address;
+               }
+               if (di_nextents)
+                       return __this_address;
+               break;
+       case XFS_DINODE_FMT_EXTENTS:
+               if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork))
+                       return __this_address;
+               break;
+       case XFS_DINODE_FMT_BTREE:
+               if (whichfork == XFS_ATTR_FORK) {
+                       if (di_nextents > MAXAEXTNUM)
+                               return __this_address;
+               } else if (di_nextents > MAXEXTNUM) {
+                       return __this_address;
+               }
+               break;
+       default:
+               return __this_address;
+       }
+       return NULL;
+}
+
 xfs_failaddr_t
 xfs_dinode_verify(
        struct xfs_mount        *mp,
@@ -441,24 +482,9 @@ xfs_dinode_verify(
        case S_IFREG:
        case S_IFLNK:
        case S_IFDIR:
-               switch (dip->di_format) {
-               case XFS_DINODE_FMT_LOCAL:
-                       /*
-                        * no local regular files yet
-                        */
-                       if (S_ISREG(mode))
-                               return __this_address;
-                       if (di_size > XFS_DFORK_DSIZE(dip, mp))
-                               return __this_address;
-                       if (dip->di_nextents)
-                               return __this_address;
-                       /* fall through */
-               case XFS_DINODE_FMT_EXTENTS:
-               case XFS_DINODE_FMT_BTREE:
-                       break;
-               default:
-                       return __this_address;
-               }
+               fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK);
+               if (fa)
+                       return fa;
                break;
        case 0:
                /* Uninitialized inode ok. */
@@ -468,17 +494,9 @@ xfs_dinode_verify(
        }
 
        if (XFS_DFORK_Q(dip)) {
-               switch (dip->di_aformat) {
-               case XFS_DINODE_FMT_LOCAL:
-                       if (dip->di_anextents)
-                               return __this_address;
-               /* fall through */
-               case XFS_DINODE_FMT_EXTENTS:
-               case XFS_DINODE_FMT_BTREE:
-                       break;
-               default:
-                       return __this_address;
-               }
+               fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK);
+               if (fa)
+                       return fa;
        } else {
                /*
                 * If there is no fork offset, this may be a freshly-made inode
index 65fc4ed2e9a1050b76b1cd85d874294e52a8afd9..b228c821bae6802c0aa8ab9b79069d703245bbe2 100644 (file)
@@ -1029,8 +1029,8 @@ xfs_rtalloc_query_range(
        if (low_rec->ar_startext >= mp->m_sb.sb_rextents ||
            low_rec->ar_startext == high_rec->ar_startext)
                return 0;
-       if (high_rec->ar_startext >= mp->m_sb.sb_rextents)
-               high_rec->ar_startext = mp->m_sb.sb_rextents - 1;
+       if (high_rec->ar_startext > mp->m_sb.sb_rextents)
+               high_rec->ar_startext = mp->m_sb.sb_rextents;
 
        /* Iterate the bitmap, looking for discrepancies. */
        rtstart = low_rec->ar_startext;
index c35009a8669953dfee4013615ca62b47237b4d77..83b1e8c6c18f939e8afcabdb4eb37fd33e459da8 100644 (file)
@@ -685,12 +685,10 @@ out_unlock_iolock:
 }
 
 /*
- * dead simple method of punching delalyed allocation blocks from a range in
- * the inode. Walks a block at a time so will be slow, but is only executed in
- * rare error cases so the overhead is not critical. This will always punch out
- * both the start and end blocks, even if the ranges only partially overlap
- * them, so it is up to the caller to ensure that partial blocks are not
- * passed in.
+ * Dead simple method of punching delalyed allocation blocks from a range in
+ * the inode.  This will always punch out both the start and end blocks, even
+ * if the ranges only partially overlap them, so it is up to the caller to
+ * ensure that partial blocks are not passed in.
  */
 int
 xfs_bmap_punch_delalloc_range(
@@ -698,63 +696,44 @@ xfs_bmap_punch_delalloc_range(
        xfs_fileoff_t           start_fsb,
        xfs_fileoff_t           length)
 {
-       xfs_fileoff_t           remaining = length;
+       struct xfs_ifork        *ifp = &ip->i_df;
+       xfs_fileoff_t           end_fsb = start_fsb + length;
+       struct xfs_bmbt_irec    got, del;
+       struct xfs_iext_cursor  icur;
        int                     error = 0;
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
-       do {
-               int             done;
-               xfs_bmbt_irec_t imap;
-               int             nimaps = 1;
-               xfs_fsblock_t   firstblock;
-               struct xfs_defer_ops dfops;
+       if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+               error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
+               if (error)
+                       return error;
+       }
 
-               /*
-                * Map the range first and check that it is a delalloc extent
-                * before trying to unmap the range. Otherwise we will be
-                * trying to remove a real extent (which requires a
-                * transaction) or a hole, which is probably a bad idea...
-                */
-               error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
-                                      XFS_BMAPI_ENTIRE);
+       if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
+               return 0;
 
-               if (error) {
-                       /* something screwed, just bail */
-                       if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
-                               xfs_alert(ip->i_mount,
-                       "Failed delalloc mapping lookup ino %lld fsb %lld.",
-                                               ip->i_ino, start_fsb);
-                       }
-                       break;
-               }
-               if (!nimaps) {
-                       /* nothing there */
-                       goto next_block;
-               }
-               if (imap.br_startblock != DELAYSTARTBLOCK) {
-                       /* been converted, ignore */
-                       goto next_block;
-               }
-               WARN_ON(imap.br_blockcount == 0);
+       while (got.br_startoff + got.br_blockcount > start_fsb) {
+               del = got;
+               xfs_trim_extent(&del, start_fsb, length);
 
                /*
-                * Note: while we initialise the firstblock/dfops pair, they
-                * should never be used because blocks should never be
-                * allocated or freed for a delalloc extent and hence we need
-                * don't cancel or finish them after the xfs_bunmapi() call.
+                * A delete can push the cursor forward. Step back to the
+                * previous extent on non-delalloc or extents outside the
+                * target range.
                 */
-               xfs_defer_init(&dfops, &firstblock);
-               error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
-                                       &dfops, &done);
-               if (error)
-                       break;
+               if (!del.br_blockcount ||
+                   !isnullstartblock(del.br_startblock)) {
+                       if (!xfs_iext_prev_extent(ifp, &icur, &got))
+                               break;
+                       continue;
+               }
 
-               ASSERT(!xfs_defer_has_unfinished_work(&dfops));
-next_block:
-               start_fsb++;
-               remaining--;
-       } while(remaining > 0);
+               error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
+                                                 &got, &del);
+               if (error || !xfs_iext_get_extent(ifp, &icur, &got))
+                       break;
+       }
 
        return error;
 }
@@ -1208,7 +1187,22 @@ xfs_free_file_space(
                return 0;
        if (offset + len > XFS_ISIZE(ip))
                len = XFS_ISIZE(ip) - offset;
-       return iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
+       error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
+       if (error)
+               return error;
+
+       /*
+        * If we zeroed right up to EOF and EOF straddles a page boundary we
+        * must make sure that the post-EOF area is also zeroed because the
+        * page could be mmap'd and iomap_zero_range doesn't do that for us.
+        * Writeback of the eof page will do this, albeit clumsily.
+        */
+       if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) {
+               error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
+                               (offset + len) & ~PAGE_MASK, LLONG_MAX);
+       }
+
+       return error;
 }
 
 /*
@@ -1404,6 +1398,10 @@ xfs_insert_file_space(
 
        trace_xfs_insert_file_space(ip);
 
+       error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
+       if (error)
+               return error;
+
        error = xfs_prepare_shift(ip, offset);
        if (error)
                return error;
index c34fa9c342f25fdbee7e39fead0078e30859bba3..c7157bc48bd192ea60650577232ea87e8bfbbf02 100644 (file)
@@ -513,8 +513,8 @@ xfs_getfsmap_rtdev_rtbitmap_query(
        struct xfs_trans                *tp,
        struct xfs_getfsmap_info        *info)
 {
-       struct xfs_rtalloc_rec          alow;
-       struct xfs_rtalloc_rec          ahigh;
+       struct xfs_rtalloc_rec          alow = { 0 };
+       struct xfs_rtalloc_rec          ahigh = { 0 };
        int                             error;
 
        xfs_ilock(tp->t_mountp->m_rbmip, XFS_ILOCK_SHARED);
index a7afcad6b71140aed25f02979946cb9795afa644..3f2bd6032cf86525d6d344d60be903d9c739267c 100644 (file)
@@ -387,7 +387,7 @@ xfs_reserve_blocks(
        do {
                free = percpu_counter_sum(&mp->m_fdblocks) -
                                                mp->m_alloc_set_aside;
-               if (!free)
+               if (free <= 0)
                        break;
 
                delta = request - mp->m_resblks;
index 7a96c4e0ab5c621f38d9e034622d26ebd8d95437..5df4de666cc118848c86ddc33420d4147031ce57 100644 (file)
@@ -3236,7 +3236,6 @@ xfs_iflush_cluster(
        struct xfs_inode        *cip;
        int                     nr_found;
        int                     clcount = 0;
-       int                     bufwasdelwri;
        int                     i;
 
        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
@@ -3360,37 +3359,22 @@ cluster_corrupt_out:
         * inode buffer and shut down the filesystem.
         */
        rcu_read_unlock();
-       /*
-        * Clean up the buffer.  If it was delwri, just release it --
-        * brelse can handle it with no problems.  If not, shut down the
-        * filesystem before releasing the buffer.
-        */
-       bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
-       if (bufwasdelwri)
-               xfs_buf_relse(bp);
-
        xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
 
-       if (!bufwasdelwri) {
-               /*
-                * Just like incore_relse: if we have b_iodone functions,
-                * mark the buffer as an error and call them.  Otherwise
-                * mark it as stale and brelse.
-                */
-               if (bp->b_iodone) {
-                       bp->b_flags &= ~XBF_DONE;
-                       xfs_buf_stale(bp);
-                       xfs_buf_ioerror(bp, -EIO);
-                       xfs_buf_ioend(bp);
-               } else {
-                       xfs_buf_stale(bp);
-                       xfs_buf_relse(bp);
-               }
-       }
-
        /*
-        * Unlocks the flush lock
+        * We'll always have an inode attached to the buffer for completion
+        * process by the time we are called from xfs_iflush(). Hence we have
+        * always need to do IO completion processing to abort the inodes
+        * attached to the buffer.  handle them just like the shutdown case in
+        * xfs_buf_submit().
         */
+       ASSERT(bp->b_iodone);
+       bp->b_flags &= ~XBF_DONE;
+       xfs_buf_stale(bp);
+       xfs_buf_ioerror(bp, -EIO);
+       xfs_buf_ioend(bp);
+
+       /* abort the corrupt inode, as it was not attached to the buffer */
        xfs_iflush_abort(cip, false);
        kmem_free(cilist);
        xfs_perag_put(pag);
@@ -3486,12 +3470,17 @@ xfs_iflush(
                xfs_log_force(mp, 0);
 
        /*
-        * inode clustering:
-        * see if other inodes can be gathered into this write
+        * inode clustering: try to gather other inodes into this write
+        *
+        * Note: Any error during clustering will result in the filesystem
+        * being shut down and completion callbacks run on the cluster buffer.
+        * As we have already flushed and attached this inode to the buffer,
+        * it has already been aborted and released by xfs_iflush_cluster() and
+        * so we have no further error handling to do here.
         */
        error = xfs_iflush_cluster(ip, bp);
        if (error)
-               goto cluster_corrupt_out;
+               return error;
 
        *bpp = bp;
        return 0;
@@ -3500,12 +3489,8 @@ corrupt_out:
        if (bp)
                xfs_buf_relse(bp);
        xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
-cluster_corrupt_out:
-       error = -EFSCORRUPTED;
 abort_out:
-       /*
-        * Unlocks the flush lock
-        */
+       /* abort the corrupt inode, as it was not attached to the buffer */
        xfs_iflush_abort(ip, false);
        return error;
 }
index 49f5492eed3bdb9d85c53843df03546c83f2c799..55876dd02f0c8c75fa5653eeab82881bd3741928 100644 (file)
@@ -963,12 +963,13 @@ xfs_ilock_for_iomap(
        unsigned                *lockmode)
 {
        unsigned                mode = XFS_ILOCK_SHARED;
+       bool                    is_write = flags & (IOMAP_WRITE | IOMAP_ZERO);
 
        /*
         * COW writes may allocate delalloc space or convert unwritten COW
         * extents, so we need to make sure to take the lock exclusively here.
         */
-       if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO))) {
+       if (xfs_is_reflink_inode(ip) && is_write) {
                /*
                 * FIXME: It could still overwrite on unshared extents and not
                 * need allocation.
@@ -989,6 +990,7 @@ xfs_ilock_for_iomap(
                mode = XFS_ILOCK_EXCL;
        }
 
+relock:
        if (flags & IOMAP_NOWAIT) {
                if (!xfs_ilock_nowait(ip, mode))
                        return -EAGAIN;
@@ -996,6 +998,17 @@ xfs_ilock_for_iomap(
                xfs_ilock(ip, mode);
        }
 
+       /*
+        * The reflink iflag could have changed since the earlier unlocked
+        * check, so if we got ILOCK_SHARED for a write and but we're now a
+        * reflink inode we have to switch to ILOCK_EXCL and relock.
+        */
+       if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_reflink_inode(ip)) {
+               xfs_iunlock(ip, mode);
+               mode = XFS_ILOCK_EXCL;
+               goto relock;
+       }
+
        *lockmode = mode;
        return 0;
 }
index e040af120b69b3a69b38517cde3092773b391260..524f543c5b820fe45de5866cd950509190a74612 100644 (file)
@@ -258,7 +258,12 @@ xfs_trans_alloc(
        if (!(flags & XFS_TRANS_NO_WRITECOUNT))
                sb_start_intwrite(mp->m_super);
 
-       WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
+       /*
+        * Zero-reservation ("empty") transactions can't modify anything, so
+        * they're allowed to run while we're frozen.
+        */
+       WARN_ON(resp->tr_logres > 0 &&
+               mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
        atomic_inc(&mp->m_active_trans);
 
        tp = kmem_zone_zalloc(xfs_trans_zone,
index 40a916efd7c039d2132014fcaf5ec780e4a8248e..1194a4c78d557fb411e9672291f6bab6e3623e9d 100644 (file)
@@ -309,7 +309,7 @@ static inline void acpi_processor_ppc_exit(void)
 {
        return;
 }
-static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr,
+static inline void acpi_processor_ppc_has_changed(struct acpi_processor *pr,
                                                                int event_flag)
 {
        static unsigned int printout = 1;
@@ -320,7 +320,6 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr,
                       "Consider compiling CPUfreq support into your kernel.\n");
                printout = 0;
        }
-       return 0;
 }
 static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
 {
index 0763f065b975a543fb0e887d4af8d63bf7354f05..d10f1e7d6ba8c37140ae9332b59399baae4ffdf1 100644 (file)
@@ -63,7 +63,7 @@ typedef struct qspinlock {
 /*
  * Initializier
  */
-#define        __ARCH_SPIN_LOCK_UNLOCKED       { .val = ATOMIC_INIT(0) }
+#define        __ARCH_SPIN_LOCK_UNLOCKED       { { .val = ATOMIC_INIT(0) } }
 
 /*
  * Bitfields in the atomic value:
index faddde44de8c902e6884e64eeb8b22bd0d11b75a..3063125197adabb38876a9dc001986062db24658 100644 (file)
@@ -265,33 +265,41 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
  * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
  */
 
+#ifndef pte_free_tlb
 #define pte_free_tlb(tlb, ptep, address)                       \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
                __pte_free_tlb(tlb, ptep, address);             \
        } while (0)
+#endif
 
+#ifndef pmd_free_tlb
 #define pmd_free_tlb(tlb, pmdp, address)                       \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);            \
                __pmd_free_tlb(tlb, pmdp, address);             \
        } while (0)
+#endif
 
 #ifndef __ARCH_HAS_4LEVEL_HACK
+#ifndef pud_free_tlb
 #define pud_free_tlb(tlb, pudp, address)                       \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);    \
                __pud_free_tlb(tlb, pudp, address);             \
        } while (0)
 #endif
+#endif
 
 #ifndef __ARCH_HAS_5LEVEL_HACK
+#ifndef p4d_free_tlb
 #define p4d_free_tlb(tlb, pudp, address)                       \
        do {                                                    \
                __tlb_adjust_range(tlb, address, PAGE_SIZE);            \
                __p4d_free_tlb(tlb, pudp, address);             \
        } while (0)
 #endif
+#endif
 
 #define tlb_migrate_finish(mm) do {} while (0)
 
index cc414db9da0ad6758f696d0de2a251ce99d8d301..482461d8931d9186c4a11b7b2d9a24f981a595bc 100644 (file)
@@ -245,7 +245,8 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
                        int offset, size_t size, int flags);
 void af_alg_free_resources(struct af_alg_async_req *areq);
 void af_alg_async_cb(struct crypto_async_request *_req, int err);
-__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events);
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
+                        poll_table *wait);
 struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
                                           unsigned int areqlen);
 int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
index 9564597cbfac59aa1837d4de80a2f1c18d64cd7f..0aa1d9c3e0b968479af93c764b438f9ae35bece3 100644 (file)
 #define IMX6UL_CLK_CSI_PODF            222
 #define IMX6UL_CLK_PLL3_120M           223
 #define IMX6UL_CLK_KPP                 224
-#define IMX6UL_CLK_CKO1_SEL            225
-#define IMX6UL_CLK_CKO1_PODF           226
-#define IMX6UL_CLK_CKO1                        227
-#define IMX6UL_CLK_CKO2_SEL            228
-#define IMX6UL_CLK_CKO2_PODF           229
-#define IMX6UL_CLK_CKO2                        230
-#define IMX6UL_CLK_CKO                 231
-
-/* For i.MX6ULL */
-#define IMX6ULL_CLK_ESAI_PRED          232
-#define IMX6ULL_CLK_ESAI_PODF          233
-#define IMX6ULL_CLK_ESAI_EXTAL         234
-#define IMX6ULL_CLK_ESAI_MEM           235
-#define IMX6ULL_CLK_ESAI_IPG           236
-#define IMX6ULL_CLK_DCP_CLK            237
-#define IMX6ULL_CLK_EPDC_PRE_SEL       238
-#define IMX6ULL_CLK_EPDC_SEL           239
-#define IMX6ULL_CLK_EPDC_PODF          240
-#define IMX6ULL_CLK_EPDC_ACLK          241
-#define IMX6ULL_CLK_EPDC_PIX           242
-#define IMX6ULL_CLK_ESAI_SEL           243
+#define IMX6ULL_CLK_ESAI_PRED          225
+#define IMX6ULL_CLK_ESAI_PODF          226
+#define IMX6ULL_CLK_ESAI_EXTAL         227
+#define IMX6ULL_CLK_ESAI_MEM           228
+#define IMX6ULL_CLK_ESAI_IPG           229
+#define IMX6ULL_CLK_DCP_CLK            230
+#define IMX6ULL_CLK_EPDC_PRE_SEL       231
+#define IMX6ULL_CLK_EPDC_SEL           232
+#define IMX6ULL_CLK_EPDC_PODF          233
+#define IMX6ULL_CLK_EPDC_ACLK          234
+#define IMX6ULL_CLK_EPDC_PIX           235
+#define IMX6ULL_CLK_ESAI_SEL           236
+#define IMX6UL_CLK_CKO1_SEL            237
+#define IMX6UL_CLK_CKO1_PODF           238
+#define IMX6UL_CLK_CKO1                        239
+#define IMX6UL_CLK_CKO2_SEL            240
+#define IMX6UL_CLK_CKO2_PODF           241
+#define IMX6UL_CLK_CKO2                        242
+#define IMX6UL_CLK_CKO                 243
 #define IMX6UL_CLK_END                 244
 
 #endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
index 4b35a66383f983f5594f3b71885145d6b1b101ef..e54f40974eb04ca516987ac3df89b0997b5ca0dd 100644 (file)
@@ -443,6 +443,9 @@ int acpi_check_resource_conflict(const struct resource *res);
 int acpi_check_region(resource_size_t start, resource_size_t n,
                      const char *name);
 
+acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
+                               u32 level);
+
 int acpi_resources_are_enforced(void);
 
 #ifdef CONFIG_HIBERNATION
index 0c27515d2cf6db3683da2341a700283f82a99645..8124815eb1218b5653572fc4a04f5d4d734e3469 100644 (file)
@@ -214,6 +214,7 @@ struct atmphy_ops {
 struct atm_skb_data {
        struct atm_vcc  *vcc;           /* ATM VCC */
        unsigned long   atm_options;    /* ATM layer options */
+       unsigned int    acct_truesize;  /* truesize accounted to vcc */
 };
 
 #define VCC_HTABLE_SIZE 32
@@ -241,6 +242,20 @@ void vcc_insert_socket(struct sock *sk);
 
 void atm_dev_release_vccs(struct atm_dev *dev);
 
+static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+       /*
+        * Because ATM skbs may not belong to a sock (and we don't
+        * necessarily want to), skb->truesize may be adjusted,
+        * escaping the hack in pskb_expand_head() which avoids
+        * doing so for some cases. So stash the value of truesize
+        * at the time we accounted it, and atm_pop_raw() can use
+        * that value later, in case it changes.
+        */
+       refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+       ATM_SKB(skb)->acct_truesize = skb->truesize;
+       ATM_SKB(skb)->atm_options = vcc->atm_options;
+}
 
 static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
 {
index 0bd432a4d7bd00ce376292720edd104d617c80c2..24251762c20c94edd238cfca1c1f55f0269d4e80 100644 (file)
@@ -22,7 +22,6 @@ struct dentry;
  */
 enum wb_state {
        WB_registered,          /* bdi_register() was done */
-       WB_shutting_down,       /* wb_shutdown() in progress */
        WB_writeback_running,   /* Writeback is in progress */
        WB_has_dirty_io,        /* Dirty inodes on ->b_{dirty|io|more_io} */
        WB_start_all,           /* nr_pages == 0 (all) work pending */
@@ -189,6 +188,7 @@ struct backing_dev_info {
 #ifdef CONFIG_CGROUP_WRITEBACK
        struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
        struct rb_root cgwb_congested_tree; /* their congested states */
+       struct mutex cgwb_release_mutex;  /* protect shutdown of wb structs */
 #else
        struct bdi_writeback_congested *wb_congested;
 #endif
index 9154570edf2963628f873d7404930450735ff41a..79226ca8f80f2db7f813cf63973c61288c1b78ab 100644 (file)
@@ -1119,8 +1119,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
        if (!q->limits.chunk_sectors)
                return q->limits.max_sectors;
 
-       return q->limits.chunk_sectors -
-                       (offset & (q->limits.chunk_sectors - 1));
+       return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
+                       (offset & (q->limits.chunk_sectors - 1))));
 }
 
 static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
index 975fb4cf1bb743ccff5fae92e82df582533c0ff2..d50c2f0a655ae3f95271d5f8de40f8eabc917c65 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _BPF_CGROUP_H
 #define _BPF_CGROUP_H
 
+#include <linux/errno.h>
 #include <linux/jump_label.h>
 #include <uapi/linux/bpf.h>
 
@@ -188,12 +189,38 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
                                                                              \
        __ret;                                                                \
 })
+int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+                          enum bpf_prog_type ptype, struct bpf_prog *prog);
+int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+                          enum bpf_prog_type ptype);
+int cgroup_bpf_prog_query(const union bpf_attr *attr,
+                         union bpf_attr __user *uattr);
 #else
 
+struct bpf_prog;
 struct cgroup_bpf {};
 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
 
+static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+                                        enum bpf_prog_type ptype,
+                                        struct bpf_prog *prog)
+{
+       return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+                                        enum bpf_prog_type ptype)
+{
+       return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
+                                       union bpf_attr __user *uattr)
+{
+       return -EINVAL;
+}
+
 #define cgroup_bpf_enabled (0)
 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
index 995c3b1e59bfa82ef3ad0504b090ab28a898f016..8827e797ff97d0973ddf1d4217a885cee9bb63ee 100644 (file)
@@ -488,12 +488,15 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
 
 /* Map specifics */
 struct xdp_buff;
+struct sk_buff;
 
 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
 void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
 void __dev_map_flush(struct bpf_map *map);
 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
                    struct net_device *dev_rx);
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+                            struct bpf_prog *xdp_prog);
 
 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
 void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
@@ -586,6 +589,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
        return 0;
 }
 
+struct sk_buff;
+
+static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
+                                          struct sk_buff *skb,
+                                          struct bpf_prog *xdp_prog)
+{
+       return 0;
+}
+
 static inline
 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
 {
@@ -684,6 +696,8 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
 struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
 struct sock  *__sock_hash_lookup_elem(struct bpf_map *map, void *key);
 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
+int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+                       struct bpf_prog *prog);
 #else
 static inline struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
 {
@@ -702,6 +716,12 @@ static inline int sock_map_prog(struct bpf_map *map,
 {
        return -EOPNOTSUPP;
 }
+
+static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+                                     struct bpf_prog *prog)
+{
+       return -EINVAL;
+}
 #endif
 
 #if defined(CONFIG_XDP_SOCKETS)
index 5f8a4283092d0a6960fd663a33832221a9615353..9d9ff755ec2972cf6e46d1905e1a5caae9dd5ae6 100644 (file)
@@ -5,11 +5,12 @@
 #include <uapi/linux/bpf.h>
 
 #ifdef CONFIG_BPF_LIRC_MODE2
-int lirc_prog_attach(const union bpf_attr *attr);
+int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
 int lirc_prog_detach(const union bpf_attr *attr);
 int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
 #else
-static inline int lirc_prog_attach(const union bpf_attr *attr)
+static inline int lirc_prog_attach(const union bpf_attr *attr,
+                                  struct bpf_prog *prog)
 {
        return -EINVAL;
 }
index b1a5562b3215b71302422b7a727bbb2cf499d8f3..c68acc47da57b6a7bef7b8ef84a9c897d4b83ce6 100644 (file)
@@ -72,6 +72,9 @@
  */
 #ifndef COMPAT_SYSCALL_DEFINEx
 #define COMPAT_SYSCALL_DEFINEx(x, name, ...)                                   \
+       __diag_push();                                                          \
+       __diag_ignore(GCC, 8, "-Wattribute-alias",                              \
+                     "Type aliasing is used to sanitize syscall arguments");\
        asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));       \
        asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))        \
                __attribute__((alias(__stringify(__se_compat_sys##name))));     \
        asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__));  \
        asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__))   \
        {                                                                       \
-               return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+               long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+               __MAP(x,__SC_TEST,__VA_ARGS__);                                 \
+               return ret;                                                     \
        }                                                                       \
+       __diag_pop();                                                           \
        static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 #endif /* COMPAT_SYSCALL_DEFINEx */
 
index f1a7492a5cc8cc59813734d1b258dbaf04bf76c8..573f5a7d42d4fc9d1cbeecd6deb019d8d6b4d983 100644 (file)
 #define __must_be_array(a)     BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
 #endif
 
+/*
+ * Feature detection for gnu_inline (gnu89 extern inline semantics). Either
+ * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
+ * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
+ * defined so the gnu89 semantics are the default.
+ */
+#ifdef __GNUC_STDC_INLINE__
+# define __gnu_inline  __attribute__((gnu_inline))
+#else
+# define __gnu_inline
+#endif
+
 /*
  * Force always-inline if the user requests it so via the .config,
  * or if gcc is too old.
  * -Wunused-function.  This turns out to avoid the need for complex #ifdef
  * directives.  Suppress the warning in clang as well by using "unused"
  * function attribute, which is redundant but not harmful for gcc.
+ * Prefer gnu_inline, so that extern inline functions do not emit an
+ * externally visible function. This makes extern inline behave as per gnu89
+ * semantics rather than c99. This prevents multiple symbol definition errors
+ * of extern inline functions at link time.
+ * A lot of inline functions can cause havoc with function tracing.
  */
 #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) ||               \
     !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-#define inline inline          __attribute__((always_inline,unused)) notrace
-#define __inline__ __inline__  __attribute__((always_inline,unused)) notrace
-#define __inline __inline      __attribute__((always_inline,unused)) notrace
+#define inline \
+       inline __attribute__((always_inline, unused)) notrace __gnu_inline
 #else
-/* A lot of inline functions can cause havoc with function tracing */
-#define inline inline          __attribute__((unused)) notrace
-#define __inline__ __inline__  __attribute__((unused)) notrace
-#define __inline __inline      __attribute__((unused)) notrace
+#define inline inline          __attribute__((unused)) notrace __gnu_inline
 #endif
 
+#define __inline__ inline
+#define __inline inline
 #define __always_inline        inline __attribute__((always_inline))
 #define  noinline      __attribute__((noinline))
 
 #if GCC_VERSION >= 50100
 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
 #endif
+
+/*
+ * Turn individual warnings and errors on and off locally, depending
+ * on version.
+ */
+#define __diag_GCC(version, severity, s) \
+       __diag_GCC_ ## version(__diag_GCC_ ## severity s)
+
+/* Severity used in pragma directives */
+#define __diag_GCC_ignore      ignored
+#define __diag_GCC_warn                warning
+#define __diag_GCC_error       error
+
+/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */
+#if GCC_VERSION >= 40600
+#define __diag_str1(s)         #s
+#define __diag_str(s)          __diag_str1(s)
+#define __diag(s)              _Pragma(__diag_str(GCC diagnostic s))
+#endif
+
+#if GCC_VERSION >= 80000
+#define __diag_GCC_8(s)                __diag(s)
+#else
+#define __diag_GCC_8(s)
+#endif
index 6b79a9bba9a7630eb0b3a8fe35251d41717a2da0..a8ba6b04152c13c9ca2960898cd6ea4e89d37957 100644 (file)
@@ -271,4 +271,22 @@ struct ftrace_likely_data {
 # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
 #endif
 
+#ifndef __diag
+#define __diag(string)
+#endif
+
+#ifndef __diag_GCC
+#define __diag_GCC(version, severity, string)
+#endif
+
+#define __diag_push()  __diag(push)
+#define __diag_pop()   __diag(pop)
+
+#define __diag_ignore(compiler, version, option, comment) \
+       __diag_ ## compiler(version, ignore, option)
+#define __diag_warn(compiler, version, option, comment) \
+       __diag_ ## compiler(version, warn, option)
+#define __diag_error(compiler, version, option, comment) \
+       __diag_ ## compiler(version, error, option)
+
 #endif /* __LINUX_COMPILER_TYPES_H */
index 3855e3800f483e07cc4c16e68f6a1f2780de1b3e..deb0f663252fc55e39546c7d3107e96dfb3f03ae 100644 (file)
@@ -135,7 +135,7 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
 
 ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
                const struct iomap_ops *ops);
-int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
+vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
                    pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
                enum page_entry_size pe_size, pfn_t pfn);
index b67bf6ac907d8f324494efaf1d441b0ee7955a13..3c5a4cb3eb953174c688c4b965ba09d87925fdb3 100644 (file)
@@ -48,7 +48,7 @@
  *   CMA should not be used by the device drivers directly. It is
  *   only a helper framework for dma-mapping subsystem.
  *
- *   For more information, see kernel-docs in drivers/base/dma-contiguous.c
+ *   For more information, see kernel-docs in kernel/dma/contiguous.c
  */
 
 #ifdef __KERNEL__
index 45fc0f5000d8899ead3592cbdaa813d726e2c2af..c73dd7396886751938a0e2e1355d2aa28797ad87 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/cryptohash.h>
 #include <linux/set_memory.h>
 #include <linux/kallsyms.h>
+#include <linux/if_vlan.h>
 
 #include <net/sch_generic.h>
 
@@ -469,15 +470,16 @@ struct sock_fprog_kern {
 };
 
 struct bpf_binary_header {
-       unsigned int pages;
-       u8 image[];
+       u32 pages;
+       /* Some arches need word alignment for their instructions */
+       u8 image[] __aligned(4);
 };
 
 struct bpf_prog {
        u16                     pages;          /* Number of allocated pages */
        u16                     jited:1,        /* Is our filter JIT'ed? */
                                jit_requested:1,/* archs need to JIT the prog */
-                               locked:1,       /* Program image locked? */
+                               undo_set_mem:1, /* Passed set_memory_ro() checkpoint */
                                gpl_compatible:1, /* Is filter GPL compatible? */
                                cb_access:1,    /* Is control block accessed? */
                                dst_needed:1,   /* Do we need dst entry? */
@@ -671,50 +673,27 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
 
 #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
 
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
-{
-       fp->locked = 1;
-       WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
-}
-
-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
-{
-       if (fp->locked) {
-               WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
-               /* In case set_memory_rw() fails, we want to be the first
-                * to crash here instead of some random place later on.
-                */
-               fp->locked = 0;
-       }
-}
-
-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
-{
-       WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
-}
-
-static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
-{
-       WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
-}
-#else
 static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
 {
+       fp->undo_set_mem = 1;
+       set_memory_ro((unsigned long)fp, fp->pages);
 }
 
 static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
 {
+       if (fp->undo_set_mem)
+               set_memory_rw((unsigned long)fp, fp->pages);
 }
 
 static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
 {
+       set_memory_ro((unsigned long)hdr, hdr->pages);
 }
 
 static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
 {
+       set_memory_rw((unsigned long)hdr, hdr->pages);
 }
-#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
 
 static inline struct bpf_binary_header *
 bpf_jit_binary_hdr(const struct bpf_prog *fp)
@@ -786,6 +765,21 @@ static inline bool bpf_dump_raw_ok(void)
 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
                                       const struct bpf_insn *patch, u32 len);
 
+static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
+                                unsigned int pktlen)
+{
+       unsigned int len;
+
+       if (unlikely(!(fwd->flags & IFF_UP)))
+               return -ENETDOWN;
+
+       len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
+       if (pktlen > len)
+               return -EMSGSIZE;
+
+       return 0;
+}
+
 /* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
  * same cpu context. Further for best results no more than a single map
  * for the do_redirect/do_flush pair should be used. This limitation is
@@ -961,6 +955,9 @@ static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
 }
 #endif /* CONFIG_BPF_JIT */
 
+void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
+void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
+
 #define BPF_ANC                BIT(15)
 
 static inline bool bpf_needs_clear_a(const struct sock_filter *first)
index 5c91108846db20894ab70dafe43b7922fe08fb1f..d78d146a98da95c9c76417f8196f24be515df612 100644 (file)
@@ -1720,8 +1720,6 @@ struct file_operations {
        int (*iterate) (struct file *, struct dir_context *);
        int (*iterate_shared) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
-       struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
-       __poll_t (*poll_mask) (struct file *, __poll_t);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
        int (*mmap) (struct file *, struct vm_area_struct *);
index 3efa3b861d44cae46670532c9db208d8630099a9..941b11811f85915bd70a730bbc338288d995493b 100644 (file)
@@ -16,6 +16,7 @@
 #define __FSL_GUTS_H__
 
 #include <linux/types.h>
+#include <linux/io.h>
 
 /**
  * Global Utility Registers.
index 8154f4920fcb9de96a24ec7b85d9b92f56968122..ebb77674be90cfff4466667c7bb62c121db5a235 100644 (file)
@@ -223,7 +223,6 @@ extern enum ftrace_tracing_type_t ftrace_tracing_type;
  */
 int register_ftrace_function(struct ftrace_ops *ops);
 int unregister_ftrace_function(struct ftrace_ops *ops);
-void clear_ftrace_function(void);
 
 extern void ftrace_stub(unsigned long a0, unsigned long a1,
                        struct ftrace_ops *op, struct pt_regs *regs);
@@ -239,7 +238,6 @@ static inline int ftrace_nr_registered_ops(void)
 {
        return 0;
 }
-static inline void clear_ftrace_function(void) { }
 static inline void ftrace_kill(void) { }
 static inline void ftrace_free_init_mem(void) { }
 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
index 41a3d5775394fed48e7b880317eaf6c1944c2817..773bcb1d4044ed2d83d4a1504f951951fa639d94 100644 (file)
@@ -511,6 +511,7 @@ struct hid_output_fifo {
 #define HID_STAT_ADDED         BIT(0)
 #define HID_STAT_PARSED                BIT(1)
 #define HID_STAT_DUP_DETECTED  BIT(2)
+#define HID_STAT_REPROBED      BIT(3)
 
 struct hid_input {
        struct list_head list;
@@ -579,7 +580,7 @@ struct hid_device {                                                 /* device report descriptor */
        bool battery_avoid_query;
 #endif
 
-       unsigned int status;                                            /* see STAT flags above */
+       unsigned long status;                                           /* see STAT flags above */
        unsigned claimed;                                               /* Claimed by hidinput, hiddev? */
        unsigned quirks;                                                /* Various quirks the device can pull on us */
        bool io_started;                                                /* If IO has started */
index 7843b98e1c6ea7802dcea3f8b5a944d2355398d5..c20c7e197d0731e58b0f68b87531299080e8421a 100644 (file)
@@ -105,13 +105,13 @@ static inline bool br_vlan_enabled(const struct net_device *dev)
 
 static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
 {
-       return -1;
+       return -EINVAL;
 }
 
 static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
                                   struct bridge_vlan_info *p_vinfo)
 {
-       return -1;
+       return -EINVAL;
 }
 #endif
 
index f8231854b5d60316310fc5d8e57eea8625fe3078..119f53941c124c22452bf615f9ccca5a9130bb87 100644 (file)
@@ -109,6 +109,8 @@ struct ip_mc_list {
 extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto);
 extern int igmp_rcv(struct sk_buff *);
 extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
+extern int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
+                               unsigned int mode);
 extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
 extern void ip_mc_drop_socket(struct sock *sk);
 extern int ip_mc_source(int add, int omode, struct sock *sk,
index 767467d886de4d53f5f5b862614b3f1644a5ecfa..67c75372b6915289e6d0876ac21368c89eb3896a 100644 (file)
@@ -141,7 +141,7 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
        char __user *user_buffer);
 size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
-int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length);
+int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
 int iio_dma_buffer_request_update(struct iio_buffer *buffer);
 
 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
index d7188de4db968c14c5db1a44fca6421aec22d041..3f4bf60b0bb55c4d9d2708593d2439aec269f9c9 100644 (file)
@@ -100,7 +100,7 @@ static inline bool input_is_mt_axis(int axis)
        return axis == ABS_MT_SLOT || input_is_mt_value(axis);
 }
 
-void input_mt_report_slot_state(struct input_dev *dev,
+bool input_mt_report_slot_state(struct input_dev *dev,
                                unsigned int tool_type, bool active);
 
 void input_mt_report_finger_count(struct input_dev *dev, int count);
index 4bd2f34947f4a7647a485fe2e8092c1fd055f630..201de12a9957171003757967bb69161c3d060575 100644 (file)
@@ -503,6 +503,7 @@ struct irq_chip {
  * IRQCHIP_SKIP_SET_WAKE:      Skip chip.irq_set_wake(), for this irq chip
  * IRQCHIP_ONESHOT_SAFE:       One shot does not require mask/unmask
  * IRQCHIP_EOI_THREADED:       Chip requires eoi() on unmask in threaded mode
+ * IRQCHIP_SUPPORTS_LEVEL_MSI  Chip can provide two doorbells for Level MSIs
  */
 enum {
        IRQCHIP_SET_TYPE_MASKED         = (1 <<  0),
index 25b33b66453773cb01509725fa68664c555ffd3f..dd1e40ddac7d8235e31aeb96fe460c77a70ac681 100644 (file)
@@ -145,11 +145,6 @@ static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
        return desc->irq_common_data.handler_data;
 }
 
-static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
-{
-       return desc->irq_common_data.msi_desc;
-}
-
 /*
  * Architectures call this to let the generic IRQ layer
  * handle an interrupt.
index d231232385349146faf64c42cd05a73bddd0fcca..941dc0a5a877998e46d11541bdb491655a5f34af 100644 (file)
@@ -666,7 +666,7 @@ do {                                                                        \
  * your code. (Extra memory is used for special buffers that are
  * allocated when trace_printk() is used.)
  *
- * A little optization trick is done here. If there's only one
+ * A little optimization trick is done here. If there's only one
  * argument, there's no need to scan the string for printf formats.
  * The trace_puts() will suffice. But how can we take advantage of
  * using trace_puts() when trace_printk() has only one argument?
index 2803264c512f8f6bf80dffc462c4a7ab079ce5f3..c1961761311dbfd5968d6ed64ea91ca3c7d25b0e 100644 (file)
@@ -62,7 +62,6 @@ void *kthread_probe_data(struct task_struct *k);
 int kthread_park(struct task_struct *k);
 void kthread_unpark(struct task_struct *k);
 void kthread_parkme(void);
-void kthread_park_complete(struct task_struct *k);
 
 int kthreadd(void *unused);
 extern struct task_struct *kthreadd_task;
index 8b8946dd63b9d4df3d08c5051604fce0fc147be1..32f247cb5e9ea0c107970d31f135c903dfd04c55 100644 (file)
@@ -210,6 +210,7 @@ enum {
        ATA_FLAG_SLAVE_POSS     = (1 << 0), /* host supports slave dev */
                                            /* (doesn't imply presence) */
        ATA_FLAG_SATA           = (1 << 1),
+       ATA_FLAG_NO_LPM         = (1 << 2), /* host not happy with LPM */
        ATA_FLAG_NO_LOG_PAGE    = (1 << 5), /* do not issue log page read */
        ATA_FLAG_NO_ATAPI       = (1 << 6), /* No ATAPI support */
        ATA_FLAG_PIO_DMA        = (1 << 7), /* PIO cmds via DMA */
@@ -1495,6 +1496,29 @@ static inline bool ata_tag_valid(unsigned int tag)
        return tag < ATA_MAX_QUEUE || ata_tag_internal(tag);
 }
 
+#define __ata_qc_for_each(ap, qc, tag, max_tag, fn)            \
+       for ((tag) = 0; (tag) < (max_tag) &&                    \
+            ({ qc = fn((ap), (tag)); 1; }); (tag)++)           \
+
+/*
+ * Internal use only, iterate commands ignoring error handling and
+ * status of 'qc'.
+ */
+#define ata_qc_for_each_raw(ap, qc, tag)                                       \
+       __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, __ata_qc_from_tag)
+
+/*
+ * Iterate all potential commands that can be queued
+ */
+#define ata_qc_for_each(ap, qc, tag)                                   \
+       __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, ata_qc_from_tag)
+
+/*
+ * Like ata_qc_for_each, but with the internal tag included
+ */
+#define ata_qc_for_each_with_internal(ap, qc, tag)                     \
+       __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE + 1, ata_qc_from_tag)
+
 /*
  * device helpers
  */
index 4f5f8c21e2830bd3c7de20bd509b360f27e66816..1eb6f244588dae1efa08a2c9dcb8e46460592bcc 100644 (file)
@@ -27,6 +27,8 @@
  */
 #define MARVELL_PHY_ID_88E6390         0x01410f90
 
+#define MARVELL_PHY_FAMILY_ID(id)      ((id) >> 4)
+
 /* struct phy_device dev_flags definitions */
 #define MARVELL_PHY_M1145_FLAGS_RESISTANCE     0x00000001
 #define MARVELL_PHY_M1118_DNS323_LEDS          0x00000002
index 31ca3e28b0ebe98369a1582430230a2f68c6baae..a6ddefc60517899167b55b53b0007ba3e3b9ed80 100644 (file)
@@ -38,6 +38,7 @@ struct memory_block {
 
 int arch_get_memory_phys_device(unsigned long start_pfn);
 unsigned long memory_block_size_bytes(void);
+int set_memory_block_size_order(unsigned int order);
 
 /* These states are exposed to userspace as text strings in sysfs */
 #define        MEM_ONLINE              (1<<0) /* exposed to userspace */
index d3c9db492b30065750726992ba1001c48153232b..fab5121ffb8f5de2b5f39b6a0a7e43cca4b047e0 100644 (file)
@@ -8,6 +8,8 @@
 
 #include <linux/mlx5/driver.h>
 
+#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
+
 enum {
        SRIOV_NONE,
        SRIOV_LEGACY,
index 27134c4fcb76eb5140ff4828066e73e11d671cd9..ac281f5ec9b8077ba859f33eaf61e3f03ecdeb3d 100644 (file)
@@ -922,7 +922,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         vnic_env_queue_counters[0x1];
        u8         ets[0x1];
        u8         nic_flow_table[0x1];
-       u8         eswitch_flow_table[0x1];
+       u8         eswitch_manager[0x1];
        u8         device_memory[0x1];
        u8         mcam_reg[0x1];
        u8         pcam_reg[0x1];
index a0fbb9ffe3805276a16c485564de77047898a18e..3982c83fdcbfa335ef728cbcb10f073b3b1c7d70 100644 (file)
@@ -2132,7 +2132,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
                                        struct mminit_pfnnid_cache *state);
 #endif
 
-#ifdef CONFIG_HAVE_MEMBLOCK
+#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
 void zero_resv_unavail(void);
 #else
 static inline void zero_resv_unavail(void) {}
index 2014bd19f28eff41ae37b80eba324644c537e291..96a71a648eed991530489ecea56b89d8755b395c 100644 (file)
@@ -501,6 +501,7 @@ enum dmi_field {
        DMI_PRODUCT_VERSION,
        DMI_PRODUCT_SERIAL,
        DMI_PRODUCT_UUID,
+       DMI_PRODUCT_SKU,
        DMI_PRODUCT_FAMILY,
        DMI_BOARD_VENDOR,
        DMI_BOARD_NAME,
index 08b6eb964dd6865af3e1a7079a54b1e99f77e077..6554d3ba4396b3df49acac934ad16eeb71a695f4 100644 (file)
@@ -147,7 +147,6 @@ struct proto_ops {
        int             (*getname)   (struct socket *sock,
                                      struct sockaddr *addr,
                                      int peer);
-       __poll_t        (*poll_mask) (struct socket *sock, __poll_t events);
        __poll_t        (*poll)      (struct file *file, struct socket *sock,
                                      struct poll_table_struct *wait);
        int             (*ioctl)     (struct socket *sock, unsigned int cmd,
index 3ec9850c7936f01c0f7564dbe519e95ce0849639..3d0cc0b5cec2d7514dbebf32effab9b1e6388c3c 100644 (file)
@@ -2789,11 +2789,31 @@ static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp,
        if (PTR_ERR(pp) != -EINPROGRESS)
                NAPI_GRO_CB(skb)->flush |= flush;
 }
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+                                              struct sk_buff **pp,
+                                              int flush,
+                                              struct gro_remcsum *grc)
+{
+       if (PTR_ERR(pp) != -EINPROGRESS) {
+               NAPI_GRO_CB(skb)->flush |= flush;
+               skb_gro_remcsum_cleanup(skb, grc);
+               skb->remcsum_offload = 0;
+       }
+}
 #else
 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
 {
        NAPI_GRO_CB(skb)->flush |= flush;
 }
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+                                              struct sk_buff **pp,
+                                              int flush,
+                                              struct gro_remcsum *grc)
+{
+       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_remcsum_cleanup(skb, grc);
+       skb->remcsum_offload = 0;
+}
 #endif
 
 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
index 9dee3c23895d82fae05025961fe83d15b23d45b7..712eed156d0912f1aecc97de222597f1d7cc5dc9 100644 (file)
@@ -1438,6 +1438,8 @@ enum {
        NFS_IOHDR_EOF,
        NFS_IOHDR_REDO,
        NFS_IOHDR_STAT,
+       NFS_IOHDR_RESEND_PNFS,
+       NFS_IOHDR_RESEND_MDS,
 };
 
 struct nfs_io_completion;
index 9206a4fef9ac151905a825700c6ae7477d7cbd88..cb8d84090cfb7adb478d156727279aa48686d816 100644 (file)
@@ -234,7 +234,7 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
 int of_genpd_parse_idle_states(struct device_node *dn,
                               struct genpd_power_state **states, int *n);
 unsigned int of_genpd_opp_to_performance_state(struct device *dev,
-                               struct device_node *opp_node);
+                               struct device_node *np);
 
 int genpd_dev_pm_attach(struct device *dev);
 struct device *genpd_dev_pm_attach_by_id(struct device *dev,
@@ -274,9 +274,9 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
 
 static inline unsigned int
 of_genpd_opp_to_performance_state(struct device *dev,
-                                 struct device_node *opp_node)
+                                 struct device_node *np)
 {
-       return -ENODEV;
+       return 0;
 }
 
 static inline int genpd_dev_pm_attach(struct device *dev)
index fdf86b4cbc71bacca2795107532fb75e3855c0c9..7e0fdcf905d2e77b355c94a7381446927452723c 100644 (file)
@@ -74,18 +74,18 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
        pt->_key   = ~(__poll_t)0; /* all events enabled */
 }
 
-static inline bool file_has_poll_mask(struct file *file)
+static inline bool file_can_poll(struct file *file)
 {
-       return file->f_op->get_poll_head && file->f_op->poll_mask;
+       return file->f_op->poll;
 }
 
-static inline bool file_can_poll(struct file *file)
+static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
 {
-       return file->f_op->poll || file_has_poll_mask(file);
+       if (unlikely(!file->f_op->poll))
+               return DEFAULT_POLLMASK;
+       return file->f_op->poll(file, pt);
 }
 
-__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt);
-
 struct poll_table_entry {
        struct file *filp;
        __poll_t key;
index 4193c41e383a897273605aac39f331b46512691a..a685da2c4522b5583ec405d0c0ff49da6aa718c9 100644 (file)
@@ -98,5 +98,7 @@ extern __must_check bool refcount_dec_if_one(refcount_t *r);
 extern __must_check bool refcount_dec_not_one(refcount_t *r);
 extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
 extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
-
+extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
+                                                      spinlock_t *lock,
+                                                      unsigned long *flags);
 #endif /* _LINUX_REFCOUNT_H */
index 64125443f8a638e787adfbaebab4755f5d63852a..5ef5c7c412a75b5f24e276563d278be8ced3b212 100644 (file)
@@ -354,6 +354,8 @@ struct rmi_driver_data {
        struct mutex irq_mutex;
        struct input_dev *input;
 
+       struct irq_domain *irqdomain;
+
        u8 pdt_props;
 
        u8 num_rx_electrodes;
index 51f52020ad5fdd44ab4fdfa6ad2e0063c4780947..093aa57120b0cf1f40c2a75f28612331c6e6f6e0 100644 (file)
@@ -9,9 +9,6 @@
 #include <asm/io.h>
 
 struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
-       unsigned long   sg_magic;
-#endif
        unsigned long   page_link;
        unsigned int    offset;
        unsigned int    length;
@@ -64,7 +61,6 @@ struct sg_table {
  *
  */
 
-#define SG_MAGIC       0x87654321
 #define SG_CHAIN       0x01UL
 #define SG_END         0x02UL
 
@@ -98,7 +94,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
         */
        BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
 #ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
        BUG_ON(sg_is_chain(sg));
 #endif
        sg->page_link = page_link | (unsigned long) page;
@@ -129,7 +124,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
 static inline struct page *sg_page(struct scatterlist *sg)
 {
 #ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
        BUG_ON(sg_is_chain(sg));
 #endif
        return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
@@ -195,9 +189,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
  **/
 static inline void sg_mark_end(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        /*
         * Set termination bit, clear potential chain bit
         */
@@ -215,9 +206,6 @@ static inline void sg_mark_end(struct scatterlist *sg)
  **/
 static inline void sg_unmark_end(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        sg->page_link &= ~SG_END;
 }
 
@@ -260,12 +248,6 @@ static inline void *sg_virt(struct scatterlist *sg)
 static inline void sg_init_marker(struct scatterlist *sgl,
                                  unsigned int nents)
 {
-#ifdef CONFIG_DEBUG_SG
-       unsigned int i;
-
-       for (i = 0; i < nents; i++)
-               sgl[i].sg_magic = SG_MAGIC;
-#endif
        sg_mark_end(&sgl[nents - 1]);
 }
 
index 87bf02d93a279a9b98df452c7ad78a0b54adc1db..43731fe51c972ad6c3d6cb277ac940ec5a939023 100644 (file)
@@ -118,7 +118,7 @@ struct task_group;
  * the comment with set_special_state().
  */
 #define is_special_task_state(state)                           \
-       ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
+       ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
 
 #define __set_current_state(state_value)                       \
        do {                                                    \
@@ -1799,20 +1799,22 @@ static inline void rseq_set_notify_resume(struct task_struct *t)
                set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
 }
 
-void __rseq_handle_notify_resume(struct pt_regs *regs);
+void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
 
-static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+                                            struct pt_regs *regs)
 {
        if (current->rseq)
-               __rseq_handle_notify_resume(regs);
+               __rseq_handle_notify_resume(ksig, regs);
 }
 
-static inline void rseq_signal_deliver(struct pt_regs *regs)
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+                                      struct pt_regs *regs)
 {
        preempt_disable();
        __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
        preempt_enable();
-       rseq_handle_notify_resume(regs);
+       rseq_handle_notify_resume(ksig, regs);
 }
 
 /* rseq_preempt() requires preemption to be disabled. */
@@ -1831,9 +1833,7 @@ static inline void rseq_migrate(struct task_struct *t)
 
 /*
  * If parent process has a registered restartable sequences area, the
- * child inherits. Only applies when forking a process, not a thread. In
- * case a parent fork() in the middle of a restartable sequence, set the
- * resume notifier to force the child to retry.
+ * child inherits. Only applies when forking a process, not a thread.
  */
 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
 {
@@ -1847,7 +1847,6 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
                t->rseq_len = current->rseq_len;
                t->rseq_sig = current->rseq_sig;
                t->rseq_event_mask = current->rseq_event_mask;
-               rseq_preempt(t);
        }
 }
 
@@ -1864,10 +1863,12 @@ static inline void rseq_execve(struct task_struct *t)
 static inline void rseq_set_notify_resume(struct task_struct *t)
 {
 }
-static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+                                            struct pt_regs *regs)
 {
 }
-static inline void rseq_signal_deliver(struct pt_regs *regs)
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+                                      struct pt_regs *regs)
 {
 }
 static inline void rseq_preempt(struct task_struct *t)
index c8688595499421d9f051366d4a85e5553751768e..610a201126ee031166798baaf8ecae74fe478c4d 100644 (file)
@@ -630,6 +630,7 @@ typedef unsigned char *sk_buff_data_t;
  *     @hash: the packet hash
  *     @queue_mapping: Queue mapping for multiqueue devices
  *     @xmit_more: More SKBs are pending for this queue
+ *     @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
  *     @ndisc_nodetype: router type (from link layer)
  *     @ooo_okay: allow the mapping of a socket to a queue to be changed
  *     @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -735,7 +736,7 @@ struct sk_buff {
                                peeked:1,
                                head_frag:1,
                                xmit_more:1,
-                               __unused:1; /* one bit hole */
+                               pfmemalloc:1;
 
        /* fields enclosed in headers_start/headers_end are copied
         * using a single memcpy() in __copy_skb_header()
@@ -754,31 +755,30 @@ struct sk_buff {
 
        __u8                    __pkt_type_offset[0];
        __u8                    pkt_type:3;
-       __u8                    pfmemalloc:1;
        __u8                    ignore_df:1;
-
        __u8                    nf_trace:1;
        __u8                    ip_summed:2;
        __u8                    ooo_okay:1;
+
        __u8                    l4_hash:1;
        __u8                    sw_hash:1;
        __u8                    wifi_acked_valid:1;
        __u8                    wifi_acked:1;
-
        __u8                    no_fcs:1;
        /* Indicates the inner headers are valid in the skbuff. */
        __u8                    encapsulation:1;
        __u8                    encap_hdr_csum:1;
        __u8                    csum_valid:1;
+
        __u8                    csum_complete_sw:1;
        __u8                    csum_level:2;
        __u8                    csum_not_inet:1;
-
        __u8                    dst_pending_confirm:1;
 #ifdef CONFIG_IPV6_NDISC_NODETYPE
        __u8                    ndisc_nodetype:2;
 #endif
        __u8                    ipvs_property:1;
+
        __u8                    inner_protocol_type:1;
        __u8                    remcsum_offload:1;
 #ifdef CONFIG_NET_SWITCHDEV
@@ -3252,7 +3252,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
                                    int *peeked, int *off, int *err);
 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
                                  int *err);
-__poll_t datagram_poll_mask(struct socket *sock, __poll_t events);
+__poll_t datagram_poll(struct file *file, struct socket *sock,
+                          struct poll_table_struct *wait);
 int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
                           struct iov_iter *to, int size);
 static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
index 09fa2c6f0e68e69567b8b918cdda3f13f5ddaefa..3a1a1dbc6f49479f61f4c1a6588e6a672f0b0663 100644 (file)
@@ -155,8 +155,12 @@ struct kmem_cache {
 
 #ifdef CONFIG_SYSFS
 #define SLAB_SUPPORTS_SYSFS
+void sysfs_slab_unlink(struct kmem_cache *);
 void sysfs_slab_release(struct kmem_cache *);
 #else
+static inline void sysfs_slab_unlink(struct kmem_cache *s)
+{
+}
 static inline void sysfs_slab_release(struct kmem_cache *s)
 {
 }
index 1e8a46435838456ae57af232664ce9686a4ac5c0..fd57888d4942e10166440da41d449a52fc8e1730 100644 (file)
@@ -427,6 +427,11 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
 #define atomic_dec_and_lock(atomic, lock) \
                __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
 
+extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
+                                       unsigned long *flags);
+#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
+               __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
+
 int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
                           size_t max_size, unsigned int cpu_mult,
                           gfp_t gfp);
index 73810808cdf266e5cdcfc1e0c6b3af126a0bf4b1..a368a68cb667f848c8a505aecb3b8b24df51b63a 100644 (file)
@@ -231,6 +231,9 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
  */
 #ifndef __SYSCALL_DEFINEx
 #define __SYSCALL_DEFINEx(x, name, ...)                                        \
+       __diag_push();                                                  \
+       __diag_ignore(GCC, 8, "-Wattribute-alias",                      \
+                     "Type aliasing is used to sanitize syscall arguments");\
        asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))       \
                __attribute__((alias(__stringify(__se_sys##name))));    \
        ALLOW_ERROR_INJECTION(sys##name, ERRNO);                        \
@@ -243,6 +246,7 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
                __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__));       \
                return ret;                                             \
        }                                                               \
+       __diag_pop();                                                   \
        static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
 #endif /* __SYSCALL_DEFINEx */
 
index 6c5f2074e14f36d1368e1723394d4da9ef0cf3ae..6f8b68cd460f8c2b0aff758848a5de5a3ad65d6c 100644 (file)
@@ -75,7 +75,7 @@ struct uio_device {
         struct fasync_struct    *async_queue;
         wait_queue_head_t       wait;
         struct uio_info         *info;
-       spinlock_t              info_lock;
+       struct mutex            info_lock;
         struct kobject          *map_dir;
         struct kobject          *portio_dir;
 };
index 53ce8176c31306deaf9c2be5743546abe4d27b53..ec9d6bc658559c55b64ac3c1d23b4e1166cc4b04 100644 (file)
@@ -271,7 +271,7 @@ int  bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
                     int flags);
 int  bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
                            size_t len, int flags);
-__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events);
+__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
 int  bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int  bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
 int  bt_sock_wait_ready(struct sock *sk, unsigned long flags);
index 5cba71d2dc44b9ea2366725ff68c9f668f639345..71b9043aa0e7995c7e61f17b4493acd99410d1bc 100644 (file)
@@ -170,6 +170,7 @@ struct fib6_info {
                                        unused:3;
 
        struct fib6_nh                  fib6_nh;
+       struct rcu_head                 rcu;
 };
 
 struct rt6_info {
@@ -273,7 +274,7 @@ static inline void ip6_rt_put(struct rt6_info *rt)
 }
 
 struct fib6_info *fib6_info_alloc(gfp_t gfp_flags);
-void fib6_info_destroy(struct fib6_info *f6i);
+void fib6_info_destroy_rcu(struct rcu_head *head);
 
 static inline void fib6_info_hold(struct fib6_info *f6i)
 {
@@ -283,7 +284,7 @@ static inline void fib6_info_hold(struct fib6_info *f6i)
 static inline void fib6_info_release(struct fib6_info *f6i)
 {
        if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
-               fib6_info_destroy(f6i);
+               call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
 }
 
 enum fib6_walk_state {
index 59656fc580df7e0301e0c9282af9358a255b863f..7b9c82de11cc9388b070992af610e5fd14b66333 100644 (file)
@@ -66,6 +66,12 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
                (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
 }
 
+static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i)
+{
+       return (f6i->fib6_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
+              RTF_GATEWAY;
+}
+
 void ip6_route_input(struct sk_buff *skb);
 struct dst_entry *ip6_route_input_lookup(struct net *net,
                                         struct net_device *dev,
index 16475c269749a72f3c487e102e50cabff797317e..8f73be4945037c6d0997ec8ab7c3e9da3980a6e4 100644 (file)
@@ -355,14 +355,7 @@ struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
 struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
                                          struct ipv6_txoptions *opt,
                                          int newtype,
-                                         struct ipv6_opt_hdr __user *newopt,
-                                         int newoptlen);
-struct ipv6_txoptions *
-ipv6_renew_options_kern(struct sock *sk,
-                       struct ipv6_txoptions *opt,
-                       int newtype,
-                       struct ipv6_opt_hdr *newopt,
-                       int newoptlen);
+                                         struct ipv6_opt_hdr *newopt);
 struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
                                          struct ipv6_txoptions *opt);
 
@@ -830,7 +823,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
         * to minimize possbility that any useful information to an
         * attacker is leaked. Only lower 20 bits are relevant.
         */
-       rol32(hash, 16);
+       hash = rol32(hash, 16);
 
        flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
 
@@ -1107,6 +1100,8 @@ void ipv6_sysctl_unregister(void);
 
 int ipv6_sock_mc_join(struct sock *sk, int ifindex,
                      const struct in6_addr *addr);
+int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
+                         const struct in6_addr *addr, unsigned int mode);
 int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
                      const struct in6_addr *addr);
 #endif /* _NET_IPV6_H */
index b0eaeb02d46d14ceb87f6e62d4765959c8383a66..f4c21b5a1242baac0415b3dde8fbc30524690ee7 100644 (file)
@@ -153,6 +153,8 @@ struct iucv_sock_list {
        atomic_t          autobind_name;
 };
 
+__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
+                           poll_table *wait);
 void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
 void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
index 47e35cce3b648d696b127ed7bd643036128795f6..a71264d75d7f98d28f92dfd861ffe6e0d39c0198 100644 (file)
@@ -128,6 +128,7 @@ struct net {
 #endif
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
        struct netns_nf_frag    nf_frag;
+       struct ctl_table_header *nf_frag_frags_hdr;
 #endif
        struct sock             *nfnl;
        struct sock             *nfnl_stash;
index e0c0c2558ec48adfb27629c2180f9b04efb67bcf..a05134507e7bc806d9afd9ff7c86b95e5df084eb 100644 (file)
@@ -65,4 +65,10 @@ extern const struct nft_expr_ops nft_payload_fast_ops;
 extern struct static_key_false nft_counters_enabled;
 extern struct static_key_false nft_trace_enabled;
 
+extern struct nft_set_type nft_set_rhash_type;
+extern struct nft_set_type nft_set_hash_type;
+extern struct nft_set_type nft_set_hash_fast_type;
+extern struct nft_set_type nft_set_rbtree_type;
+extern struct nft_set_type nft_set_bitmap_type;
+
 #endif /* _NET_NF_TABLES_CORE_H */
index 9754a50ecde9c44162cc60e387d48cb034c6e6d4..4cc64c8446eb94f1c122cf15d4bf74c7e3f2275d 100644 (file)
@@ -64,7 +64,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
  * belonging to established connections going through that one.
  */
 struct sock *
-nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
+nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
                      const u8 protocol,
                      const __be32 saddr, const __be32 daddr,
                      const __be16 sport, const __be16 dport,
@@ -103,7 +103,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
                            struct sock *sk);
 
 struct sock *
-nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
+nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
                      const u8 protocol,
                      const struct in6_addr *saddr, const struct in6_addr *daddr,
                      const __be16 sport, const __be16 dport,
index c978a31b0f846210b4c2a369af960d5349b5395a..762ac9931b6251152b6ee0e5780df0f7b073f3e6 100644 (file)
@@ -109,7 +109,6 @@ struct netns_ipv6 {
 
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
 struct netns_nf_frag {
-       struct netns_sysctl_ipv6 sysctl;
        struct netns_frags      frags;
 };
 #endif
index a3c1a2c47cd4bfd868004548cdf1ef7a361fa4c6..20b059574e600e64838b0bdecfaf6a76e6629d4a 100644 (file)
@@ -111,6 +111,11 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
 {
 }
 
+static inline bool tcf_block_shared(struct tcf_block *block)
+{
+       return false;
+}
+
 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
 {
        return NULL;
index 30b3e2fe240a88e3396a8b3664fd879c93fd30bf..8c2caa370e0f683ea764bc0d72da6dfa93699673 100644 (file)
@@ -109,7 +109,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 int sctp_inet_listen(struct socket *sock, int backlog);
 void sctp_write_space(struct sock *sk);
 void sctp_data_ready(struct sock *sk);
-__poll_t sctp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t sctp_poll(struct file *file, struct socket *sock,
+               poll_table *wait);
 void sctp_sock_rfree(struct sk_buff *skb);
 void sctp_copy_sock(struct sock *newsk, struct sock *sk,
                    struct sctp_association *asoc);
index 9470fd7e4350ea9546b43a504ebe12f6362dda18..32d2454c04793021c0dc87bca7f1802b49c5249b 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/tc_act/tc_csum.h>
 
 struct tcf_csum_params {
-       int action;
        u32 update_flags;
        struct rcu_head rcu;
 };
index efef0b4b1b2bddc76095bcd4d02ebaaa3b2beb56..46b8c7f1c8d5273791df55eeb6345807d8812e96 100644 (file)
@@ -18,7 +18,6 @@
 struct tcf_tunnel_key_params {
        struct rcu_head         rcu;
        int                     tcft_action;
-       int                     action;
        struct metadata_dst     *tcft_enc_metadata;
 };
 
index 0448e7c5d2b4062f8ceecb5b38882385a1be7ead..3482d13d655b88910e3d41d30584d94bfea7a535 100644 (file)
@@ -388,7 +388,8 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
 void tcp_close(struct sock *sk, long timeout);
 void tcp_init_sock(struct sock *sk);
 void tcp_init_transfer(struct sock *sk, int bpf_op);
-__poll_t tcp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t tcp_poll(struct file *file, struct socket *sock,
+                     struct poll_table_struct *wait);
 int tcp_getsockopt(struct sock *sk, int level, int optname,
                   char __user *optval, int __user *optlen);
 int tcp_setsockopt(struct sock *sk, int level, int optname,
@@ -827,6 +828,10 @@ struct tcp_skb_cb {
 
 #define TCP_SKB_CB(__skb)      ((struct tcp_skb_cb *)&((__skb)->cb[0]))
 
+static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
+{
+       TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
+}
 
 #if IS_ENABLED(CONFIG_IPV6)
 /* This is the variant of inet6_iif() that must be used by TCP,
@@ -907,8 +912,6 @@ enum tcp_ca_event {
        CA_EVENT_LOSS,          /* loss timeout */
        CA_EVENT_ECN_NO_CE,     /* ECT set, but not CE marked */
        CA_EVENT_ECN_IS_CE,     /* received CE marked IP packet */
-       CA_EVENT_DELAYED_ACK,   /* Delayed ack is sent */
-       CA_EVENT_NON_DELAYED_ACK,
 };
 
 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
index 7f84ea3e217cf5e3f78698ee63bc9dced179caed..70c273777fe9fe27b2ef1ba7c2c80970da8ea5c4 100644 (file)
@@ -109,7 +109,8 @@ struct tls_sw_context_rx {
 
        struct strparser strp;
        void (*saved_data_ready)(struct sock *sk);
-       __poll_t (*sk_poll_mask)(struct socket *sock, __poll_t events);
+       unsigned int (*sk_poll)(struct file *file, struct socket *sock,
+                               struct poll_table_struct *wait);
        struct sk_buff *recv_pkt;
        u8 control;
        bool decrypted;
@@ -224,7 +225,8 @@ void tls_sw_free_resources_tx(struct sock *sk);
 void tls_sw_free_resources_rx(struct sock *sk);
 int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                   int nonblock, int flags, int *addr_len);
-__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events);
+unsigned int tls_sw_poll(struct file *file, struct socket *sock,
+                        struct poll_table_struct *wait);
 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
                           struct pipe_inode_info *pipe,
                           size_t len, unsigned int flags);
index b1ea8b0f5e6a8ce82602e593acd583170b4a6e73..81afdacd4fff04bd05335da85a7a06b1996282f8 100644 (file)
@@ -285,7 +285,7 @@ int udp_init_sock(struct sock *sk);
 int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 int __udp_disconnect(struct sock *sk, int flags);
 int udp_disconnect(struct sock *sk, int flags);
-__poll_t udp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
 struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
                                       netdev_features_t features,
                                       bool is_ipv6);
index 9fe472f2ac950c8f3f042cca547cb8f7ce820a97..7161856bcf9c7f572943f6a78676df2aa458a5f7 100644 (file)
@@ -60,6 +60,10 @@ struct xdp_sock {
        bool zc;
        /* Protects multiple processes in the control path */
        struct mutex mutex;
+       /* Mutual exclusion of NAPI TX thread and sendmsg error paths
+        * in the SKB destructor callback.
+        */
+       spinlock_t tx_completion_lock;
        u64 rx_dropped;
 };
 
index 4c6241bc203931dcc6b74de5be72349e741cb6be..6c003995347a3904cda6e57814c50bcf6c0733a7 100644 (file)
@@ -3391,11 +3391,14 @@ int ib_process_cq_direct(struct ib_cq *cq, int budget);
  *
  * Users can examine the cq structure to determine the actual CQ size.
  */
-struct ib_cq *ib_create_cq(struct ib_device *device,
-                          ib_comp_handler comp_handler,
-                          void (*event_handler)(struct ib_event *, void *),
-                          void *cq_context,
-                          const struct ib_cq_init_attr *cq_attr);
+struct ib_cq *__ib_create_cq(struct ib_device *device,
+                            ib_comp_handler comp_handler,
+                            void (*event_handler)(struct ib_event *, void *),
+                            void *cq_context,
+                            const struct ib_cq_init_attr *cq_attr,
+                            const char *caller);
+#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
+       __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
 
 /**
  * ib_resize_cq - Modifies the capacity of the CQ.
index d00221345c1988ff59de79f47401903d560c55e0..3c5038b587ba0b3ab6064d5dd8a90e2e8c6bfeae 100644 (file)
@@ -39,8 +39,10 @@ enum {
        IOCB_CMD_PWRITE = 1,
        IOCB_CMD_FSYNC = 2,
        IOCB_CMD_FDSYNC = 3,
-       /* 4 was the experimental IOCB_CMD_PREADX */
-       IOCB_CMD_POLL = 5,
+       /* These two are experimental.
+        * IOCB_CMD_PREADX = 4,
+        * IOCB_CMD_POLL = 5,
+        */
        IOCB_CMD_NOOP = 6,
        IOCB_CMD_PREADV = 7,
        IOCB_CMD_PWRITEV = 8,
index 59b19b6a40d73ea6575f8810a6f4345a931c5a01..b7db3261c62d124760e98d9c851c1b01e64bdb03 100644 (file)
@@ -1857,7 +1857,8 @@ union bpf_attr {
  *             is resolved), the nexthop address is returned in ipv4_dst
  *             or ipv6_dst based on family, smac is set to mac address of
  *             egress device, dmac is set to nexthop mac address, rt_metric
- *             is set to metric from route (IPv4/IPv6 only).
+ *             is set to metric from route (IPv4/IPv6 only), and ifindex
+ *             is set to the device index of the nexthop from the FIB lookup.
  *
  *             *plen* argument is the size of the passed in struct.
  *             *flags* argument can be a combination of one or more of the
@@ -1873,9 +1874,10 @@ union bpf_attr {
  *             *ctx* is either **struct xdp_md** for XDP programs or
  *             **struct sk_buff** tc cls_act programs.
  *     Return
- *             Egress device index on success, 0 if packet needs to continue
- *             up the stack for further processing or a negative error in case
- *             of failure.
+ *             * < 0 if any input argument is invalid
+ *             *   0 on success (packet is forwarded, nexthop neighbor exists)
+ *             * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
+ *             *     packet is not forwarded or needs assist from full stack
  *
  * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
  *     Description
@@ -2612,6 +2614,18 @@ struct bpf_raw_tracepoint_args {
 #define BPF_FIB_LOOKUP_DIRECT  BIT(0)
 #define BPF_FIB_LOOKUP_OUTPUT  BIT(1)
 
+enum {
+       BPF_FIB_LKUP_RET_SUCCESS,      /* lookup successful */
+       BPF_FIB_LKUP_RET_BLACKHOLE,    /* dest is blackholed; can be dropped */
+       BPF_FIB_LKUP_RET_UNREACHABLE,  /* dest is unreachable; can be dropped */
+       BPF_FIB_LKUP_RET_PROHIBIT,     /* dest not allowed; can be dropped */
+       BPF_FIB_LKUP_RET_NOT_FWDED,    /* packet is not forwarded */
+       BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
+       BPF_FIB_LKUP_RET_UNSUPP_LWT,   /* fwd requires encapsulation */
+       BPF_FIB_LKUP_RET_NO_NEIGH,     /* no neighbor entry for nh */
+       BPF_FIB_LKUP_RET_FRAG_NEEDED,  /* fragmentation required to fwd */
+};
+
 struct bpf_fib_lookup {
        /* input:  network family for lookup (AF_INET, AF_INET6)
         * output: network family of egress nexthop
@@ -2625,7 +2639,11 @@ struct bpf_fib_lookup {
 
        /* total length of packet from network header - used for MTU check */
        __u16   tot_len;
-       __u32   ifindex;  /* L3 device index for lookup */
+
+       /* input: L3 device index for lookup
+        * output: device index from FIB lookup
+        */
+       __u32   ifindex;
 
        union {
                /* inputs to lookup */
index 4ca65b56084f94526435a58a8663d58054c924f4..7363f18e65a553e12f4d1cc13844dfbf2bbe6f17 100644 (file)
@@ -226,7 +226,7 @@ enum tunable_id {
        ETHTOOL_TX_COPYBREAK,
        ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
        /*
-        * Add your fresh new tubale attribute above and remember to update
+        * Add your fresh new tunable attribute above and remember to update
         * tunable_strings[] in net/core/ethtool.c
         */
        __ETHTOOL_TUNABLE_COUNT,
index 85a3fb65e40a6f3941337c7fad17e7fdff3b33d0..20d6cc91435df90f08741c478ab29ea85efa7167 100644 (file)
@@ -53,6 +53,9 @@ enum {
 /* These are client behavior specific flags. */
 #define NBD_CFLAG_DESTROY_ON_DISCONNECT        (1 << 0) /* delete the nbd device on
                                                    disconnect. */
+#define NBD_CFLAG_DISCONNECT_ON_CLOSE (1 << 1) /* disconnect the nbd device on
+                                               *  close by last opener.
+                                               */
 
 /* userspace doesn't need the nbd_device structure */
 
index d620fa43756cab2685428861f31c27d9a59b2a39..9a402fdb60e97bc92591312ebc7071de54fa900c 100644 (file)
  * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  */
 
-#ifdef __KERNEL__
-# include <linux/types.h>
-#else
-# include <stdint.h>
-#endif
-
-#include <linux/types_32_64.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
 
 enum rseq_cpu_id_state {
        RSEQ_CPU_ID_UNINITIALIZED               = -1,
@@ -52,10 +47,10 @@ struct rseq_cs {
        __u32 version;
        /* enum rseq_cs_flags */
        __u32 flags;
-       LINUX_FIELD_u32_u64(start_ip);
+       __u64 start_ip;
        /* Offset from start_ip. */
-       LINUX_FIELD_u32_u64(post_commit_offset);
-       LINUX_FIELD_u32_u64(abort_ip);
+       __u64 post_commit_offset;
+       __u64 abort_ip;
 } __attribute__((aligned(4 * sizeof(__u64))));
 
 /*
@@ -67,28 +62,30 @@ struct rseq_cs {
 struct rseq {
        /*
         * Restartable sequences cpu_id_start field. Updated by the
-        * kernel, and read by user-space with single-copy atomicity
-        * semantics. Aligned on 32-bit. Always contains a value in the
-        * range of possible CPUs, although the value may not be the
-        * actual current CPU (e.g. if rseq is not initialized). This
-        * CPU number value should always be compared against the value
-        * of the cpu_id field before performing a rseq commit or
-        * returning a value read from a data structure indexed using
-        * the cpu_id_start value.
+        * kernel. Read by user-space with single-copy atomicity
+        * semantics. This field should only be read by the thread which
+        * registered this data structure. Aligned on 32-bit. Always
+        * contains a value in the range of possible CPUs, although the
+        * value may not be the actual current CPU (e.g. if rseq is not
+        * initialized). This CPU number value should always be compared
+        * against the value of the cpu_id field before performing a rseq
+        * commit or returning a value read from a data structure indexed
+        * using the cpu_id_start value.
         */
        __u32 cpu_id_start;
        /*
-        * Restartable sequences cpu_id field. Updated by the kernel,
-        * and read by user-space with single-copy atomicity semantics.
-        * Aligned on 32-bit. Values RSEQ_CPU_ID_UNINITIALIZED and
-        * RSEQ_CPU_ID_REGISTRATION_FAILED have a special semantic: the
-        * former means "rseq uninitialized", and latter means "rseq
-        * initialization failed". This value is meant to be read within
-        * rseq critical sections and compared with the cpu_id_start
-        * value previously read, before performing the commit instruction,
-        * or read and compared with the cpu_id_start value before returning
-        * a value loaded from a data structure indexed using the
-        * cpu_id_start value.
+        * Restartable sequences cpu_id field. Updated by the kernel.
+        * Read by user-space with single-copy atomicity semantics. This
+        * field should only be read by the thread which registered this
+        * data structure. Aligned on 32-bit. Values
+        * RSEQ_CPU_ID_UNINITIALIZED and RSEQ_CPU_ID_REGISTRATION_FAILED
+        * have a special semantic: the former means "rseq uninitialized",
+        * and latter means "rseq initialization failed". This value is
+        * meant to be read within rseq critical sections and compared
+        * with the cpu_id_start value previously read, before performing
+        * the commit instruction, or read and compared with the
+        * cpu_id_start value before returning a value loaded from a data
+        * structure indexed using the cpu_id_start value.
         */
        __u32 cpu_id;
        /*
@@ -105,27 +102,44 @@ struct rseq {
         * targeted by the rseq_cs. Also needs to be set to NULL by user-space
         * before reclaiming memory that contains the targeted struct rseq_cs.
         *
-        * Read and set by the kernel with single-copy atomicity semantics.
-        * Set by user-space with single-copy atomicity semantics. Aligned
-        * on 64-bit.
+        * Read and set by the kernel. Set by user-space with single-copy
+        * atomicity semantics. This field should only be updated by the
+        * thread which registered this data structure. Aligned on 64-bit.
         */
-       LINUX_FIELD_u32_u64(rseq_cs);
+       union {
+               __u64 ptr64;
+#ifdef __LP64__
+               __u64 ptr;
+#else
+               struct {
+#if (defined(__BYTE_ORDER) && (__BYTE_ORDER == __BIG_ENDIAN)) || defined(__BIG_ENDIAN)
+                       __u32 padding;          /* Initialized to zero. */
+                       __u32 ptr32;
+#else /* LITTLE */
+                       __u32 ptr32;
+                       __u32 padding;          /* Initialized to zero. */
+#endif /* ENDIAN */
+               } ptr;
+#endif
+       } rseq_cs;
+
        /*
-        * - RSEQ_DISABLE flag:
+        * Restartable sequences flags field.
+        *
+        * This field should only be updated by the thread which
+        * registered this data structure. Read by the kernel.
+        * Mainly used for single-stepping through rseq critical sections
+        * with debuggers.
         *
-        * Fallback fast-track flag for single-stepping.
-        * Set by user-space if lack of progress is detected.
-        * Cleared by user-space after rseq finish.
-        * Read by the kernel.
         * - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT
-        *     Inhibit instruction sequence block restart and event
-        *     counter increment on preemption for this thread.
+        *     Inhibit instruction sequence block restart on preemption
+        *     for this thread.
         * - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL
-        *     Inhibit instruction sequence block restart and event
-        *     counter increment on signal delivery for this thread.
+        *     Inhibit instruction sequence block restart on signal
+        *     delivery for this thread.
         * - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE
-        *     Inhibit instruction sequence block restart and event
-        *     counter increment on migration for this thread.
+        *     Inhibit instruction sequence block restart on migration for
+        *     this thread.
         */
        __u32 flags;
 } __attribute__((aligned(4 * sizeof(__u64))));
index 6e299349b15876d3302cc784576dd84cff6f1d66..b7b57967d90f09cd428d90e12b0035e3ecbcfc67 100644 (file)
@@ -44,6 +44,7 @@
 #define TCMU_MAILBOX_VERSION 2
 #define ALIGN_SIZE 64 /* Should be enough for most CPUs */
 #define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
+#define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */
 
 struct tcmu_mailbox {
        __u16 version;
@@ -71,6 +72,7 @@ struct tcmu_cmd_entry_hdr {
        __u16 cmd_id;
        __u8 kflags;
 #define TCMU_UFLAG_UNKNOWN_OP 0x1
+#define TCMU_UFLAG_READ_LEN   0x2
        __u8 uflags;
 
 } __packed;
@@ -119,7 +121,7 @@ struct tcmu_cmd_entry {
                        __u8 scsi_status;
                        __u8 __pad1;
                        __u16 __pad2;
-                       __u32 __pad3;
+                       __u32 read_len;
                        char sense_buffer[TCMU_SENSE_BUFFERSIZE];
                } rsp;
        };
index 29eb659aa77a183e36082599866fb512908d1197..e3f6ed8a7064f9276ca2b57ed5ecff3364786e9d 100644 (file)
@@ -127,6 +127,10 @@ enum {
 
 #define TCP_CM_INQ             TCP_INQ
 
+#define TCP_REPAIR_ON          1
+#define TCP_REPAIR_OFF         0
+#define TCP_REPAIR_OFF_NO_WP   -1      /* Turn off without window probes */
+
 struct tcp_repair_opt {
        __u32   opt_code;
        __u32   opt_val;
diff --git a/include/uapi/linux/types_32_64.h b/include/uapi/linux/types_32_64.h
deleted file mode 100644 (file)
index 0a87ace..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-#ifndef _UAPI_LINUX_TYPES_32_64_H
-#define _UAPI_LINUX_TYPES_32_64_H
-
-/*
- * linux/types_32_64.h
- *
- * Integer type declaration for pointers across 32-bit and 64-bit systems.
- *
- * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifdef __KERNEL__
-# include <linux/types.h>
-#else
-# include <stdint.h>
-#endif
-
-#include <asm/byteorder.h>
-
-#ifdef __BYTE_ORDER
-# if (__BYTE_ORDER == __BIG_ENDIAN)
-#  define LINUX_BYTE_ORDER_BIG_ENDIAN
-# else
-#  define LINUX_BYTE_ORDER_LITTLE_ENDIAN
-# endif
-#else
-# ifdef __BIG_ENDIAN
-#  define LINUX_BYTE_ORDER_BIG_ENDIAN
-# else
-#  define LINUX_BYTE_ORDER_LITTLE_ENDIAN
-# endif
-#endif
-
-#ifdef __LP64__
-# define LINUX_FIELD_u32_u64(field)                    __u64 field
-# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v)    field = (intptr_t)v
-#else
-# ifdef LINUX_BYTE_ORDER_BIG_ENDIAN
-#  define LINUX_FIELD_u32_u64(field)   __u32 field ## _padding, field
-#  define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v)   \
-       field ## _padding = 0, field = (intptr_t)v
-# else
-#  define LINUX_FIELD_u32_u64(field)   __u32 field, field ## _padding
-#  define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v)   \
-       field = (intptr_t)v, field ## _padding = 0
-# endif
-#endif
-
-#endif /* _UAPI_LINUX_TYPES_32_64_H */
index 9d4340c907d17d0c2ecacdd6762e36d7c9d6def5..1e1d9bd0bd3788711d8722e7ec9e1a15661d7c3b 100644 (file)
@@ -25,12 +25,16 @@ extern bool xen_pvh;
 #define xen_hvm_domain()       (xen_domain_type == XEN_HVM_DOMAIN)
 #define xen_pvh_domain()       (xen_pvh)
 
+#include <linux/types.h>
+
+extern uint32_t xen_start_flags;
+
 #ifdef CONFIG_XEN_DOM0
 #include <xen/interface/xen.h>
 #include <asm/xen/hypervisor.h>
 
 #define xen_initial_domain()   (xen_domain() && \
-                                xen_start_info && xen_start_info->flags & SIF_INITDOMAIN)
+                                (xen_start_flags & SIF_INITDOMAIN))
 #else  /* !CONFIG_XEN_DOM0 */
 #define xen_initial_domain()   (0)
 #endif /* CONFIG_XEN_DOM0 */
index 5a52f07259a2aab4ad5993801a6d15b5dfe5d4a3..041f3a022122d559b8588c8c24c8db37756464de 100644 (file)
@@ -1051,10 +1051,9 @@ config LD_DEAD_CODE_DATA_ELIMINATION
        depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
        depends on EXPERT
        help
-         Select this if the architecture wants to do dead code and
-         data elimination with the linker by compiling with
-         -ffunction-sections -fdata-sections, and linking with
-         --gc-sections.
+         Enable this if you want to do dead code and data elimination with
+         the linker by compiling with -ffunction-sections -fdata-sections,
+         and linking with --gc-sections.
 
          This can reduce on disk and in-memory size of the kernel
          code and static data, particularly for small configs and
@@ -1719,10 +1718,6 @@ source "arch/Kconfig"
 
 endmenu                # General setup
 
-config HAVE_GENERIC_DMA_COHERENT
-       bool
-       default n
-
 config RT_MUTEXES
        bool
 
index d2001624fe7a31b788508e5da97924173bf2e33e..04bc07c2b42a9dfef399caea56a12b072f0ad028 100644 (file)
@@ -41,6 +41,7 @@ obj-y += printk/
 obj-y += irq/
 obj-y += rcu/
 obj-y += livepatch/
+obj-y += dma/
 
 obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
 obj-$(CONFIG_FREEZER) += freezer.o
index 2d49d18b793abaf60379c4050e4148a77bae732f..e016ac3afa2422f4f4e1cdd0d8417b1e6a2f55ac 100644 (file)
@@ -991,16 +991,13 @@ static void btf_int_bits_seq_show(const struct btf *btf,
                                  void *data, u8 bits_offset,
                                  struct seq_file *m)
 {
+       u16 left_shift_bits, right_shift_bits;
        u32 int_data = btf_type_int(t);
        u16 nr_bits = BTF_INT_BITS(int_data);
        u16 total_bits_offset;
        u16 nr_copy_bytes;
        u16 nr_copy_bits;
-       u8 nr_upper_bits;
-       union {
-               u64 u64_num;
-               u8  u8_nums[8];
-       } print_num;
+       u64 print_num;
 
        total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
        data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
@@ -1008,21 +1005,20 @@ static void btf_int_bits_seq_show(const struct btf *btf,
        nr_copy_bits = nr_bits + bits_offset;
        nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
 
-       print_num.u64_num = 0;
-       memcpy(&print_num.u64_num, data, nr_copy_bytes);
+       print_num = 0;
+       memcpy(&print_num, data, nr_copy_bytes);
 
-       /* Ditch the higher order bits */
-       nr_upper_bits = BITS_PER_BYTE_MASKED(nr_copy_bits);
-       if (nr_upper_bits) {
-               /* We need to mask out some bits of the upper byte. */
-               u8 mask = (1 << nr_upper_bits) - 1;
+#ifdef __BIG_ENDIAN_BITFIELD
+       left_shift_bits = bits_offset;
+#else
+       left_shift_bits = BITS_PER_U64 - nr_copy_bits;
+#endif
+       right_shift_bits = BITS_PER_U64 - nr_bits;
 
-               print_num.u8_nums[nr_copy_bytes - 1] &= mask;
-       }
-
-       print_num.u64_num >>= bits_offset;
+       print_num <<= left_shift_bits;
+       print_num >>= right_shift_bits;
 
-       seq_printf(m, "0x%llx", print_num.u64_num);
+       seq_printf(m, "0x%llx", print_num);
 }
 
 static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
index f7c00bd6f8e49ca9cc4e6ee323b01f718aebd9ec..3d83ee7df381b1def956b5e645376451d797440e 100644 (file)
@@ -428,6 +428,60 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
        return ret;
 }
 
+int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+                          enum bpf_prog_type ptype, struct bpf_prog *prog)
+{
+       struct cgroup *cgrp;
+       int ret;
+
+       cgrp = cgroup_get_from_fd(attr->target_fd);
+       if (IS_ERR(cgrp))
+               return PTR_ERR(cgrp);
+
+       ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
+                               attr->attach_flags);
+       cgroup_put(cgrp);
+       return ret;
+}
+
+int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
+{
+       struct bpf_prog *prog;
+       struct cgroup *cgrp;
+       int ret;
+
+       cgrp = cgroup_get_from_fd(attr->target_fd);
+       if (IS_ERR(cgrp))
+               return PTR_ERR(cgrp);
+
+       prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
+       if (IS_ERR(prog))
+               prog = NULL;
+
+       ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
+       if (prog)
+               bpf_prog_put(prog);
+
+       cgroup_put(cgrp);
+       return ret;
+}
+
+int cgroup_bpf_prog_query(const union bpf_attr *attr,
+                         union bpf_attr __user *uattr)
+{
+       struct cgroup *cgrp;
+       int ret;
+
+       cgrp = cgroup_get_from_fd(attr->query.target_fd);
+       if (IS_ERR(cgrp))
+               return PTR_ERR(cgrp);
+
+       ret = cgroup_bpf_query(cgrp, attr, uattr);
+
+       cgroup_put(cgrp);
+       return ret;
+}
+
 /**
  * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
  * @sk: The socket sending or receiving traffic
index 9f1493705f4043066033dd44ec6deb95e7418287..1e5625d46414cc68efe372b2c6a8dab266a24dd6 100644 (file)
@@ -350,6 +350,20 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
        return prog_adj;
 }
 
+void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
+{
+       int i;
+
+       for (i = 0; i < fp->aux->func_cnt; i++)
+               bpf_prog_kallsyms_del(fp->aux->func[i]);
+}
+
+void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
+{
+       bpf_prog_kallsyms_del_subprogs(fp);
+       bpf_prog_kallsyms_del(fp);
+}
+
 #ifdef CONFIG_BPF_JIT
 /* All BPF JIT sysctl knobs here. */
 int bpf_jit_enable   __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
@@ -1434,6 +1448,17 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
        return 0;
 }
 
+static void bpf_prog_select_func(struct bpf_prog *fp)
+{
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+       u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
+
+       fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
+#else
+       fp->bpf_func = __bpf_prog_ret0_warn;
+#endif
+}
+
 /**
  *     bpf_prog_select_runtime - select exec runtime for BPF program
  *     @fp: bpf_prog populated with internal BPF program
@@ -1444,13 +1469,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
  */
 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
 {
-#ifndef CONFIG_BPF_JIT_ALWAYS_ON
-       u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
+       /* In case of BPF to BPF calls, verifier did all the prep
+        * work with regards to JITing, etc.
+        */
+       if (fp->bpf_func)
+               goto finalize;
 
-       fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
-#else
-       fp->bpf_func = __bpf_prog_ret0_warn;
-#endif
+       bpf_prog_select_func(fp);
 
        /* eBPF JITs can rewrite the program in case constant
         * blinding is active. However, in case of error during
@@ -1471,6 +1496,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
                if (*err)
                        return fp;
        }
+
+finalize:
        bpf_prog_lock_ro(fp);
 
        /* The tail call compatibility check can only be done at
index a7cc7b3494a90f582886485668562ccfef5f5ffd..d361fc1e3bf35fd54d485e72c2e258171e3394d5 100644 (file)
@@ -334,10 +334,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
 {
        struct net_device *dev = dst->dev;
        struct xdp_frame *xdpf;
+       int err;
 
        if (!dev->netdev_ops->ndo_xdp_xmit)
                return -EOPNOTSUPP;
 
+       err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
+       if (unlikely(err))
+               return err;
+
        xdpf = convert_to_xdp_frame(xdp);
        if (unlikely(!xdpf))
                return -EOVERFLOW;
@@ -345,6 +350,20 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
        return bq_enqueue(dst, xdpf, dev_rx);
 }
 
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+                            struct bpf_prog *xdp_prog)
+{
+       int err;
+
+       err = xdp_ok_fwd_dev(dst->dev, skb->len);
+       if (unlikely(err))
+               return err;
+       skb->dev = dst->dev;
+       generic_xdp_tx(skb, xdp_prog);
+
+       return 0;
+}
+
 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
 {
        struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
index 3ca2198a6d22d9ab67c61e964811046cf9c9e512..513d9dfcf4ee136dd5e6733789a996612272376d 100644 (file)
@@ -747,13 +747,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
                                 * old element will be freed immediately.
                                 * Otherwise return an error
                                 */
-                               atomic_dec(&htab->count);
-                               return ERR_PTR(-E2BIG);
+                               l_new = ERR_PTR(-E2BIG);
+                               goto dec_count;
                        }
                l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
                                     htab->map.numa_node);
-               if (!l_new)
-                       return ERR_PTR(-ENOMEM);
+               if (!l_new) {
+                       l_new = ERR_PTR(-ENOMEM);
+                       goto dec_count;
+               }
        }
 
        memcpy(l_new->key, key, key_size);
@@ -766,7 +768,8 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
                                                  GFP_ATOMIC | __GFP_NOWARN);
                        if (!pptr) {
                                kfree(l_new);
-                               return ERR_PTR(-ENOMEM);
+                               l_new = ERR_PTR(-ENOMEM);
+                               goto dec_count;
                        }
                }
 
@@ -780,6 +783,9 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
 
        l_new->hash = hash;
        return l_new;
+dec_count:
+       atomic_dec(&htab->count);
+       return l_new;
 }
 
 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
index 52a91d816c0eb9a1f9fe96fd77b3ffefd6145149..98fb7938beea9dd18a255ad77ebe797b01660dea 100644 (file)
@@ -72,6 +72,7 @@ struct bpf_htab {
        u32 n_buckets;
        u32 elem_size;
        struct bpf_sock_progs progs;
+       struct rcu_head rcu;
 };
 
 struct htab_elem {
@@ -89,8 +90,8 @@ enum smap_psock_state {
 struct smap_psock_map_entry {
        struct list_head list;
        struct sock **entry;
-       struct htab_elem *hash_link;
-       struct bpf_htab *htab;
+       struct htab_elem __rcu *hash_link;
+       struct bpf_htab __rcu *htab;
 };
 
 struct smap_psock {
@@ -120,6 +121,7 @@ struct smap_psock {
        struct bpf_prog *bpf_parse;
        struct bpf_prog *bpf_verdict;
        struct list_head maps;
+       spinlock_t maps_lock;
 
        /* Back reference used when sock callback trigger sockmap operations */
        struct sock *sock;
@@ -140,6 +142,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
                            int offset, size_t size, int flags);
+static void bpf_tcp_close(struct sock *sk, long timeout);
 
 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
 {
@@ -161,7 +164,42 @@ out:
        return !empty;
 }
 
-static struct proto tcp_bpf_proto;
+enum {
+       SOCKMAP_IPV4,
+       SOCKMAP_IPV6,
+       SOCKMAP_NUM_PROTS,
+};
+
+enum {
+       SOCKMAP_BASE,
+       SOCKMAP_TX,
+       SOCKMAP_NUM_CONFIGS,
+};
+
+static struct proto *saved_tcpv6_prot __read_mostly;
+static DEFINE_SPINLOCK(tcpv6_prot_lock);
+static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS];
+static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
+                        struct proto *base)
+{
+       prot[SOCKMAP_BASE]                      = *base;
+       prot[SOCKMAP_BASE].close                = bpf_tcp_close;
+       prot[SOCKMAP_BASE].recvmsg              = bpf_tcp_recvmsg;
+       prot[SOCKMAP_BASE].stream_memory_read   = bpf_tcp_stream_read;
+
+       prot[SOCKMAP_TX]                        = prot[SOCKMAP_BASE];
+       prot[SOCKMAP_TX].sendmsg                = bpf_tcp_sendmsg;
+       prot[SOCKMAP_TX].sendpage               = bpf_tcp_sendpage;
+}
+
+static void update_sk_prot(struct sock *sk, struct smap_psock *psock)
+{
+       int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4;
+       int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE;
+
+       sk->sk_prot = &bpf_tcp_prots[family][conf];
+}
+
 static int bpf_tcp_init(struct sock *sk)
 {
        struct smap_psock *psock;
@@ -181,14 +219,17 @@ static int bpf_tcp_init(struct sock *sk)
        psock->save_close = sk->sk_prot->close;
        psock->sk_proto = sk->sk_prot;
 
-       if (psock->bpf_tx_msg) {
-               tcp_bpf_proto.sendmsg = bpf_tcp_sendmsg;
-               tcp_bpf_proto.sendpage = bpf_tcp_sendpage;
-               tcp_bpf_proto.recvmsg = bpf_tcp_recvmsg;
-               tcp_bpf_proto.stream_memory_read = bpf_tcp_stream_read;
+       /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */
+       if (sk->sk_family == AF_INET6 &&
+           unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
+               spin_lock_bh(&tcpv6_prot_lock);
+               if (likely(sk->sk_prot != saved_tcpv6_prot)) {
+                       build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot);
+                       smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
+               }
+               spin_unlock_bh(&tcpv6_prot_lock);
        }
-
-       sk->sk_prot = &tcp_bpf_proto;
+       update_sk_prot(sk, psock);
        rcu_read_unlock();
        return 0;
 }
@@ -219,24 +260,64 @@ out:
        rcu_read_unlock();
 }
 
+static struct htab_elem *lookup_elem_raw(struct hlist_head *head,
+                                        u32 hash, void *key, u32 key_size)
+{
+       struct htab_elem *l;
+
+       hlist_for_each_entry_rcu(l, head, hash_node) {
+               if (l->hash == hash && !memcmp(&l->key, key, key_size))
+                       return l;
+       }
+
+       return NULL;
+}
+
+static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
+{
+       return &htab->buckets[hash & (htab->n_buckets - 1)];
+}
+
+static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
+{
+       return &__select_bucket(htab, hash)->head;
+}
+
 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
 {
        atomic_dec(&htab->count);
        kfree_rcu(l, rcu);
 }
 
+static struct smap_psock_map_entry *psock_map_pop(struct sock *sk,
+                                                 struct smap_psock *psock)
+{
+       struct smap_psock_map_entry *e;
+
+       spin_lock_bh(&psock->maps_lock);
+       e = list_first_entry_or_null(&psock->maps,
+                                    struct smap_psock_map_entry,
+                                    list);
+       if (e)
+               list_del(&e->list);
+       spin_unlock_bh(&psock->maps_lock);
+       return e;
+}
+
 static void bpf_tcp_close(struct sock *sk, long timeout)
 {
        void (*close_fun)(struct sock *sk, long timeout);
-       struct smap_psock_map_entry *e, *tmp;
+       struct smap_psock_map_entry *e;
        struct sk_msg_buff *md, *mtmp;
        struct smap_psock *psock;
        struct sock *osk;
 
+       lock_sock(sk);
        rcu_read_lock();
        psock = smap_psock_sk(sk);
        if (unlikely(!psock)) {
                rcu_read_unlock();
+               release_sock(sk);
                return sk->sk_prot->close(sk, timeout);
        }
 
@@ -247,7 +328,6 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
         */
        close_fun = psock->save_close;
 
-       write_lock_bh(&sk->sk_callback_lock);
        if (psock->cork) {
                free_start_sg(psock->sock, psock->cork);
                kfree(psock->cork);
@@ -260,21 +340,40 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
                kfree(md);
        }
 
-       list_for_each_entry_safe(e, tmp, &psock->maps, list) {
+       e = psock_map_pop(sk, psock);
+       while (e) {
                if (e->entry) {
                        osk = cmpxchg(e->entry, sk, NULL);
                        if (osk == sk) {
-                               list_del(&e->list);
                                smap_release_sock(psock, sk);
                        }
                } else {
-                       hlist_del_rcu(&e->hash_link->hash_node);
-                       smap_release_sock(psock, e->hash_link->sk);
-                       free_htab_elem(e->htab, e->hash_link);
+                       struct htab_elem *link = rcu_dereference(e->hash_link);
+                       struct bpf_htab *htab = rcu_dereference(e->htab);
+                       struct hlist_head *head;
+                       struct htab_elem *l;
+                       struct bucket *b;
+
+                       b = __select_bucket(htab, link->hash);
+                       head = &b->head;
+                       raw_spin_lock_bh(&b->lock);
+                       l = lookup_elem_raw(head,
+                                           link->hash, link->key,
+                                           htab->map.key_size);
+                       /* If another thread deleted this object skip deletion.
+                        * The refcnt on psock may or may not be zero.
+                        */
+                       if (l) {
+                               hlist_del_rcu(&link->hash_node);
+                               smap_release_sock(psock, link->sk);
+                               free_htab_elem(htab, link);
+                       }
+                       raw_spin_unlock_bh(&b->lock);
                }
+               e = psock_map_pop(sk, psock);
        }
-       write_unlock_bh(&sk->sk_callback_lock);
        rcu_read_unlock();
+       release_sock(sk);
        close_fun(sk, timeout);
 }
 
@@ -472,7 +571,8 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
        while (sg[i].length) {
                free += sg[i].length;
                sk_mem_uncharge(sk, sg[i].length);
-               put_page(sg_page(&sg[i]));
+               if (!md->skb)
+                       put_page(sg_page(&sg[i]));
                sg[i].length = 0;
                sg[i].page_link = 0;
                sg[i].offset = 0;
@@ -481,6 +581,8 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
                if (i == MAX_SKB_FRAGS)
                        i = 0;
        }
+       if (md->skb)
+               consume_skb(md->skb);
 
        return free;
 }
@@ -1111,8 +1213,7 @@ static void bpf_tcp_msg_add(struct smap_psock *psock,
 
 static int bpf_tcp_ulp_register(void)
 {
-       tcp_bpf_proto = tcp_prot;
-       tcp_bpf_proto.close = bpf_tcp_close;
+       build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot);
        /* Once BPF TX ULP is registered it is never unregistered. It
         * will be in the ULP list for the lifetime of the system. Doing
         * duplicate registers is not a problem.
@@ -1135,7 +1236,7 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
         */
        TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
        skb->sk = psock->sock;
-       bpf_compute_data_pointers(skb);
+       bpf_compute_data_end_sk_skb(skb);
        preempt_disable();
        rc = (*prog->bpf_func)(skb, prog->insnsi);
        preempt_enable();
@@ -1357,7 +1458,9 @@ static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
 {
        if (refcount_dec_and_test(&psock->refcnt)) {
                tcp_cleanup_ulp(sock);
+               write_lock_bh(&sock->sk_callback_lock);
                smap_stop_sock(psock, sock);
+               write_unlock_bh(&sock->sk_callback_lock);
                clear_bit(SMAP_TX_RUNNING, &psock->state);
                rcu_assign_sk_user_data(sock, NULL);
                call_rcu_sched(&psock->rcu, smap_destroy_psock);
@@ -1388,7 +1491,7 @@ static int smap_parse_func_strparser(struct strparser *strp,
         * any socket yet.
         */
        skb->sk = psock->sock;
-       bpf_compute_data_pointers(skb);
+       bpf_compute_data_end_sk_skb(skb);
        rc = (*prog->bpf_func)(skb, prog->insnsi);
        skb->sk = NULL;
        rcu_read_unlock();
@@ -1508,6 +1611,7 @@ static struct smap_psock *smap_init_psock(struct sock *sock, int node)
        INIT_LIST_HEAD(&psock->maps);
        INIT_LIST_HEAD(&psock->ingress);
        refcount_set(&psock->refcnt, 1);
+       spin_lock_init(&psock->maps_lock);
 
        rcu_assign_sk_user_data(sock, psock);
        sock_hold(sock);
@@ -1564,18 +1668,32 @@ free_stab:
        return ERR_PTR(err);
 }
 
-static void smap_list_remove(struct smap_psock *psock,
-                            struct sock **entry,
-                            struct htab_elem *hash_link)
+static void smap_list_map_remove(struct smap_psock *psock,
+                                struct sock **entry)
 {
        struct smap_psock_map_entry *e, *tmp;
 
+       spin_lock_bh(&psock->maps_lock);
        list_for_each_entry_safe(e, tmp, &psock->maps, list) {
-               if (e->entry == entry || e->hash_link == hash_link) {
+               if (e->entry == entry)
+                       list_del(&e->list);
+       }
+       spin_unlock_bh(&psock->maps_lock);
+}
+
+static void smap_list_hash_remove(struct smap_psock *psock,
+                                 struct htab_elem *hash_link)
+{
+       struct smap_psock_map_entry *e, *tmp;
+
+       spin_lock_bh(&psock->maps_lock);
+       list_for_each_entry_safe(e, tmp, &psock->maps, list) {
+               struct htab_elem *c = rcu_dereference(e->hash_link);
+
+               if (c == hash_link)
                        list_del(&e->list);
-                       break;
-               }
        }
+       spin_unlock_bh(&psock->maps_lock);
 }
 
 static void sock_map_free(struct bpf_map *map)
@@ -1601,7 +1719,6 @@ static void sock_map_free(struct bpf_map *map)
                if (!sock)
                        continue;
 
-               write_lock_bh(&sock->sk_callback_lock);
                psock = smap_psock_sk(sock);
                /* This check handles a racing sock event that can get the
                 * sk_callback_lock before this case but after xchg happens
@@ -1609,10 +1726,9 @@ static void sock_map_free(struct bpf_map *map)
                 * to be null and queued for garbage collection.
                 */
                if (likely(psock)) {
-                       smap_list_remove(psock, &stab->sock_map[i], NULL);
+                       smap_list_map_remove(psock, &stab->sock_map[i]);
                        smap_release_sock(psock, sock);
                }
-               write_unlock_bh(&sock->sk_callback_lock);
        }
        rcu_read_unlock();
 
@@ -1661,17 +1777,15 @@ static int sock_map_delete_elem(struct bpf_map *map, void *key)
        if (!sock)
                return -EINVAL;
 
-       write_lock_bh(&sock->sk_callback_lock);
        psock = smap_psock_sk(sock);
        if (!psock)
                goto out;
 
        if (psock->bpf_parse)
                smap_stop_sock(psock, sock);
-       smap_list_remove(psock, &stab->sock_map[k], NULL);
+       smap_list_map_remove(psock, &stab->sock_map[k]);
        smap_release_sock(psock, sock);
 out:
-       write_unlock_bh(&sock->sk_callback_lock);
        return 0;
 }
 
@@ -1752,7 +1866,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
                }
        }
 
-       write_lock_bh(&sock->sk_callback_lock);
        psock = smap_psock_sk(sock);
 
        /* 2. Do not allow inheriting programs if psock exists and has
@@ -1789,7 +1902,7 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
                e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
                if (!e) {
                        err = -ENOMEM;
-                       goto out_progs;
+                       goto out_free;
                }
        }
 
@@ -1809,7 +1922,9 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
                if (err)
                        goto out_free;
                smap_init_progs(psock, verdict, parse);
+               write_lock_bh(&sock->sk_callback_lock);
                smap_start_sock(psock, sock);
+               write_unlock_bh(&sock->sk_callback_lock);
        }
 
        /* 4. Place psock in sockmap for use and stop any programs on
@@ -1819,9 +1934,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
         */
        if (map_link) {
                e->entry = map_link;
+               spin_lock_bh(&psock->maps_lock);
                list_add_tail(&e->list, &psock->maps);
+               spin_unlock_bh(&psock->maps_lock);
        }
-       write_unlock_bh(&sock->sk_callback_lock);
        return err;
 out_free:
        smap_release_sock(psock, sock);
@@ -1832,7 +1948,6 @@ out_progs:
        }
        if (tx_msg)
                bpf_prog_put(tx_msg);
-       write_unlock_bh(&sock->sk_callback_lock);
        kfree(e);
        return err;
 }
@@ -1869,10 +1984,8 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
        if (osock) {
                struct smap_psock *opsock = smap_psock_sk(osock);
 
-               write_lock_bh(&osock->sk_callback_lock);
-               smap_list_remove(opsock, &stab->sock_map[i], NULL);
+               smap_list_map_remove(opsock, &stab->sock_map[i]);
                smap_release_sock(opsock, osock);
-               write_unlock_bh(&osock->sk_callback_lock);
        }
 out:
        return err;
@@ -1915,6 +2028,24 @@ int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
        return 0;
 }
 
+int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+                       struct bpf_prog *prog)
+{
+       int ufd = attr->target_fd;
+       struct bpf_map *map;
+       struct fd f;
+       int err;
+
+       f = fdget(ufd);
+       map = __bpf_map_get(f);
+       if (IS_ERR(map))
+               return PTR_ERR(map);
+
+       err = sock_map_prog(map, prog, attr->attach_type);
+       fdput(f);
+       return err;
+}
+
 static void *sock_map_lookup(struct bpf_map *map, void *key)
 {
        return NULL;
@@ -1944,7 +2075,13 @@ static int sock_map_update_elem(struct bpf_map *map,
                return -EOPNOTSUPP;
        }
 
+       lock_sock(skops.sk);
+       preempt_disable();
+       rcu_read_lock();
        err = sock_map_ctx_update_elem(&skops, map, key, flags);
+       rcu_read_unlock();
+       preempt_enable();
+       release_sock(skops.sk);
        fput(socket->file);
        return err;
 }
@@ -2043,14 +2180,13 @@ free_htab:
        return ERR_PTR(err);
 }
 
-static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
+static void __bpf_htab_free(struct rcu_head *rcu)
 {
-       return &htab->buckets[hash & (htab->n_buckets - 1)];
-}
+       struct bpf_htab *htab;
 
-static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
-{
-       return &__select_bucket(htab, hash)->head;
+       htab = container_of(rcu, struct bpf_htab, rcu);
+       bpf_map_area_free(htab->buckets);
+       kfree(htab);
 }
 
 static void sock_hash_free(struct bpf_map *map)
@@ -2069,16 +2205,18 @@ static void sock_hash_free(struct bpf_map *map)
         */
        rcu_read_lock();
        for (i = 0; i < htab->n_buckets; i++) {
-               struct hlist_head *head = select_bucket(htab, i);
+               struct bucket *b = __select_bucket(htab, i);
+               struct hlist_head *head;
                struct hlist_node *n;
                struct htab_elem *l;
 
+               raw_spin_lock_bh(&b->lock);
+               head = &b->head;
                hlist_for_each_entry_safe(l, n, head, hash_node) {
                        struct sock *sock = l->sk;
                        struct smap_psock *psock;
 
                        hlist_del_rcu(&l->hash_node);
-                       write_lock_bh(&sock->sk_callback_lock);
                        psock = smap_psock_sk(sock);
                        /* This check handles a racing sock event that can get
                         * the sk_callback_lock before this case but after xchg
@@ -2086,16 +2224,15 @@ static void sock_hash_free(struct bpf_map *map)
                         * (psock) to be null and queued for garbage collection.
                         */
                        if (likely(psock)) {
-                               smap_list_remove(psock, NULL, l);
+                               smap_list_hash_remove(psock, l);
                                smap_release_sock(psock, sock);
                        }
-                       write_unlock_bh(&sock->sk_callback_lock);
-                       kfree(l);
+                       free_htab_elem(htab, l);
                }
+               raw_spin_unlock_bh(&b->lock);
        }
        rcu_read_unlock();
-       bpf_map_area_free(htab->buckets);
-       kfree(htab);
+       call_rcu(&htab->rcu, __bpf_htab_free);
 }
 
 static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
@@ -2122,19 +2259,6 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
        return l_new;
 }
 
-static struct htab_elem *lookup_elem_raw(struct hlist_head *head,
-                                        u32 hash, void *key, u32 key_size)
-{
-       struct htab_elem *l;
-
-       hlist_for_each_entry_rcu(l, head, hash_node) {
-               if (l->hash == hash && !memcmp(&l->key, key, key_size))
-                       return l;
-       }
-
-       return NULL;
-}
-
 static inline u32 htab_map_hash(const void *key, u32 key_len)
 {
        return jhash(key, key_len, 0);
@@ -2230,7 +2354,10 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
        if (err)
                goto err;
 
-       /* bpf_map_update_elem() can be called in_irq() */
+       /* psock is valid here because otherwise above *ctx_update_elem would
+        * have thrown an error. It is safe to skip error check.
+        */
+       psock = smap_psock_sk(sock);
        raw_spin_lock_bh(&b->lock);
        l_old = lookup_elem_raw(head, hash, key, key_size);
        if (l_old && map_flags == BPF_NOEXIST) {
@@ -2248,15 +2375,12 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
                goto bucket_err;
        }
 
-       psock = smap_psock_sk(sock);
-       if (unlikely(!psock)) {
-               err = -EINVAL;
-               goto bucket_err;
-       }
-
-       e->hash_link = l_new;
-       e->htab = container_of(map, struct bpf_htab, map);
+       rcu_assign_pointer(e->hash_link, l_new);
+       rcu_assign_pointer(e->htab,
+                          container_of(map, struct bpf_htab, map));
+       spin_lock_bh(&psock->maps_lock);
        list_add_tail(&e->list, &psock->maps);
+       spin_unlock_bh(&psock->maps_lock);
 
        /* add new element to the head of the list, so that
         * concurrent search will find it before old elem
@@ -2266,19 +2390,17 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
                psock = smap_psock_sk(l_old->sk);
 
                hlist_del_rcu(&l_old->hash_node);
-               smap_list_remove(psock, NULL, l_old);
+               smap_list_hash_remove(psock, l_old);
                smap_release_sock(psock, l_old->sk);
                free_htab_elem(htab, l_old);
        }
        raw_spin_unlock_bh(&b->lock);
        return 0;
 bucket_err:
+       smap_release_sock(psock, sock);
        raw_spin_unlock_bh(&b->lock);
 err:
        kfree(e);
-       psock = smap_psock_sk(sock);
-       if (psock)
-               smap_release_sock(psock, sock);
        return err;
 }
 
@@ -2300,7 +2422,13 @@ static int sock_hash_update_elem(struct bpf_map *map,
                return -EINVAL;
        }
 
+       lock_sock(skops.sk);
+       preempt_disable();
+       rcu_read_lock();
        err = sock_hash_ctx_update_elem(&skops, map, key, flags);
+       rcu_read_unlock();
+       preempt_enable();
+       release_sock(skops.sk);
        fput(socket->file);
        return err;
 }
@@ -2326,7 +2454,6 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
                struct smap_psock *psock;
 
                hlist_del_rcu(&l->hash_node);
-               write_lock_bh(&sock->sk_callback_lock);
                psock = smap_psock_sk(sock);
                /* This check handles a racing sock event that can get the
                 * sk_callback_lock before this case but after xchg happens
@@ -2334,10 +2461,9 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
                 * to be null and queued for garbage collection.
                 */
                if (likely(psock)) {
-                       smap_list_remove(psock, NULL, l);
+                       smap_list_hash_remove(psock, l);
                        smap_release_sock(psock, sock);
                }
-               write_unlock_bh(&sock->sk_callback_lock);
                free_htab_elem(htab, l);
                ret = 0;
        }
@@ -2359,10 +2485,8 @@ struct sock  *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
        b = __select_bucket(htab, hash);
        head = &b->head;
 
-       raw_spin_lock_bh(&b->lock);
        l = lookup_elem_raw(head, hash, key, key_size);
        sk = l ? l->sk : NULL;
-       raw_spin_unlock_bh(&b->lock);
        return sk;
 }
 
@@ -2383,6 +2507,7 @@ const struct bpf_map_ops sock_hash_ops = {
        .map_get_next_key = sock_hash_get_next_key,
        .map_update_elem = sock_hash_update_elem,
        .map_delete_elem = sock_hash_delete_elem,
+       .map_release_uref = sock_map_release,
 };
 
 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
index 0fa20624707f23b15d3200c1302765a119ee9fc1..a31a1ba0f8eada88e03dc4a73ad9c0305cc70198 100644 (file)
@@ -735,7 +735,9 @@ static int map_update_elem(union bpf_attr *attr)
        if (bpf_map_is_dev_bound(map)) {
                err = bpf_map_offload_update_elem(map, key, value, attr->flags);
                goto out;
-       } else if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
+       } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
+                  map->map_type == BPF_MAP_TYPE_SOCKHASH ||
+                  map->map_type == BPF_MAP_TYPE_SOCKMAP) {
                err = map->ops->map_update_elem(map, key, value, attr->flags);
                goto out;
        }
@@ -1034,14 +1036,9 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
 {
        if (atomic_dec_and_test(&prog->aux->refcnt)) {
-               int i;
-
                /* bpf_prog_free_id() must be called first */
                bpf_prog_free_id(prog, do_idr_lock);
-
-               for (i = 0; i < prog->aux->func_cnt; i++)
-                       bpf_prog_kallsyms_del(prog->aux->func[i]);
-               bpf_prog_kallsyms_del(prog);
+               bpf_prog_kallsyms_del_all(prog);
 
                call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
        }
@@ -1358,9 +1355,7 @@ static int bpf_prog_load(union bpf_attr *attr)
        if (err < 0)
                goto free_used_maps;
 
-       /* eBPF program is ready to be JITed */
-       if (!prog->bpf_func)
-               prog = bpf_prog_select_runtime(prog, &err);
+       prog = bpf_prog_select_runtime(prog, &err);
        if (err < 0)
                goto free_used_maps;
 
@@ -1384,6 +1379,7 @@ static int bpf_prog_load(union bpf_attr *attr)
        return err;
 
 free_used_maps:
+       bpf_prog_kallsyms_del_subprogs(prog);
        free_used_maps(prog->aux);
 free_prog:
        bpf_prog_uncharge_memlock(prog);
@@ -1489,8 +1485,6 @@ out_free_tp:
        return err;
 }
 
-#ifdef CONFIG_CGROUP_BPF
-
 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
                                             enum bpf_attach_type attach_type)
 {
@@ -1505,40 +1499,6 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
 
 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
 
-static int sockmap_get_from_fd(const union bpf_attr *attr,
-                              int type, bool attach)
-{
-       struct bpf_prog *prog = NULL;
-       int ufd = attr->target_fd;
-       struct bpf_map *map;
-       struct fd f;
-       int err;
-
-       f = fdget(ufd);
-       map = __bpf_map_get(f);
-       if (IS_ERR(map))
-               return PTR_ERR(map);
-
-       if (attach) {
-               prog = bpf_prog_get_type(attr->attach_bpf_fd, type);
-               if (IS_ERR(prog)) {
-                       fdput(f);
-                       return PTR_ERR(prog);
-               }
-       }
-
-       err = sock_map_prog(map, prog, attr->attach_type);
-       if (err) {
-               fdput(f);
-               if (prog)
-                       bpf_prog_put(prog);
-               return err;
-       }
-
-       fdput(f);
-       return 0;
-}
-
 #define BPF_F_ATTACH_MASK \
        (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)
 
@@ -1546,7 +1506,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
 {
        enum bpf_prog_type ptype;
        struct bpf_prog *prog;
-       struct cgroup *cgrp;
        int ret;
 
        if (!capable(CAP_NET_ADMIN))
@@ -1583,12 +1542,15 @@ static int bpf_prog_attach(const union bpf_attr *attr)
                ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
                break;
        case BPF_SK_MSG_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, true);
+               ptype = BPF_PROG_TYPE_SK_MSG;
+               break;
        case BPF_SK_SKB_STREAM_PARSER:
        case BPF_SK_SKB_STREAM_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, true);
+               ptype = BPF_PROG_TYPE_SK_SKB;
+               break;
        case BPF_LIRC_MODE2:
-               return lirc_prog_attach(attr);
+               ptype = BPF_PROG_TYPE_LIRC_MODE2;
+               break;
        default:
                return -EINVAL;
        }
@@ -1602,18 +1564,20 @@ static int bpf_prog_attach(const union bpf_attr *attr)
                return -EINVAL;
        }
 
-       cgrp = cgroup_get_from_fd(attr->target_fd);
-       if (IS_ERR(cgrp)) {
-               bpf_prog_put(prog);
-               return PTR_ERR(cgrp);
+       switch (ptype) {
+       case BPF_PROG_TYPE_SK_SKB:
+       case BPF_PROG_TYPE_SK_MSG:
+               ret = sockmap_get_from_fd(attr, ptype, prog);
+               break;
+       case BPF_PROG_TYPE_LIRC_MODE2:
+               ret = lirc_prog_attach(attr, prog);
+               break;
+       default:
+               ret = cgroup_bpf_prog_attach(attr, ptype, prog);
        }
 
-       ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
-                               attr->attach_flags);
        if (ret)
                bpf_prog_put(prog);
-       cgroup_put(cgrp);
-
        return ret;
 }
 
@@ -1622,9 +1586,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
 static int bpf_prog_detach(const union bpf_attr *attr)
 {
        enum bpf_prog_type ptype;
-       struct bpf_prog *prog;
-       struct cgroup *cgrp;
-       int ret;
 
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
@@ -1657,29 +1618,17 @@ static int bpf_prog_detach(const union bpf_attr *attr)
                ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
                break;
        case BPF_SK_MSG_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, false);
+               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, NULL);
        case BPF_SK_SKB_STREAM_PARSER:
        case BPF_SK_SKB_STREAM_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, false);
+               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, NULL);
        case BPF_LIRC_MODE2:
                return lirc_prog_detach(attr);
        default:
                return -EINVAL;
        }
 
-       cgrp = cgroup_get_from_fd(attr->target_fd);
-       if (IS_ERR(cgrp))
-               return PTR_ERR(cgrp);
-
-       prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
-       if (IS_ERR(prog))
-               prog = NULL;
-
-       ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
-       if (prog)
-               bpf_prog_put(prog);
-       cgroup_put(cgrp);
-       return ret;
+       return cgroup_bpf_prog_detach(attr, ptype);
 }
 
 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
@@ -1687,9 +1636,6 @@ static int bpf_prog_detach(const union bpf_attr *attr)
 static int bpf_prog_query(const union bpf_attr *attr,
                          union bpf_attr __user *uattr)
 {
-       struct cgroup *cgrp;
-       int ret;
-
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
        if (CHECK_ATTR(BPF_PROG_QUERY))
@@ -1717,14 +1663,9 @@ static int bpf_prog_query(const union bpf_attr *attr,
        default:
                return -EINVAL;
        }
-       cgrp = cgroup_get_from_fd(attr->query.target_fd);
-       if (IS_ERR(cgrp))
-               return PTR_ERR(cgrp);
-       ret = cgroup_bpf_query(cgrp, attr, uattr);
-       cgroup_put(cgrp);
-       return ret;
+
+       return cgroup_bpf_prog_query(attr, uattr);
 }
-#endif /* CONFIG_CGROUP_BPF */
 
 #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
 
@@ -2371,7 +2312,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
        case BPF_OBJ_GET:
                err = bpf_obj_get(&attr);
                break;
-#ifdef CONFIG_CGROUP_BPF
        case BPF_PROG_ATTACH:
                err = bpf_prog_attach(&attr);
                break;
@@ -2381,7 +2321,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
        case BPF_PROG_QUERY:
                err = bpf_prog_query(&attr, uattr);
                break;
-#endif
        case BPF_PROG_TEST_RUN:
                err = bpf_prog_test_run(&attr, uattr);
                break;
index 9e2bf834f13a21090b566862a853e7567ee03ac9..63aaac52a26553fb29529790cf0350f6dafa504b 100644 (file)
@@ -5430,6 +5430,10 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                if (insn->code != (BPF_JMP | BPF_CALL) ||
                    insn->src_reg != BPF_PSEUDO_CALL)
                        continue;
+               /* Upon error here we cannot fall back to interpreter but
+                * need a hard reject of the program. Thus -EFAULT is
+                * propagated in any case.
+                */
                subprog = find_subprog(env, i + insn->imm + 1);
                if (subprog < 0) {
                        WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
@@ -5450,7 +5454,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
 
        func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
        if (!func)
-               return -ENOMEM;
+               goto out_undo_insn;
 
        for (i = 0; i < env->subprog_cnt; i++) {
                subprog_start = subprog_end;
@@ -5515,7 +5519,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                tmp = bpf_int_jit_compile(func[i]);
                if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
                        verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
-                       err = -EFAULT;
+                       err = -ENOTSUPP;
                        goto out_free;
                }
                cond_resched();
@@ -5552,6 +5556,7 @@ out_free:
                if (func[i])
                        bpf_jit_free(func[i]);
        kfree(func);
+out_undo_insn:
        /* cleanup main prog to be interpreted */
        prog->jit_requested = 0;
        for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
@@ -5578,6 +5583,8 @@ static int fixup_call_args(struct bpf_verifier_env *env)
                err = jit_subprogs(env);
                if (err == 0)
                        return 0;
+               if (err == -EFAULT)
+                       return err;
        }
 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
        for (i = 0; i < prog->len; i++, insn++) {
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
new file mode 100644 (file)
index 0000000..9bd5430
--- /dev/null
@@ -0,0 +1,50 @@
+
+config HAS_DMA
+       bool
+       depends on !NO_DMA
+       default y
+
+config NEED_SG_DMA_LENGTH
+       bool
+
+config NEED_DMA_MAP_STATE
+       bool
+
+config ARCH_DMA_ADDR_T_64BIT
+       def_bool 64BIT || PHYS_ADDR_T_64BIT
+
+config HAVE_GENERIC_DMA_COHERENT
+       bool
+
+config ARCH_HAS_SYNC_DMA_FOR_DEVICE
+       bool
+
+config ARCH_HAS_SYNC_DMA_FOR_CPU
+       bool
+       select NEED_DMA_MAP_STATE
+
+config DMA_DIRECT_OPS
+       bool
+       depends on HAS_DMA
+
+config DMA_NONCOHERENT_OPS
+       bool
+       depends on HAS_DMA
+       select DMA_DIRECT_OPS
+
+config DMA_NONCOHERENT_MMAP
+       bool
+       depends on DMA_NONCOHERENT_OPS
+
+config DMA_NONCOHERENT_CACHE_SYNC
+       bool
+       depends on DMA_NONCOHERENT_OPS
+
+config DMA_VIRT_OPS
+       bool
+       depends on HAS_DMA
+
+config SWIOTLB
+       bool
+       select DMA_DIRECT_OPS
+       select NEED_DMA_MAP_STATE
diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile
new file mode 100644 (file)
index 0000000..6de44e4
--- /dev/null
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_HAS_DMA)                  += mapping.o
+obj-$(CONFIG_DMA_CMA)                  += contiguous.o
+obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += coherent.o
+obj-$(CONFIG_DMA_DIRECT_OPS)           += direct.o
+obj-$(CONFIG_DMA_NONCOHERENT_OPS)      += noncoherent.o
+obj-$(CONFIG_DMA_VIRT_OPS)             += virt.o
+obj-$(CONFIG_DMA_API_DEBUG)            += debug.o
+obj-$(CONFIG_SWIOTLB)                  += swiotlb.o
+
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
new file mode 100644 (file)
index 0000000..597d408
--- /dev/null
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Coherent per-device memory handling.
+ * Borrowed from i386
+ */
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+
+struct dma_coherent_mem {
+       void            *virt_base;
+       dma_addr_t      device_base;
+       unsigned long   pfn_base;
+       int             size;
+       int             flags;
+       unsigned long   *bitmap;
+       spinlock_t      spinlock;
+       bool            use_dev_dma_pfn_offset;
+};
+
+static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
+
+static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
+{
+       if (dev && dev->dma_mem)
+               return dev->dma_mem;
+       return NULL;
+}
+
+static inline dma_addr_t dma_get_device_base(struct device *dev,
+                                            struct dma_coherent_mem * mem)
+{
+       if (mem->use_dev_dma_pfn_offset)
+               return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
+       else
+               return mem->device_base;
+}
+
+static int dma_init_coherent_memory(
+       phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
+       struct dma_coherent_mem **mem)
+{
+       struct dma_coherent_mem *dma_mem = NULL;
+       void __iomem *mem_base = NULL;
+       int pages = size >> PAGE_SHIFT;
+       int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
+       int ret;
+
+       if (!size) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       mem_base = memremap(phys_addr, size, MEMREMAP_WC);
+       if (!mem_base) {
+               ret = -EINVAL;
+               goto out;
+       }
+       dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
+       if (!dma_mem) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+       if (!dma_mem->bitmap) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       dma_mem->virt_base = mem_base;
+       dma_mem->device_base = device_addr;
+       dma_mem->pfn_base = PFN_DOWN(phys_addr);
+       dma_mem->size = pages;
+       dma_mem->flags = flags;
+       spin_lock_init(&dma_mem->spinlock);
+
+       *mem = dma_mem;
+       return 0;
+
+out:
+       kfree(dma_mem);
+       if (mem_base)
+               memunmap(mem_base);
+       return ret;
+}
+
+static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
+{
+       if (!mem)
+               return;
+
+       memunmap(mem->virt_base);
+       kfree(mem->bitmap);
+       kfree(mem);
+}
+
+static int dma_assign_coherent_memory(struct device *dev,
+                                     struct dma_coherent_mem *mem)
+{
+       if (!dev)
+               return -ENODEV;
+
+       if (dev->dma_mem)
+               return -EBUSY;
+
+       dev->dma_mem = mem;
+       return 0;
+}
+
+int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+                               dma_addr_t device_addr, size_t size, int flags)
+{
+       struct dma_coherent_mem *mem;
+       int ret;
+
+       ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem);
+       if (ret)
+               return ret;
+
+       ret = dma_assign_coherent_memory(dev, mem);
+       if (ret)
+               dma_release_coherent_memory(mem);
+       return ret;
+}
+EXPORT_SYMBOL(dma_declare_coherent_memory);
+
+void dma_release_declared_memory(struct device *dev)
+{
+       struct dma_coherent_mem *mem = dev->dma_mem;
+
+       if (!mem)
+               return;
+       dma_release_coherent_memory(mem);
+       dev->dma_mem = NULL;
+}
+EXPORT_SYMBOL(dma_release_declared_memory);
+
+void *dma_mark_declared_memory_occupied(struct device *dev,
+                                       dma_addr_t device_addr, size_t size)
+{
+       struct dma_coherent_mem *mem = dev->dma_mem;
+       unsigned long flags;
+       int pos, err;
+
+       size += device_addr & ~PAGE_MASK;
+
+       if (!mem)
+               return ERR_PTR(-EINVAL);
+
+       spin_lock_irqsave(&mem->spinlock, flags);
+       pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
+       err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
+       spin_unlock_irqrestore(&mem->spinlock, flags);
+
+       if (err != 0)
+               return ERR_PTR(err);
+       return mem->virt_base + (pos << PAGE_SHIFT);
+}
+EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
+
+static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
+               ssize_t size, dma_addr_t *dma_handle)
+{
+       int order = get_order(size);
+       unsigned long flags;
+       int pageno;
+       void *ret;
+
+       spin_lock_irqsave(&mem->spinlock, flags);
+
+       if (unlikely(size > (mem->size << PAGE_SHIFT)))
+               goto err;
+
+       pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
+       if (unlikely(pageno < 0))
+               goto err;
+
+       /*
+        * Memory was found in the coherent area.
+        */
+       *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
+       ret = mem->virt_base + (pageno << PAGE_SHIFT);
+       spin_unlock_irqrestore(&mem->spinlock, flags);
+       memset(ret, 0, size);
+       return ret;
+err:
+       spin_unlock_irqrestore(&mem->spinlock, flags);
+       return NULL;
+}
+
+/**
+ * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
+ * @dev:       device from which we allocate memory
+ * @size:      size of requested memory area
+ * @dma_handle:        This will be filled with the correct dma handle
+ * @ret:       This pointer will be filled with the virtual address
+ *             to allocated area.
+ *
+ * This function should be only called from per-arch dma_alloc_coherent()
+ * to support allocation from per-device coherent memory pools.
+ *
+ * Returns 0 if dma_alloc_coherent should continue with allocating from
+ * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
+ */
+int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
+               dma_addr_t *dma_handle, void **ret)
+{
+       struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+       if (!mem)
+               return 0;
+
+       *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
+       if (*ret)
+               return 1;
+
+       /*
+        * In the case where the allocation can not be satisfied from the
+        * per-device area, try to fall back to generic memory if the
+        * constraints allow it.
+        */
+       return mem->flags & DMA_MEMORY_EXCLUSIVE;
+}
+EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
+
+void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
+{
+       if (!dma_coherent_default_memory)
+               return NULL;
+
+       return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
+                       dma_handle);
+}
+
+static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
+                                      int order, void *vaddr)
+{
+       if (mem && vaddr >= mem->virt_base && vaddr <
+                  (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+               int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+               unsigned long flags;
+
+               spin_lock_irqsave(&mem->spinlock, flags);
+               bitmap_release_region(mem->bitmap, page, order);
+               spin_unlock_irqrestore(&mem->spinlock, flags);
+               return 1;
+       }
+       return 0;
+}
+
+/**
+ * dma_release_from_dev_coherent() - free memory to device coherent memory pool
+ * @dev:       device from which the memory was allocated
+ * @order:     the order of pages allocated
+ * @vaddr:     virtual address of allocated pages
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, releases that memory.
+ *
+ * Returns 1 if we correctly released the memory, or 0 if the caller should
+ * proceed with releasing memory from generic pools.
+ */
+int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
+{
+       struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+       return __dma_release_from_coherent(mem, order, vaddr);
+}
+EXPORT_SYMBOL(dma_release_from_dev_coherent);
+
+int dma_release_from_global_coherent(int order, void *vaddr)
+{
+       if (!dma_coherent_default_memory)
+               return 0;
+
+       return __dma_release_from_coherent(dma_coherent_default_memory, order,
+                       vaddr);
+}
+
+static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
+               struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
+{
+       if (mem && vaddr >= mem->virt_base && vaddr + size <=
+                  (mem->virt_base + (mem->size << PAGE_SHIFT))) {
+               unsigned long off = vma->vm_pgoff;
+               int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+               int user_count = vma_pages(vma);
+               int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+               *ret = -ENXIO;
+               if (off < count && user_count <= count - off) {
+                       unsigned long pfn = mem->pfn_base + start + off;
+                       *ret = remap_pfn_range(vma, vma->vm_start, pfn,
+                                              user_count << PAGE_SHIFT,
+                                              vma->vm_page_prot);
+               }
+               return 1;
+       }
+       return 0;
+}
+
+/**
+ * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
+ * @dev:       device from which the memory was allocated
+ * @vma:       vm_area for the userspace memory
+ * @vaddr:     cpu address returned by dma_alloc_from_dev_coherent
+ * @size:      size of the memory buffer allocated
+ * @ret:       result from remap_pfn_range()
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+ *
+ * Returns 1 if @vaddr belongs to the device coherent pool and the caller
+ * should return @ret, or 0 if they should proceed with mapping memory from
+ * generic areas.
+ */
+int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
+                          void *vaddr, size_t size, int *ret)
+{
+       struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
+
+       return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
+}
+EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
+
+int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
+                                  size_t size, int *ret)
+{
+       if (!dma_coherent_default_memory)
+               return 0;
+
+       return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
+                                       vaddr, size, ret);
+}
+
+/*
+ * Support for reserved memory regions defined in device tree
+ */
+#ifdef CONFIG_OF_RESERVED_MEM
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+
+static struct reserved_mem *dma_reserved_default_memory __initdata;
+
+static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
+{
+       struct dma_coherent_mem *mem = rmem->priv;
+       int ret;
+
+       if (!mem) {
+               ret = dma_init_coherent_memory(rmem->base, rmem->base,
+                                              rmem->size,
+                                              DMA_MEMORY_EXCLUSIVE, &mem);
+               if (ret) {
+                       pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
+                               &rmem->base, (unsigned long)rmem->size / SZ_1M);
+                       return ret;
+               }
+       }
+       mem->use_dev_dma_pfn_offset = true;
+       rmem->priv = mem;
+       dma_assign_coherent_memory(dev, mem);
+       return 0;
+}
+
+static void rmem_dma_device_release(struct reserved_mem *rmem,
+                                   struct device *dev)
+{
+       if (dev)
+               dev->dma_mem = NULL;
+}
+
+static const struct reserved_mem_ops rmem_dma_ops = {
+       .device_init    = rmem_dma_device_init,
+       .device_release = rmem_dma_device_release,
+};
+
+static int __init rmem_dma_setup(struct reserved_mem *rmem)
+{
+       unsigned long node = rmem->fdt_node;
+
+       if (of_get_flat_dt_prop(node, "reusable", NULL))
+               return -EINVAL;
+
+#ifdef CONFIG_ARM
+       if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
+               pr_err("Reserved memory: regions without no-map are not yet supported\n");
+               return -EINVAL;
+       }
+
+       if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
+               WARN(dma_reserved_default_memory,
+                    "Reserved memory: region for default DMA coherent area is redefined\n");
+               dma_reserved_default_memory = rmem;
+       }
+#endif
+
+       rmem->ops = &rmem_dma_ops;
+       pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
+               &rmem->base, (unsigned long)rmem->size / SZ_1M);
+       return 0;
+}
+
+static int __init dma_init_reserved_memory(void)
+{
+       const struct reserved_mem_ops *ops;
+       int ret;
+
+       if (!dma_reserved_default_memory)
+               return -ENOMEM;
+
+       ops = dma_reserved_default_memory->ops;
+
+       /*
+        * We rely on rmem_dma_device_init() does not propagate error of
+        * dma_assign_coherent_memory() for "NULL" device.
+        */
+       ret = ops->device_init(dma_reserved_default_memory, NULL);
+
+       if (!ret) {
+               dma_coherent_default_memory = dma_reserved_default_memory->priv;
+               pr_info("DMA: default coherent area is set\n");
+       }
+
+       return ret;
+}
+
+core_initcall(dma_init_reserved_memory);
+
+RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
+#endif
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
new file mode 100644 (file)
index 0000000..d987dcd
--- /dev/null
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Contiguous Memory Allocator for DMA mapping framework
+ * Copyright (c) 2010-2011 by Samsung Electronics.
+ * Written by:
+ *     Marek Szyprowski <m.szyprowski@samsung.com>
+ *     Michal Nazarewicz <mina86@mina86.com>
+ */
+
+#define pr_fmt(fmt) "cma: " fmt
+
+#ifdef CONFIG_CMA_DEBUG
+#ifndef DEBUG
+#  define DEBUG
+#endif
+#endif
+
+#include <asm/page.h>
+#include <asm/dma-contiguous.h>
+
+#include <linux/memblock.h>
+#include <linux/err.h>
+#include <linux/sizes.h>
+#include <linux/dma-contiguous.h>
+#include <linux/cma.h>
+
+#ifdef CONFIG_CMA_SIZE_MBYTES
+#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
+#else
+#define CMA_SIZE_MBYTES 0
+#endif
+
+struct cma *dma_contiguous_default_area;
+
+/*
+ * Default global CMA area size can be defined in kernel's .config.
+ * This is useful mainly for distro maintainers to create a kernel
+ * that works correctly for most supported systems.
+ * The size can be set in bytes or as a percentage of the total memory
+ * in the system.
+ *
+ * Users, who want to set the size of global CMA area for their system
+ * should use cma= kernel parameter.
+ */
+static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
+static phys_addr_t size_cmdline = -1;
+static phys_addr_t base_cmdline;
+static phys_addr_t limit_cmdline;
+
+static int __init early_cma(char *p)
+{
+       pr_debug("%s(%s)\n", __func__, p);
+       size_cmdline = memparse(p, &p);
+       if (*p != '@')
+               return 0;
+       base_cmdline = memparse(p + 1, &p);
+       if (*p != '-') {
+               limit_cmdline = base_cmdline + size_cmdline;
+               return 0;
+       }
+       limit_cmdline = memparse(p + 1, &p);
+
+       return 0;
+}
+early_param("cma", early_cma);
+
+#ifdef CONFIG_CMA_SIZE_PERCENTAGE
+
+static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
+{
+       struct memblock_region *reg;
+       unsigned long total_pages = 0;
+
+       /*
+        * We cannot use memblock_phys_mem_size() here, because
+        * memblock_analyze() has not been called yet.
+        */
+       for_each_memblock(memory, reg)
+               total_pages += memblock_region_memory_end_pfn(reg) -
+                              memblock_region_memory_base_pfn(reg);
+
+       return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
+}
+
+#else
+
+static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
+{
+       return 0;
+}
+
+#endif
+
+/**
+ * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory.
+ */
+void __init dma_contiguous_reserve(phys_addr_t limit)
+{
+       phys_addr_t selected_size = 0;
+       phys_addr_t selected_base = 0;
+       phys_addr_t selected_limit = limit;
+       bool fixed = false;
+
+       pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
+
+       if (size_cmdline != -1) {
+               selected_size = size_cmdline;
+               selected_base = base_cmdline;
+               selected_limit = min_not_zero(limit_cmdline, limit);
+               if (base_cmdline + size_cmdline == limit_cmdline)
+                       fixed = true;
+       } else {
+#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
+               selected_size = size_bytes;
+#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
+               selected_size = cma_early_percent_memory();
+#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
+               selected_size = min(size_bytes, cma_early_percent_memory());
+#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
+               selected_size = max(size_bytes, cma_early_percent_memory());
+#endif
+       }
+
+       if (selected_size && !dma_contiguous_default_area) {
+               pr_debug("%s: reserving %ld MiB for global area\n", __func__,
+                        (unsigned long)selected_size / SZ_1M);
+
+               dma_contiguous_reserve_area(selected_size, selected_base,
+                                           selected_limit,
+                                           &dma_contiguous_default_area,
+                                           fixed);
+       }
+}
+
+/**
+ * dma_contiguous_reserve_area() - reserve custom contiguous area
+ * @size: Size of the reserved area (in bytes),
+ * @base: Base address of the reserved area optional, use 0 for any
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ * @res_cma: Pointer to store the created cma region.
+ * @fixed: hint about where to place the reserved area
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory. This function allows to create custom reserved areas for specific
+ * devices.
+ *
+ * If @fixed is true, reserve contiguous area at exactly @base.  If false,
+ * reserve in range from @base to @limit.
+ */
+int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+                                      phys_addr_t limit, struct cma **res_cma,
+                                      bool fixed)
+{
+       int ret;
+
+       ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
+                                       "reserved", res_cma);
+       if (ret)
+               return ret;
+
+       /* Architecture specific contiguous memory fixup. */
+       dma_contiguous_early_fixup(cma_get_base(*res_cma),
+                               cma_get_size(*res_cma));
+
+       return 0;
+}
+
+/**
+ * dma_alloc_from_contiguous() - allocate pages from contiguous area
+ * @dev:   Pointer to device for which the allocation is performed.
+ * @count: Requested number of pages.
+ * @align: Requested alignment of pages (in PAGE_SIZE order).
+ * @gfp_mask: GFP flags to use for this allocation.
+ *
+ * This function allocates memory buffer for specified device. It uses
+ * device specific contiguous memory area if available or the default
+ * global one. Requires architecture specific dev_get_cma_area() helper
+ * function.
+ */
+struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
+                                      unsigned int align, gfp_t gfp_mask)
+{
+       if (align > CONFIG_CMA_ALIGNMENT)
+               align = CONFIG_CMA_ALIGNMENT;
+
+       return cma_alloc(dev_get_cma_area(dev), count, align, gfp_mask);
+}
+
+/**
+ * dma_release_from_contiguous() - release allocated pages
+ * @dev:   Pointer to device for which the pages were allocated.
+ * @pages: Allocated pages.
+ * @count: Number of allocated pages.
+ *
+ * This function releases memory allocated by dma_alloc_from_contiguous().
+ * It returns false when provided pages do not belong to contiguous area and
+ * true otherwise.
+ */
+bool dma_release_from_contiguous(struct device *dev, struct page *pages,
+                                int count)
+{
+       return cma_release(dev_get_cma_area(dev), pages, count);
+}
+
+/*
+ * Support for reserved memory regions defined in device tree
+ */
+#ifdef CONFIG_OF_RESERVED_MEM
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) fmt
+
+static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
+{
+       dev_set_cma_area(dev, rmem->priv);
+       return 0;
+}
+
+static void rmem_cma_device_release(struct reserved_mem *rmem,
+                                   struct device *dev)
+{
+       dev_set_cma_area(dev, NULL);
+}
+
+static const struct reserved_mem_ops rmem_cma_ops = {
+       .device_init    = rmem_cma_device_init,
+       .device_release = rmem_cma_device_release,
+};
+
+static int __init rmem_cma_setup(struct reserved_mem *rmem)
+{
+       phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
+       phys_addr_t mask = align - 1;
+       unsigned long node = rmem->fdt_node;
+       struct cma *cma;
+       int err;
+
+       if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
+           of_get_flat_dt_prop(node, "no-map", NULL))
+               return -EINVAL;
+
+       if ((rmem->base & mask) || (rmem->size & mask)) {
+               pr_err("Reserved memory: incorrect alignment of CMA region\n");
+               return -EINVAL;
+       }
+
+       err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
+       if (err) {
+               pr_err("Reserved memory: unable to setup CMA region\n");
+               return err;
+       }
+       /* Architecture specific contiguous memory fixup. */
+       dma_contiguous_early_fixup(rmem->base, rmem->size);
+
+       if (of_get_flat_dt_prop(node, "linux,cma-default", NULL))
+               dma_contiguous_set_default(cma);
+
+       rmem->ops = &rmem_cma_ops;
+       rmem->priv = cma;
+
+       pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
+               &rmem->base, (unsigned long)rmem->size / SZ_1M);
+
+       return 0;
+}
+RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
+#endif
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
new file mode 100644 (file)
index 0000000..c007d25
--- /dev/null
@@ -0,0 +1,1773 @@
+/*
+ * Copyright (C) 2008 Advanced Micro Devices, Inc.
+ *
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/sched/task_stack.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched/task.h>
+#include <linux/stacktrace.h>
+#include <linux/dma-debug.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <asm/sections.h>
+
+#define HASH_SIZE       1024ULL
+#define HASH_FN_SHIFT   13
+#define HASH_FN_MASK    (HASH_SIZE - 1)
+
+/* allow architectures to override this if absolutely required */
+#ifndef PREALLOC_DMA_DEBUG_ENTRIES
+#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+#endif
+
+enum {
+       dma_debug_single,
+       dma_debug_page,
+       dma_debug_sg,
+       dma_debug_coherent,
+       dma_debug_resource,
+};
+
+enum map_err_types {
+       MAP_ERR_CHECK_NOT_APPLICABLE,
+       MAP_ERR_NOT_CHECKED,
+       MAP_ERR_CHECKED,
+};
+
+#define DMA_DEBUG_STACKTRACE_ENTRIES 5
+
+/**
+ * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
+ * @list: node on pre-allocated free_entries list
+ * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
+ * @type: single, page, sg, coherent
+ * @pfn: page frame of the start address
+ * @offset: offset of mapping relative to pfn
+ * @size: length of the mapping
+ * @direction: enum dma_data_direction
+ * @sg_call_ents: 'nents' from dma_map_sg
+ * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
+ * @map_err_type: track whether dma_mapping_error() was checked
+ * @stacktrace: support backtraces when a violation is detected
+ */
+struct dma_debug_entry {
+       struct list_head list;
+       struct device    *dev;
+       int              type;
+       unsigned long    pfn;
+       size_t           offset;
+       u64              dev_addr;
+       u64              size;
+       int              direction;
+       int              sg_call_ents;
+       int              sg_mapped_ents;
+       enum map_err_types  map_err_type;
+#ifdef CONFIG_STACKTRACE
+       struct           stack_trace stacktrace;
+       unsigned long    st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
+#endif
+};
+
+typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
+
+struct hash_bucket {
+       struct list_head list;
+       spinlock_t lock;
+} ____cacheline_aligned_in_smp;
+
+/* Hash list to save the allocated dma addresses */
+static struct hash_bucket dma_entry_hash[HASH_SIZE];
+/* List of pre-allocated dma_debug_entry's */
+static LIST_HEAD(free_entries);
+/* Lock for the list above */
+static DEFINE_SPINLOCK(free_entries_lock);
+
+/* Global disable flag - will be set in case of an error */
+static bool global_disable __read_mostly;
+
+/* Early initialization disable flag, set at the end of dma_debug_init */
+static bool dma_debug_initialized __read_mostly;
+
+static inline bool dma_debug_disabled(void)
+{
+       return global_disable || !dma_debug_initialized;
+}
+
+/* Global error count */
+static u32 error_count;
+
+/* Global error show enable*/
+static u32 show_all_errors __read_mostly;
+/* Number of errors to show */
+static u32 show_num_errors = 1;
+
+static u32 num_free_entries;
+static u32 min_free_entries;
+static u32 nr_total_entries;
+
+/* number of preallocated entries requested by kernel cmdline */
+static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
+
+/* debugfs dentry's for the stuff above */
+static struct dentry *dma_debug_dent        __read_mostly;
+static struct dentry *global_disable_dent   __read_mostly;
+static struct dentry *error_count_dent      __read_mostly;
+static struct dentry *show_all_errors_dent  __read_mostly;
+static struct dentry *show_num_errors_dent  __read_mostly;
+static struct dentry *num_free_entries_dent __read_mostly;
+static struct dentry *min_free_entries_dent __read_mostly;
+static struct dentry *filter_dent           __read_mostly;
+
+/* per-driver filter related state */
+
+#define NAME_MAX_LEN   64
+
+static char                  current_driver_name[NAME_MAX_LEN] __read_mostly;
+static struct device_driver *current_driver                    __read_mostly;
+
+static DEFINE_RWLOCK(driver_name_lock);
+
+static const char *const maperr2str[] = {
+       [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
+       [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
+       [MAP_ERR_CHECKED] = "dma map error checked",
+};
+
+static const char *type2name[5] = { "single", "page",
+                                   "scather-gather", "coherent",
+                                   "resource" };
+
+static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
+                                  "DMA_FROM_DEVICE", "DMA_NONE" };
+
+/*
+ * The access to some variables in this macro is racy. We can't use atomic_t
+ * here because all these variables are exported to debugfs. Some of them even
+ * writeable. This is also the reason why a lock won't help much. But anyway,
+ * the races are no big deal. Here is why:
+ *
+ *   error_count: the addition is racy, but the worst thing that can happen is
+ *                that we don't count some errors
+ *   show_num_errors: the subtraction is racy. Also no big deal because in
+ *                    worst case this will result in one warning more in the
+ *                    system log than the user configured. This variable is
+ *                    writeable via debugfs.
+ */
+static inline void dump_entry_trace(struct dma_debug_entry *entry)
+{
+#ifdef CONFIG_STACKTRACE
+       if (entry) {
+               pr_warning("Mapped at:\n");
+               print_stack_trace(&entry->stacktrace, 0);
+       }
+#endif
+}
+
+static bool driver_filter(struct device *dev)
+{
+       struct device_driver *drv;
+       unsigned long flags;
+       bool ret;
+
+       /* driver filter off */
+       if (likely(!current_driver_name[0]))
+               return true;
+
+       /* driver filter on and initialized */
+       if (current_driver && dev && dev->driver == current_driver)
+               return true;
+
+       /* driver filter on, but we can't filter on a NULL device... */
+       if (!dev)
+               return false;
+
+       if (current_driver || !current_driver_name[0])
+               return false;
+
+       /* driver filter on but not yet initialized */
+       drv = dev->driver;
+       if (!drv)
+               return false;
+
+       /* lock to protect against change of current_driver_name */
+       read_lock_irqsave(&driver_name_lock, flags);
+
+       ret = false;
+       if (drv->name &&
+           strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
+               current_driver = drv;
+               ret = true;
+       }
+
+       read_unlock_irqrestore(&driver_name_lock, flags);
+
+       return ret;
+}
+
+#define err_printk(dev, entry, format, arg...) do {                    \
+               error_count += 1;                                       \
+               if (driver_filter(dev) &&                               \
+                   (show_all_errors || show_num_errors > 0)) {         \
+                       WARN(1, "%s %s: " format,                       \
+                            dev ? dev_driver_string(dev) : "NULL",     \
+                            dev ? dev_name(dev) : "NULL", ## arg);     \
+                       dump_entry_trace(entry);                        \
+               }                                                       \
+               if (!show_all_errors && show_num_errors > 0)            \
+                       show_num_errors -= 1;                           \
+       } while (0);
+
+/*
+ * Hash related functions
+ *
+ * Every DMA-API request is saved into a struct dma_debug_entry. To
+ * have quick access to these structs they are stored into a hash.
+ */
+static int hash_fn(struct dma_debug_entry *entry)
+{
+       /*
+        * Hash function is based on the dma address.
+        * We use bits 20-27 here as the index into the hash
+        */
+       return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
+}
+
+/*
+ * Request exclusive access to a hash bucket for a given dma_debug_entry.
+ */
+static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
+                                          unsigned long *flags)
+       __acquires(&dma_entry_hash[idx].lock)
+{
+       int idx = hash_fn(entry);
+       unsigned long __flags;
+
+       spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
+       *flags = __flags;
+       return &dma_entry_hash[idx];
+}
+
+/*
+ * Give up exclusive access to the hash bucket
+ */
+static void put_hash_bucket(struct hash_bucket *bucket,
+                           unsigned long *flags)
+       __releases(&bucket->lock)
+{
+       unsigned long __flags = *flags;
+
+       spin_unlock_irqrestore(&bucket->lock, __flags);
+}
+
+static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
+{
+       return ((a->dev_addr == b->dev_addr) &&
+               (a->dev == b->dev)) ? true : false;
+}
+
+static bool containing_match(struct dma_debug_entry *a,
+                            struct dma_debug_entry *b)
+{
+       if (a->dev != b->dev)
+               return false;
+
+       if ((b->dev_addr <= a->dev_addr) &&
+           ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
+               return true;
+
+       return false;
+}
+
+/*
+ * Search a given entry in the hash bucket list
+ */
+static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
+                                                 struct dma_debug_entry *ref,
+                                                 match_fn match)
+{
+       struct dma_debug_entry *entry, *ret = NULL;
+       int matches = 0, match_lvl, last_lvl = -1;
+
+       list_for_each_entry(entry, &bucket->list, list) {
+               if (!match(ref, entry))
+                       continue;
+
+               /*
+                * Some drivers map the same physical address multiple
+                * times. Without a hardware IOMMU this results in the
+                * same device addresses being put into the dma-debug
+                * hash multiple times too. This can result in false
+                * positives being reported. Therefore we implement a
+                * best-fit algorithm here which returns the entry from
+                * the hash which fits best to the reference value
+                * instead of the first-fit.
+                */
+               matches += 1;
+               match_lvl = 0;
+               entry->size         == ref->size         ? ++match_lvl : 0;
+               entry->type         == ref->type         ? ++match_lvl : 0;
+               entry->direction    == ref->direction    ? ++match_lvl : 0;
+               entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
+
+               if (match_lvl == 4) {
+                       /* perfect-fit - return the result */
+                       return entry;
+               } else if (match_lvl > last_lvl) {
+                       /*
+                        * We found an entry that fits better then the
+                        * previous one or it is the 1st match.
+                        */
+                       last_lvl = match_lvl;
+                       ret      = entry;
+               }
+       }
+
+       /*
+        * If we have multiple matches but no perfect-fit, just return
+        * NULL.
+        */
+       ret = (matches == 1) ? ret : NULL;
+
+       return ret;
+}
+
+static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
+                                                struct dma_debug_entry *ref)
+{
+       return __hash_bucket_find(bucket, ref, exact_match);
+}
+
+static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
+                                                  struct dma_debug_entry *ref,
+                                                  unsigned long *flags)
+{
+
+       unsigned int max_range = dma_get_max_seg_size(ref->dev);
+       struct dma_debug_entry *entry, index = *ref;
+       unsigned int range = 0;
+
+       while (range <= max_range) {
+               entry = __hash_bucket_find(*bucket, ref, containing_match);
+
+               if (entry)
+                       return entry;
+
+               /*
+                * Nothing found, go back a hash bucket
+                */
+               put_hash_bucket(*bucket, flags);
+               range          += (1 << HASH_FN_SHIFT);
+               index.dev_addr -= (1 << HASH_FN_SHIFT);
+               *bucket = get_hash_bucket(&index, flags);
+       }
+
+       return NULL;
+}
+
+/*
+ * Add an entry to a hash bucket
+ */
+static void hash_bucket_add(struct hash_bucket *bucket,
+                           struct dma_debug_entry *entry)
+{
+       list_add_tail(&entry->list, &bucket->list);
+}
+
+/*
+ * Remove entry from a hash bucket list
+ */
+static void hash_bucket_del(struct dma_debug_entry *entry)
+{
+       list_del(&entry->list);
+}
+
+static unsigned long long phys_addr(struct dma_debug_entry *entry)
+{
+       if (entry->type == dma_debug_resource)
+               return __pfn_to_phys(entry->pfn) + entry->offset;
+
+       return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
+}
+
+/*
+ * Dump mapping entries for debugging purposes
+ */
+void debug_dma_dump_mappings(struct device *dev)
+{
+       int idx;
+
+       for (idx = 0; idx < HASH_SIZE; idx++) {
+               struct hash_bucket *bucket = &dma_entry_hash[idx];
+               struct dma_debug_entry *entry;
+               unsigned long flags;
+
+               spin_lock_irqsave(&bucket->lock, flags);
+
+               list_for_each_entry(entry, &bucket->list, list) {
+                       if (!dev || dev == entry->dev) {
+                               dev_info(entry->dev,
+                                        "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
+                                        type2name[entry->type], idx,
+                                        phys_addr(entry), entry->pfn,
+                                        entry->dev_addr, entry->size,
+                                        dir2name[entry->direction],
+                                        maperr2str[entry->map_err_type]);
+                       }
+               }
+
+               spin_unlock_irqrestore(&bucket->lock, flags);
+       }
+}
+
+/*
+ * For each mapping (initial cacheline in the case of
+ * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
+ * scatterlist, or the cacheline specified in dma_map_single) insert
+ * into this tree using the cacheline as the key. At
+ * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry.  If
+ * the entry already exists at insertion time add a tag as a reference
+ * count for the overlapping mappings.  For now, the overlap tracking
+ * just ensures that 'unmaps' balance 'maps' before marking the
+ * cacheline idle, but we should also be flagging overlaps as an API
+ * violation.
+ *
+ * Memory usage is mostly constrained by the maximum number of available
+ * dma-debug entries in that we need a free dma_debug_entry before
+ * inserting into the tree.  In the case of dma_map_page and
+ * dma_alloc_coherent there is only one dma_debug_entry and one
+ * dma_active_cacheline entry to track per event.  dma_map_sg(), on the
+ * other hand, consumes a single dma_debug_entry, but inserts 'nents'
+ * entries into the tree.
+ *
+ * At any time debug_dma_assert_idle() can be called to trigger a
+ * warning if any cachelines in the given page are in the active set.
+ */
+static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
+static DEFINE_SPINLOCK(radix_lock);
+#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
+#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
+#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
+
+static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
+{
+       return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
+               (entry->offset >> L1_CACHE_SHIFT);
+}
+
+static int active_cacheline_read_overlap(phys_addr_t cln)
+{
+       int overlap = 0, i;
+
+       for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
+               if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
+                       overlap |= 1 << i;
+       return overlap;
+}
+
+static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
+{
+       int i;
+
+       if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
+               return overlap;
+
+       for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
+               if (overlap & 1 << i)
+                       radix_tree_tag_set(&dma_active_cacheline, cln, i);
+               else
+                       radix_tree_tag_clear(&dma_active_cacheline, cln, i);
+
+       return overlap;
+}
+
+static void active_cacheline_inc_overlap(phys_addr_t cln)
+{
+       int overlap = active_cacheline_read_overlap(cln);
+
+       overlap = active_cacheline_set_overlap(cln, ++overlap);
+
+       /* If we overflowed the overlap counter then we're potentially
+        * leaking dma-mappings.  Otherwise, if maps and unmaps are
+        * balanced then this overflow may cause false negatives in
+        * debug_dma_assert_idle() as the cacheline may be marked idle
+        * prematurely.
+        */
+       WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
+                 "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
+                 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
+}
+
+static int active_cacheline_dec_overlap(phys_addr_t cln)
+{
+       int overlap = active_cacheline_read_overlap(cln);
+
+       return active_cacheline_set_overlap(cln, --overlap);
+}
+
+static int active_cacheline_insert(struct dma_debug_entry *entry)
+{
+       phys_addr_t cln = to_cacheline_number(entry);
+       unsigned long flags;
+       int rc;
+
+       /* If the device is not writing memory then we don't have any
+        * concerns about the cpu consuming stale data.  This mitigates
+        * legitimate usages of overlapping mappings.
+        */
+       if (entry->direction == DMA_TO_DEVICE)
+               return 0;
+
+       spin_lock_irqsave(&radix_lock, flags);
+       rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
+       if (rc == -EEXIST)
+               active_cacheline_inc_overlap(cln);
+       spin_unlock_irqrestore(&radix_lock, flags);
+
+       return rc;
+}
+
+static void active_cacheline_remove(struct dma_debug_entry *entry)
+{
+       phys_addr_t cln = to_cacheline_number(entry);
+       unsigned long flags;
+
+       /* ...mirror the insert case */
+       if (entry->direction == DMA_TO_DEVICE)
+               return;
+
+       spin_lock_irqsave(&radix_lock, flags);
+       /* since we are counting overlaps the final put of the
+        * cacheline will occur when the overlap count is 0.
+        * active_cacheline_dec_overlap() returns -1 in that case
+        */
+       if (active_cacheline_dec_overlap(cln) < 0)
+               radix_tree_delete(&dma_active_cacheline, cln);
+       spin_unlock_irqrestore(&radix_lock, flags);
+}
+
+/**
+ * debug_dma_assert_idle() - assert that a page is not undergoing dma
+ * @page: page to lookup in the dma_active_cacheline tree
+ *
+ * Place a call to this routine in cases where the cpu touching the page
+ * before the dma completes (page is dma_unmapped) will lead to data
+ * corruption.
+ */
+void debug_dma_assert_idle(struct page *page)
+{
+       static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
+       struct dma_debug_entry *entry = NULL;
+       void **results = (void **) &ents;
+       unsigned int nents, i;
+       unsigned long flags;
+       phys_addr_t cln;
+
+       if (dma_debug_disabled())
+               return;
+
+       if (!page)
+               return;
+
+       cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
+       spin_lock_irqsave(&radix_lock, flags);
+       nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
+                                      CACHELINES_PER_PAGE);
+       for (i = 0; i < nents; i++) {
+               phys_addr_t ent_cln = to_cacheline_number(ents[i]);
+
+               if (ent_cln == cln) {
+                       entry = ents[i];
+                       break;
+               } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
+                       break;
+       }
+       spin_unlock_irqrestore(&radix_lock, flags);
+
+       if (!entry)
+               return;
+
+       cln = to_cacheline_number(entry);
+       err_printk(entry->dev, entry,
+                  "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
+                  &cln);
+}
+
+/*
+ * Wrapper function for adding an entry to the hash.
+ * This function takes care of locking itself.
+ */
+static void add_dma_entry(struct dma_debug_entry *entry)
+{
+       struct hash_bucket *bucket;
+       unsigned long flags;
+       int rc;
+
+       bucket = get_hash_bucket(entry, &flags);
+       hash_bucket_add(bucket, entry);
+       put_hash_bucket(bucket, &flags);
+
+       rc = active_cacheline_insert(entry);
+       if (rc == -ENOMEM) {
+               pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
+               global_disable = true;
+       }
+
+       /* TODO: report -EEXIST errors here as overlapping mappings are
+        * not supported by the DMA API
+        */
+}
+
+static struct dma_debug_entry *__dma_entry_alloc(void)
+{
+       struct dma_debug_entry *entry;
+
+       entry = list_entry(free_entries.next, struct dma_debug_entry, list);
+       list_del(&entry->list);
+       memset(entry, 0, sizeof(*entry));
+
+       num_free_entries -= 1;
+       if (num_free_entries < min_free_entries)
+               min_free_entries = num_free_entries;
+
+       return entry;
+}
+
+/* struct dma_entry allocator
+ *
+ * The next two functions implement the allocator for
+ * struct dma_debug_entries.
+ */
+static struct dma_debug_entry *dma_entry_alloc(void)
+{
+       struct dma_debug_entry *entry;
+       unsigned long flags;
+
+       spin_lock_irqsave(&free_entries_lock, flags);
+
+       if (list_empty(&free_entries)) {
+               global_disable = true;
+               spin_unlock_irqrestore(&free_entries_lock, flags);
+               pr_err("DMA-API: debugging out of memory - disabling\n");
+               return NULL;
+       }
+
+       entry = __dma_entry_alloc();
+
+       spin_unlock_irqrestore(&free_entries_lock, flags);
+
+#ifdef CONFIG_STACKTRACE
+       entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
+       entry->stacktrace.entries = entry->st_entries;
+       entry->stacktrace.skip = 2;
+       save_stack_trace(&entry->stacktrace);
+#endif
+
+       return entry;
+}
+
+static void dma_entry_free(struct dma_debug_entry *entry)
+{
+       unsigned long flags;
+
+       active_cacheline_remove(entry);
+
+       /*
+        * add to beginning of the list - this way the entries are
+        * more likely cache hot when they are reallocated.
+        */
+       spin_lock_irqsave(&free_entries_lock, flags);
+       list_add(&entry->list, &free_entries);
+       num_free_entries += 1;
+       spin_unlock_irqrestore(&free_entries_lock, flags);
+}
+
+int dma_debug_resize_entries(u32 num_entries)
+{
+       int i, delta, ret = 0;
+       unsigned long flags;
+       struct dma_debug_entry *entry;
+       LIST_HEAD(tmp);
+
+       spin_lock_irqsave(&free_entries_lock, flags);
+
+       if (nr_total_entries < num_entries) {
+               delta = num_entries - nr_total_entries;
+
+               spin_unlock_irqrestore(&free_entries_lock, flags);
+
+               for (i = 0; i < delta; i++) {
+                       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+                       if (!entry)
+                               break;
+
+                       list_add_tail(&entry->list, &tmp);
+               }
+
+               spin_lock_irqsave(&free_entries_lock, flags);
+
+               list_splice(&tmp, &free_entries);
+               nr_total_entries += i;
+               num_free_entries += i;
+       } else {
+               delta = nr_total_entries - num_entries;
+
+               for (i = 0; i < delta && !list_empty(&free_entries); i++) {
+                       entry = __dma_entry_alloc();
+                       kfree(entry);
+               }
+
+               nr_total_entries -= i;
+       }
+
+       if (nr_total_entries != num_entries)
+               ret = 1;
+
+       spin_unlock_irqrestore(&free_entries_lock, flags);
+
+       return ret;
+}
+
+/*
+ * DMA-API debugging init code
+ *
+ * The init code does two things:
+ *   1. Initialize core data structures
+ *   2. Preallocate a given number of dma_debug_entry structs
+ */
+
+static int prealloc_memory(u32 num_entries)
+{
+       struct dma_debug_entry *entry, *next_entry;
+       int i;
+
+       for (i = 0; i < num_entries; ++i) {
+               entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+               if (!entry)
+                       goto out_err;
+
+               list_add_tail(&entry->list, &free_entries);
+       }
+
+       num_free_entries = num_entries;
+       min_free_entries = num_entries;
+
+       pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
+
+       return 0;
+
+out_err:
+
+       list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
+               list_del(&entry->list);
+               kfree(entry);
+       }
+
+       return -ENOMEM;
+}
+
+static ssize_t filter_read(struct file *file, char __user *user_buf,
+                          size_t count, loff_t *ppos)
+{
+       char buf[NAME_MAX_LEN + 1];
+       unsigned long flags;
+       int len;
+
+       if (!current_driver_name[0])
+               return 0;
+
+       /*
+        * We can't copy to userspace directly because current_driver_name can
+        * only be read under the driver_name_lock with irqs disabled. So
+        * create a temporary copy first.
+        */
+       read_lock_irqsave(&driver_name_lock, flags);
+       len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
+       read_unlock_irqrestore(&driver_name_lock, flags);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t filter_write(struct file *file, const char __user *userbuf,
+                           size_t count, loff_t *ppos)
+{
+       char buf[NAME_MAX_LEN];
+       unsigned long flags;
+       size_t len;
+       int i;
+
+       /*
+        * We can't copy from userspace directly. Access to
+        * current_driver_name is protected with a write_lock with irqs
+        * disabled. Since copy_from_user can fault and may sleep we
+        * need to copy to temporary buffer first
+        */
+       len = min(count, (size_t)(NAME_MAX_LEN - 1));
+       if (copy_from_user(buf, userbuf, len))
+               return -EFAULT;
+
+       buf[len] = 0;
+
+       write_lock_irqsave(&driver_name_lock, flags);
+
+       /*
+        * Now handle the string we got from userspace very carefully.
+        * The rules are:
+        *         - only use the first token we got
+        *         - token delimiter is everything looking like a space
+        *           character (' ', '\n', '\t' ...)
+        *
+        */
+       if (!isalnum(buf[0])) {
+               /*
+                * If the first character userspace gave us is not
+                * alphanumerical then assume the filter should be
+                * switched off.
+                */
+               if (current_driver_name[0])
+                       pr_info("DMA-API: switching off dma-debug driver filter\n");
+               current_driver_name[0] = 0;
+               current_driver = NULL;
+               goto out_unlock;
+       }
+
+       /*
+        * Now parse out the first token and use it as the name for the
+        * driver to filter for.
+        */
+       for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
+               current_driver_name[i] = buf[i];
+               if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
+                       break;
+       }
+       current_driver_name[i] = 0;
+       current_driver = NULL;
+
+       pr_info("DMA-API: enable driver filter for driver [%s]\n",
+               current_driver_name);
+
+out_unlock:
+       write_unlock_irqrestore(&driver_name_lock, flags);
+
+       return count;
+}
+
+static const struct file_operations filter_fops = {
+       .read  = filter_read,
+       .write = filter_write,
+       .llseek = default_llseek,
+};
+
+static int dma_debug_fs_init(void)
+{
+       dma_debug_dent = debugfs_create_dir("dma-api", NULL);
+       if (!dma_debug_dent) {
+               pr_err("DMA-API: can not create debugfs directory\n");
+               return -ENOMEM;
+       }
+
+       global_disable_dent = debugfs_create_bool("disabled", 0444,
+                       dma_debug_dent,
+                       &global_disable);
+       if (!global_disable_dent)
+               goto out_err;
+
+       error_count_dent = debugfs_create_u32("error_count", 0444,
+                       dma_debug_dent, &error_count);
+       if (!error_count_dent)
+               goto out_err;
+
+       show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
+                       dma_debug_dent,
+                       &show_all_errors);
+       if (!show_all_errors_dent)
+               goto out_err;
+
+       show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
+                       dma_debug_dent,
+                       &show_num_errors);
+       if (!show_num_errors_dent)
+               goto out_err;
+
+       num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
+                       dma_debug_dent,
+                       &num_free_entries);
+       if (!num_free_entries_dent)
+               goto out_err;
+
+       min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
+                       dma_debug_dent,
+                       &min_free_entries);
+       if (!min_free_entries_dent)
+               goto out_err;
+
+       filter_dent = debugfs_create_file("driver_filter", 0644,
+                                         dma_debug_dent, NULL, &filter_fops);
+       if (!filter_dent)
+               goto out_err;
+
+       return 0;
+
+out_err:
+       debugfs_remove_recursive(dma_debug_dent);
+
+       return -ENOMEM;
+}
+
+static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
+{
+       struct dma_debug_entry *entry;
+       unsigned long flags;
+       int count = 0, i;
+
+       for (i = 0; i < HASH_SIZE; ++i) {
+               spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
+               list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
+                       if (entry->dev == dev) {
+                               count += 1;
+                               *out_entry = entry;
+                       }
+               }
+               spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
+       }
+
+       return count;
+}
+
+static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
+{
+       struct device *dev = data;
+       struct dma_debug_entry *uninitialized_var(entry);
+       int count;
+
+       if (dma_debug_disabled())
+               return 0;
+
+       switch (action) {
+       case BUS_NOTIFY_UNBOUND_DRIVER:
+               count = device_dma_allocations(dev, &entry);
+               if (count == 0)
+                       break;
+               err_printk(dev, entry, "DMA-API: device driver has pending "
+                               "DMA allocations while released from device "
+                               "[count=%d]\n"
+                               "One of leaked entries details: "
+                               "[device address=0x%016llx] [size=%llu bytes] "
+                               "[mapped with %s] [mapped as %s]\n",
+                       count, entry->dev_addr, entry->size,
+                       dir2name[entry->direction], type2name[entry->type]);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+void dma_debug_add_bus(struct bus_type *bus)
+{
+       struct notifier_block *nb;
+
+       if (dma_debug_disabled())
+               return;
+
+       nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
+       if (nb == NULL) {
+               pr_err("dma_debug_add_bus: out of memory\n");
+               return;
+       }
+
+       nb->notifier_call = dma_debug_device_change;
+
+       bus_register_notifier(bus, nb);
+}
+
+static int dma_debug_init(void)
+{
+       int i;
+
+       /* Do not use dma_debug_initialized here, since we really want to be
+        * called to set dma_debug_initialized
+        */
+       if (global_disable)
+               return 0;
+
+       for (i = 0; i < HASH_SIZE; ++i) {
+               INIT_LIST_HEAD(&dma_entry_hash[i].list);
+               spin_lock_init(&dma_entry_hash[i].lock);
+       }
+
+       if (dma_debug_fs_init() != 0) {
+               pr_err("DMA-API: error creating debugfs entries - disabling\n");
+               global_disable = true;
+
+               return 0;
+       }
+
+       if (prealloc_memory(nr_prealloc_entries) != 0) {
+               pr_err("DMA-API: debugging out of memory error - disabled\n");
+               global_disable = true;
+
+               return 0;
+       }
+
+       nr_total_entries = num_free_entries;
+
+       dma_debug_initialized = true;
+
+       pr_info("DMA-API: debugging enabled by kernel config\n");
+       return 0;
+}
+core_initcall(dma_debug_init);
+
+static __init int dma_debug_cmdline(char *str)
+{
+       if (!str)
+               return -EINVAL;
+
+       if (strncmp(str, "off", 3) == 0) {
+               pr_info("DMA-API: debugging disabled on kernel command line\n");
+               global_disable = true;
+       }
+
+       return 0;
+}
+
+static __init int dma_debug_entries_cmdline(char *str)
+{
+       if (!str)
+               return -EINVAL;
+       if (!get_option(&str, &nr_prealloc_entries))
+               nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
+       return 0;
+}
+
+__setup("dma_debug=", dma_debug_cmdline);
+__setup("dma_debug_entries=", dma_debug_entries_cmdline);
+
+static void check_unmap(struct dma_debug_entry *ref)
+{
+       struct dma_debug_entry *entry;
+       struct hash_bucket *bucket;
+       unsigned long flags;
+
+       bucket = get_hash_bucket(ref, &flags);
+       entry = bucket_find_exact(bucket, ref);
+
+       if (!entry) {
+               /* must drop lock before calling dma_mapping_error */
+               put_hash_bucket(bucket, &flags);
+
+               if (dma_mapping_error(ref->dev, ref->dev_addr)) {
+                       err_printk(ref->dev, NULL,
+                                  "DMA-API: device driver tries to free an "
+                                  "invalid DMA memory address\n");
+               } else {
+                       err_printk(ref->dev, NULL,
+                                  "DMA-API: device driver tries to free DMA "
+                                  "memory it has not allocated [device "
+                                  "address=0x%016llx] [size=%llu bytes]\n",
+                                  ref->dev_addr, ref->size);
+               }
+               return;
+       }
+
+       if (ref->size != entry->size) {
+               err_printk(ref->dev, entry, "DMA-API: device driver frees "
+                          "DMA memory with different size "
+                          "[device address=0x%016llx] [map size=%llu bytes] "
+                          "[unmap size=%llu bytes]\n",
+                          ref->dev_addr, entry->size, ref->size);
+       }
+
+       if (ref->type != entry->type) {
+               err_printk(ref->dev, entry, "DMA-API: device driver frees "
+                          "DMA memory with wrong function "
+                          "[device address=0x%016llx] [size=%llu bytes] "
+                          "[mapped as %s] [unmapped as %s]\n",
+                          ref->dev_addr, ref->size,
+                          type2name[entry->type], type2name[ref->type]);
+       } else if ((entry->type == dma_debug_coherent) &&
+                  (phys_addr(ref) != phys_addr(entry))) {
+               err_printk(ref->dev, entry, "DMA-API: device driver frees "
+                          "DMA memory with different CPU address "
+                          "[device address=0x%016llx] [size=%llu bytes] "
+                          "[cpu alloc address=0x%016llx] "
+                          "[cpu free address=0x%016llx]",
+                          ref->dev_addr, ref->size,
+                          phys_addr(entry),
+                          phys_addr(ref));
+       }
+
+       if (ref->sg_call_ents && ref->type == dma_debug_sg &&
+           ref->sg_call_ents != entry->sg_call_ents) {
+               err_printk(ref->dev, entry, "DMA-API: device driver frees "
+                          "DMA sg list with different entry count "
+                          "[map count=%d] [unmap count=%d]\n",
+                          entry->sg_call_ents, ref->sg_call_ents);
+       }
+
+       /*
+        * This may be no bug in reality - but most implementations of the
+        * DMA API don't handle this properly, so check for it here
+        */
+       if (ref->direction != entry->direction) {
+               err_printk(ref->dev, entry, "DMA-API: device driver frees "
+                          "DMA memory with different direction "
+                          "[device address=0x%016llx] [size=%llu bytes] "
+                          "[mapped with %s] [unmapped with %s]\n",
+                          ref->dev_addr, ref->size,
+                          dir2name[entry->direction],
+                          dir2name[ref->direction]);
+       }
+
+       /*
+        * Drivers should use dma_mapping_error() to check the returned
+        * addresses of dma_map_single() and dma_map_page().
+        * If not, print this warning message. See Documentation/DMA-API.txt.
+        */
+       if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
+               err_printk(ref->dev, entry,
+                          "DMA-API: device driver failed to check map error"
+                          "[device address=0x%016llx] [size=%llu bytes] "
+                          "[mapped as %s]",
+                          ref->dev_addr, ref->size,
+                          type2name[entry->type]);
+       }
+
+       hash_bucket_del(entry);
+       dma_entry_free(entry);
+
+       put_hash_bucket(bucket, &flags);
+}
+
+static void check_for_stack(struct device *dev,
+                           struct page *page, size_t offset)
+{
+       void *addr;
+       struct vm_struct *stack_vm_area = task_stack_vm_area(current);
+
+       if (!stack_vm_area) {
+               /* Stack is direct-mapped. */
+               if (PageHighMem(page))
+                       return;
+               addr = page_address(page) + offset;
+               if (object_is_on_stack(addr))
+                       err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [addr=%p]\n", addr);
+       } else {
+               /* Stack is vmalloced. */
+               int i;
+
+               for (i = 0; i < stack_vm_area->nr_pages; i++) {
+                       if (page != stack_vm_area->pages[i])
+                               continue;
+
+                       addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
+                       err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [probable addr=%p]\n", addr);
+                       break;
+               }
+       }
+}
+
+static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
+{
+       unsigned long a1 = (unsigned long)addr;
+       unsigned long b1 = a1 + len;
+       unsigned long a2 = (unsigned long)start;
+       unsigned long b2 = (unsigned long)end;
+
+       return !(b1 <= a2 || a1 >= b2);
+}
+
+static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
+{
+       if (overlap(addr, len, _stext, _etext) ||
+           overlap(addr, len, __start_rodata, __end_rodata))
+               err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
+}
+
+static void check_sync(struct device *dev,
+                      struct dma_debug_entry *ref,
+                      bool to_cpu)
+{
+       struct dma_debug_entry *entry;
+       struct hash_bucket *bucket;
+       unsigned long flags;
+
+       bucket = get_hash_bucket(ref, &flags);
+
+       entry = bucket_find_contain(&bucket, ref, &flags);
+
+       if (!entry) {
+               err_printk(dev, NULL, "DMA-API: device driver tries "
+                               "to sync DMA memory it has not allocated "
+                               "[device address=0x%016llx] [size=%llu bytes]\n",
+                               (unsigned long long)ref->dev_addr, ref->size);
+               goto out;
+       }
+
+       if (ref->size > entry->size) {
+               err_printk(dev, entry, "DMA-API: device driver syncs"
+                               " DMA memory outside allocated range "
+                               "[device address=0x%016llx] "
+                               "[allocation size=%llu bytes] "
+                               "[sync offset+size=%llu]\n",
+                               entry->dev_addr, entry->size,
+                               ref->size);
+       }
+
+       if (entry->direction == DMA_BIDIRECTIONAL)
+               goto out;
+
+       if (ref->direction != entry->direction) {
+               err_printk(dev, entry, "DMA-API: device driver syncs "
+                               "DMA memory with different direction "
+                               "[device address=0x%016llx] [size=%llu bytes] "
+                               "[mapped with %s] [synced with %s]\n",
+                               (unsigned long long)ref->dev_addr, entry->size,
+                               dir2name[entry->direction],
+                               dir2name[ref->direction]);
+       }
+
+       if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
+                     !(ref->direction == DMA_TO_DEVICE))
+               err_printk(dev, entry, "DMA-API: device driver syncs "
+                               "device read-only DMA memory for cpu "
+                               "[device address=0x%016llx] [size=%llu bytes] "
+                               "[mapped with %s] [synced with %s]\n",
+                               (unsigned long long)ref->dev_addr, entry->size,
+                               dir2name[entry->direction],
+                               dir2name[ref->direction]);
+
+       if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
+                      !(ref->direction == DMA_FROM_DEVICE))
+               err_printk(dev, entry, "DMA-API: device driver syncs "
+                               "device write-only DMA memory to device "
+                               "[device address=0x%016llx] [size=%llu bytes] "
+                               "[mapped with %s] [synced with %s]\n",
+                               (unsigned long long)ref->dev_addr, entry->size,
+                               dir2name[entry->direction],
+                               dir2name[ref->direction]);
+
+       if (ref->sg_call_ents && ref->type == dma_debug_sg &&
+           ref->sg_call_ents != entry->sg_call_ents) {
+               err_printk(ref->dev, entry, "DMA-API: device driver syncs "
+                          "DMA sg list with different entry count "
+                          "[map count=%d] [sync count=%d]\n",
+                          entry->sg_call_ents, ref->sg_call_ents);
+       }
+
+out:
+       put_hash_bucket(bucket, &flags);
+}
+
+static void check_sg_segment(struct device *dev, struct scatterlist *sg)
+{
+#ifdef CONFIG_DMA_API_DEBUG_SG
+       unsigned int max_seg = dma_get_max_seg_size(dev);
+       u64 start, end, boundary = dma_get_seg_boundary(dev);
+
+       /*
+        * Either the driver forgot to set dma_parms appropriately, or
+        * whoever generated the list forgot to check them.
+        */
+       if (sg->length > max_seg)
+               err_printk(dev, NULL, "DMA-API: mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
+                          sg->length, max_seg);
+       /*
+        * In some cases this could potentially be the DMA API
+        * implementation's fault, but it would usually imply that
+        * the scatterlist was built inappropriately to begin with.
+        */
+       start = sg_dma_address(sg);
+       end = start + sg_dma_len(sg) - 1;
+       if ((start ^ end) & ~boundary)
+               err_printk(dev, NULL, "DMA-API: mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
+                          start, end, boundary);
+#endif
+}
+
+void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
+                       size_t size, int direction, dma_addr_t dma_addr,
+                       bool map_single)
+{
+       struct dma_debug_entry *entry;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       if (dma_mapping_error(dev, dma_addr))
+               return;
+
+       entry = dma_entry_alloc();
+       if (!entry)
+               return;
+
+       entry->dev       = dev;
+       entry->type      = dma_debug_page;
+       entry->pfn       = page_to_pfn(page);
+       entry->offset    = offset,
+       entry->dev_addr  = dma_addr;
+       entry->size      = size;
+       entry->direction = direction;
+       entry->map_err_type = MAP_ERR_NOT_CHECKED;
+
+       if (map_single)
+               entry->type = dma_debug_single;
+
+       check_for_stack(dev, page, offset);
+
+       if (!PageHighMem(page)) {
+               void *addr = page_address(page) + offset;
+
+               check_for_illegal_area(dev, addr, size);
+       }
+
+       add_dma_entry(entry);
+}
+EXPORT_SYMBOL(debug_dma_map_page);
+
+void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+       struct dma_debug_entry ref;
+       struct dma_debug_entry *entry;
+       struct hash_bucket *bucket;
+       unsigned long flags;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       ref.dev = dev;
+       ref.dev_addr = dma_addr;
+       bucket = get_hash_bucket(&ref, &flags);
+
+       list_for_each_entry(entry, &bucket->list, list) {
+               if (!exact_match(&ref, entry))
+                       continue;
+
+               /*
+                * The same physical address can be mapped multiple
+                * times. Without a hardware IOMMU this results in the
+                * same device addresses being put into the dma-debug
+                * hash multiple times too. This can result in false
+                * positives being reported. Therefore we implement a
+                * best-fit algorithm here which updates the first entry
+                * from the hash which fits the reference value and is
+                * not currently listed as being checked.
+                */
+               if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
+                       entry->map_err_type = MAP_ERR_CHECKED;
+                       break;
+               }
+       }
+
+       put_hash_bucket(bucket, &flags);
+}
+EXPORT_SYMBOL(debug_dma_mapping_error);
+
+void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
+                         size_t size, int direction, bool map_single)
+{
+       struct dma_debug_entry ref = {
+               .type           = dma_debug_page,
+               .dev            = dev,
+               .dev_addr       = addr,
+               .size           = size,
+               .direction      = direction,
+       };
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       if (map_single)
+               ref.type = dma_debug_single;
+
+       check_unmap(&ref);
+}
+EXPORT_SYMBOL(debug_dma_unmap_page);
+
+void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
+                     int nents, int mapped_ents, int direction)
+{
+       struct dma_debug_entry *entry;
+       struct scatterlist *s;
+       int i;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       for_each_sg(sg, s, mapped_ents, i) {
+               entry = dma_entry_alloc();
+               if (!entry)
+                       return;
+
+               entry->type           = dma_debug_sg;
+               entry->dev            = dev;
+               entry->pfn            = page_to_pfn(sg_page(s));
+               entry->offset         = s->offset,
+               entry->size           = sg_dma_len(s);
+               entry->dev_addr       = sg_dma_address(s);
+               entry->direction      = direction;
+               entry->sg_call_ents   = nents;
+               entry->sg_mapped_ents = mapped_ents;
+
+               check_for_stack(dev, sg_page(s), s->offset);
+
+               if (!PageHighMem(sg_page(s))) {
+                       check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
+               }
+
+               check_sg_segment(dev, s);
+
+               add_dma_entry(entry);
+       }
+}
+EXPORT_SYMBOL(debug_dma_map_sg);
+
+static int get_nr_mapped_entries(struct device *dev,
+                                struct dma_debug_entry *ref)
+{
+       struct dma_debug_entry *entry;
+       struct hash_bucket *bucket;
+       unsigned long flags;
+       int mapped_ents;
+
+       bucket       = get_hash_bucket(ref, &flags);
+       entry        = bucket_find_exact(bucket, ref);
+       mapped_ents  = 0;
+
+       if (entry)
+               mapped_ents = entry->sg_mapped_ents;
+       put_hash_bucket(bucket, &flags);
+
+       return mapped_ents;
+}
+
+void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+                       int nelems, int dir)
+{
+       struct scatterlist *s;
+       int mapped_ents = 0, i;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       for_each_sg(sglist, s, nelems, i) {
+
+               struct dma_debug_entry ref = {
+                       .type           = dma_debug_sg,
+                       .dev            = dev,
+                       .pfn            = page_to_pfn(sg_page(s)),
+                       .offset         = s->offset,
+                       .dev_addr       = sg_dma_address(s),
+                       .size           = sg_dma_len(s),
+                       .direction      = dir,
+                       .sg_call_ents   = nelems,
+               };
+
+               if (mapped_ents && i >= mapped_ents)
+                       break;
+
+               if (!i)
+                       mapped_ents = get_nr_mapped_entries(dev, &ref);
+
+               check_unmap(&ref);
+       }
+}
+EXPORT_SYMBOL(debug_dma_unmap_sg);
+
+void debug_dma_alloc_coherent(struct device *dev, size_t size,
+                             dma_addr_t dma_addr, void *virt)
+{
+       struct dma_debug_entry *entry;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       if (unlikely(virt == NULL))
+               return;
+
+       /* handle vmalloc and linear addresses */
+       if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
+               return;
+
+       entry = dma_entry_alloc();
+       if (!entry)
+               return;
+
+       entry->type      = dma_debug_coherent;
+       entry->dev       = dev;
+       entry->offset    = offset_in_page(virt);
+       entry->size      = size;
+       entry->dev_addr  = dma_addr;
+       entry->direction = DMA_BIDIRECTIONAL;
+
+       if (is_vmalloc_addr(virt))
+               entry->pfn = vmalloc_to_pfn(virt);
+       else
+               entry->pfn = page_to_pfn(virt_to_page(virt));
+
+       add_dma_entry(entry);
+}
+EXPORT_SYMBOL(debug_dma_alloc_coherent);
+
+void debug_dma_free_coherent(struct device *dev, size_t size,
+                        void *virt, dma_addr_t addr)
+{
+       struct dma_debug_entry ref = {
+               .type           = dma_debug_coherent,
+               .dev            = dev,
+               .offset         = offset_in_page(virt),
+               .dev_addr       = addr,
+               .size           = size,
+               .direction      = DMA_BIDIRECTIONAL,
+       };
+
+       /* handle vmalloc and linear addresses */
+       if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
+               return;
+
+       if (is_vmalloc_addr(virt))
+               ref.pfn = vmalloc_to_pfn(virt);
+       else
+               ref.pfn = page_to_pfn(virt_to_page(virt));
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       check_unmap(&ref);
+}
+EXPORT_SYMBOL(debug_dma_free_coherent);
+
+void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
+                           int direction, dma_addr_t dma_addr)
+{
+       struct dma_debug_entry *entry;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       entry = dma_entry_alloc();
+       if (!entry)
+               return;
+
+       entry->type             = dma_debug_resource;
+       entry->dev              = dev;
+       entry->pfn              = PHYS_PFN(addr);
+       entry->offset           = offset_in_page(addr);
+       entry->size             = size;
+       entry->dev_addr         = dma_addr;
+       entry->direction        = direction;
+       entry->map_err_type     = MAP_ERR_NOT_CHECKED;
+
+       add_dma_entry(entry);
+}
+EXPORT_SYMBOL(debug_dma_map_resource);
+
+void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
+                             size_t size, int direction)
+{
+       struct dma_debug_entry ref = {
+               .type           = dma_debug_resource,
+               .dev            = dev,
+               .dev_addr       = dma_addr,
+               .size           = size,
+               .direction      = direction,
+       };
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       check_unmap(&ref);
+}
+EXPORT_SYMBOL(debug_dma_unmap_resource);
+
+void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+                                  size_t size, int direction)
+{
+       struct dma_debug_entry ref;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       ref.type         = dma_debug_single;
+       ref.dev          = dev;
+       ref.dev_addr     = dma_handle;
+       ref.size         = size;
+       ref.direction    = direction;
+       ref.sg_call_ents = 0;
+
+       check_sync(dev, &ref, true);
+}
+EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
+
+void debug_dma_sync_single_for_device(struct device *dev,
+                                     dma_addr_t dma_handle, size_t size,
+                                     int direction)
+{
+       struct dma_debug_entry ref;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       ref.type         = dma_debug_single;
+       ref.dev          = dev;
+       ref.dev_addr     = dma_handle;
+       ref.size         = size;
+       ref.direction    = direction;
+       ref.sg_call_ents = 0;
+
+       check_sync(dev, &ref, false);
+}
+EXPORT_SYMBOL(debug_dma_sync_single_for_device);
+
+void debug_dma_sync_single_range_for_cpu(struct device *dev,
+                                        dma_addr_t dma_handle,
+                                        unsigned long offset, size_t size,
+                                        int direction)
+{
+       struct dma_debug_entry ref;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       ref.type         = dma_debug_single;
+       ref.dev          = dev;
+       ref.dev_addr     = dma_handle;
+       ref.size         = offset + size;
+       ref.direction    = direction;
+       ref.sg_call_ents = 0;
+
+       check_sync(dev, &ref, true);
+}
+EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
+
+void debug_dma_sync_single_range_for_device(struct device *dev,
+                                           dma_addr_t dma_handle,
+                                           unsigned long offset,
+                                           size_t size, int direction)
+{
+       struct dma_debug_entry ref;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       ref.type         = dma_debug_single;
+       ref.dev          = dev;
+       ref.dev_addr     = dma_handle;
+       ref.size         = offset + size;
+       ref.direction    = direction;
+       ref.sg_call_ents = 0;
+
+       check_sync(dev, &ref, false);
+}
+EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
+
+void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+                              int nelems, int direction)
+{
+       struct scatterlist *s;
+       int mapped_ents = 0, i;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       for_each_sg(sg, s, nelems, i) {
+
+               struct dma_debug_entry ref = {
+                       .type           = dma_debug_sg,
+                       .dev            = dev,
+                       .pfn            = page_to_pfn(sg_page(s)),
+                       .offset         = s->offset,
+                       .dev_addr       = sg_dma_address(s),
+                       .size           = sg_dma_len(s),
+                       .direction      = direction,
+                       .sg_call_ents   = nelems,
+               };
+
+               if (!i)
+                       mapped_ents = get_nr_mapped_entries(dev, &ref);
+
+               if (i >= mapped_ents)
+                       break;
+
+               check_sync(dev, &ref, true);
+       }
+}
+EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
+
+void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+                                 int nelems, int direction)
+{
+       struct scatterlist *s;
+       int mapped_ents = 0, i;
+
+       if (unlikely(dma_debug_disabled()))
+               return;
+
+       for_each_sg(sg, s, nelems, i) {
+
+               struct dma_debug_entry ref = {
+                       .type           = dma_debug_sg,
+                       .dev            = dev,
+                       .pfn            = page_to_pfn(sg_page(s)),
+                       .offset         = s->offset,
+                       .dev_addr       = sg_dma_address(s),
+                       .size           = sg_dma_len(s),
+                       .direction      = direction,
+                       .sg_call_ents   = nelems,
+               };
+               if (!i)
+                       mapped_ents = get_nr_mapped_entries(dev, &ref);
+
+               if (i >= mapped_ents)
+                       break;
+
+               check_sync(dev, &ref, false);
+       }
+}
+EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
+
+static int __init dma_debug_driver_setup(char *str)
+{
+       int i;
+
+       for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
+               current_driver_name[i] = *str;
+               if (*str == 0)
+                       break;
+       }
+
+       if (current_driver_name[0])
+               pr_info("DMA-API: enable driver filter for driver [%s]\n",
+                       current_driver_name);
+
+
+       return 1;
+}
+__setup("dma_debug_driver=", dma_debug_driver_setup);
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
new file mode 100644 (file)
index 0000000..8be8106
--- /dev/null
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMA operations that map physical memory directly without using an IOMMU or
+ * flushing caches.
+ */
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/dma-direct.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-contiguous.h>
+#include <linux/pfn.h>
+#include <linux/set_memory.h>
+
+#define DIRECT_MAPPING_ERROR           0
+
+/*
+ * Most architectures use ZONE_DMA for the first 16 Megabytes, but
+ * some use it for entirely different regions:
+ */
+#ifndef ARCH_ZONE_DMA_BITS
+#define ARCH_ZONE_DMA_BITS 24
+#endif
+
+/*
+ * For AMD SEV all DMA must be to unencrypted addresses.
+ */
+static inline bool force_dma_unencrypted(void)
+{
+       return sev_active();
+}
+
+static bool
+check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
+               const char *caller)
+{
+       if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
+               if (!dev->dma_mask) {
+                       dev_err(dev,
+                               "%s: call on device without dma_mask\n",
+                               caller);
+                       return false;
+               }
+
+               if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
+                       dev_err(dev,
+                               "%s: overflow %pad+%zu of device mask %llx\n",
+                               caller, &dma_addr, size, *dev->dma_mask);
+               }
+               return false;
+       }
+       return true;
+}
+
+static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
+{
+       dma_addr_t addr = force_dma_unencrypted() ?
+               __phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
+       return addr + size - 1 <= dev->coherent_dma_mask;
+}
+
+void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+               gfp_t gfp, unsigned long attrs)
+{
+       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       int page_order = get_order(size);
+       struct page *page = NULL;
+       void *ret;
+
+       /* we always manually zero the memory once we are done: */
+       gfp &= ~__GFP_ZERO;
+
+       /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
+       if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+               gfp |= GFP_DMA;
+       if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
+               gfp |= GFP_DMA32;
+
+again:
+       /* CMA can be used only in the context which permits sleeping */
+       if (gfpflags_allow_blocking(gfp)) {
+               page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
+               if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+                       dma_release_from_contiguous(dev, page, count);
+                       page = NULL;
+               }
+       }
+       if (!page)
+               page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
+
+       if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+               __free_pages(page, page_order);
+               page = NULL;
+
+               if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
+                   dev->coherent_dma_mask < DMA_BIT_MASK(64) &&
+                   !(gfp & (GFP_DMA32 | GFP_DMA))) {
+                       gfp |= GFP_DMA32;
+                       goto again;
+               }
+
+               if (IS_ENABLED(CONFIG_ZONE_DMA) &&
+                   dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
+                   !(gfp & GFP_DMA)) {
+                       gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
+                       goto again;
+               }
+       }
+
+       if (!page)
+               return NULL;
+       ret = page_address(page);
+       if (force_dma_unencrypted()) {
+               set_memory_decrypted((unsigned long)ret, 1 << page_order);
+               *dma_handle = __phys_to_dma(dev, page_to_phys(page));
+       } else {
+               *dma_handle = phys_to_dma(dev, page_to_phys(page));
+       }
+       memset(ret, 0, size);
+       return ret;
+}
+
+/*
+ * NOTE: this function must never look at the dma_addr argument, because we want
+ * to be able to use it as a helper for iommu implementations as well.
+ */
+void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
+               dma_addr_t dma_addr, unsigned long attrs)
+{
+       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       unsigned int page_order = get_order(size);
+
+       if (force_dma_unencrypted())
+               set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
+       if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
+               free_pages((unsigned long)cpu_addr, page_order);
+}
+
+dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size, enum dma_data_direction dir,
+               unsigned long attrs)
+{
+       dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
+
+       if (!check_addr(dev, dma_addr, size, __func__))
+               return DIRECT_MAPPING_ERROR;
+       return dma_addr;
+}
+
+int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
+               enum dma_data_direction dir, unsigned long attrs)
+{
+       int i;
+       struct scatterlist *sg;
+
+       for_each_sg(sgl, sg, nents, i) {
+               BUG_ON(!sg_page(sg));
+
+               sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
+               if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
+                       return 0;
+               sg_dma_len(sg) = sg->length;
+       }
+
+       return nents;
+}
+
+int dma_direct_supported(struct device *dev, u64 mask)
+{
+#ifdef CONFIG_ZONE_DMA
+       if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+               return 0;
+#else
+       /*
+        * Because 32-bit DMA masks are so common we expect every architecture
+        * to be able to satisfy them - either by not supporting more physical
+        * memory, or by providing a ZONE_DMA32.  If neither is the case, the
+        * architecture needs to use an IOMMU instead of the direct mapping.
+        */
+       if (mask < DMA_BIT_MASK(32))
+               return 0;
+#endif
+       /*
+        * Various PCI/PCIe bridges have broken support for > 32bit DMA even
+        * if the device itself might support it.
+        */
+       if (dev->dma_32bit_limit && mask > DMA_BIT_MASK(32))
+               return 0;
+       return 1;
+}
+
+int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+       return dma_addr == DIRECT_MAPPING_ERROR;
+}
+
+const struct dma_map_ops dma_direct_ops = {
+       .alloc                  = dma_direct_alloc,
+       .free                   = dma_direct_free,
+       .map_page               = dma_direct_map_page,
+       .map_sg                 = dma_direct_map_sg,
+       .dma_supported          = dma_direct_supported,
+       .mapping_error          = dma_direct_mapping_error,
+};
+EXPORT_SYMBOL(dma_direct_ops);
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
new file mode 100644 (file)
index 0000000..d2a92dd
--- /dev/null
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * arch-independent dma-mapping routines
+ *
+ * Copyright (c) 2006  SUSE Linux Products GmbH
+ * Copyright (c) 2006  Tejun Heo <teheo@suse.de>
+ */
+
+#include <linux/acpi.h>
+#include <linux/dma-mapping.h>
+#include <linux/export.h>
+#include <linux/gfp.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+/*
+ * Managed DMA API
+ */
+struct dma_devres {
+       size_t          size;
+       void            *vaddr;
+       dma_addr_t      dma_handle;
+       unsigned long   attrs;
+};
+
+static void dmam_release(struct device *dev, void *res)
+{
+       struct dma_devres *this = res;
+
+       dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
+                       this->attrs);
+}
+
+static int dmam_match(struct device *dev, void *res, void *match_data)
+{
+       struct dma_devres *this = res, *match = match_data;
+
+       if (this->vaddr == match->vaddr) {
+               WARN_ON(this->size != match->size ||
+                       this->dma_handle != match->dma_handle);
+               return 1;
+       }
+       return 0;
+}
+
+/**
+ * dmam_alloc_coherent - Managed dma_alloc_coherent()
+ * @dev: Device to allocate coherent memory for
+ * @size: Size of allocation
+ * @dma_handle: Out argument for allocated DMA handle
+ * @gfp: Allocation flags
+ *
+ * Managed dma_alloc_coherent().  Memory allocated using this function
+ * will be automatically released on driver detach.
+ *
+ * RETURNS:
+ * Pointer to allocated memory on success, NULL on failure.
+ */
+void *dmam_alloc_coherent(struct device *dev, size_t size,
+                          dma_addr_t *dma_handle, gfp_t gfp)
+{
+       struct dma_devres *dr;
+       void *vaddr;
+
+       dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
+       if (!dr)
+               return NULL;
+
+       vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
+       if (!vaddr) {
+               devres_free(dr);
+               return NULL;
+       }
+
+       dr->vaddr = vaddr;
+       dr->dma_handle = *dma_handle;
+       dr->size = size;
+
+       devres_add(dev, dr);
+
+       return vaddr;
+}
+EXPORT_SYMBOL(dmam_alloc_coherent);
+
+/**
+ * dmam_free_coherent - Managed dma_free_coherent()
+ * @dev: Device to free coherent memory for
+ * @size: Size of allocation
+ * @vaddr: Virtual address of the memory to free
+ * @dma_handle: DMA handle of the memory to free
+ *
+ * Managed dma_free_coherent().
+ */
+void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
+                       dma_addr_t dma_handle)
+{
+       struct dma_devres match_data = { size, vaddr, dma_handle };
+
+       dma_free_coherent(dev, size, vaddr, dma_handle);
+       WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
+}
+EXPORT_SYMBOL(dmam_free_coherent);
+
+/**
+ * dmam_alloc_attrs - Managed dma_alloc_attrs()
+ * @dev: Device to allocate non_coherent memory for
+ * @size: Size of allocation
+ * @dma_handle: Out argument for allocated DMA handle
+ * @gfp: Allocation flags
+ * @attrs: Flags in the DMA_ATTR_* namespace.
+ *
+ * Managed dma_alloc_attrs().  Memory allocated using this function will be
+ * automatically released on driver detach.
+ *
+ * RETURNS:
+ * Pointer to allocated memory on success, NULL on failure.
+ */
+void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+               gfp_t gfp, unsigned long attrs)
+{
+       struct dma_devres *dr;
+       void *vaddr;
+
+       dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
+       if (!dr)
+               return NULL;
+
+       vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
+       if (!vaddr) {
+               devres_free(dr);
+               return NULL;
+       }
+
+       dr->vaddr = vaddr;
+       dr->dma_handle = *dma_handle;
+       dr->size = size;
+       dr->attrs = attrs;
+
+       devres_add(dev, dr);
+
+       return vaddr;
+}
+EXPORT_SYMBOL(dmam_alloc_attrs);
+
+#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
+
+static void dmam_coherent_decl_release(struct device *dev, void *res)
+{
+       dma_release_declared_memory(dev);
+}
+
+/**
+ * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
+ * @dev: Device to declare coherent memory for
+ * @phys_addr: Physical address of coherent memory to be declared
+ * @device_addr: Device address of coherent memory to be declared
+ * @size: Size of coherent memory to be declared
+ * @flags: Flags
+ *
+ * Managed dma_declare_coherent_memory().
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+                                dma_addr_t device_addr, size_t size, int flags)
+{
+       void *res;
+       int rc;
+
+       res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
+       if (!res)
+               return -ENOMEM;
+
+       rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
+                                        flags);
+       if (!rc)
+               devres_add(dev, res);
+       else
+               devres_free(res);
+
+       return rc;
+}
+EXPORT_SYMBOL(dmam_declare_coherent_memory);
+
+/**
+ * dmam_release_declared_memory - Managed dma_release_declared_memory().
+ * @dev: Device to release declared coherent memory for
+ *
+ * Managed dmam_release_declared_memory().
+ */
+void dmam_release_declared_memory(struct device *dev)
+{
+       WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
+}
+EXPORT_SYMBOL(dmam_release_declared_memory);
+
+#endif
+
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ */
+int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+                void *cpu_addr, dma_addr_t handle, size_t size)
+{
+       struct page *page = virt_to_page(cpu_addr);
+       int ret;
+
+       ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+       if (unlikely(ret))
+               return ret;
+
+       sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+       return 0;
+}
+EXPORT_SYMBOL(dma_common_get_sgtable);
+
+/*
+ * Create userspace mapping for the DMA-coherent memory.
+ */
+int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+                   void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+       int ret = -ENXIO;
+#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
+       unsigned long user_count = vma_pages(vma);
+       unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       unsigned long off = vma->vm_pgoff;
+
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+       if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
+               return ret;
+
+       if (off < count && user_count <= (count - off))
+               ret = remap_pfn_range(vma, vma->vm_start,
+                                     page_to_pfn(virt_to_page(cpu_addr)) + off,
+                                     user_count << PAGE_SHIFT,
+                                     vma->vm_page_prot);
+#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
+
+       return ret;
+}
+EXPORT_SYMBOL(dma_common_mmap);
+
+#ifdef CONFIG_MMU
+static struct vm_struct *__dma_common_pages_remap(struct page **pages,
+                       size_t size, unsigned long vm_flags, pgprot_t prot,
+                       const void *caller)
+{
+       struct vm_struct *area;
+
+       area = get_vm_area_caller(size, vm_flags, caller);
+       if (!area)
+               return NULL;
+
+       if (map_vm_area(area, prot, pages)) {
+               vunmap(area->addr);
+               return NULL;
+       }
+
+       return area;
+}
+
+/*
+ * remaps an array of PAGE_SIZE pages into another vm_area
+ * Cannot be used in non-sleeping contexts
+ */
+void *dma_common_pages_remap(struct page **pages, size_t size,
+                       unsigned long vm_flags, pgprot_t prot,
+                       const void *caller)
+{
+       struct vm_struct *area;
+
+       area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
+       if (!area)
+               return NULL;
+
+       area->pages = pages;
+
+       return area->addr;
+}
+
+/*
+ * remaps an allocated contiguous region into another vm_area.
+ * Cannot be used in non-sleeping contexts
+ */
+
+void *dma_common_contiguous_remap(struct page *page, size_t size,
+                       unsigned long vm_flags,
+                       pgprot_t prot, const void *caller)
+{
+       int i;
+       struct page **pages;
+       struct vm_struct *area;
+
+       pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
+       if (!pages)
+               return NULL;
+
+       for (i = 0; i < (size >> PAGE_SHIFT); i++)
+               pages[i] = nth_page(page, i);
+
+       area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller);
+
+       kfree(pages);
+
+       if (!area)
+               return NULL;
+       return area->addr;
+}
+
+/*
+ * unmaps a range previously mapped by dma_common_*_remap
+ */
+void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
+{
+       struct vm_struct *area = find_vm_area(cpu_addr);
+
+       if (!area || (area->flags & vm_flags) != vm_flags) {
+               WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+               return;
+       }
+
+       unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
+       vunmap(cpu_addr);
+}
+#endif
+
+/*
+ * enables DMA API use for a device
+ */
+int dma_configure(struct device *dev)
+{
+       if (dev->bus->dma_configure)
+               return dev->bus->dma_configure(dev);
+       return 0;
+}
+
+void dma_deconfigure(struct device *dev)
+{
+       of_dma_deconfigure(dev);
+       acpi_dma_deconfigure(dev);
+}
diff --git a/kernel/dma/noncoherent.c b/kernel/dma/noncoherent.c
new file mode 100644 (file)
index 0000000..79e9a75
--- /dev/null
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Christoph Hellwig.
+ *
+ * DMA operations that map physical memory directly without providing cache
+ * coherence.
+ */
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
+#include <linux/scatterlist.h>
+
+static void dma_noncoherent_sync_single_for_device(struct device *dev,
+               dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+       arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
+}
+
+static void dma_noncoherent_sync_sg_for_device(struct device *dev,
+               struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sgl, sg, nents, i)
+               arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
+}
+
+static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page,
+               unsigned long offset, size_t size, enum dma_data_direction dir,
+               unsigned long attrs)
+{
+       dma_addr_t addr;
+
+       addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
+       if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               arch_sync_dma_for_device(dev, page_to_phys(page) + offset,
+                               size, dir);
+       return addr;
+}
+
+static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl,
+               int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+       nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs);
+       if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir);
+       return nents;
+}
+
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
+static void dma_noncoherent_sync_single_for_cpu(struct device *dev,
+               dma_addr_t addr, size_t size, enum dma_data_direction dir)
+{
+       arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
+}
+
+static void dma_noncoherent_sync_sg_for_cpu(struct device *dev,
+               struct scatterlist *sgl, int nents, enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sgl, sg, nents, i)
+               arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
+}
+
+static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr,
+               size_t size, enum dma_data_direction dir, unsigned long attrs)
+{
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl,
+               int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+               dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir);
+}
+#endif
+
+const struct dma_map_ops dma_noncoherent_ops = {
+       .alloc                  = arch_dma_alloc,
+       .free                   = arch_dma_free,
+       .mmap                   = arch_dma_mmap,
+       .sync_single_for_device = dma_noncoherent_sync_single_for_device,
+       .sync_sg_for_device     = dma_noncoherent_sync_sg_for_device,
+       .map_page               = dma_noncoherent_map_page,
+       .map_sg                 = dma_noncoherent_map_sg,
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
+       .sync_single_for_cpu    = dma_noncoherent_sync_single_for_cpu,
+       .sync_sg_for_cpu        = dma_noncoherent_sync_sg_for_cpu,
+       .unmap_page             = dma_noncoherent_unmap_page,
+       .unmap_sg               = dma_noncoherent_unmap_sg,
+#endif
+       .dma_supported          = dma_direct_supported,
+       .mapping_error          = dma_direct_mapping_error,
+       .cache_sync             = arch_dma_cache_sync,
+};
+EXPORT_SYMBOL(dma_noncoherent_ops);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
new file mode 100644 (file)
index 0000000..9045410
--- /dev/null
@@ -0,0 +1,1088 @@
+/*
+ * Dynamic DMA mapping support.
+ *
+ * This implementation is a fallback for platforms that do not support
+ * I/O TLBs (aka DMA address translation hardware).
+ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
+ * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
+ * Copyright (C) 2000, 2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * 03/05/07 davidm     Switch from PCI-DMA to generic device DMA API.
+ * 00/12/13 davidm     Rename to swiotlb.c and add mark_clean() to avoid
+ *                     unnecessary i-cache flushing.
+ * 04/07/.. ak         Better overflow handling. Assorted fixes.
+ * 05/09/10 linville   Add support for syncing ranges, support syncing for
+ *                     DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
+ * 08/12/11 beckyb     Add highmem support
+ */
+
+#include <linux/cache.h>
+#include <linux/dma-direct.h>
+#include <linux/mm.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/swiotlb.h>
+#include <linux/pfn.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/highmem.h>
+#include <linux/gfp.h>
+#include <linux/scatterlist.h>
+#include <linux/mem_encrypt.h>
+#include <linux/set_memory.h>
+
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/iommu-helper.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/swiotlb.h>
+
+#define OFFSET(val,align) ((unsigned long)     \
+                          ( (val) & ( (align) - 1)))
+
+#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
+
+/*
+ * Minimum IO TLB size to bother booting with.  Systems with mainly
+ * 64bit capable cards will only lightly use the swiotlb.  If we can't
+ * allocate a contiguous 1MB, we're probably in trouble anyway.
+ */
+#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
+
+enum swiotlb_force swiotlb_force;
+
+/*
+ * Used to do a quick range check in swiotlb_tbl_unmap_single and
+ * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
+ * API.
+ */
+static phys_addr_t io_tlb_start, io_tlb_end;
+
+/*
+ * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
+ * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
+ */
+static unsigned long io_tlb_nslabs;
+
+/*
+ * When the IOMMU overflows we return a fallback buffer. This sets the size.
+ */
+static unsigned long io_tlb_overflow = 32*1024;
+
+static phys_addr_t io_tlb_overflow_buffer;
+
+/*
+ * This is a free list describing the number of free entries available from
+ * each index
+ */
+static unsigned int *io_tlb_list;
+static unsigned int io_tlb_index;
+
+/*
+ * Max segment that we can provide which (if pages are contingous) will
+ * not be bounced (unless SWIOTLB_FORCE is set).
+ */
+unsigned int max_segment;
+
+/*
+ * We need to save away the original address corresponding to a mapped entry
+ * for the sync operations.
+ */
+#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
+static phys_addr_t *io_tlb_orig_addr;
+
+/*
+ * Protect the above data structures in the map and unmap calls
+ */
+static DEFINE_SPINLOCK(io_tlb_lock);
+
+static int late_alloc;
+
+static int __init
+setup_io_tlb_npages(char *str)
+{
+       if (isdigit(*str)) {
+               io_tlb_nslabs = simple_strtoul(str, &str, 0);
+               /* avoid tail segment of size < IO_TLB_SEGSIZE */
+               io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+       }
+       if (*str == ',')
+               ++str;
+       if (!strcmp(str, "force")) {
+               swiotlb_force = SWIOTLB_FORCE;
+       } else if (!strcmp(str, "noforce")) {
+               swiotlb_force = SWIOTLB_NO_FORCE;
+               io_tlb_nslabs = 1;
+       }
+
+       return 0;
+}
+early_param("swiotlb", setup_io_tlb_npages);
+/* make io_tlb_overflow tunable too? */
+
+unsigned long swiotlb_nr_tbl(void)
+{
+       return io_tlb_nslabs;
+}
+EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
+
+unsigned int swiotlb_max_segment(void)
+{
+       return max_segment;
+}
+EXPORT_SYMBOL_GPL(swiotlb_max_segment);
+
+void swiotlb_set_max_segment(unsigned int val)
+{
+       if (swiotlb_force == SWIOTLB_FORCE)
+               max_segment = 1;
+       else
+               max_segment = rounddown(val, PAGE_SIZE);
+}
+
+/* default to 64MB */
+#define IO_TLB_DEFAULT_SIZE (64UL<<20)
+unsigned long swiotlb_size_or_default(void)
+{
+       unsigned long size;
+
+       size = io_tlb_nslabs << IO_TLB_SHIFT;
+
+       return size ? size : (IO_TLB_DEFAULT_SIZE);
+}
+
+static bool no_iotlb_memory;
+
+void swiotlb_print_info(void)
+{
+       unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
+       unsigned char *vstart, *vend;
+
+       if (no_iotlb_memory) {
+               pr_warn("software IO TLB: No low mem\n");
+               return;
+       }
+
+       vstart = phys_to_virt(io_tlb_start);
+       vend = phys_to_virt(io_tlb_end);
+
+       printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
+              (unsigned long long)io_tlb_start,
+              (unsigned long long)io_tlb_end,
+              bytes >> 20, vstart, vend - 1);
+}
+
+/*
+ * Early SWIOTLB allocation may be too early to allow an architecture to
+ * perform the desired operations.  This function allows the architecture to
+ * call SWIOTLB when the operations are possible.  It needs to be called
+ * before the SWIOTLB memory is used.
+ */
+void __init swiotlb_update_mem_attributes(void)
+{
+       void *vaddr;
+       unsigned long bytes;
+
+       if (no_iotlb_memory || late_alloc)
+               return;
+
+       vaddr = phys_to_virt(io_tlb_start);
+       bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
+       set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
+       memset(vaddr, 0, bytes);
+
+       vaddr = phys_to_virt(io_tlb_overflow_buffer);
+       bytes = PAGE_ALIGN(io_tlb_overflow);
+       set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
+       memset(vaddr, 0, bytes);
+}
+
+int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
+{
+       void *v_overflow_buffer;
+       unsigned long i, bytes;
+
+       bytes = nslabs << IO_TLB_SHIFT;
+
+       io_tlb_nslabs = nslabs;
+       io_tlb_start = __pa(tlb);
+       io_tlb_end = io_tlb_start + bytes;
+
+       /*
+        * Get the overflow emergency buffer
+        */
+       v_overflow_buffer = memblock_virt_alloc_low_nopanic(
+                                               PAGE_ALIGN(io_tlb_overflow),
+                                               PAGE_SIZE);
+       if (!v_overflow_buffer)
+               return -ENOMEM;
+
+       io_tlb_overflow_buffer = __pa(v_overflow_buffer);
+
+       /*
+        * Allocate and initialize the free list array.  This array is used
+        * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
+        * between io_tlb_start and io_tlb_end.
+        */
+       io_tlb_list = memblock_virt_alloc(
+                               PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
+                               PAGE_SIZE);
+       io_tlb_orig_addr = memblock_virt_alloc(
+                               PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
+                               PAGE_SIZE);
+       for (i = 0; i < io_tlb_nslabs; i++) {
+               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+               io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+       }
+       io_tlb_index = 0;
+
+       if (verbose)
+               swiotlb_print_info();
+
+       swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
+       return 0;
+}
+
+/*
+ * Statically reserve bounce buffer space and initialize bounce buffer data
+ * structures for the software IO TLB used to implement the DMA API.
+ */
+void  __init
+swiotlb_init(int verbose)
+{
+       size_t default_size = IO_TLB_DEFAULT_SIZE;
+       unsigned char *vstart;
+       unsigned long bytes;
+
+       if (!io_tlb_nslabs) {
+               io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
+               io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+       }
+
+       bytes = io_tlb_nslabs << IO_TLB_SHIFT;
+
+       /* Get IO TLB memory from the low pages */
+       vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
+       if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
+               return;
+
+       if (io_tlb_start)
+               memblock_free_early(io_tlb_start,
+                                   PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
+       pr_warn("Cannot allocate SWIOTLB buffer");
+       no_iotlb_memory = true;
+}
+
+/*
+ * Systems with larger DMA zones (those that don't support ISA) can
+ * initialize the swiotlb later using the slab allocator if needed.
+ * This should be just like above, but with some error catching.
+ */
+int
+swiotlb_late_init_with_default_size(size_t default_size)
+{
+       unsigned long bytes, req_nslabs = io_tlb_nslabs;
+       unsigned char *vstart = NULL;
+       unsigned int order;
+       int rc = 0;
+
+       if (!io_tlb_nslabs) {
+               io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
+               io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
+       }
+
+       /*
+        * Get IO TLB memory from the low pages
+        */
+       order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
+       io_tlb_nslabs = SLABS_PER_PAGE << order;
+       bytes = io_tlb_nslabs << IO_TLB_SHIFT;
+
+       while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
+               vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
+                                                 order);
+               if (vstart)
+                       break;
+               order--;
+       }
+
+       if (!vstart) {
+               io_tlb_nslabs = req_nslabs;
+               return -ENOMEM;
+       }
+       if (order != get_order(bytes)) {
+               printk(KERN_WARNING "Warning: only able to allocate %ld MB "
+                      "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
+               io_tlb_nslabs = SLABS_PER_PAGE << order;
+       }
+       rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
+       if (rc)
+               free_pages((unsigned long)vstart, order);
+
+       return rc;
+}
+
+int
+swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
+{
+       unsigned long i, bytes;
+       unsigned char *v_overflow_buffer;
+
+       bytes = nslabs << IO_TLB_SHIFT;
+
+       io_tlb_nslabs = nslabs;
+       io_tlb_start = virt_to_phys(tlb);
+       io_tlb_end = io_tlb_start + bytes;
+
+       set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
+       memset(tlb, 0, bytes);
+
+       /*
+        * Get the overflow emergency buffer
+        */
+       v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
+                                                    get_order(io_tlb_overflow));
+       if (!v_overflow_buffer)
+               goto cleanup2;
+
+       set_memory_decrypted((unsigned long)v_overflow_buffer,
+                       io_tlb_overflow >> PAGE_SHIFT);
+       memset(v_overflow_buffer, 0, io_tlb_overflow);
+       io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
+
+       /*
+        * Allocate and initialize the free list array.  This array is used
+        * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
+        * between io_tlb_start and io_tlb_end.
+        */
+       io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
+                                     get_order(io_tlb_nslabs * sizeof(int)));
+       if (!io_tlb_list)
+               goto cleanup3;
+
+       io_tlb_orig_addr = (phys_addr_t *)
+               __get_free_pages(GFP_KERNEL,
+                                get_order(io_tlb_nslabs *
+                                          sizeof(phys_addr_t)));
+       if (!io_tlb_orig_addr)
+               goto cleanup4;
+
+       for (i = 0; i < io_tlb_nslabs; i++) {
+               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+               io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+       }
+       io_tlb_index = 0;
+
+       swiotlb_print_info();
+
+       late_alloc = 1;
+
+       swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
+
+       return 0;
+
+cleanup4:
+       free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
+                                                        sizeof(int)));
+       io_tlb_list = NULL;
+cleanup3:
+       free_pages((unsigned long)v_overflow_buffer,
+                  get_order(io_tlb_overflow));
+       io_tlb_overflow_buffer = 0;
+cleanup2:
+       io_tlb_end = 0;
+       io_tlb_start = 0;
+       io_tlb_nslabs = 0;
+       max_segment = 0;
+       return -ENOMEM;
+}
+
+void __init swiotlb_exit(void)
+{
+       if (!io_tlb_orig_addr)
+               return;
+
+       if (late_alloc) {
+               free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
+                          get_order(io_tlb_overflow));
+               free_pages((unsigned long)io_tlb_orig_addr,
+                          get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
+               free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
+                                                                sizeof(int)));
+               free_pages((unsigned long)phys_to_virt(io_tlb_start),
+                          get_order(io_tlb_nslabs << IO_TLB_SHIFT));
+       } else {
+               memblock_free_late(io_tlb_overflow_buffer,
+                                  PAGE_ALIGN(io_tlb_overflow));
+               memblock_free_late(__pa(io_tlb_orig_addr),
+                                  PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
+               memblock_free_late(__pa(io_tlb_list),
+                                  PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
+               memblock_free_late(io_tlb_start,
+                                  PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
+       }
+       io_tlb_nslabs = 0;
+       max_segment = 0;
+}
+
+int is_swiotlb_buffer(phys_addr_t paddr)
+{
+       return paddr >= io_tlb_start && paddr < io_tlb_end;
+}
+
+/*
+ * Bounce: copy the swiotlb buffer back to the original dma location
+ */
+static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
+                          size_t size, enum dma_data_direction dir)
+{
+       unsigned long pfn = PFN_DOWN(orig_addr);
+       unsigned char *vaddr = phys_to_virt(tlb_addr);
+
+       if (PageHighMem(pfn_to_page(pfn))) {
+               /* The buffer does not have a mapping.  Map it in and copy */
+               unsigned int offset = orig_addr & ~PAGE_MASK;
+               char *buffer;
+               unsigned int sz = 0;
+               unsigned long flags;
+
+               while (size) {
+                       sz = min_t(size_t, PAGE_SIZE - offset, size);
+
+                       local_irq_save(flags);
+                       buffer = kmap_atomic(pfn_to_page(pfn));
+                       if (dir == DMA_TO_DEVICE)
+                               memcpy(vaddr, buffer + offset, sz);
+                       else
+                               memcpy(buffer + offset, vaddr, sz);
+                       kunmap_atomic(buffer);
+                       local_irq_restore(flags);
+
+                       size -= sz;
+                       pfn++;
+                       vaddr += sz;
+                       offset = 0;
+               }
+       } else if (dir == DMA_TO_DEVICE) {
+               memcpy(vaddr, phys_to_virt(orig_addr), size);
+       } else {
+               memcpy(phys_to_virt(orig_addr), vaddr, size);
+       }
+}
+
+phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
+                                  dma_addr_t tbl_dma_addr,
+                                  phys_addr_t orig_addr, size_t size,
+                                  enum dma_data_direction dir,
+                                  unsigned long attrs)
+{
+       unsigned long flags;
+       phys_addr_t tlb_addr;
+       unsigned int nslots, stride, index, wrap;
+       int i;
+       unsigned long mask;
+       unsigned long offset_slots;
+       unsigned long max_slots;
+
+       if (no_iotlb_memory)
+               panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
+
+       if (mem_encrypt_active())
+               pr_warn_once("%s is active and system is using DMA bounce buffers\n",
+                            sme_active() ? "SME" : "SEV");
+
+       mask = dma_get_seg_boundary(hwdev);
+
+       tbl_dma_addr &= mask;
+
+       offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+
+       /*
+        * Carefully handle integer overflow which can occur when mask == ~0UL.
+        */
+       max_slots = mask + 1
+                   ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
+                   : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
+
+       /*
+        * For mappings greater than or equal to a page, we limit the stride
+        * (and hence alignment) to a page size.
+        */
+       nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+       if (size >= PAGE_SIZE)
+               stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
+       else
+               stride = 1;
+
+       BUG_ON(!nslots);
+
+       /*
+        * Find suitable number of IO TLB entries size that will fit this
+        * request and allocate a buffer from that IO TLB pool.
+        */
+       spin_lock_irqsave(&io_tlb_lock, flags);
+       index = ALIGN(io_tlb_index, stride);
+       if (index >= io_tlb_nslabs)
+               index = 0;
+       wrap = index;
+
+       do {
+               while (iommu_is_span_boundary(index, nslots, offset_slots,
+                                             max_slots)) {
+                       index += stride;
+                       if (index >= io_tlb_nslabs)
+                               index = 0;
+                       if (index == wrap)
+                               goto not_found;
+               }
+
+               /*
+                * If we find a slot that indicates we have 'nslots' number of
+                * contiguous buffers, we allocate the buffers from that slot
+                * and mark the entries as '0' indicating unavailable.
+                */
+               if (io_tlb_list[index] >= nslots) {
+                       int count = 0;
+
+                       for (i = index; i < (int) (index + nslots); i++)
+                               io_tlb_list[i] = 0;
+                       for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
+                               io_tlb_list[i] = ++count;
+                       tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
+
+                       /*
+                        * Update the indices to avoid searching in the next
+                        * round.
+                        */
+                       io_tlb_index = ((index + nslots) < io_tlb_nslabs
+                                       ? (index + nslots) : 0);
+
+                       goto found;
+               }
+               index += stride;
+               if (index >= io_tlb_nslabs)
+                       index = 0;
+       } while (index != wrap);
+
+not_found:
+       spin_unlock_irqrestore(&io_tlb_lock, flags);
+       if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
+               dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
+       return SWIOTLB_MAP_ERROR;
+found:
+       spin_unlock_irqrestore(&io_tlb_lock, flags);
+
+       /*
+        * Save away the mapping from the original address to the DMA address.
+        * This is needed when we sync the memory.  Then we sync the buffer if
+        * needed.
+        */
+       for (i = 0; i < nslots; i++)
+               io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
+       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+           (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
+               swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
+
+       return tlb_addr;
+}
+
+/*
+ * Allocates bounce buffer and returns its physical address.
+ */
+static phys_addr_t
+map_single(struct device *hwdev, phys_addr_t phys, size_t size,
+          enum dma_data_direction dir, unsigned long attrs)
+{
+       dma_addr_t start_dma_addr;
+
+       if (swiotlb_force == SWIOTLB_NO_FORCE) {
+               dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
+                                    &phys);
+               return SWIOTLB_MAP_ERROR;
+       }
+
+       start_dma_addr = __phys_to_dma(hwdev, io_tlb_start);
+       return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
+                                     dir, attrs);
+}
+
+/*
+ * tlb_addr is the physical address of the bounce buffer to unmap.
+ */
+void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
+                             size_t size, enum dma_data_direction dir,
+                             unsigned long attrs)
+{
+       unsigned long flags;
+       int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+       int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
+       phys_addr_t orig_addr = io_tlb_orig_addr[index];
+
+       /*
+        * First, sync the memory before unmapping the entry
+        */
+       if (orig_addr != INVALID_PHYS_ADDR &&
+           !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+           ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+               swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
+
+       /*
+        * Return the buffer to the free list by setting the corresponding
+        * entries to indicate the number of contiguous entries available.
+        * While returning the entries to the free list, we merge the entries
+        * with slots below and above the pool being returned.
+        */
+       spin_lock_irqsave(&io_tlb_lock, flags);
+       {
+               count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
+                        io_tlb_list[index + nslots] : 0);
+               /*
+                * Step 1: return the slots to the free list, merging the
+                * slots with superceeding slots
+                */
+               for (i = index + nslots - 1; i >= index; i--) {
+                       io_tlb_list[i] = ++count;
+                       io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+               }
+               /*
+                * Step 2: merge the returned slots with the preceding slots,
+                * if available (non zero)
+                */
+               for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
+                       io_tlb_list[i] = ++count;
+       }
+       spin_unlock_irqrestore(&io_tlb_lock, flags);
+}
+
+void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
+                            size_t size, enum dma_data_direction dir,
+                            enum dma_sync_target target)
+{
+       int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
+       phys_addr_t orig_addr = io_tlb_orig_addr[index];
+
+       if (orig_addr == INVALID_PHYS_ADDR)
+               return;
+       orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
+
+       switch (target) {
+       case SYNC_FOR_CPU:
+               if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
+                       swiotlb_bounce(orig_addr, tlb_addr,
+                                      size, DMA_FROM_DEVICE);
+               else
+                       BUG_ON(dir != DMA_TO_DEVICE);
+               break;
+       case SYNC_FOR_DEVICE:
+               if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
+                       swiotlb_bounce(orig_addr, tlb_addr,
+                                      size, DMA_TO_DEVICE);
+               else
+                       BUG_ON(dir != DMA_FROM_DEVICE);
+               break;
+       default:
+               BUG();
+       }
+}
+
+static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
+               size_t size)
+{
+       u64 mask = DMA_BIT_MASK(32);
+
+       if (dev && dev->coherent_dma_mask)
+               mask = dev->coherent_dma_mask;
+       return addr + size - 1 <= mask;
+}
+
+static void *
+swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
+               unsigned long attrs)
+{
+       phys_addr_t phys_addr;
+
+       if (swiotlb_force == SWIOTLB_NO_FORCE)
+               goto out_warn;
+
+       phys_addr = swiotlb_tbl_map_single(dev,
+                       __phys_to_dma(dev, io_tlb_start),
+                       0, size, DMA_FROM_DEVICE, attrs);
+       if (phys_addr == SWIOTLB_MAP_ERROR)
+               goto out_warn;
+
+       *dma_handle = __phys_to_dma(dev, phys_addr);
+       if (!dma_coherent_ok(dev, *dma_handle, size))
+               goto out_unmap;
+
+       memset(phys_to_virt(phys_addr), 0, size);
+       return phys_to_virt(phys_addr);
+
+out_unmap:
+       dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
+               (unsigned long long)dev->coherent_dma_mask,
+               (unsigned long long)*dma_handle);
+
+       /*
+        * DMA_TO_DEVICE to avoid memcpy in unmap_single.
+        * DMA_ATTR_SKIP_CPU_SYNC is optional.
+        */
+       swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
+                       DMA_ATTR_SKIP_CPU_SYNC);
+out_warn:
+       if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
+               dev_warn(dev,
+                       "swiotlb: coherent allocation failed, size=%zu\n",
+                       size);
+               dump_stack();
+       }
+       return NULL;
+}
+
+static bool swiotlb_free_buffer(struct device *dev, size_t size,
+               dma_addr_t dma_addr)
+{
+       phys_addr_t phys_addr = dma_to_phys(dev, dma_addr);
+
+       WARN_ON_ONCE(irqs_disabled());
+
+       if (!is_swiotlb_buffer(phys_addr))
+               return false;
+
+       /*
+        * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
+        * DMA_ATTR_SKIP_CPU_SYNC is optional.
+        */
+       swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
+                                DMA_ATTR_SKIP_CPU_SYNC);
+       return true;
+}
+
+static void
+swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
+            int do_panic)
+{
+       if (swiotlb_force == SWIOTLB_NO_FORCE)
+               return;
+
+       /*
+        * Ran out of IOMMU space for this operation. This is very bad.
+        * Unfortunately the drivers cannot handle this operation properly.
+        * unless they check for dma_mapping_error (most don't)
+        * When the mapping is small enough return a static buffer to limit
+        * the damage, or panic when the transfer is too big.
+        */
+       dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n",
+                           size);
+
+       if (size <= io_tlb_overflow || !do_panic)
+               return;
+
+       if (dir == DMA_BIDIRECTIONAL)
+               panic("DMA: Random memory could be DMA accessed\n");
+       if (dir == DMA_FROM_DEVICE)
+               panic("DMA: Random memory could be DMA written\n");
+       if (dir == DMA_TO_DEVICE)
+               panic("DMA: Random memory could be DMA read\n");
+}
+
+/*
+ * Map a single buffer of the indicated size for DMA in streaming mode.  The
+ * physical address to use is returned.
+ *
+ * Once the device is given the dma address, the device owns this memory until
+ * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
+ */
+dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
+                           unsigned long offset, size_t size,
+                           enum dma_data_direction dir,
+                           unsigned long attrs)
+{
+       phys_addr_t map, phys = page_to_phys(page) + offset;
+       dma_addr_t dev_addr = phys_to_dma(dev, phys);
+
+       BUG_ON(dir == DMA_NONE);
+       /*
+        * If the address happens to be in the device's DMA window,
+        * we can safely return the device addr and not worry about bounce
+        * buffering it.
+        */
+       if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
+               return dev_addr;
+
+       trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
+
+       /* Oh well, have to allocate and map a bounce buffer. */
+       map = map_single(dev, phys, size, dir, attrs);
+       if (map == SWIOTLB_MAP_ERROR) {
+               swiotlb_full(dev, size, dir, 1);
+               return __phys_to_dma(dev, io_tlb_overflow_buffer);
+       }
+
+       dev_addr = __phys_to_dma(dev, map);
+
+       /* Ensure that the address returned is DMA'ble */
+       if (dma_capable(dev, dev_addr, size))
+               return dev_addr;
+
+       attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+       swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
+
+       return __phys_to_dma(dev, io_tlb_overflow_buffer);
+}
+
+/*
+ * Unmap a single streaming mode DMA translation.  The dma_addr and size must
+ * match what was provided for in a previous swiotlb_map_page call.  All
+ * other usages are undefined.
+ *
+ * After this call, reads by the cpu to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
+                        size_t size, enum dma_data_direction dir,
+                        unsigned long attrs)
+{
+       phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
+
+       BUG_ON(dir == DMA_NONE);
+
+       if (is_swiotlb_buffer(paddr)) {
+               swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
+               return;
+       }
+
+       if (dir != DMA_FROM_DEVICE)
+               return;
+
+       /*
+        * phys_to_virt doesn't work with hihgmem page but we could
+        * call dma_mark_clean() with hihgmem page here. However, we
+        * are fine since dma_mark_clean() is null on POWERPC. We can
+        * make dma_mark_clean() take a physical address if necessary.
+        */
+       dma_mark_clean(phys_to_virt(paddr), size);
+}
+
+void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+                       size_t size, enum dma_data_direction dir,
+                       unsigned long attrs)
+{
+       unmap_single(hwdev, dev_addr, size, dir, attrs);
+}
+
+/*
+ * Make physical memory consistent for a single streaming mode DMA translation
+ * after a transfer.
+ *
+ * If you perform a swiotlb_map_page() but wish to interrogate the buffer
+ * using the cpu, yet do not wish to teardown the dma mapping, you must
+ * call this function before doing so.  At the next point you give the dma
+ * address back to the card, you must first perform a
+ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
+ */
+static void
+swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
+                   size_t size, enum dma_data_direction dir,
+                   enum dma_sync_target target)
+{
+       phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
+
+       BUG_ON(dir == DMA_NONE);
+
+       if (is_swiotlb_buffer(paddr)) {
+               swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
+               return;
+       }
+
+       if (dir != DMA_FROM_DEVICE)
+               return;
+
+       dma_mark_clean(phys_to_virt(paddr), size);
+}
+
+void
+swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
+                           size_t size, enum dma_data_direction dir)
+{
+       swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
+}
+
+void
+swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
+                              size_t size, enum dma_data_direction dir)
+{
+       swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
+}
+
+/*
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * This is the scatter-gather version of the above swiotlb_map_page
+ * interface.  Here the scatter gather list elements are each tagged with the
+ * appropriate dma address and length.  They are obtained via
+ * sg_dma_{address,length}(SG).
+ *
+ * NOTE: An implementation may be able to use a smaller number of
+ *       DMA address/length pairs than there are SG table elements.
+ *       (for example via virtual mapping capabilities)
+ *       The routine returns the number of addr/length pairs actually
+ *       used, at most nents.
+ *
+ * Device ownership issues as mentioned above for swiotlb_map_page are the
+ * same here.
+ */
+int
+swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
+                    enum dma_data_direction dir, unsigned long attrs)
+{
+       struct scatterlist *sg;
+       int i;
+
+       BUG_ON(dir == DMA_NONE);
+
+       for_each_sg(sgl, sg, nelems, i) {
+               phys_addr_t paddr = sg_phys(sg);
+               dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
+
+               if (swiotlb_force == SWIOTLB_FORCE ||
+                   !dma_capable(hwdev, dev_addr, sg->length)) {
+                       phys_addr_t map = map_single(hwdev, sg_phys(sg),
+                                                    sg->length, dir, attrs);
+                       if (map == SWIOTLB_MAP_ERROR) {
+                               /* Don't panic here, we expect map_sg users
+                                  to do proper error handling. */
+                               swiotlb_full(hwdev, sg->length, dir, 0);
+                               attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+                               swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
+                                                      attrs);
+                               sg_dma_len(sgl) = 0;
+                               return 0;
+                       }
+                       sg->dma_address = __phys_to_dma(hwdev, map);
+               } else
+                       sg->dma_address = dev_addr;
+               sg_dma_len(sg) = sg->length;
+       }
+       return nelems;
+}
+
+/*
+ * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
+ * concerning calls here are the same as for swiotlb_unmap_page() above.
+ */
+void
+swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+                      int nelems, enum dma_data_direction dir,
+                      unsigned long attrs)
+{
+       struct scatterlist *sg;
+       int i;
+
+       BUG_ON(dir == DMA_NONE);
+
+       for_each_sg(sgl, sg, nelems, i)
+               unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
+                            attrs);
+}
+
+/*
+ * Make physical memory consistent for a set of streaming mode DMA translations
+ * after a transfer.
+ *
+ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
+ * and usage.
+ */
+static void
+swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
+               int nelems, enum dma_data_direction dir,
+               enum dma_sync_target target)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sgl, sg, nelems, i)
+               swiotlb_sync_single(hwdev, sg->dma_address,
+                                   sg_dma_len(sg), dir, target);
+}
+
+void
+swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
+                       int nelems, enum dma_data_direction dir)
+{
+       swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
+}
+
+void
+swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
+                          int nelems, enum dma_data_direction dir)
+{
+       swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
+}
+
+int
+swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
+{
+       return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer));
+}
+
+/*
+ * Return whether the given device DMA address mask can be supported
+ * properly.  For example, if your device can only drive the low 24-bits
+ * during bus mastering, then you would pass 0x00ffffff as the mask to
+ * this function.
+ */
+int
+swiotlb_dma_supported(struct device *hwdev, u64 mask)
+{
+       return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
+}
+
+void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+               gfp_t gfp, unsigned long attrs)
+{
+       void *vaddr;
+
+       /* temporary workaround: */
+       if (gfp & __GFP_NOWARN)
+               attrs |= DMA_ATTR_NO_WARN;
+
+       /*
+        * Don't print a warning when the first allocation attempt fails.
+        * swiotlb_alloc_coherent() will print a warning when the DMA memory
+        * allocation ultimately failed.
+        */
+       gfp |= __GFP_NOWARN;
+
+       vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+       if (!vaddr)
+               vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs);
+       return vaddr;
+}
+
+void swiotlb_free(struct device *dev, size_t size, void *vaddr,
+               dma_addr_t dma_addr, unsigned long attrs)
+{
+       if (!swiotlb_free_buffer(dev, size, dma_addr))
+               dma_direct_free(dev, size, vaddr, dma_addr, attrs);
+}
+
+const struct dma_map_ops swiotlb_dma_ops = {
+       .mapping_error          = swiotlb_dma_mapping_error,
+       .alloc                  = swiotlb_alloc,
+       .free                   = swiotlb_free,
+       .sync_single_for_cpu    = swiotlb_sync_single_for_cpu,
+       .sync_single_for_device = swiotlb_sync_single_for_device,
+       .sync_sg_for_cpu        = swiotlb_sync_sg_for_cpu,
+       .sync_sg_for_device     = swiotlb_sync_sg_for_device,
+       .map_sg                 = swiotlb_map_sg_attrs,
+       .unmap_sg               = swiotlb_unmap_sg_attrs,
+       .map_page               = swiotlb_map_page,
+       .unmap_page             = swiotlb_unmap_page,
+       .dma_supported          = dma_direct_supported,
+};
+EXPORT_SYMBOL(swiotlb_dma_ops);
diff --git a/kernel/dma/virt.c b/kernel/dma/virt.c
new file mode 100644 (file)
index 0000000..631ddec
--- /dev/null
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMA operations that map to virtual addresses without flushing memory.
+ */
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+
+static void *dma_virt_alloc(struct device *dev, size_t size,
+                           dma_addr_t *dma_handle, gfp_t gfp,
+                           unsigned long attrs)
+{
+       void *ret;
+
+       ret = (void *)__get_free_pages(gfp, get_order(size));
+       if (ret)
+               *dma_handle = (uintptr_t)ret;
+       return ret;
+}
+
+static void dma_virt_free(struct device *dev, size_t size,
+                         void *cpu_addr, dma_addr_t dma_addr,
+                         unsigned long attrs)
+{
+       free_pages((unsigned long)cpu_addr, get_order(size));
+}
+
+static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page,
+                                   unsigned long offset, size_t size,
+                                   enum dma_data_direction dir,
+                                   unsigned long attrs)
+{
+       return (uintptr_t)(page_address(page) + offset);
+}
+
+static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
+                          int nents, enum dma_data_direction dir,
+                          unsigned long attrs)
+{
+       int i;
+       struct scatterlist *sg;
+
+       for_each_sg(sgl, sg, nents, i) {
+               BUG_ON(!sg_page(sg));
+               sg_dma_address(sg) = (uintptr_t)sg_virt(sg);
+               sg_dma_len(sg) = sg->length;
+       }
+
+       return nents;
+}
+
+const struct dma_map_ops dma_virt_ops = {
+       .alloc                  = dma_virt_alloc,
+       .free                   = dma_virt_free,
+       .map_page               = dma_virt_map_page,
+       .map_sg                 = dma_virt_map_sg,
+};
+EXPORT_SYMBOL(dma_virt_ops);
index 80cca2b30c4fe02c1baca59b08c0cae158a7368e..8f0434a9951af00bce3f009c21ddbd3c5cd61e01 100644 (file)
@@ -6482,7 +6482,7 @@ void perf_prepare_sample(struct perf_event_header *header,
                data->phys_addr = perf_virt_to_phys(data->addr);
 }
 
-static void __always_inline
+static __always_inline void
 __perf_event_output(struct perf_event *event,
                    struct perf_sample_data *data,
                    struct pt_regs *regs,
index 045a37e9ddee3255fac6ab34b80682f4e2e968e1..5d3cf407e37469a7b1cafab8c4af303d074bbdf8 100644 (file)
@@ -103,7 +103,7 @@ out:
        preempt_enable();
 }
 
-static bool __always_inline
+static __always_inline bool
 ring_buffer_has_space(unsigned long head, unsigned long tail,
                      unsigned long data_size, unsigned int size,
                      bool backward)
@@ -114,7 +114,7 @@ ring_buffer_has_space(unsigned long head, unsigned long tail,
                return CIRC_SPACE(tail, head, data_size) >= size;
 }
 
-static int __always_inline
+static __always_inline int
 __perf_output_begin(struct perf_output_handle *handle,
                    struct perf_event *event, unsigned int size,
                    bool backward)
@@ -414,7 +414,7 @@ err:
 }
 EXPORT_SYMBOL_GPL(perf_aux_output_begin);
 
-static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb)
+static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
 {
        if (rb->aux_overwrite)
                return false;
index 4dadeb3d666621239a7273f7651847fa7099dacf..6f636136cccc05993e20034e92effc0c0fc3e7e2 100644 (file)
@@ -55,6 +55,7 @@ static const struct irq_bit_descr irqchip_flags[] = {
        BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE),
        BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE),
        BIT_MASK_DESCR(IRQCHIP_EOI_THREADED),
+       BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI),
 };
 
 static void
index 481951bf091d49fbe4378bb21504b6482e11919f..750cb8082694d481a7fbce92999757e754023f65 100644 (file)
@@ -177,9 +177,20 @@ void *kthread_probe_data(struct task_struct *task)
 static void __kthread_parkme(struct kthread *self)
 {
        for (;;) {
-               set_current_state(TASK_PARKED);
+               /*
+                * TASK_PARKED is a special state; we must serialize against
+                * possible pending wakeups to avoid store-store collisions on
+                * task->state.
+                *
+                * Such a collision might possibly result in the task state
+                * changin from TASK_PARKED and us failing the
+                * wait_task_inactive() in kthread_park().
+                */
+               set_special_state(TASK_PARKED);
                if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
                        break;
+
+               complete_all(&self->parked);
                schedule();
        }
        __set_current_state(TASK_RUNNING);
@@ -191,11 +202,6 @@ void kthread_parkme(void)
 }
 EXPORT_SYMBOL_GPL(kthread_parkme);
 
-void kthread_park_complete(struct task_struct *k)
-{
-       complete_all(&to_kthread(k)->parked);
-}
-
 static int kthread(void *_create)
 {
        /* Copy data: it's on kthread's stack */
@@ -461,6 +467,9 @@ void kthread_unpark(struct task_struct *k)
 
        reinit_completion(&kthread->parked);
        clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+       /*
+        * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
+        */
        wake_up_state(k, TASK_PARKED);
 }
 EXPORT_SYMBOL_GPL(kthread_unpark);
@@ -487,7 +496,16 @@ int kthread_park(struct task_struct *k)
        set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
        if (k != current) {
                wake_up_process(k);
+               /*
+                * Wait for __kthread_parkme() to complete(), this means we
+                * _will_ have TASK_PARKED and are about to call schedule().
+                */
                wait_for_completion(&kthread->parked);
+               /*
+                * Now wait for that schedule() to complete and the task to
+                * get scheduled out.
+                */
+               WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
        }
 
        return 0;
index edcac5de7ebcdb489113800c941274d8887f9b56..5fa4d3138bf106cd87f822636652c144e846f3aa 100644 (file)
@@ -1265,11 +1265,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
        this.parent = NULL;
        this.class = class;
 
-       local_irq_save(flags);
+       raw_local_irq_save(flags);
        arch_spin_lock(&lockdep_lock);
        ret = __lockdep_count_forward_deps(&this);
        arch_spin_unlock(&lockdep_lock);
-       local_irq_restore(flags);
+       raw_local_irq_restore(flags);
 
        return ret;
 }
@@ -1292,11 +1292,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
        this.parent = NULL;
        this.class = class;
 
-       local_irq_save(flags);
+       raw_local_irq_save(flags);
        arch_spin_lock(&lockdep_lock);
        ret = __lockdep_count_backward_deps(&this);
        arch_spin_unlock(&lockdep_lock);
-       local_irq_restore(flags);
+       raw_local_irq_restore(flags);
 
        return ret;
 }
@@ -4411,7 +4411,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
        if (unlikely(!debug_locks))
                return;
 
-       local_irq_save(flags);
+       raw_local_irq_save(flags);
        for (i = 0; i < curr->lockdep_depth; i++) {
                hlock = curr->held_locks + i;
 
@@ -4422,7 +4422,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
                print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
                break;
        }
-       local_irq_restore(flags);
+       raw_local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
 
index bc1e507be9ff7aea311261e78002d53375f9a6d7..776308d2fa9e9468116f0174eed4b8062475a83f 100644 (file)
@@ -181,6 +181,7 @@ void down_read_non_owner(struct rw_semaphore *sem)
        might_sleep();
 
        __down_read(sem);
+       rwsem_set_reader_owned(sem);
 }
 
 EXPORT_SYMBOL(down_read_non_owner);
index ae306f90c51484fae6bb583733ca5e8f8b3e76be..c6242d8594dc7c0fab52de9df7f9cf01e49e5d0f 100644 (file)
@@ -85,9 +85,9 @@ static int rseq_update_cpu_id(struct task_struct *t)
 {
        u32 cpu_id = raw_smp_processor_id();
 
-       if (__put_user(cpu_id, &t->rseq->cpu_id_start))
+       if (put_user(cpu_id, &t->rseq->cpu_id_start))
                return -EFAULT;
-       if (__put_user(cpu_id, &t->rseq->cpu_id))
+       if (put_user(cpu_id, &t->rseq->cpu_id))
                return -EFAULT;
        trace_rseq_update(t);
        return 0;
@@ -100,14 +100,14 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t)
        /*
         * Reset cpu_id_start to its initial state (0).
         */
-       if (__put_user(cpu_id_start, &t->rseq->cpu_id_start))
+       if (put_user(cpu_id_start, &t->rseq->cpu_id_start))
                return -EFAULT;
        /*
         * Reset cpu_id to RSEQ_CPU_ID_UNINITIALIZED, so any user coming
         * in after unregistration can figure out that rseq needs to be
         * registered again.
         */
-       if (__put_user(cpu_id, &t->rseq->cpu_id))
+       if (put_user(cpu_id, &t->rseq->cpu_id))
                return -EFAULT;
        return 0;
 }
@@ -115,29 +115,36 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t)
 static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
 {
        struct rseq_cs __user *urseq_cs;
-       unsigned long ptr;
+       u64 ptr;
        u32 __user *usig;
        u32 sig;
        int ret;
 
-       ret = __get_user(ptr, &t->rseq->rseq_cs);
-       if (ret)
-               return ret;
+       if (copy_from_user(&ptr, &t->rseq->rseq_cs.ptr64, sizeof(ptr)))
+               return -EFAULT;
        if (!ptr) {
                memset(rseq_cs, 0, sizeof(*rseq_cs));
                return 0;
        }
-       urseq_cs = (struct rseq_cs __user *)ptr;
+       if (ptr >= TASK_SIZE)
+               return -EINVAL;
+       urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr;
        if (copy_from_user(rseq_cs, urseq_cs, sizeof(*rseq_cs)))
                return -EFAULT;
-       if (rseq_cs->version > 0)
-               return -EINVAL;
 
+       if (rseq_cs->start_ip >= TASK_SIZE ||
+           rseq_cs->start_ip + rseq_cs->post_commit_offset >= TASK_SIZE ||
+           rseq_cs->abort_ip >= TASK_SIZE ||
+           rseq_cs->version > 0)
+               return -EINVAL;
+       /* Check for overflow. */
+       if (rseq_cs->start_ip + rseq_cs->post_commit_offset < rseq_cs->start_ip)
+               return -EINVAL;
        /* Ensure that abort_ip is not in the critical section. */
        if (rseq_cs->abort_ip - rseq_cs->start_ip < rseq_cs->post_commit_offset)
                return -EINVAL;
 
-       usig = (u32 __user *)(rseq_cs->abort_ip - sizeof(u32));
+       usig = (u32 __user *)(unsigned long)(rseq_cs->abort_ip - sizeof(u32));
        ret = get_user(sig, usig);
        if (ret)
                return ret;
@@ -146,7 +153,7 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
                printk_ratelimited(KERN_WARNING
                        "Possible attack attempt. Unexpected rseq signature 0x%x, expecting 0x%x (pid=%d, addr=%p).\n",
                        sig, current->rseq_sig, current->pid, usig);
-               return -EPERM;
+               return -EINVAL;
        }
        return 0;
 }
@@ -157,7 +164,7 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
        int ret;
 
        /* Get thread flags. */
-       ret = __get_user(flags, &t->rseq->flags);
+       ret = get_user(flags, &t->rseq->flags);
        if (ret)
                return ret;
 
@@ -195,9 +202,11 @@ static int clear_rseq_cs(struct task_struct *t)
         * of code outside of the rseq assembly block. This performs
         * a lazy clear of the rseq_cs field.
         *
-        * Set rseq_cs to NULL with single-copy atomicity.
+        * Set rseq_cs to NULL.
         */
-       return __put_user(0UL, &t->rseq->rseq_cs);
+       if (clear_user(&t->rseq->rseq_cs.ptr64, sizeof(t->rseq->rseq_cs.ptr64)))
+               return -EFAULT;
+       return 0;
 }
 
 /*
@@ -251,10 +260,10 @@ static int rseq_ip_fixup(struct pt_regs *regs)
  * respect to other threads scheduled on the same CPU, and with respect
  * to signal handlers.
  */
-void __rseq_handle_notify_resume(struct pt_regs *regs)
+void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
 {
        struct task_struct *t = current;
-       int ret;
+       int ret, sig;
 
        if (unlikely(t->flags & PF_EXITING))
                return;
@@ -268,7 +277,8 @@ void __rseq_handle_notify_resume(struct pt_regs *regs)
        return;
 
 error:
-       force_sig(SIGSEGV, t);
+       sig = ksig ? ksig->sig : 0;
+       force_sigsegv(sig, t);
 }
 
 #ifdef CONFIG_DEBUG_RSEQ
index 78d8facba456c2fc44c7024ae1a1c8d9db6f0692..fe365c9a08e98392d17a0f54ac1c7209db5f90b2 100644 (file)
@@ -7,7 +7,6 @@
  */
 #include "sched.h"
 
-#include <linux/kthread.h>
 #include <linux/nospec.h>
 
 #include <linux/kcov.h>
@@ -2724,28 +2723,20 @@ static struct rq *finish_task_switch(struct task_struct *prev)
                membarrier_mm_sync_core_before_usermode(mm);
                mmdrop(mm);
        }
-       if (unlikely(prev_state & (TASK_DEAD|TASK_PARKED))) {
-               switch (prev_state) {
-               case TASK_DEAD:
-                       if (prev->sched_class->task_dead)
-                               prev->sched_class->task_dead(prev);
+       if (unlikely(prev_state == TASK_DEAD)) {
+               if (prev->sched_class->task_dead)
+                       prev->sched_class->task_dead(prev);
 
-                       /*
-                        * Remove function-return probe instances associated with this
-                        * task and put them back on the free list.
-                        */
-                       kprobe_flush_task(prev);
-
-                       /* Task is done with its stack. */
-                       put_task_stack(prev);
+               /*
+                * Remove function-return probe instances associated with this
+                * task and put them back on the free list.
+                */
+               kprobe_flush_task(prev);
 
-                       put_task_struct(prev);
-                       break;
+               /* Task is done with its stack. */
+               put_task_stack(prev);
 
-               case TASK_PARKED:
-                       kthread_park_complete(prev);
-                       break;
-               }
+               put_task_struct(prev);
        }
 
        tick_nohz_task_switch();
@@ -3113,7 +3104,9 @@ static void sched_tick_remote(struct work_struct *work)
        struct tick_work *twork = container_of(dwork, struct tick_work, work);
        int cpu = twork->cpu;
        struct rq *rq = cpu_rq(cpu);
+       struct task_struct *curr;
        struct rq_flags rf;
+       u64 delta;
 
        /*
         * Handle the tick only if it appears the remote CPU is running in full
@@ -3122,24 +3115,28 @@ static void sched_tick_remote(struct work_struct *work)
         * statistics and checks timeslices in a time-independent way, regardless
         * of when exactly it is running.
         */
-       if (!idle_cpu(cpu) && tick_nohz_tick_stopped_cpu(cpu)) {
-               struct task_struct *curr;
-               u64 delta;
+       if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu))
+               goto out_requeue;
 
-               rq_lock_irq(rq, &rf);
-               update_rq_clock(rq);
-               curr = rq->curr;
-               delta = rq_clock_task(rq) - curr->se.exec_start;
+       rq_lock_irq(rq, &rf);
+       curr = rq->curr;
+       if (is_idle_task(curr))
+               goto out_unlock;
 
-               /*
-                * Make sure the next tick runs within a reasonable
-                * amount of time.
-                */
-               WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
-               curr->sched_class->task_tick(rq, curr, 0);
-               rq_unlock_irq(rq, &rf);
-       }
+       update_rq_clock(rq);
+       delta = rq_clock_task(rq) - curr->se.exec_start;
+
+       /*
+        * Make sure the next tick runs within a reasonable
+        * amount of time.
+        */
+       WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
+       curr->sched_class->task_tick(rq, curr, 0);
+
+out_unlock:
+       rq_unlock_irq(rq, &rf);
 
+out_requeue:
        /*
         * Run the remote tick once per second (1Hz). This arbitrary
         * frequency is large enough to avoid overload but short enough
index 3cde46483f0aa5e57ea14502322fd90741e88ac0..c907fde01eaa65fcf784074466a17f8a64ab0203 100644 (file)
@@ -192,7 +192,7 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
 {
        struct rq *rq = cpu_rq(sg_cpu->cpu);
 
-       if (rq->rt.rt_nr_running)
+       if (rt_rq_is_runnable(&rq->rt))
                return sg_cpu->max;
 
        /*
index 1866e64792a791f8737128c88ae691d7453ff117..2f0a0be4d344d7de76211b01c6883c10fbb2ba89 100644 (file)
@@ -3982,18 +3982,10 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
        if (!sched_feat(UTIL_EST))
                return;
 
-       /*
-        * Update root cfs_rq's estimated utilization
-        *
-        * If *p is the last task then the root cfs_rq's estimated utilization
-        * of a CPU is 0 by definition.
-        */
-       ue.enqueued = 0;
-       if (cfs_rq->nr_running) {
-               ue.enqueued  = cfs_rq->avg.util_est.enqueued;
-               ue.enqueued -= min_t(unsigned int, ue.enqueued,
-                                    (_task_util_est(p) | UTIL_AVG_UNCHANGED));
-       }
+       /* Update root cfs_rq's estimated utilization */
+       ue.enqueued  = cfs_rq->avg.util_est.enqueued;
+       ue.enqueued -= min_t(unsigned int, ue.enqueued,
+                            (_task_util_est(p) | UTIL_AVG_UNCHANGED));
        WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
 
        /*
@@ -4590,6 +4582,7 @@ void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
        now = sched_clock_cpu(smp_processor_id());
        cfs_b->runtime = cfs_b->quota;
        cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
+       cfs_b->expires_seq++;
 }
 
 static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
@@ -4612,6 +4605,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
        struct task_group *tg = cfs_rq->tg;
        struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
        u64 amount = 0, min_amount, expires;
+       int expires_seq;
 
        /* note: this is a positive sum as runtime_remaining <= 0 */
        min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
@@ -4628,6 +4622,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
                        cfs_b->idle = 0;
                }
        }
+       expires_seq = cfs_b->expires_seq;
        expires = cfs_b->runtime_expires;
        raw_spin_unlock(&cfs_b->lock);
 
@@ -4637,8 +4632,10 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
         * spread between our sched_clock and the one on which runtime was
         * issued.
         */
-       if ((s64)(expires - cfs_rq->runtime_expires) > 0)
+       if (cfs_rq->expires_seq != expires_seq) {
+               cfs_rq->expires_seq = expires_seq;
                cfs_rq->runtime_expires = expires;
+       }
 
        return cfs_rq->runtime_remaining > 0;
 }
@@ -4664,12 +4661,9 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
         * has not truly expired.
         *
         * Fortunately we can check determine whether this the case by checking
-        * whether the global deadline has advanced. It is valid to compare
-        * cfs_b->runtime_expires without any locks since we only care about
-        * exact equality, so a partial write will still work.
+        * whether the global deadline(cfs_b->expires_seq) has advanced.
         */
-
-       if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
+       if (cfs_rq->expires_seq == cfs_b->expires_seq) {
                /* extend local deadline, drift is bounded above by 2 ticks */
                cfs_rq->runtime_expires += TICK_NSEC;
        } else {
@@ -5202,13 +5196,18 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 
 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 {
+       u64 overrun;
+
        lockdep_assert_held(&cfs_b->lock);
 
-       if (!cfs_b->period_active) {
-               cfs_b->period_active = 1;
-               hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
-               hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
-       }
+       if (cfs_b->period_active)
+               return;
+
+       cfs_b->period_active = 1;
+       overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
+       cfs_b->runtime_expires += (overrun + 1) * ktime_to_ns(cfs_b->period);
+       cfs_b->expires_seq++;
+       hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
 }
 
 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
index 47556b0c9a95faff3e827f6ffd690646cff38224..572567078b60b59dd14e3a608c6e9207f1c7e024 100644 (file)
@@ -508,8 +508,11 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
 
        rt_se = rt_rq->tg->rt_se[cpu];
 
-       if (!rt_se)
+       if (!rt_se) {
                dequeue_top_rt_rq(rt_rq);
+               /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
+               cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
+       }
        else if (on_rt_rq(rt_se))
                dequeue_rt_entity(rt_se, 0);
 }
@@ -1001,8 +1004,6 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq)
        sub_nr_running(rq, rt_rq->rt_nr_running);
        rt_rq->rt_queued = 0;
 
-       /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
-       cpufreq_update_util(rq, 0);
 }
 
 static void
@@ -1014,11 +1015,14 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)
 
        if (rt_rq->rt_queued)
                return;
-       if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
+
+       if (rt_rq_throttled(rt_rq))
                return;
 
-       add_nr_running(rq, rt_rq->rt_nr_running);
-       rt_rq->rt_queued = 1;
+       if (rt_rq->rt_nr_running) {
+               add_nr_running(rq, rt_rq->rt_nr_running);
+               rt_rq->rt_queued = 1;
+       }
 
        /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
        cpufreq_update_util(rq, 0);
index 6601baf2361c04605ce198091bdfe8d56aa8a7b7..c7742dcc136c6aa3ba95f0874c02c96e1eaccde8 100644 (file)
@@ -334,9 +334,10 @@ struct cfs_bandwidth {
        u64                     runtime;
        s64                     hierarchical_quota;
        u64                     runtime_expires;
+       int                     expires_seq;
 
-       int                     idle;
-       int                     period_active;
+       short                   idle;
+       short                   period_active;
        struct hrtimer          period_timer;
        struct hrtimer          slack_timer;
        struct list_head        throttled_cfs_rq;
@@ -551,6 +552,7 @@ struct cfs_rq {
 
 #ifdef CONFIG_CFS_BANDWIDTH
        int                     runtime_enabled;
+       int                     expires_seq;
        u64                     runtime_expires;
        s64                     runtime_remaining;
 
@@ -609,6 +611,11 @@ struct rt_rq {
 #endif
 };
 
+static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
+{
+       return rt_rq->rt_queued && rt_rq->rt_nr_running;
+}
+
 /* Deadline class' related fields in a runqueue */
 struct dl_rq {
        /* runqueue is an rbtree, ordered by deadline */
index de2f57fddc04ed85f5d419fe64e51cdcbb93193b..75ffc1d1a2e06e9d08f1e121e7dfb86d7758b3ed 100644 (file)
@@ -79,12 +79,16 @@ static void wakeup_softirqd(void)
 
 /*
  * If ksoftirqd is scheduled, we do not want to process pending softirqs
- * right now. Let ksoftirqd handle this at its own rate, to get fairness.
+ * right now. Let ksoftirqd handle this at its own rate, to get fairness,
+ * unless we're doing some of the synchronous softirqs.
  */
-static bool ksoftirqd_running(void)
+#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
+static bool ksoftirqd_running(unsigned long pending)
 {
        struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 
+       if (pending & SOFTIRQ_NOW_MASK)
+               return false;
        return tsk && (tsk->state == TASK_RUNNING);
 }
 
@@ -139,9 +143,13 @@ static void __local_bh_enable(unsigned int cnt)
 {
        lockdep_assert_irqs_disabled();
 
+       if (preempt_count() == cnt)
+               trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
+
        if (softirq_count() == (cnt & SOFTIRQ_MASK))
                trace_softirqs_on(_RET_IP_);
-       preempt_count_sub(cnt);
+
+       __preempt_count_sub(cnt);
 }
 
 /*
@@ -324,7 +332,7 @@ asmlinkage __visible void do_softirq(void)
 
        pending = local_softirq_pending();
 
-       if (pending && !ksoftirqd_running())
+       if (pending && !ksoftirqd_running(pending))
                do_softirq_own_stack();
 
        local_irq_restore(flags);
@@ -351,7 +359,7 @@ void irq_enter(void)
 
 static inline void invoke_softirq(void)
 {
-       if (ksoftirqd_running())
+       if (ksoftirqd_running(local_softirq_pending()))
                return;
 
        if (!force_irqthreads) {
index 055a4a728c00cce3945afc04b9bee692b243896b..3e93c54bd3a16b7fc282a20064f5d75f7c812ee8 100644 (file)
@@ -1659,7 +1659,7 @@ EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
 int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
 {
        switch(restart->nanosleep.type) {
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
        case TT_COMPAT:
                if (compat_put_timespec64(ts, restart->nanosleep.compat_rmtp))
                        return -EFAULT;
index 5a6251ac6f7acd183c35a51d9d55fb680fda64dd..9cdf54b04ca8860b7aa2eec2e3625de076b5f7e2 100644 (file)
@@ -604,7 +604,6 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
        /*
         * Disarm any old timer after extracting its expiry time.
         */
-       lockdep_assert_irqs_disabled();
 
        ret = 0;
        old_incr = timer->it.cpu.incr;
@@ -1049,7 +1048,6 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
        /*
         * Now re-arm for the new expiry time.
         */
-       lockdep_assert_irqs_disabled();
        arm_timer(timer);
 unlock:
        unlock_task_sighand(p, &flags);
index b7005dd21ec16ce5fa92e33b3b46f04bedbbf7f0..14de3727b18e6ca5c21780aa37af22fbc4d29739 100644 (file)
@@ -277,8 +277,7 @@ static bool tick_check_preferred(struct clock_event_device *curdev,
         */
        return !curdev ||
                newdev->rating > curdev->rating ||
-              (!cpumask_equal(curdev->cpumask, newdev->cpumask) &&
-               !tick_check_percpu(curdev, newdev, smp_processor_id()));
+              !cpumask_equal(curdev->cpumask, newdev->cpumask);
 }
 
 /*
index 6fa99213fc720e4b77c467ae69a87007c22b37d2..2b41e8e2d31db26faaaf905543af749463939b9c 100644 (file)
@@ -28,6 +28,7 @@
  */
 
 #include <linux/export.h>
+#include <linux/kernel.h>
 #include <linux/timex.h>
 #include <linux/capability.h>
 #include <linux/timekeeper_internal.h>
@@ -314,9 +315,10 @@ unsigned int jiffies_to_msecs(const unsigned long j)
        return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
 #else
 # if BITS_PER_LONG == 32
-       return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
+       return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
+              HZ_TO_MSEC_SHR32;
 # else
-       return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
+       return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
 # endif
 #endif
 }
index efed9c1cfb7ea4ea12182e711dacf01623f73452..caf9cbf3581683ace69577fd2f365ba039138cea 100644 (file)
@@ -192,17 +192,6 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
        op->saved_func(ip, parent_ip, op, regs);
 }
 
-/**
- * clear_ftrace_function - reset the ftrace function
- *
- * This NULLs the ftrace function and in essence stops
- * tracing.  There may be lag
- */
-void clear_ftrace_function(void)
-{
-       ftrace_trace_function = ftrace_stub;
-}
-
 static void ftrace_sync(struct work_struct *work)
 {
        /*
@@ -6689,7 +6678,7 @@ void ftrace_kill(void)
 {
        ftrace_disabled = 1;
        ftrace_enabled = 0;
-       clear_ftrace_function();
+       ftrace_trace_function = ftrace_stub;
 }
 
 /**
index c9336e98ac59a778d31c16a9ac72b184477e7177..87cf25171fb8dfde4ea96f001a8e28e64abd17f2 100644 (file)
@@ -1360,8 +1360,6 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 void
 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 {
-       struct ring_buffer *buf;
-
        if (tr->stop_count)
                return;
 
@@ -1375,9 +1373,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
 
        arch_spin_lock(&tr->max_lock);
 
-       buf = tr->trace_buffer.buffer;
-       tr->trace_buffer.buffer = tr->max_buffer.buffer;
-       tr->max_buffer.buffer = buf;
+       swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
 
        __update_max_tr(tr, tsk, cpu);
        arch_spin_unlock(&tr->max_lock);
@@ -2957,6 +2953,7 @@ out_nobuffer:
 }
 EXPORT_SYMBOL_GPL(trace_vbprintk);
 
+__printf(3, 0)
 static int
 __trace_array_vprintk(struct ring_buffer *buffer,
                      unsigned long ip, const char *fmt, va_list args)
@@ -3011,12 +3008,14 @@ out_nobuffer:
        return len;
 }
 
+__printf(3, 0)
 int trace_array_vprintk(struct trace_array *tr,
                        unsigned long ip, const char *fmt, va_list args)
 {
        return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
 }
 
+__printf(3, 0)
 int trace_array_printk(struct trace_array *tr,
                       unsigned long ip, const char *fmt, ...)
 {
@@ -3032,6 +3031,7 @@ int trace_array_printk(struct trace_array *tr,
        return ret;
 }
 
+__printf(3, 4)
 int trace_array_printk_buf(struct ring_buffer *buffer,
                           unsigned long ip, const char *fmt, ...)
 {
@@ -3047,6 +3047,7 @@ int trace_array_printk_buf(struct ring_buffer *buffer,
        return ret;
 }
 
+__printf(2, 0)
 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
 {
        return trace_array_vprintk(&global_trace, ip, fmt, args);
@@ -3364,8 +3365,8 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
 
        print_event_info(buf, m);
 
-       seq_printf(m, "#           TASK-PID   CPU#   %s  TIMESTAMP  FUNCTION\n", tgid ? "TGID     " : "");
-       seq_printf(m, "#              | |       |    %s     |         |\n",      tgid ? "  |      " : "");
+       seq_printf(m, "#           TASK-PID   %s  CPU#   TIMESTAMP  FUNCTION\n", tgid ? "TGID     " : "");
+       seq_printf(m, "#              | |     %s    |       |         |\n",      tgid ? "  |      " : "");
 }
 
 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
@@ -3385,9 +3386,9 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
                   tgid ? tgid_space : space);
        seq_printf(m, "#                          %s||| /     delay\n",
                   tgid ? tgid_space : space);
-       seq_printf(m, "#           TASK-PID   CPU#%s||||    TIMESTAMP  FUNCTION\n",
+       seq_printf(m, "#           TASK-PID %sCPU#  ||||    TIMESTAMP  FUNCTION\n",
                   tgid ? "   TGID   " : space);
-       seq_printf(m, "#              | |       | %s||||       |         |\n",
+       seq_printf(m, "#              | |   %s  |   ||||       |         |\n",
                   tgid ? "     |    " : space);
 }
 
index 630c5a24b2b255bf7cd35ef1524e25508835679f..f8f86231ad90e48b73b7eb2dc8d1def48071219a 100644 (file)
@@ -583,9 +583,7 @@ static __always_inline void trace_clear_recursion(int bit)
 static inline struct ring_buffer_iter *
 trace_buffer_iter(struct trace_iterator *iter, int cpu)
 {
-       if (iter->buffer_iter && iter->buffer_iter[cpu])
-               return iter->buffer_iter[cpu];
-       return NULL;
+       return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
 }
 
 int tracer_init(struct tracer *t, struct trace_array *tr);
index e1c818dbc0d724c603be39463b83de8f021cf79f..893a206bcba4f76dd504e246d9667475fd7ed456 100644 (file)
@@ -78,7 +78,8 @@ static const char * ops[] = { OPS };
        C(TOO_MANY_PREDS,       "Too many terms in predicate expression"), \
        C(INVALID_FILTER,       "Meaningless filter expression"),       \
        C(IP_FIELD_ONLY,        "Only 'ip' field is supported for function trace"), \
-       C(INVALID_VALUE,        "Invalid value (did you forget quotes)?"),
+       C(INVALID_VALUE,        "Invalid value (did you forget quotes)?"), \
+       C(NO_FILTER,            "No filter found"),
 
 #undef C
 #define C(a, b)                FILT_ERR_##a
@@ -550,6 +551,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
                goto out_free;
        }
 
+       if (!N) {
+               /* No program? */
+               ret = -EINVAL;
+               parse_error(pe, FILT_ERR_NO_FILTER, ptr - str);
+               goto out_free;
+       }
+
        prog[N].pred = NULL;                                    /* #13 */
        prog[N].target = 1;             /* TRUE */
        prog[N+1].pred = NULL;
@@ -1693,6 +1701,7 @@ static void create_filter_finish(struct filter_parse_error *pe)
  * @filter_str: filter string
  * @set_str: remember @filter_str and enable detailed error in filter
  * @filterp: out param for created filter (always updated on return)
+ *           Must be a pointer that references a NULL pointer.
  *
  * Creates a filter for @call with @filter_str.  If @set_str is %true,
  * @filter_str is copied and recorded in the new filter.
@@ -1710,6 +1719,10 @@ static int create_filter(struct trace_event_call *call,
        struct filter_parse_error *pe = NULL;
        int err;
 
+       /* filterp must point to NULL */
+       if (WARN_ON(*filterp))
+               *filterp = NULL;
+
        err = create_filter_start(filter_string, set_str, &pe, filterp);
        if (err)
                return err;
index 046c716a6536ba4f504df2523a420b92920d4e52..aae18af94c94e61967063ac7534fe2979fdcec45 100644 (file)
@@ -393,7 +393,7 @@ static void hist_err_event(char *str, char *system, char *event, char *var)
        else if (system)
                snprintf(err, MAX_FILTER_STR_VAL, "%s.%s", system, event);
        else
-               strncpy(err, var, MAX_FILTER_STR_VAL);
+               strscpy(err, var, MAX_FILTER_STR_VAL);
 
        hist_err(str, err);
 }
index 23c0b0cb5fb95c9875fb35cbd0d22f027430343c..169b3c44ee97f3cf00bc574b185f16fa572a12d5 100644 (file)
@@ -831,6 +831,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
        struct ftrace_graph_ret *graph_ret;
        struct ftrace_graph_ent *call;
        unsigned long long duration;
+       int cpu = iter->cpu;
        int i;
 
        graph_ret = &ret_entry->ret;
@@ -839,7 +840,6 @@ print_graph_entry_leaf(struct trace_iterator *iter,
 
        if (data) {
                struct fgraph_cpu_data *cpu_data;
-               int cpu = iter->cpu;
 
                cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 
@@ -869,6 +869,9 @@ print_graph_entry_leaf(struct trace_iterator *iter,
 
        trace_seq_printf(s, "%ps();\n", (void *)call->func);
 
+       print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
+                       cpu, iter->ent->pid, flags);
+
        return trace_handle_return(s);
 }
 
index daa81571b22a4646bcc6400ccee0fe638dda2515..21f71847294222297b92ea1bfcada6ac1ddf6537 100644 (file)
@@ -1480,8 +1480,10 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
        }
 
        ret = __register_trace_kprobe(tk);
-       if (ret < 0)
+       if (ret < 0) {
+               kfree(tk->tp.call.print_fmt);
                goto error;
+       }
 
        return &tk->tp.call;
 error:
@@ -1501,6 +1503,8 @@ void destroy_local_trace_kprobe(struct trace_event_call *event_call)
        }
 
        __unregister_trace_kprobe(tk);
+
+       kfree(tk->tp.call.print_fmt);
        free_trace_kprobe(tk);
 }
 #endif /* CONFIG_PERF_EVENTS */
index 90db994ac9004d2fc7163eeb95b8c79e121e991a..1c8e30fda46a8a4abab5c748868e52c06a8b30ea 100644 (file)
@@ -594,8 +594,7 @@ int trace_print_context(struct trace_iterator *iter)
 
        trace_find_cmdline(entry->pid, comm);
 
-       trace_seq_printf(s, "%16s-%-5d [%03d] ",
-                              comm, entry->pid, iter->cpu);
+       trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
 
        if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
                unsigned int tgid = trace_find_tgid(entry->pid);
@@ -606,6 +605,8 @@ int trace_print_context(struct trace_iterator *iter)
                        trace_seq_printf(s, "(%5d) ", tgid);
        }
 
+       trace_seq_printf(s, "[%03d] ", iter->cpu);
+
        if (tr->trace_flags & TRACE_ITER_IRQ_INFO)
                trace_print_lat_fmt(s, entry);
 
index e34b04b56057a86cd0ade5cb9fcb4919730f80a2..706836ec314d2add83b84ebe59dec8fd7e13a7ad 100644 (file)
@@ -420,60 +420,15 @@ config HAS_IOPORT_MAP
        depends on HAS_IOMEM && !NO_IOPORT_MAP
        default y
 
-config HAS_DMA
-       bool
-       depends on !NO_DMA
-       default y
+source "kernel/dma/Kconfig"
 
 config SGL_ALLOC
        bool
        default n
 
-config NEED_SG_DMA_LENGTH
-       bool
-
-config NEED_DMA_MAP_STATE
-       bool
-
-config ARCH_DMA_ADDR_T_64BIT
-       def_bool 64BIT || PHYS_ADDR_T_64BIT
-
 config IOMMU_HELPER
        bool
 
-config ARCH_HAS_SYNC_DMA_FOR_DEVICE
-       bool
-
-config ARCH_HAS_SYNC_DMA_FOR_CPU
-       bool
-       select NEED_DMA_MAP_STATE
-
-config DMA_DIRECT_OPS
-       bool
-       depends on HAS_DMA
-
-config DMA_NONCOHERENT_OPS
-       bool
-       depends on HAS_DMA
-       select DMA_DIRECT_OPS
-
-config DMA_NONCOHERENT_MMAP
-       bool
-       depends on DMA_NONCOHERENT_OPS
-
-config DMA_NONCOHERENT_CACHE_SYNC
-       bool
-       depends on DMA_NONCOHERENT_OPS
-
-config DMA_VIRT_OPS
-       bool
-       depends on HAS_DMA
-
-config SWIOTLB
-       bool
-       select DMA_DIRECT_OPS
-       select NEED_DMA_MAP_STATE
-
 config CHECK_SIGNATURE
        bool
 
index 3d35d062970d2459ecee5573cf512a999061b3ab..c253c1b46c6b12b2a7f2f3879bba0cf698a47866 100644 (file)
@@ -6,6 +6,7 @@ if HAVE_ARCH_KASAN
 config KASAN
        bool "KASan: runtime memory debugger"
        depends on SLUB || (SLAB && !DEBUG_SLAB)
+       select SLUB_DEBUG if SLUB
        select CONSTRUCTORS
        select STACKDEPOT
        help
index 956b320292fef9a4055a1a955f37b6f41c2a4b71..90dc5520b7849dc69dc4c3df3ea419c45e9451cc 100644 (file)
@@ -23,15 +23,12 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
         sha1.o chacha20.o irq_regs.o argv_split.o \
         flex_proportions.o ratelimit.o show_mem.o \
         is_single_threaded.o plist.o decompress.o kobject_uevent.o \
-        earlycpio.o seq_buf.o siphash.o \
+        earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
         nmi_backtrace.o nodemask.o win_minmax.o
 
 lib-$(CONFIG_PRINTK) += dump_stack.o
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
-lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o
-lib-$(CONFIG_DMA_NONCOHERENT_OPS) += dma-noncoherent.o
-lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o
 
 lib-y  += kobject.o klist.o
 obj-y  += lockref.o
@@ -98,10 +95,6 @@ obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
 obj-$(CONFIG_DEBUG_LIST) += list_debug.o
 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
 
-ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
-  lib-y += dec_and_lock.o
-endif
-
 obj-$(CONFIG_BITREVERSE) += bitrev.o
 obj-$(CONFIG_RATIONAL) += rational.o
 obj-$(CONFIG_CRC_CCITT)        += crc-ccitt.o
@@ -148,7 +141,6 @@ obj-$(CONFIG_SMP) += percpu_counter.o
 obj-$(CONFIG_AUDIT_GENERIC) += audit.o
 obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
 
-obj-$(CONFIG_SWIOTLB) += swiotlb.o
 obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
 obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
 obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
@@ -169,8 +161,6 @@ obj-$(CONFIG_NLATTR) += nlattr.o
 
 obj-$(CONFIG_LRU_CACHE) += lru_cache.o
 
-obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
-
 obj-$(CONFIG_GENERIC_CSUM) += checksum.o
 
 obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
index 347fa7ac2e8a858827415d44725d256d2a9e96a3..9555b68bb774cc3277dca434d19880286d71df0e 100644 (file)
@@ -33,3 +33,19 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
 }
 
 EXPORT_SYMBOL(_atomic_dec_and_lock);
+
+int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
+                                unsigned long *flags)
+{
+       /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+       if (atomic_add_unless(atomic, -1, 1))
+               return 0;
+
+       /* Otherwise do it the slow way */
+       spin_lock_irqsave(lock, *flags);
+       if (atomic_dec_and_test(atomic))
+               return 1;
+       spin_unlock_irqrestore(lock, *flags);
+       return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
deleted file mode 100644 (file)
index c007d25..0000000
+++ /dev/null
@@ -1,1773 +0,0 @@
-/*
- * Copyright (C) 2008 Advanced Micro Devices, Inc.
- *
- * Author: Joerg Roedel <joerg.roedel@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-
-#include <linux/sched/task_stack.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/sched/task.h>
-#include <linux/stacktrace.h>
-#include <linux/dma-debug.h>
-#include <linux/spinlock.h>
-#include <linux/vmalloc.h>
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-#include <linux/export.h>
-#include <linux/device.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/ctype.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-
-#include <asm/sections.h>
-
-#define HASH_SIZE       1024ULL
-#define HASH_FN_SHIFT   13
-#define HASH_FN_MASK    (HASH_SIZE - 1)
-
-/* allow architectures to override this if absolutely required */
-#ifndef PREALLOC_DMA_DEBUG_ENTRIES
-#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
-#endif
-
-enum {
-       dma_debug_single,
-       dma_debug_page,
-       dma_debug_sg,
-       dma_debug_coherent,
-       dma_debug_resource,
-};
-
-enum map_err_types {
-       MAP_ERR_CHECK_NOT_APPLICABLE,
-       MAP_ERR_NOT_CHECKED,
-       MAP_ERR_CHECKED,
-};
-
-#define DMA_DEBUG_STACKTRACE_ENTRIES 5
-
-/**
- * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
- * @list: node on pre-allocated free_entries list
- * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
- * @type: single, page, sg, coherent
- * @pfn: page frame of the start address
- * @offset: offset of mapping relative to pfn
- * @size: length of the mapping
- * @direction: enum dma_data_direction
- * @sg_call_ents: 'nents' from dma_map_sg
- * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
- * @map_err_type: track whether dma_mapping_error() was checked
- * @stacktrace: support backtraces when a violation is detected
- */
-struct dma_debug_entry {
-       struct list_head list;
-       struct device    *dev;
-       int              type;
-       unsigned long    pfn;
-       size_t           offset;
-       u64              dev_addr;
-       u64              size;
-       int              direction;
-       int              sg_call_ents;
-       int              sg_mapped_ents;
-       enum map_err_types  map_err_type;
-#ifdef CONFIG_STACKTRACE
-       struct           stack_trace stacktrace;
-       unsigned long    st_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
-#endif
-};
-
-typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
-
-struct hash_bucket {
-       struct list_head list;
-       spinlock_t lock;
-} ____cacheline_aligned_in_smp;
-
-/* Hash list to save the allocated dma addresses */
-static struct hash_bucket dma_entry_hash[HASH_SIZE];
-/* List of pre-allocated dma_debug_entry's */
-static LIST_HEAD(free_entries);
-/* Lock for the list above */
-static DEFINE_SPINLOCK(free_entries_lock);
-
-/* Global disable flag - will be set in case of an error */
-static bool global_disable __read_mostly;
-
-/* Early initialization disable flag, set at the end of dma_debug_init */
-static bool dma_debug_initialized __read_mostly;
-
-static inline bool dma_debug_disabled(void)
-{
-       return global_disable || !dma_debug_initialized;
-}
-
-/* Global error count */
-static u32 error_count;
-
-/* Global error show enable*/
-static u32 show_all_errors __read_mostly;
-/* Number of errors to show */
-static u32 show_num_errors = 1;
-
-static u32 num_free_entries;
-static u32 min_free_entries;
-static u32 nr_total_entries;
-
-/* number of preallocated entries requested by kernel cmdline */
-static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
-
-/* debugfs dentry's for the stuff above */
-static struct dentry *dma_debug_dent        __read_mostly;
-static struct dentry *global_disable_dent   __read_mostly;
-static struct dentry *error_count_dent      __read_mostly;
-static struct dentry *show_all_errors_dent  __read_mostly;
-static struct dentry *show_num_errors_dent  __read_mostly;
-static struct dentry *num_free_entries_dent __read_mostly;
-static struct dentry *min_free_entries_dent __read_mostly;
-static struct dentry *filter_dent           __read_mostly;
-
-/* per-driver filter related state */
-
-#define NAME_MAX_LEN   64
-
-static char                  current_driver_name[NAME_MAX_LEN] __read_mostly;
-static struct device_driver *current_driver                    __read_mostly;
-
-static DEFINE_RWLOCK(driver_name_lock);
-
-static const char *const maperr2str[] = {
-       [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
-       [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
-       [MAP_ERR_CHECKED] = "dma map error checked",
-};
-
-static const char *type2name[5] = { "single", "page",
-                                   "scather-gather", "coherent",
-                                   "resource" };
-
-static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
-                                  "DMA_FROM_DEVICE", "DMA_NONE" };
-
-/*
- * The access to some variables in this macro is racy. We can't use atomic_t
- * here because all these variables are exported to debugfs. Some of them even
- * writeable. This is also the reason why a lock won't help much. But anyway,
- * the races are no big deal. Here is why:
- *
- *   error_count: the addition is racy, but the worst thing that can happen is
- *                that we don't count some errors
- *   show_num_errors: the subtraction is racy. Also no big deal because in
- *                    worst case this will result in one warning more in the
- *                    system log than the user configured. This variable is
- *                    writeable via debugfs.
- */
-static inline void dump_entry_trace(struct dma_debug_entry *entry)
-{
-#ifdef CONFIG_STACKTRACE
-       if (entry) {
-               pr_warning("Mapped at:\n");
-               print_stack_trace(&entry->stacktrace, 0);
-       }
-#endif
-}
-
-static bool driver_filter(struct device *dev)
-{
-       struct device_driver *drv;
-       unsigned long flags;
-       bool ret;
-
-       /* driver filter off */
-       if (likely(!current_driver_name[0]))
-               return true;
-
-       /* driver filter on and initialized */
-       if (current_driver && dev && dev->driver == current_driver)
-               return true;
-
-       /* driver filter on, but we can't filter on a NULL device... */
-       if (!dev)
-               return false;
-
-       if (current_driver || !current_driver_name[0])
-               return false;
-
-       /* driver filter on but not yet initialized */
-       drv = dev->driver;
-       if (!drv)
-               return false;
-
-       /* lock to protect against change of current_driver_name */
-       read_lock_irqsave(&driver_name_lock, flags);
-
-       ret = false;
-       if (drv->name &&
-           strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
-               current_driver = drv;
-               ret = true;
-       }
-
-       read_unlock_irqrestore(&driver_name_lock, flags);
-
-       return ret;
-}
-
-#define err_printk(dev, entry, format, arg...) do {                    \
-               error_count += 1;                                       \
-               if (driver_filter(dev) &&                               \
-                   (show_all_errors || show_num_errors > 0)) {         \
-                       WARN(1, "%s %s: " format,                       \
-                            dev ? dev_driver_string(dev) : "NULL",     \
-                            dev ? dev_name(dev) : "NULL", ## arg);     \
-                       dump_entry_trace(entry);                        \
-               }                                                       \
-               if (!show_all_errors && show_num_errors > 0)            \
-                       show_num_errors -= 1;                           \
-       } while (0);
-
-/*
- * Hash related functions
- *
- * Every DMA-API request is saved into a struct dma_debug_entry. To
- * have quick access to these structs they are stored into a hash.
- */
-static int hash_fn(struct dma_debug_entry *entry)
-{
-       /*
-        * Hash function is based on the dma address.
-        * We use bits 20-27 here as the index into the hash
-        */
-       return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
-}
-
-/*
- * Request exclusive access to a hash bucket for a given dma_debug_entry.
- */
-static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
-                                          unsigned long *flags)
-       __acquires(&dma_entry_hash[idx].lock)
-{
-       int idx = hash_fn(entry);
-       unsigned long __flags;
-
-       spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
-       *flags = __flags;
-       return &dma_entry_hash[idx];
-}
-
-/*
- * Give up exclusive access to the hash bucket
- */
-static void put_hash_bucket(struct hash_bucket *bucket,
-                           unsigned long *flags)
-       __releases(&bucket->lock)
-{
-       unsigned long __flags = *flags;
-
-       spin_unlock_irqrestore(&bucket->lock, __flags);
-}
-
-static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
-{
-       return ((a->dev_addr == b->dev_addr) &&
-               (a->dev == b->dev)) ? true : false;
-}
-
-static bool containing_match(struct dma_debug_entry *a,
-                            struct dma_debug_entry *b)
-{
-       if (a->dev != b->dev)
-               return false;
-
-       if ((b->dev_addr <= a->dev_addr) &&
-           ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
-               return true;
-
-       return false;
-}
-
-/*
- * Search a given entry in the hash bucket list
- */
-static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
-                                                 struct dma_debug_entry *ref,
-                                                 match_fn match)
-{
-       struct dma_debug_entry *entry, *ret = NULL;
-       int matches = 0, match_lvl, last_lvl = -1;
-
-       list_for_each_entry(entry, &bucket->list, list) {
-               if (!match(ref, entry))
-                       continue;
-
-               /*
-                * Some drivers map the same physical address multiple
-                * times. Without a hardware IOMMU this results in the
-                * same device addresses being put into the dma-debug
-                * hash multiple times too. This can result in false
-                * positives being reported. Therefore we implement a
-                * best-fit algorithm here which returns the entry from
-                * the hash which fits best to the reference value
-                * instead of the first-fit.
-                */
-               matches += 1;
-               match_lvl = 0;
-               entry->size         == ref->size         ? ++match_lvl : 0;
-               entry->type         == ref->type         ? ++match_lvl : 0;
-               entry->direction    == ref->direction    ? ++match_lvl : 0;
-               entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
-
-               if (match_lvl == 4) {
-                       /* perfect-fit - return the result */
-                       return entry;
-               } else if (match_lvl > last_lvl) {
-                       /*
-                        * We found an entry that fits better then the
-                        * previous one or it is the 1st match.
-                        */
-                       last_lvl = match_lvl;
-                       ret      = entry;
-               }
-       }
-
-       /*
-        * If we have multiple matches but no perfect-fit, just return
-        * NULL.
-        */
-       ret = (matches == 1) ? ret : NULL;
-
-       return ret;
-}
-
-static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
-                                                struct dma_debug_entry *ref)
-{
-       return __hash_bucket_find(bucket, ref, exact_match);
-}
-
-static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
-                                                  struct dma_debug_entry *ref,
-                                                  unsigned long *flags)
-{
-
-       unsigned int max_range = dma_get_max_seg_size(ref->dev);
-       struct dma_debug_entry *entry, index = *ref;
-       unsigned int range = 0;
-
-       while (range <= max_range) {
-               entry = __hash_bucket_find(*bucket, ref, containing_match);
-
-               if (entry)
-                       return entry;
-
-               /*
-                * Nothing found, go back a hash bucket
-                */
-               put_hash_bucket(*bucket, flags);
-               range          += (1 << HASH_FN_SHIFT);
-               index.dev_addr -= (1 << HASH_FN_SHIFT);
-               *bucket = get_hash_bucket(&index, flags);
-       }
-
-       return NULL;
-}
-
-/*
- * Add an entry to a hash bucket
- */
-static void hash_bucket_add(struct hash_bucket *bucket,
-                           struct dma_debug_entry *entry)
-{
-       list_add_tail(&entry->list, &bucket->list);
-}
-
-/*
- * Remove entry from a hash bucket list
- */
-static void hash_bucket_del(struct dma_debug_entry *entry)
-{
-       list_del(&entry->list);
-}
-
-static unsigned long long phys_addr(struct dma_debug_entry *entry)
-{
-       if (entry->type == dma_debug_resource)
-               return __pfn_to_phys(entry->pfn) + entry->offset;
-
-       return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
-}
-
-/*
- * Dump mapping entries for debugging purposes
- */
-void debug_dma_dump_mappings(struct device *dev)
-{
-       int idx;
-
-       for (idx = 0; idx < HASH_SIZE; idx++) {
-               struct hash_bucket *bucket = &dma_entry_hash[idx];
-               struct dma_debug_entry *entry;
-               unsigned long flags;
-
-               spin_lock_irqsave(&bucket->lock, flags);
-
-               list_for_each_entry(entry, &bucket->list, list) {
-                       if (!dev || dev == entry->dev) {
-                               dev_info(entry->dev,
-                                        "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
-                                        type2name[entry->type], idx,
-                                        phys_addr(entry), entry->pfn,
-                                        entry->dev_addr, entry->size,
-                                        dir2name[entry->direction],
-                                        maperr2str[entry->map_err_type]);
-                       }
-               }
-
-               spin_unlock_irqrestore(&bucket->lock, flags);
-       }
-}
-
-/*
- * For each mapping (initial cacheline in the case of
- * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
- * scatterlist, or the cacheline specified in dma_map_single) insert
- * into this tree using the cacheline as the key. At
- * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry.  If
- * the entry already exists at insertion time add a tag as a reference
- * count for the overlapping mappings.  For now, the overlap tracking
- * just ensures that 'unmaps' balance 'maps' before marking the
- * cacheline idle, but we should also be flagging overlaps as an API
- * violation.
- *
- * Memory usage is mostly constrained by the maximum number of available
- * dma-debug entries in that we need a free dma_debug_entry before
- * inserting into the tree.  In the case of dma_map_page and
- * dma_alloc_coherent there is only one dma_debug_entry and one
- * dma_active_cacheline entry to track per event.  dma_map_sg(), on the
- * other hand, consumes a single dma_debug_entry, but inserts 'nents'
- * entries into the tree.
- *
- * At any time debug_dma_assert_idle() can be called to trigger a
- * warning if any cachelines in the given page are in the active set.
- */
-static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
-static DEFINE_SPINLOCK(radix_lock);
-#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
-#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
-#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
-
-static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
-{
-       return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
-               (entry->offset >> L1_CACHE_SHIFT);
-}
-
-static int active_cacheline_read_overlap(phys_addr_t cln)
-{
-       int overlap = 0, i;
-
-       for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
-               if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
-                       overlap |= 1 << i;
-       return overlap;
-}
-
-static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
-{
-       int i;
-
-       if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
-               return overlap;
-
-       for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
-               if (overlap & 1 << i)
-                       radix_tree_tag_set(&dma_active_cacheline, cln, i);
-               else
-                       radix_tree_tag_clear(&dma_active_cacheline, cln, i);
-
-       return overlap;
-}
-
-static void active_cacheline_inc_overlap(phys_addr_t cln)
-{
-       int overlap = active_cacheline_read_overlap(cln);
-
-       overlap = active_cacheline_set_overlap(cln, ++overlap);
-
-       /* If we overflowed the overlap counter then we're potentially
-        * leaking dma-mappings.  Otherwise, if maps and unmaps are
-        * balanced then this overflow may cause false negatives in
-        * debug_dma_assert_idle() as the cacheline may be marked idle
-        * prematurely.
-        */
-       WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
-                 "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
-                 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
-}
-
-static int active_cacheline_dec_overlap(phys_addr_t cln)
-{
-       int overlap = active_cacheline_read_overlap(cln);
-
-       return active_cacheline_set_overlap(cln, --overlap);
-}
-
-static int active_cacheline_insert(struct dma_debug_entry *entry)
-{
-       phys_addr_t cln = to_cacheline_number(entry);
-       unsigned long flags;
-       int rc;
-
-       /* If the device is not writing memory then we don't have any
-        * concerns about the cpu consuming stale data.  This mitigates
-        * legitimate usages of overlapping mappings.
-        */
-       if (entry->direction == DMA_TO_DEVICE)
-               return 0;
-
-       spin_lock_irqsave(&radix_lock, flags);
-       rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
-       if (rc == -EEXIST)
-               active_cacheline_inc_overlap(cln);
-       spin_unlock_irqrestore(&radix_lock, flags);
-
-       return rc;
-}
-
-static void active_cacheline_remove(struct dma_debug_entry *entry)
-{
-       phys_addr_t cln = to_cacheline_number(entry);
-       unsigned long flags;
-
-       /* ...mirror the insert case */
-       if (entry->direction == DMA_TO_DEVICE)
-               return;
-
-       spin_lock_irqsave(&radix_lock, flags);
-       /* since we are counting overlaps the final put of the
-        * cacheline will occur when the overlap count is 0.
-        * active_cacheline_dec_overlap() returns -1 in that case
-        */
-       if (active_cacheline_dec_overlap(cln) < 0)
-               radix_tree_delete(&dma_active_cacheline, cln);
-       spin_unlock_irqrestore(&radix_lock, flags);
-}
-
-/**
- * debug_dma_assert_idle() - assert that a page is not undergoing dma
- * @page: page to lookup in the dma_active_cacheline tree
- *
- * Place a call to this routine in cases where the cpu touching the page
- * before the dma completes (page is dma_unmapped) will lead to data
- * corruption.
- */
-void debug_dma_assert_idle(struct page *page)
-{
-       static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
-       struct dma_debug_entry *entry = NULL;
-       void **results = (void **) &ents;
-       unsigned int nents, i;
-       unsigned long flags;
-       phys_addr_t cln;
-
-       if (dma_debug_disabled())
-               return;
-
-       if (!page)
-               return;
-
-       cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
-       spin_lock_irqsave(&radix_lock, flags);
-       nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
-                                      CACHELINES_PER_PAGE);
-       for (i = 0; i < nents; i++) {
-               phys_addr_t ent_cln = to_cacheline_number(ents[i]);
-
-               if (ent_cln == cln) {
-                       entry = ents[i];
-                       break;
-               } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
-                       break;
-       }
-       spin_unlock_irqrestore(&radix_lock, flags);
-
-       if (!entry)
-               return;
-
-       cln = to_cacheline_number(entry);
-       err_printk(entry->dev, entry,
-                  "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
-                  &cln);
-}
-
-/*
- * Wrapper function for adding an entry to the hash.
- * This function takes care of locking itself.
- */
-static void add_dma_entry(struct dma_debug_entry *entry)
-{
-       struct hash_bucket *bucket;
-       unsigned long flags;
-       int rc;
-
-       bucket = get_hash_bucket(entry, &flags);
-       hash_bucket_add(bucket, entry);
-       put_hash_bucket(bucket, &flags);
-
-       rc = active_cacheline_insert(entry);
-       if (rc == -ENOMEM) {
-               pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
-               global_disable = true;
-       }
-
-       /* TODO: report -EEXIST errors here as overlapping mappings are
-        * not supported by the DMA API
-        */
-}
-
-static struct dma_debug_entry *__dma_entry_alloc(void)
-{
-       struct dma_debug_entry *entry;
-
-       entry = list_entry(free_entries.next, struct dma_debug_entry, list);
-       list_del(&entry->list);
-       memset(entry, 0, sizeof(*entry));
-
-       num_free_entries -= 1;
-       if (num_free_entries < min_free_entries)
-               min_free_entries = num_free_entries;
-
-       return entry;
-}
-
-/* struct dma_entry allocator
- *
- * The next two functions implement the allocator for
- * struct dma_debug_entries.
- */
-static struct dma_debug_entry *dma_entry_alloc(void)
-{
-       struct dma_debug_entry *entry;
-       unsigned long flags;
-
-       spin_lock_irqsave(&free_entries_lock, flags);
-
-       if (list_empty(&free_entries)) {
-               global_disable = true;
-               spin_unlock_irqrestore(&free_entries_lock, flags);
-               pr_err("DMA-API: debugging out of memory - disabling\n");
-               return NULL;
-       }
-
-       entry = __dma_entry_alloc();
-
-       spin_unlock_irqrestore(&free_entries_lock, flags);
-
-#ifdef CONFIG_STACKTRACE
-       entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
-       entry->stacktrace.entries = entry->st_entries;
-       entry->stacktrace.skip = 2;
-       save_stack_trace(&entry->stacktrace);
-#endif
-
-       return entry;
-}
-
-static void dma_entry_free(struct dma_debug_entry *entry)
-{
-       unsigned long flags;
-
-       active_cacheline_remove(entry);
-
-       /*
-        * add to beginning of the list - this way the entries are
-        * more likely cache hot when they are reallocated.
-        */
-       spin_lock_irqsave(&free_entries_lock, flags);
-       list_add(&entry->list, &free_entries);
-       num_free_entries += 1;
-       spin_unlock_irqrestore(&free_entries_lock, flags);
-}
-
-int dma_debug_resize_entries(u32 num_entries)
-{
-       int i, delta, ret = 0;
-       unsigned long flags;
-       struct dma_debug_entry *entry;
-       LIST_HEAD(tmp);
-
-       spin_lock_irqsave(&free_entries_lock, flags);
-
-       if (nr_total_entries < num_entries) {
-               delta = num_entries - nr_total_entries;
-
-               spin_unlock_irqrestore(&free_entries_lock, flags);
-
-               for (i = 0; i < delta; i++) {
-                       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-                       if (!entry)
-                               break;
-
-                       list_add_tail(&entry->list, &tmp);
-               }
-
-               spin_lock_irqsave(&free_entries_lock, flags);
-
-               list_splice(&tmp, &free_entries);
-               nr_total_entries += i;
-               num_free_entries += i;
-       } else {
-               delta = nr_total_entries - num_entries;
-
-               for (i = 0; i < delta && !list_empty(&free_entries); i++) {
-                       entry = __dma_entry_alloc();
-                       kfree(entry);
-               }
-
-               nr_total_entries -= i;
-       }
-
-       if (nr_total_entries != num_entries)
-               ret = 1;
-
-       spin_unlock_irqrestore(&free_entries_lock, flags);
-
-       return ret;
-}
-
-/*
- * DMA-API debugging init code
- *
- * The init code does two things:
- *   1. Initialize core data structures
- *   2. Preallocate a given number of dma_debug_entry structs
- */
-
-static int prealloc_memory(u32 num_entries)
-{
-       struct dma_debug_entry *entry, *next_entry;
-       int i;
-
-       for (i = 0; i < num_entries; ++i) {
-               entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-               if (!entry)
-                       goto out_err;
-
-               list_add_tail(&entry->list, &free_entries);
-       }
-
-       num_free_entries = num_entries;
-       min_free_entries = num_entries;
-
-       pr_info("DMA-API: preallocated %d debug entries\n", num_entries);
-
-       return 0;
-
-out_err:
-
-       list_for_each_entry_safe(entry, next_entry, &free_entries, list) {
-               list_del(&entry->list);
-               kfree(entry);
-       }
-
-       return -ENOMEM;
-}
-
-static ssize_t filter_read(struct file *file, char __user *user_buf,
-                          size_t count, loff_t *ppos)
-{
-       char buf[NAME_MAX_LEN + 1];
-       unsigned long flags;
-       int len;
-
-       if (!current_driver_name[0])
-               return 0;
-
-       /*
-        * We can't copy to userspace directly because current_driver_name can
-        * only be read under the driver_name_lock with irqs disabled. So
-        * create a temporary copy first.
-        */
-       read_lock_irqsave(&driver_name_lock, flags);
-       len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
-       read_unlock_irqrestore(&driver_name_lock, flags);
-
-       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t filter_write(struct file *file, const char __user *userbuf,
-                           size_t count, loff_t *ppos)
-{
-       char buf[NAME_MAX_LEN];
-       unsigned long flags;
-       size_t len;
-       int i;
-
-       /*
-        * We can't copy from userspace directly. Access to
-        * current_driver_name is protected with a write_lock with irqs
-        * disabled. Since copy_from_user can fault and may sleep we
-        * need to copy to temporary buffer first
-        */
-       len = min(count, (size_t)(NAME_MAX_LEN - 1));
-       if (copy_from_user(buf, userbuf, len))
-               return -EFAULT;
-
-       buf[len] = 0;
-
-       write_lock_irqsave(&driver_name_lock, flags);
-
-       /*
-        * Now handle the string we got from userspace very carefully.
-        * The rules are:
-        *         - only use the first token we got
-        *         - token delimiter is everything looking like a space
-        *           character (' ', '\n', '\t' ...)
-        *
-        */
-       if (!isalnum(buf[0])) {
-               /*
-                * If the first character userspace gave us is not
-                * alphanumerical then assume the filter should be
-                * switched off.
-                */
-               if (current_driver_name[0])
-                       pr_info("DMA-API: switching off dma-debug driver filter\n");
-               current_driver_name[0] = 0;
-               current_driver = NULL;
-               goto out_unlock;
-       }
-
-       /*
-        * Now parse out the first token and use it as the name for the
-        * driver to filter for.
-        */
-       for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
-               current_driver_name[i] = buf[i];
-               if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
-                       break;
-       }
-       current_driver_name[i] = 0;
-       current_driver = NULL;
-
-       pr_info("DMA-API: enable driver filter for driver [%s]\n",
-               current_driver_name);
-
-out_unlock:
-       write_unlock_irqrestore(&driver_name_lock, flags);
-
-       return count;
-}
-
-static const struct file_operations filter_fops = {
-       .read  = filter_read,
-       .write = filter_write,
-       .llseek = default_llseek,
-};
-
-static int dma_debug_fs_init(void)
-{
-       dma_debug_dent = debugfs_create_dir("dma-api", NULL);
-       if (!dma_debug_dent) {
-               pr_err("DMA-API: can not create debugfs directory\n");
-               return -ENOMEM;
-       }
-
-       global_disable_dent = debugfs_create_bool("disabled", 0444,
-                       dma_debug_dent,
-                       &global_disable);
-       if (!global_disable_dent)
-               goto out_err;
-
-       error_count_dent = debugfs_create_u32("error_count", 0444,
-                       dma_debug_dent, &error_count);
-       if (!error_count_dent)
-               goto out_err;
-
-       show_all_errors_dent = debugfs_create_u32("all_errors", 0644,
-                       dma_debug_dent,
-                       &show_all_errors);
-       if (!show_all_errors_dent)
-               goto out_err;
-
-       show_num_errors_dent = debugfs_create_u32("num_errors", 0644,
-                       dma_debug_dent,
-                       &show_num_errors);
-       if (!show_num_errors_dent)
-               goto out_err;
-
-       num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444,
-                       dma_debug_dent,
-                       &num_free_entries);
-       if (!num_free_entries_dent)
-               goto out_err;
-
-       min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444,
-                       dma_debug_dent,
-                       &min_free_entries);
-       if (!min_free_entries_dent)
-               goto out_err;
-
-       filter_dent = debugfs_create_file("driver_filter", 0644,
-                                         dma_debug_dent, NULL, &filter_fops);
-       if (!filter_dent)
-               goto out_err;
-
-       return 0;
-
-out_err:
-       debugfs_remove_recursive(dma_debug_dent);
-
-       return -ENOMEM;
-}
-
-static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
-{
-       struct dma_debug_entry *entry;
-       unsigned long flags;
-       int count = 0, i;
-
-       for (i = 0; i < HASH_SIZE; ++i) {
-               spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
-               list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
-                       if (entry->dev == dev) {
-                               count += 1;
-                               *out_entry = entry;
-                       }
-               }
-               spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
-       }
-
-       return count;
-}
-
-static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
-{
-       struct device *dev = data;
-       struct dma_debug_entry *uninitialized_var(entry);
-       int count;
-
-       if (dma_debug_disabled())
-               return 0;
-
-       switch (action) {
-       case BUS_NOTIFY_UNBOUND_DRIVER:
-               count = device_dma_allocations(dev, &entry);
-               if (count == 0)
-                       break;
-               err_printk(dev, entry, "DMA-API: device driver has pending "
-                               "DMA allocations while released from device "
-                               "[count=%d]\n"
-                               "One of leaked entries details: "
-                               "[device address=0x%016llx] [size=%llu bytes] "
-                               "[mapped with %s] [mapped as %s]\n",
-                       count, entry->dev_addr, entry->size,
-                       dir2name[entry->direction], type2name[entry->type]);
-               break;
-       default:
-               break;
-       }
-
-       return 0;
-}
-
-void dma_debug_add_bus(struct bus_type *bus)
-{
-       struct notifier_block *nb;
-
-       if (dma_debug_disabled())
-               return;
-
-       nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
-       if (nb == NULL) {
-               pr_err("dma_debug_add_bus: out of memory\n");
-               return;
-       }
-
-       nb->notifier_call = dma_debug_device_change;
-
-       bus_register_notifier(bus, nb);
-}
-
-static int dma_debug_init(void)
-{
-       int i;
-
-       /* Do not use dma_debug_initialized here, since we really want to be
-        * called to set dma_debug_initialized
-        */
-       if (global_disable)
-               return 0;
-
-       for (i = 0; i < HASH_SIZE; ++i) {
-               INIT_LIST_HEAD(&dma_entry_hash[i].list);
-               spin_lock_init(&dma_entry_hash[i].lock);
-       }
-
-       if (dma_debug_fs_init() != 0) {
-               pr_err("DMA-API: error creating debugfs entries - disabling\n");
-               global_disable = true;
-
-               return 0;
-       }
-
-       if (prealloc_memory(nr_prealloc_entries) != 0) {
-               pr_err("DMA-API: debugging out of memory error - disabled\n");
-               global_disable = true;
-
-               return 0;
-       }
-
-       nr_total_entries = num_free_entries;
-
-       dma_debug_initialized = true;
-
-       pr_info("DMA-API: debugging enabled by kernel config\n");
-       return 0;
-}
-core_initcall(dma_debug_init);
-
-static __init int dma_debug_cmdline(char *str)
-{
-       if (!str)
-               return -EINVAL;
-
-       if (strncmp(str, "off", 3) == 0) {
-               pr_info("DMA-API: debugging disabled on kernel command line\n");
-               global_disable = true;
-       }
-
-       return 0;
-}
-
-static __init int dma_debug_entries_cmdline(char *str)
-{
-       if (!str)
-               return -EINVAL;
-       if (!get_option(&str, &nr_prealloc_entries))
-               nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
-       return 0;
-}
-
-__setup("dma_debug=", dma_debug_cmdline);
-__setup("dma_debug_entries=", dma_debug_entries_cmdline);
-
-static void check_unmap(struct dma_debug_entry *ref)
-{
-       struct dma_debug_entry *entry;
-       struct hash_bucket *bucket;
-       unsigned long flags;
-
-       bucket = get_hash_bucket(ref, &flags);
-       entry = bucket_find_exact(bucket, ref);
-
-       if (!entry) {
-               /* must drop lock before calling dma_mapping_error */
-               put_hash_bucket(bucket, &flags);
-
-               if (dma_mapping_error(ref->dev, ref->dev_addr)) {
-                       err_printk(ref->dev, NULL,
-                                  "DMA-API: device driver tries to free an "
-                                  "invalid DMA memory address\n");
-               } else {
-                       err_printk(ref->dev, NULL,
-                                  "DMA-API: device driver tries to free DMA "
-                                  "memory it has not allocated [device "
-                                  "address=0x%016llx] [size=%llu bytes]\n",
-                                  ref->dev_addr, ref->size);
-               }
-               return;
-       }
-
-       if (ref->size != entry->size) {
-               err_printk(ref->dev, entry, "DMA-API: device driver frees "
-                          "DMA memory with different size "
-                          "[device address=0x%016llx] [map size=%llu bytes] "
-                          "[unmap size=%llu bytes]\n",
-                          ref->dev_addr, entry->size, ref->size);
-       }
-
-       if (ref->type != entry->type) {
-               err_printk(ref->dev, entry, "DMA-API: device driver frees "
-                          "DMA memory with wrong function "
-                          "[device address=0x%016llx] [size=%llu bytes] "
-                          "[mapped as %s] [unmapped as %s]\n",
-                          ref->dev_addr, ref->size,
-                          type2name[entry->type], type2name[ref->type]);
-       } else if ((entry->type == dma_debug_coherent) &&
-                  (phys_addr(ref) != phys_addr(entry))) {
-               err_printk(ref->dev, entry, "DMA-API: device driver frees "
-                          "DMA memory with different CPU address "
-                          "[device address=0x%016llx] [size=%llu bytes] "
-                          "[cpu alloc address=0x%016llx] "
-                          "[cpu free address=0x%016llx]",
-                          ref->dev_addr, ref->size,
-                          phys_addr(entry),
-                          phys_addr(ref));
-       }
-
-       if (ref->sg_call_ents && ref->type == dma_debug_sg &&
-           ref->sg_call_ents != entry->sg_call_ents) {
-               err_printk(ref->dev, entry, "DMA-API: device driver frees "
-                          "DMA sg list with different entry count "
-                          "[map count=%d] [unmap count=%d]\n",
-                          entry->sg_call_ents, ref->sg_call_ents);
-       }
-
-       /*
-        * This may be no bug in reality - but most implementations of the
-        * DMA API don't handle this properly, so check for it here
-        */
-       if (ref->direction != entry->direction) {
-               err_printk(ref->dev, entry, "DMA-API: device driver frees "
-                          "DMA memory with different direction "
-                          "[device address=0x%016llx] [size=%llu bytes] "
-                          "[mapped with %s] [unmapped with %s]\n",
-                          ref->dev_addr, ref->size,
-                          dir2name[entry->direction],
-                          dir2name[ref->direction]);
-       }
-
-       /*
-        * Drivers should use dma_mapping_error() to check the returned
-        * addresses of dma_map_single() and dma_map_page().
-        * If not, print this warning message. See Documentation/DMA-API.txt.
-        */
-       if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
-               err_printk(ref->dev, entry,
-                          "DMA-API: device driver failed to check map error"
-                          "[device address=0x%016llx] [size=%llu bytes] "
-                          "[mapped as %s]",
-                          ref->dev_addr, ref->size,
-                          type2name[entry->type]);
-       }
-
-       hash_bucket_del(entry);
-       dma_entry_free(entry);
-
-       put_hash_bucket(bucket, &flags);
-}
-
-static void check_for_stack(struct device *dev,
-                           struct page *page, size_t offset)
-{
-       void *addr;
-       struct vm_struct *stack_vm_area = task_stack_vm_area(current);
-
-       if (!stack_vm_area) {
-               /* Stack is direct-mapped. */
-               if (PageHighMem(page))
-                       return;
-               addr = page_address(page) + offset;
-               if (object_is_on_stack(addr))
-                       err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [addr=%p]\n", addr);
-       } else {
-               /* Stack is vmalloced. */
-               int i;
-
-               for (i = 0; i < stack_vm_area->nr_pages; i++) {
-                       if (page != stack_vm_area->pages[i])
-                               continue;
-
-                       addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
-                       err_printk(dev, NULL, "DMA-API: device driver maps memory from stack [probable addr=%p]\n", addr);
-                       break;
-               }
-       }
-}
-
-static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
-{
-       unsigned long a1 = (unsigned long)addr;
-       unsigned long b1 = a1 + len;
-       unsigned long a2 = (unsigned long)start;
-       unsigned long b2 = (unsigned long)end;
-
-       return !(b1 <= a2 || a1 >= b2);
-}
-
-static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
-{
-       if (overlap(addr, len, _stext, _etext) ||
-           overlap(addr, len, __start_rodata, __end_rodata))
-               err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
-}
-
-static void check_sync(struct device *dev,
-                      struct dma_debug_entry *ref,
-                      bool to_cpu)
-{
-       struct dma_debug_entry *entry;
-       struct hash_bucket *bucket;
-       unsigned long flags;
-
-       bucket = get_hash_bucket(ref, &flags);
-
-       entry = bucket_find_contain(&bucket, ref, &flags);
-
-       if (!entry) {
-               err_printk(dev, NULL, "DMA-API: device driver tries "
-                               "to sync DMA memory it has not allocated "
-                               "[device address=0x%016llx] [size=%llu bytes]\n",
-                               (unsigned long long)ref->dev_addr, ref->size);
-               goto out;
-       }
-
-       if (ref->size > entry->size) {
-               err_printk(dev, entry, "DMA-API: device driver syncs"
-                               " DMA memory outside allocated range "
-                               "[device address=0x%016llx] "
-                               "[allocation size=%llu bytes] "
-                               "[sync offset+size=%llu]\n",
-                               entry->dev_addr, entry->size,
-                               ref->size);
-       }
-
-       if (entry->direction == DMA_BIDIRECTIONAL)
-               goto out;
-
-       if (ref->direction != entry->direction) {
-               err_printk(dev, entry, "DMA-API: device driver syncs "
-                               "DMA memory with different direction "
-                               "[device address=0x%016llx] [size=%llu bytes] "
-                               "[mapped with %s] [synced with %s]\n",
-                               (unsigned long long)ref->dev_addr, entry->size,
-                               dir2name[entry->direction],
-                               dir2name[ref->direction]);
-       }
-
-       if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
-                     !(ref->direction == DMA_TO_DEVICE))
-               err_printk(dev, entry, "DMA-API: device driver syncs "
-                               "device read-only DMA memory for cpu "
-                               "[device address=0x%016llx] [size=%llu bytes] "
-                               "[mapped with %s] [synced with %s]\n",
-                               (unsigned long long)ref->dev_addr, entry->size,
-                               dir2name[entry->direction],
-                               dir2name[ref->direction]);
-
-       if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
-                      !(ref->direction == DMA_FROM_DEVICE))
-               err_printk(dev, entry, "DMA-API: device driver syncs "
-                               "device write-only DMA memory to device "
-                               "[device address=0x%016llx] [size=%llu bytes] "
-                               "[mapped with %s] [synced with %s]\n",
-                               (unsigned long long)ref->dev_addr, entry->size,
-                               dir2name[entry->direction],
-                               dir2name[ref->direction]);
-
-       if (ref->sg_call_ents && ref->type == dma_debug_sg &&
-           ref->sg_call_ents != entry->sg_call_ents) {
-               err_printk(ref->dev, entry, "DMA-API: device driver syncs "
-                          "DMA sg list with different entry count "
-                          "[map count=%d] [sync count=%d]\n",
-                          entry->sg_call_ents, ref->sg_call_ents);
-       }
-
-out:
-       put_hash_bucket(bucket, &flags);
-}
-
-static void check_sg_segment(struct device *dev, struct scatterlist *sg)
-{
-#ifdef CONFIG_DMA_API_DEBUG_SG
-       unsigned int max_seg = dma_get_max_seg_size(dev);
-       u64 start, end, boundary = dma_get_seg_boundary(dev);
-
-       /*
-        * Either the driver forgot to set dma_parms appropriately, or
-        * whoever generated the list forgot to check them.
-        */
-       if (sg->length > max_seg)
-               err_printk(dev, NULL, "DMA-API: mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
-                          sg->length, max_seg);
-       /*
-        * In some cases this could potentially be the DMA API
-        * implementation's fault, but it would usually imply that
-        * the scatterlist was built inappropriately to begin with.
-        */
-       start = sg_dma_address(sg);
-       end = start + sg_dma_len(sg) - 1;
-       if ((start ^ end) & ~boundary)
-               err_printk(dev, NULL, "DMA-API: mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
-                          start, end, boundary);
-#endif
-}
-
-void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
-                       size_t size, int direction, dma_addr_t dma_addr,
-                       bool map_single)
-{
-       struct dma_debug_entry *entry;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       if (dma_mapping_error(dev, dma_addr))
-               return;
-
-       entry = dma_entry_alloc();
-       if (!entry)
-               return;
-
-       entry->dev       = dev;
-       entry->type      = dma_debug_page;
-       entry->pfn       = page_to_pfn(page);
-       entry->offset    = offset,
-       entry->dev_addr  = dma_addr;
-       entry->size      = size;
-       entry->direction = direction;
-       entry->map_err_type = MAP_ERR_NOT_CHECKED;
-
-       if (map_single)
-               entry->type = dma_debug_single;
-
-       check_for_stack(dev, page, offset);
-
-       if (!PageHighMem(page)) {
-               void *addr = page_address(page) + offset;
-
-               check_for_illegal_area(dev, addr, size);
-       }
-
-       add_dma_entry(entry);
-}
-EXPORT_SYMBOL(debug_dma_map_page);
-
-void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-       struct dma_debug_entry ref;
-       struct dma_debug_entry *entry;
-       struct hash_bucket *bucket;
-       unsigned long flags;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       ref.dev = dev;
-       ref.dev_addr = dma_addr;
-       bucket = get_hash_bucket(&ref, &flags);
-
-       list_for_each_entry(entry, &bucket->list, list) {
-               if (!exact_match(&ref, entry))
-                       continue;
-
-               /*
-                * The same physical address can be mapped multiple
-                * times. Without a hardware IOMMU this results in the
-                * same device addresses being put into the dma-debug
-                * hash multiple times too. This can result in false
-                * positives being reported. Therefore we implement a
-                * best-fit algorithm here which updates the first entry
-                * from the hash which fits the reference value and is
-                * not currently listed as being checked.
-                */
-               if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
-                       entry->map_err_type = MAP_ERR_CHECKED;
-                       break;
-               }
-       }
-
-       put_hash_bucket(bucket, &flags);
-}
-EXPORT_SYMBOL(debug_dma_mapping_error);
-
-void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
-                         size_t size, int direction, bool map_single)
-{
-       struct dma_debug_entry ref = {
-               .type           = dma_debug_page,
-               .dev            = dev,
-               .dev_addr       = addr,
-               .size           = size,
-               .direction      = direction,
-       };
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       if (map_single)
-               ref.type = dma_debug_single;
-
-       check_unmap(&ref);
-}
-EXPORT_SYMBOL(debug_dma_unmap_page);
-
-void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
-                     int nents, int mapped_ents, int direction)
-{
-       struct dma_debug_entry *entry;
-       struct scatterlist *s;
-       int i;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       for_each_sg(sg, s, mapped_ents, i) {
-               entry = dma_entry_alloc();
-               if (!entry)
-                       return;
-
-               entry->type           = dma_debug_sg;
-               entry->dev            = dev;
-               entry->pfn            = page_to_pfn(sg_page(s));
-               entry->offset         = s->offset,
-               entry->size           = sg_dma_len(s);
-               entry->dev_addr       = sg_dma_address(s);
-               entry->direction      = direction;
-               entry->sg_call_ents   = nents;
-               entry->sg_mapped_ents = mapped_ents;
-
-               check_for_stack(dev, sg_page(s), s->offset);
-
-               if (!PageHighMem(sg_page(s))) {
-                       check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
-               }
-
-               check_sg_segment(dev, s);
-
-               add_dma_entry(entry);
-       }
-}
-EXPORT_SYMBOL(debug_dma_map_sg);
-
-static int get_nr_mapped_entries(struct device *dev,
-                                struct dma_debug_entry *ref)
-{
-       struct dma_debug_entry *entry;
-       struct hash_bucket *bucket;
-       unsigned long flags;
-       int mapped_ents;
-
-       bucket       = get_hash_bucket(ref, &flags);
-       entry        = bucket_find_exact(bucket, ref);
-       mapped_ents  = 0;
-
-       if (entry)
-               mapped_ents = entry->sg_mapped_ents;
-       put_hash_bucket(bucket, &flags);
-
-       return mapped_ents;
-}
-
-void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
-                       int nelems, int dir)
-{
-       struct scatterlist *s;
-       int mapped_ents = 0, i;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       for_each_sg(sglist, s, nelems, i) {
-
-               struct dma_debug_entry ref = {
-                       .type           = dma_debug_sg,
-                       .dev            = dev,
-                       .pfn            = page_to_pfn(sg_page(s)),
-                       .offset         = s->offset,
-                       .dev_addr       = sg_dma_address(s),
-                       .size           = sg_dma_len(s),
-                       .direction      = dir,
-                       .sg_call_ents   = nelems,
-               };
-
-               if (mapped_ents && i >= mapped_ents)
-                       break;
-
-               if (!i)
-                       mapped_ents = get_nr_mapped_entries(dev, &ref);
-
-               check_unmap(&ref);
-       }
-}
-EXPORT_SYMBOL(debug_dma_unmap_sg);
-
-void debug_dma_alloc_coherent(struct device *dev, size_t size,
-                             dma_addr_t dma_addr, void *virt)
-{
-       struct dma_debug_entry *entry;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       if (unlikely(virt == NULL))
-               return;
-
-       /* handle vmalloc and linear addresses */
-       if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
-               return;
-
-       entry = dma_entry_alloc();
-       if (!entry)
-               return;
-
-       entry->type      = dma_debug_coherent;
-       entry->dev       = dev;
-       entry->offset    = offset_in_page(virt);
-       entry->size      = size;
-       entry->dev_addr  = dma_addr;
-       entry->direction = DMA_BIDIRECTIONAL;
-
-       if (is_vmalloc_addr(virt))
-               entry->pfn = vmalloc_to_pfn(virt);
-       else
-               entry->pfn = page_to_pfn(virt_to_page(virt));
-
-       add_dma_entry(entry);
-}
-EXPORT_SYMBOL(debug_dma_alloc_coherent);
-
-void debug_dma_free_coherent(struct device *dev, size_t size,
-                        void *virt, dma_addr_t addr)
-{
-       struct dma_debug_entry ref = {
-               .type           = dma_debug_coherent,
-               .dev            = dev,
-               .offset         = offset_in_page(virt),
-               .dev_addr       = addr,
-               .size           = size,
-               .direction      = DMA_BIDIRECTIONAL,
-       };
-
-       /* handle vmalloc and linear addresses */
-       if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
-               return;
-
-       if (is_vmalloc_addr(virt))
-               ref.pfn = vmalloc_to_pfn(virt);
-       else
-               ref.pfn = page_to_pfn(virt_to_page(virt));
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       check_unmap(&ref);
-}
-EXPORT_SYMBOL(debug_dma_free_coherent);
-
-void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
-                           int direction, dma_addr_t dma_addr)
-{
-       struct dma_debug_entry *entry;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       entry = dma_entry_alloc();
-       if (!entry)
-               return;
-
-       entry->type             = dma_debug_resource;
-       entry->dev              = dev;
-       entry->pfn              = PHYS_PFN(addr);
-       entry->offset           = offset_in_page(addr);
-       entry->size             = size;
-       entry->dev_addr         = dma_addr;
-       entry->direction        = direction;
-       entry->map_err_type     = MAP_ERR_NOT_CHECKED;
-
-       add_dma_entry(entry);
-}
-EXPORT_SYMBOL(debug_dma_map_resource);
-
-void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
-                             size_t size, int direction)
-{
-       struct dma_debug_entry ref = {
-               .type           = dma_debug_resource,
-               .dev            = dev,
-               .dev_addr       = dma_addr,
-               .size           = size,
-               .direction      = direction,
-       };
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       check_unmap(&ref);
-}
-EXPORT_SYMBOL(debug_dma_unmap_resource);
-
-void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
-                                  size_t size, int direction)
-{
-       struct dma_debug_entry ref;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       ref.type         = dma_debug_single;
-       ref.dev          = dev;
-       ref.dev_addr     = dma_handle;
-       ref.size         = size;
-       ref.direction    = direction;
-       ref.sg_call_ents = 0;
-
-       check_sync(dev, &ref, true);
-}
-EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
-
-void debug_dma_sync_single_for_device(struct device *dev,
-                                     dma_addr_t dma_handle, size_t size,
-                                     int direction)
-{
-       struct dma_debug_entry ref;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       ref.type         = dma_debug_single;
-       ref.dev          = dev;
-       ref.dev_addr     = dma_handle;
-       ref.size         = size;
-       ref.direction    = direction;
-       ref.sg_call_ents = 0;
-
-       check_sync(dev, &ref, false);
-}
-EXPORT_SYMBOL(debug_dma_sync_single_for_device);
-
-void debug_dma_sync_single_range_for_cpu(struct device *dev,
-                                        dma_addr_t dma_handle,
-                                        unsigned long offset, size_t size,
-                                        int direction)
-{
-       struct dma_debug_entry ref;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       ref.type         = dma_debug_single;
-       ref.dev          = dev;
-       ref.dev_addr     = dma_handle;
-       ref.size         = offset + size;
-       ref.direction    = direction;
-       ref.sg_call_ents = 0;
-
-       check_sync(dev, &ref, true);
-}
-EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
-
-void debug_dma_sync_single_range_for_device(struct device *dev,
-                                           dma_addr_t dma_handle,
-                                           unsigned long offset,
-                                           size_t size, int direction)
-{
-       struct dma_debug_entry ref;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       ref.type         = dma_debug_single;
-       ref.dev          = dev;
-       ref.dev_addr     = dma_handle;
-       ref.size         = offset + size;
-       ref.direction    = direction;
-       ref.sg_call_ents = 0;
-
-       check_sync(dev, &ref, false);
-}
-EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
-
-void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
-                              int nelems, int direction)
-{
-       struct scatterlist *s;
-       int mapped_ents = 0, i;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       for_each_sg(sg, s, nelems, i) {
-
-               struct dma_debug_entry ref = {
-                       .type           = dma_debug_sg,
-                       .dev            = dev,
-                       .pfn            = page_to_pfn(sg_page(s)),
-                       .offset         = s->offset,
-                       .dev_addr       = sg_dma_address(s),
-                       .size           = sg_dma_len(s),
-                       .direction      = direction,
-                       .sg_call_ents   = nelems,
-               };
-
-               if (!i)
-                       mapped_ents = get_nr_mapped_entries(dev, &ref);
-
-               if (i >= mapped_ents)
-                       break;
-
-               check_sync(dev, &ref, true);
-       }
-}
-EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
-
-void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
-                                 int nelems, int direction)
-{
-       struct scatterlist *s;
-       int mapped_ents = 0, i;
-
-       if (unlikely(dma_debug_disabled()))
-               return;
-
-       for_each_sg(sg, s, nelems, i) {
-
-               struct dma_debug_entry ref = {
-                       .type           = dma_debug_sg,
-                       .dev            = dev,
-                       .pfn            = page_to_pfn(sg_page(s)),
-                       .offset         = s->offset,
-                       .dev_addr       = sg_dma_address(s),
-                       .size           = sg_dma_len(s),
-                       .direction      = direction,
-                       .sg_call_ents   = nelems,
-               };
-               if (!i)
-                       mapped_ents = get_nr_mapped_entries(dev, &ref);
-
-               if (i >= mapped_ents)
-                       break;
-
-               check_sync(dev, &ref, false);
-       }
-}
-EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
-
-static int __init dma_debug_driver_setup(char *str)
-{
-       int i;
-
-       for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
-               current_driver_name[i] = *str;
-               if (*str == 0)
-                       break;
-       }
-
-       if (current_driver_name[0])
-               pr_info("DMA-API: enable driver filter for driver [%s]\n",
-                       current_driver_name);
-
-
-       return 1;
-}
-__setup("dma_debug_driver=", dma_debug_driver_setup);
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
deleted file mode 100644 (file)
index 8be8106..0000000
+++ /dev/null
@@ -1,204 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DMA operations that map physical memory directly without using an IOMMU or
- * flushing caches.
- */
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/dma-direct.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-contiguous.h>
-#include <linux/pfn.h>
-#include <linux/set_memory.h>
-
-#define DIRECT_MAPPING_ERROR           0
-
-/*
- * Most architectures use ZONE_DMA for the first 16 Megabytes, but
- * some use it for entirely different regions:
- */
-#ifndef ARCH_ZONE_DMA_BITS
-#define ARCH_ZONE_DMA_BITS 24
-#endif
-
-/*
- * For AMD SEV all DMA must be to unencrypted addresses.
- */
-static inline bool force_dma_unencrypted(void)
-{
-       return sev_active();
-}
-
-static bool
-check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
-               const char *caller)
-{
-       if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
-               if (!dev->dma_mask) {
-                       dev_err(dev,
-                               "%s: call on device without dma_mask\n",
-                               caller);
-                       return false;
-               }
-
-               if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
-                       dev_err(dev,
-                               "%s: overflow %pad+%zu of device mask %llx\n",
-                               caller, &dma_addr, size, *dev->dma_mask);
-               }
-               return false;
-       }
-       return true;
-}
-
-static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
-{
-       dma_addr_t addr = force_dma_unencrypted() ?
-               __phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
-       return addr + size - 1 <= dev->coherent_dma_mask;
-}
-
-void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
-               gfp_t gfp, unsigned long attrs)
-{
-       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       int page_order = get_order(size);
-       struct page *page = NULL;
-       void *ret;
-
-       /* we always manually zero the memory once we are done: */
-       gfp &= ~__GFP_ZERO;
-
-       /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
-       if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
-               gfp |= GFP_DMA;
-       if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
-               gfp |= GFP_DMA32;
-
-again:
-       /* CMA can be used only in the context which permits sleeping */
-       if (gfpflags_allow_blocking(gfp)) {
-               page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
-               if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
-                       dma_release_from_contiguous(dev, page, count);
-                       page = NULL;
-               }
-       }
-       if (!page)
-               page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
-
-       if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
-               __free_pages(page, page_order);
-               page = NULL;
-
-               if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
-                   dev->coherent_dma_mask < DMA_BIT_MASK(64) &&
-                   !(gfp & (GFP_DMA32 | GFP_DMA))) {
-                       gfp |= GFP_DMA32;
-                       goto again;
-               }
-
-               if (IS_ENABLED(CONFIG_ZONE_DMA) &&
-                   dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
-                   !(gfp & GFP_DMA)) {
-                       gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
-                       goto again;
-               }
-       }
-
-       if (!page)
-               return NULL;
-       ret = page_address(page);
-       if (force_dma_unencrypted()) {
-               set_memory_decrypted((unsigned long)ret, 1 << page_order);
-               *dma_handle = __phys_to_dma(dev, page_to_phys(page));
-       } else {
-               *dma_handle = phys_to_dma(dev, page_to_phys(page));
-       }
-       memset(ret, 0, size);
-       return ret;
-}
-
-/*
- * NOTE: this function must never look at the dma_addr argument, because we want
- * to be able to use it as a helper for iommu implementations as well.
- */
-void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
-               dma_addr_t dma_addr, unsigned long attrs)
-{
-       unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       unsigned int page_order = get_order(size);
-
-       if (force_dma_unencrypted())
-               set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
-       if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
-               free_pages((unsigned long)cpu_addr, page_order);
-}
-
-dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size, enum dma_data_direction dir,
-               unsigned long attrs)
-{
-       dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
-
-       if (!check_addr(dev, dma_addr, size, __func__))
-               return DIRECT_MAPPING_ERROR;
-       return dma_addr;
-}
-
-int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
-               enum dma_data_direction dir, unsigned long attrs)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sgl, sg, nents, i) {
-               BUG_ON(!sg_page(sg));
-
-               sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
-               if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
-                       return 0;
-               sg_dma_len(sg) = sg->length;
-       }
-
-       return nents;
-}
-
-int dma_direct_supported(struct device *dev, u64 mask)
-{
-#ifdef CONFIG_ZONE_DMA
-       if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
-               return 0;
-#else
-       /*
-        * Because 32-bit DMA masks are so common we expect every architecture
-        * to be able to satisfy them - either by not supporting more physical
-        * memory, or by providing a ZONE_DMA32.  If neither is the case, the
-        * architecture needs to use an IOMMU instead of the direct mapping.
-        */
-       if (mask < DMA_BIT_MASK(32))
-               return 0;
-#endif
-       /*
-        * Various PCI/PCIe bridges have broken support for > 32bit DMA even
-        * if the device itself might support it.
-        */
-       if (dev->dma_32bit_limit && mask > DMA_BIT_MASK(32))
-               return 0;
-       return 1;
-}
-
-int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
-       return dma_addr == DIRECT_MAPPING_ERROR;
-}
-
-const struct dma_map_ops dma_direct_ops = {
-       .alloc                  = dma_direct_alloc,
-       .free                   = dma_direct_free,
-       .map_page               = dma_direct_map_page,
-       .map_sg                 = dma_direct_map_sg,
-       .dma_supported          = dma_direct_supported,
-       .mapping_error          = dma_direct_mapping_error,
-};
-EXPORT_SYMBOL(dma_direct_ops);
diff --git a/lib/dma-noncoherent.c b/lib/dma-noncoherent.c
deleted file mode 100644 (file)
index 79e9a75..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2018 Christoph Hellwig.
- *
- * DMA operations that map physical memory directly without providing cache
- * coherence.
- */
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/dma-direct.h>
-#include <linux/dma-noncoherent.h>
-#include <linux/scatterlist.h>
-
-static void dma_noncoherent_sync_single_for_device(struct device *dev,
-               dma_addr_t addr, size_t size, enum dma_data_direction dir)
-{
-       arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir);
-}
-
-static void dma_noncoherent_sync_sg_for_device(struct device *dev,
-               struct scatterlist *sgl, int nents, enum dma_data_direction dir)
-{
-       struct scatterlist *sg;
-       int i;
-
-       for_each_sg(sgl, sg, nents, i)
-               arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
-}
-
-static dma_addr_t dma_noncoherent_map_page(struct device *dev, struct page *page,
-               unsigned long offset, size_t size, enum dma_data_direction dir,
-               unsigned long attrs)
-{
-       dma_addr_t addr;
-
-       addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
-       if (!dma_mapping_error(dev, addr) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               arch_sync_dma_for_device(dev, page_to_phys(page) + offset,
-                               size, dir);
-       return addr;
-}
-
-static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl,
-               int nents, enum dma_data_direction dir, unsigned long attrs)
-{
-       nents = dma_direct_map_sg(dev, sgl, nents, dir, attrs);
-       if (nents > 0 && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               dma_noncoherent_sync_sg_for_device(dev, sgl, nents, dir);
-       return nents;
-}
-
-#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
-static void dma_noncoherent_sync_single_for_cpu(struct device *dev,
-               dma_addr_t addr, size_t size, enum dma_data_direction dir)
-{
-       arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir);
-}
-
-static void dma_noncoherent_sync_sg_for_cpu(struct device *dev,
-               struct scatterlist *sgl, int nents, enum dma_data_direction dir)
-{
-       struct scatterlist *sg;
-       int i;
-
-       for_each_sg(sgl, sg, nents, i)
-               arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
-}
-
-static void dma_noncoherent_unmap_page(struct device *dev, dma_addr_t addr,
-               size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               dma_noncoherent_sync_single_for_cpu(dev, addr, size, dir);
-}
-
-static void dma_noncoherent_unmap_sg(struct device *dev, struct scatterlist *sgl,
-               int nents, enum dma_data_direction dir, unsigned long attrs)
-{
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-               dma_noncoherent_sync_sg_for_cpu(dev, sgl, nents, dir);
-}
-#endif
-
-const struct dma_map_ops dma_noncoherent_ops = {
-       .alloc                  = arch_dma_alloc,
-       .free                   = arch_dma_free,
-       .mmap                   = arch_dma_mmap,
-       .sync_single_for_device = dma_noncoherent_sync_single_for_device,
-       .sync_sg_for_device     = dma_noncoherent_sync_sg_for_device,
-       .map_page               = dma_noncoherent_map_page,
-       .map_sg                 = dma_noncoherent_map_sg,
-#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
-       .sync_single_for_cpu    = dma_noncoherent_sync_single_for_cpu,
-       .sync_sg_for_cpu        = dma_noncoherent_sync_sg_for_cpu,
-       .unmap_page             = dma_noncoherent_unmap_page,
-       .unmap_sg               = dma_noncoherent_unmap_sg,
-#endif
-       .dma_supported          = dma_direct_supported,
-       .mapping_error          = dma_direct_mapping_error,
-       .cache_sync             = arch_dma_cache_sync,
-};
-EXPORT_SYMBOL(dma_noncoherent_ops);
diff --git a/lib/dma-virt.c b/lib/dma-virt.c
deleted file mode 100644 (file)
index 8e61a02..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- *     lib/dma-virt.c
- *
- * DMA operations that map to virtual addresses without flushing memory.
- */
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-#include <linux/scatterlist.h>
-
-static void *dma_virt_alloc(struct device *dev, size_t size,
-                           dma_addr_t *dma_handle, gfp_t gfp,
-                           unsigned long attrs)
-{
-       void *ret;
-
-       ret = (void *)__get_free_pages(gfp, get_order(size));
-       if (ret)
-               *dma_handle = (uintptr_t)ret;
-       return ret;
-}
-
-static void dma_virt_free(struct device *dev, size_t size,
-                         void *cpu_addr, dma_addr_t dma_addr,
-                         unsigned long attrs)
-{
-       free_pages((unsigned long)cpu_addr, get_order(size));
-}
-
-static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page,
-                                   unsigned long offset, size_t size,
-                                   enum dma_data_direction dir,
-                                   unsigned long attrs)
-{
-       return (uintptr_t)(page_address(page) + offset);
-}
-
-static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
-                          int nents, enum dma_data_direction dir,
-                          unsigned long attrs)
-{
-       int i;
-       struct scatterlist *sg;
-
-       for_each_sg(sgl, sg, nents, i) {
-               BUG_ON(!sg_page(sg));
-               sg_dma_address(sg) = (uintptr_t)sg_virt(sg);
-               sg_dma_len(sg) = sg->length;
-       }
-
-       return nents;
-}
-
-const struct dma_map_ops dma_virt_ops = {
-       .alloc                  = dma_virt_alloc,
-       .free                   = dma_virt_free,
-       .map_page               = dma_virt_map_page,
-       .map_sg                 = dma_virt_map_sg,
-};
-EXPORT_SYMBOL(dma_virt_ops);
index 9bbd9c5d375a2c8bf9a6d950ba42ab556c12063b..beb14839b41ae3c04fd698ec33a34727a2bc92d5 100644 (file)
@@ -141,7 +141,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
        spin_lock_irqsave(&tags->lock, flags);
 
        /* Fastpath */
-       if (likely(tags->nr_free >= 0)) {
+       if (likely(tags->nr_free)) {
                tag = tags->freelist[--tags->nr_free];
                spin_unlock_irqrestore(&tags->lock, flags);
                return tag;
index 0eb48353abe30164d4ae564aa21bee901fad72c3..d3b81cefce91a83698c3127d562f7e81cac0b55f 100644 (file)
@@ -350,3 +350,31 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
 }
 EXPORT_SYMBOL(refcount_dec_and_lock);
 
+/**
+ * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
+ *                                 interrupts if able to decrement refcount to 0
+ * @r: the refcount
+ * @lock: the spinlock to be locked
+ * @flags: saved IRQ-flags if the is acquired
+ *
+ * Same as refcount_dec_and_lock() above except that the spinlock is acquired
+ * with disabled interupts.
+ *
+ * Return: true and hold spinlock if able to decrement refcount to 0, false
+ *         otherwise
+ */
+bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
+                                  unsigned long *flags)
+{
+       if (refcount_dec_not_one(r))
+               return false;
+
+       spin_lock_irqsave(lock, *flags);
+       if (!refcount_dec_and_test(r)) {
+               spin_unlock_irqrestore(lock, *flags);
+               return false;
+       }
+
+       return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
index 9427b5766134cb139ef385b27f92f6027fecceca..e5c8586cf7174cfe0526dc8fb3314676601c5e57 100644 (file)
@@ -774,7 +774,7 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter)
                                skip++;
                                if (list == iter->list) {
                                        iter->p = p;
-                                       skip = skip;
+                                       iter->skip = skip;
                                        goto found;
                                }
                        }
@@ -964,8 +964,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
 
 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
 {
-       return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
-                  (unsigned long)params->min_size);
+       size_t retsize;
+
+       if (params->nelem_hint)
+               retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
+                             (unsigned long)params->min_size);
+       else
+               retsize = max(HASH_DEFAULT_SIZE,
+                             (unsigned long)params->min_size);
+
+       return retsize;
 }
 
 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
@@ -1022,8 +1030,6 @@ int rhashtable_init(struct rhashtable *ht,
        struct bucket_table *tbl;
        size_t size;
 
-       size = HASH_DEFAULT_SIZE;
-
        if ((!params->key_len && !params->obj_hashfn) ||
            (params->obj_hashfn && !params->obj_cmpfn))
                return -EINVAL;
@@ -1050,8 +1056,7 @@ int rhashtable_init(struct rhashtable *ht,
 
        ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
 
-       if (params->nelem_hint)
-               size = rounded_hashtable_size(&ht->p);
+       size = rounded_hashtable_size(&ht->p);
 
        if (params->locks_mul)
                ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
@@ -1143,13 +1148,14 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
                                 void (*free_fn)(void *ptr, void *arg),
                                 void *arg)
 {
-       struct bucket_table *tbl;
+       struct bucket_table *tbl, *next_tbl;
        unsigned int i;
 
        cancel_work_sync(&ht->run_work);
 
        mutex_lock(&ht->mutex);
        tbl = rht_dereference(ht->tbl, ht);
+restart:
        if (free_fn) {
                for (i = 0; i < tbl->size; i++) {
                        struct rhash_head *pos, *next;
@@ -1166,7 +1172,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
                }
        }
 
+       next_tbl = rht_dereference(tbl->future_tbl, ht);
        bucket_table_free(tbl);
+       if (next_tbl) {
+               tbl = next_tbl;
+               goto restart;
+       }
        mutex_unlock(&ht->mutex);
 }
 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
index 1642fd507a960f5deb2b6d7366db83800a3e547b..7c6096a7170486449736d82a37fbd50326ac169e 100644 (file)
@@ -24,9 +24,6 @@
  **/
 struct scatterlist *sg_next(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        if (sg_is_last(sg))
                return NULL;
 
@@ -111,10 +108,7 @@ struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
        for_each_sg(sgl, sg, nents, i)
                ret = sg;
 
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sgl[0].sg_magic != SG_MAGIC);
        BUG_ON(!sg_is_last(ret));
-#endif
        return ret;
 }
 EXPORT_SYMBOL(sg_last);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
deleted file mode 100644 (file)
index 04b68d9..0000000
+++ /dev/null
@@ -1,1087 +0,0 @@
-/*
- * Dynamic DMA mapping support.
- *
- * This implementation is a fallback for platforms that do not support
- * I/O TLBs (aka DMA address translation hardware).
- * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
- * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
- * Copyright (C) 2000, 2003 Hewlett-Packard Co
- *     David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * 03/05/07 davidm     Switch from PCI-DMA to generic device DMA API.
- * 00/12/13 davidm     Rename to swiotlb.c and add mark_clean() to avoid
- *                     unnecessary i-cache flushing.
- * 04/07/.. ak         Better overflow handling. Assorted fixes.
- * 05/09/10 linville   Add support for syncing ranges, support syncing for
- *                     DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
- * 08/12/11 beckyb     Add highmem support
- */
-
-#include <linux/cache.h>
-#include <linux/dma-direct.h>
-#include <linux/mm.h>
-#include <linux/export.h>
-#include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/swiotlb.h>
-#include <linux/pfn.h>
-#include <linux/types.h>
-#include <linux/ctype.h>
-#include <linux/highmem.h>
-#include <linux/gfp.h>
-#include <linux/scatterlist.h>
-#include <linux/mem_encrypt.h>
-#include <linux/set_memory.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/iommu-helper.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/swiotlb.h>
-
-#define OFFSET(val,align) ((unsigned long)     \
-                          ( (val) & ( (align) - 1)))
-
-#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
-
-/*
- * Minimum IO TLB size to bother booting with.  Systems with mainly
- * 64bit capable cards will only lightly use the swiotlb.  If we can't
- * allocate a contiguous 1MB, we're probably in trouble anyway.
- */
-#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
-
-enum swiotlb_force swiotlb_force;
-
-/*
- * Used to do a quick range check in swiotlb_tbl_unmap_single and
- * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
- * API.
- */
-static phys_addr_t io_tlb_start, io_tlb_end;
-
-/*
- * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
- * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
- */
-static unsigned long io_tlb_nslabs;
-
-/*
- * When the IOMMU overflows we return a fallback buffer. This sets the size.
- */
-static unsigned long io_tlb_overflow = 32*1024;
-
-static phys_addr_t io_tlb_overflow_buffer;
-
-/*
- * This is a free list describing the number of free entries available from
- * each index
- */
-static unsigned int *io_tlb_list;
-static unsigned int io_tlb_index;
-
-/*
- * Max segment that we can provide which (if pages are contingous) will
- * not be bounced (unless SWIOTLB_FORCE is set).
- */
-unsigned int max_segment;
-
-/*
- * We need to save away the original address corresponding to a mapped entry
- * for the sync operations.
- */
-#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
-static phys_addr_t *io_tlb_orig_addr;
-
-/*
- * Protect the above data structures in the map and unmap calls
- */
-static DEFINE_SPINLOCK(io_tlb_lock);
-
-static int late_alloc;
-
-static int __init
-setup_io_tlb_npages(char *str)
-{
-       if (isdigit(*str)) {
-               io_tlb_nslabs = simple_strtoul(str, &str, 0);
-               /* avoid tail segment of size < IO_TLB_SEGSIZE */
-               io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
-       }
-       if (*str == ',')
-               ++str;
-       if (!strcmp(str, "force")) {
-               swiotlb_force = SWIOTLB_FORCE;
-       } else if (!strcmp(str, "noforce")) {
-               swiotlb_force = SWIOTLB_NO_FORCE;
-               io_tlb_nslabs = 1;
-       }
-
-       return 0;
-}
-early_param("swiotlb", setup_io_tlb_npages);
-/* make io_tlb_overflow tunable too? */
-
-unsigned long swiotlb_nr_tbl(void)
-{
-       return io_tlb_nslabs;
-}
-EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
-
-unsigned int swiotlb_max_segment(void)
-{
-       return max_segment;
-}
-EXPORT_SYMBOL_GPL(swiotlb_max_segment);
-
-void swiotlb_set_max_segment(unsigned int val)
-{
-       if (swiotlb_force == SWIOTLB_FORCE)
-               max_segment = 1;
-       else
-               max_segment = rounddown(val, PAGE_SIZE);
-}
-
-/* default to 64MB */
-#define IO_TLB_DEFAULT_SIZE (64UL<<20)
-unsigned long swiotlb_size_or_default(void)
-{
-       unsigned long size;
-
-       size = io_tlb_nslabs << IO_TLB_SHIFT;
-
-       return size ? size : (IO_TLB_DEFAULT_SIZE);
-}
-
-static bool no_iotlb_memory;
-
-void swiotlb_print_info(void)
-{
-       unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
-       unsigned char *vstart, *vend;
-
-       if (no_iotlb_memory) {
-               pr_warn("software IO TLB: No low mem\n");
-               return;
-       }
-
-       vstart = phys_to_virt(io_tlb_start);
-       vend = phys_to_virt(io_tlb_end);
-
-       printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
-              (unsigned long long)io_tlb_start,
-              (unsigned long long)io_tlb_end,
-              bytes >> 20, vstart, vend - 1);
-}
-
-/*
- * Early SWIOTLB allocation may be too early to allow an architecture to
- * perform the desired operations.  This function allows the architecture to
- * call SWIOTLB when the operations are possible.  It needs to be called
- * before the SWIOTLB memory is used.
- */
-void __init swiotlb_update_mem_attributes(void)
-{
-       void *vaddr;
-       unsigned long bytes;
-
-       if (no_iotlb_memory || late_alloc)
-               return;
-
-       vaddr = phys_to_virt(io_tlb_start);
-       bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
-       set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
-       memset(vaddr, 0, bytes);
-
-       vaddr = phys_to_virt(io_tlb_overflow_buffer);
-       bytes = PAGE_ALIGN(io_tlb_overflow);
-       set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
-       memset(vaddr, 0, bytes);
-}
-
-int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
-{
-       void *v_overflow_buffer;
-       unsigned long i, bytes;
-
-       bytes = nslabs << IO_TLB_SHIFT;
-
-       io_tlb_nslabs = nslabs;
-       io_tlb_start = __pa(tlb);
-       io_tlb_end = io_tlb_start + bytes;
-
-       /*
-        * Get the overflow emergency buffer
-        */
-       v_overflow_buffer = memblock_virt_alloc_low_nopanic(
-                                               PAGE_ALIGN(io_tlb_overflow),
-                                               PAGE_SIZE);
-       if (!v_overflow_buffer)
-               return -ENOMEM;
-
-       io_tlb_overflow_buffer = __pa(v_overflow_buffer);
-
-       /*
-        * Allocate and initialize the free list array.  This array is used
-        * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
-        * between io_tlb_start and io_tlb_end.
-        */
-       io_tlb_list = memblock_virt_alloc(
-                               PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
-                               PAGE_SIZE);
-       io_tlb_orig_addr = memblock_virt_alloc(
-                               PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
-                               PAGE_SIZE);
-       for (i = 0; i < io_tlb_nslabs; i++) {
-               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
-               io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
-       }
-       io_tlb_index = 0;
-
-       if (verbose)
-               swiotlb_print_info();
-
-       swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
-       return 0;
-}
-
-/*
- * Statically reserve bounce buffer space and initialize bounce buffer data
- * structures for the software IO TLB used to implement the DMA API.
- */
-void  __init
-swiotlb_init(int verbose)
-{
-       size_t default_size = IO_TLB_DEFAULT_SIZE;
-       unsigned char *vstart;
-       unsigned long bytes;
-
-       if (!io_tlb_nslabs) {
-               io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
-               io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
-       }
-
-       bytes = io_tlb_nslabs << IO_TLB_SHIFT;
-
-       /* Get IO TLB memory from the low pages */
-       vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
-       if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
-               return;
-
-       if (io_tlb_start)
-               memblock_free_early(io_tlb_start,
-                                   PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
-       pr_warn("Cannot allocate SWIOTLB buffer");
-       no_iotlb_memory = true;
-}
-
-/*
- * Systems with larger DMA zones (those that don't support ISA) can
- * initialize the swiotlb later using the slab allocator if needed.
- * This should be just like above, but with some error catching.
- */
-int
-swiotlb_late_init_with_default_size(size_t default_size)
-{
-       unsigned long bytes, req_nslabs = io_tlb_nslabs;
-       unsigned char *vstart = NULL;
-       unsigned int order;
-       int rc = 0;
-
-       if (!io_tlb_nslabs) {
-               io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
-               io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
-       }
-
-       /*
-        * Get IO TLB memory from the low pages
-        */
-       order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
-       io_tlb_nslabs = SLABS_PER_PAGE << order;
-       bytes = io_tlb_nslabs << IO_TLB_SHIFT;
-
-       while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
-               vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
-                                                 order);
-               if (vstart)
-                       break;
-               order--;
-       }
-
-       if (!vstart) {
-               io_tlb_nslabs = req_nslabs;
-               return -ENOMEM;
-       }
-       if (order != get_order(bytes)) {
-               printk(KERN_WARNING "Warning: only able to allocate %ld MB "
-                      "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
-               io_tlb_nslabs = SLABS_PER_PAGE << order;
-       }
-       rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
-       if (rc)
-               free_pages((unsigned long)vstart, order);
-
-       return rc;
-}
-
-int
-swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
-{
-       unsigned long i, bytes;
-       unsigned char *v_overflow_buffer;
-
-       bytes = nslabs << IO_TLB_SHIFT;
-
-       io_tlb_nslabs = nslabs;
-       io_tlb_start = virt_to_phys(tlb);
-       io_tlb_end = io_tlb_start + bytes;
-
-       set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
-       memset(tlb, 0, bytes);
-
-       /*
-        * Get the overflow emergency buffer
-        */
-       v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
-                                                    get_order(io_tlb_overflow));
-       if (!v_overflow_buffer)
-               goto cleanup2;
-
-       set_memory_decrypted((unsigned long)v_overflow_buffer,
-                       io_tlb_overflow >> PAGE_SHIFT);
-       memset(v_overflow_buffer, 0, io_tlb_overflow);
-       io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
-
-       /*
-        * Allocate and initialize the free list array.  This array is used
-        * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
-        * between io_tlb_start and io_tlb_end.
-        */
-       io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
-                                     get_order(io_tlb_nslabs * sizeof(int)));
-       if (!io_tlb_list)
-               goto cleanup3;
-
-       io_tlb_orig_addr = (phys_addr_t *)
-               __get_free_pages(GFP_KERNEL,
-                                get_order(io_tlb_nslabs *
-                                          sizeof(phys_addr_t)));
-       if (!io_tlb_orig_addr)
-               goto cleanup4;
-
-       for (i = 0; i < io_tlb_nslabs; i++) {
-               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
-               io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
-       }
-       io_tlb_index = 0;
-
-       swiotlb_print_info();
-
-       late_alloc = 1;
-
-       swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
-
-       return 0;
-
-cleanup4:
-       free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
-                                                        sizeof(int)));
-       io_tlb_list = NULL;
-cleanup3:
-       free_pages((unsigned long)v_overflow_buffer,
-                  get_order(io_tlb_overflow));
-       io_tlb_overflow_buffer = 0;
-cleanup2:
-       io_tlb_end = 0;
-       io_tlb_start = 0;
-       io_tlb_nslabs = 0;
-       max_segment = 0;
-       return -ENOMEM;
-}
-
-void __init swiotlb_exit(void)
-{
-       if (!io_tlb_orig_addr)
-               return;
-
-       if (late_alloc) {
-               free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
-                          get_order(io_tlb_overflow));
-               free_pages((unsigned long)io_tlb_orig_addr,
-                          get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
-               free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
-                                                                sizeof(int)));
-               free_pages((unsigned long)phys_to_virt(io_tlb_start),
-                          get_order(io_tlb_nslabs << IO_TLB_SHIFT));
-       } else {
-               memblock_free_late(io_tlb_overflow_buffer,
-                                  PAGE_ALIGN(io_tlb_overflow));
-               memblock_free_late(__pa(io_tlb_orig_addr),
-                                  PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
-               memblock_free_late(__pa(io_tlb_list),
-                                  PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
-               memblock_free_late(io_tlb_start,
-                                  PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
-       }
-       io_tlb_nslabs = 0;
-       max_segment = 0;
-}
-
-int is_swiotlb_buffer(phys_addr_t paddr)
-{
-       return paddr >= io_tlb_start && paddr < io_tlb_end;
-}
-
-/*
- * Bounce: copy the swiotlb buffer back to the original dma location
- */
-static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
-                          size_t size, enum dma_data_direction dir)
-{
-       unsigned long pfn = PFN_DOWN(orig_addr);
-       unsigned char *vaddr = phys_to_virt(tlb_addr);
-
-       if (PageHighMem(pfn_to_page(pfn))) {
-               /* The buffer does not have a mapping.  Map it in and copy */
-               unsigned int offset = orig_addr & ~PAGE_MASK;
-               char *buffer;
-               unsigned int sz = 0;
-               unsigned long flags;
-
-               while (size) {
-                       sz = min_t(size_t, PAGE_SIZE - offset, size);
-
-                       local_irq_save(flags);
-                       buffer = kmap_atomic(pfn_to_page(pfn));
-                       if (dir == DMA_TO_DEVICE)
-                               memcpy(vaddr, buffer + offset, sz);
-                       else
-                               memcpy(buffer + offset, vaddr, sz);
-                       kunmap_atomic(buffer);
-                       local_irq_restore(flags);
-
-                       size -= sz;
-                       pfn++;
-                       vaddr += sz;
-                       offset = 0;
-               }
-       } else if (dir == DMA_TO_DEVICE) {
-               memcpy(vaddr, phys_to_virt(orig_addr), size);
-       } else {
-               memcpy(phys_to_virt(orig_addr), vaddr, size);
-       }
-}
-
-phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
-                                  dma_addr_t tbl_dma_addr,
-                                  phys_addr_t orig_addr, size_t size,
-                                  enum dma_data_direction dir,
-                                  unsigned long attrs)
-{
-       unsigned long flags;
-       phys_addr_t tlb_addr;
-       unsigned int nslots, stride, index, wrap;
-       int i;
-       unsigned long mask;
-       unsigned long offset_slots;
-       unsigned long max_slots;
-
-       if (no_iotlb_memory)
-               panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
-
-       if (mem_encrypt_active())
-               pr_warn_once("%s is active and system is using DMA bounce buffers\n",
-                            sme_active() ? "SME" : "SEV");
-
-       mask = dma_get_seg_boundary(hwdev);
-
-       tbl_dma_addr &= mask;
-
-       offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-
-       /*
-        * Carefully handle integer overflow which can occur when mask == ~0UL.
-        */
-       max_slots = mask + 1
-                   ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
-                   : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
-
-       /*
-        * For mappings greater than or equal to a page, we limit the stride
-        * (and hence alignment) to a page size.
-        */
-       nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-       if (size >= PAGE_SIZE)
-               stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
-       else
-               stride = 1;
-
-       BUG_ON(!nslots);
-
-       /*
-        * Find suitable number of IO TLB entries size that will fit this
-        * request and allocate a buffer from that IO TLB pool.
-        */
-       spin_lock_irqsave(&io_tlb_lock, flags);
-       index = ALIGN(io_tlb_index, stride);
-       if (index >= io_tlb_nslabs)
-               index = 0;
-       wrap = index;
-
-       do {
-               while (iommu_is_span_boundary(index, nslots, offset_slots,
-                                             max_slots)) {
-                       index += stride;
-                       if (index >= io_tlb_nslabs)
-                               index = 0;
-                       if (index == wrap)
-                               goto not_found;
-               }
-
-               /*
-                * If we find a slot that indicates we have 'nslots' number of
-                * contiguous buffers, we allocate the buffers from that slot
-                * and mark the entries as '0' indicating unavailable.
-                */
-               if (io_tlb_list[index] >= nslots) {
-                       int count = 0;
-
-                       for (i = index; i < (int) (index + nslots); i++)
-                               io_tlb_list[i] = 0;
-                       for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
-                               io_tlb_list[i] = ++count;
-                       tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
-
-                       /*
-                        * Update the indices to avoid searching in the next
-                        * round.
-                        */
-                       io_tlb_index = ((index + nslots) < io_tlb_nslabs
-                                       ? (index + nslots) : 0);
-
-                       goto found;
-               }
-               index += stride;
-               if (index >= io_tlb_nslabs)
-                       index = 0;
-       } while (index != wrap);
-
-not_found:
-       spin_unlock_irqrestore(&io_tlb_lock, flags);
-       if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
-               dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
-       return SWIOTLB_MAP_ERROR;
-found:
-       spin_unlock_irqrestore(&io_tlb_lock, flags);
-
-       /*
-        * Save away the mapping from the original address to the DMA address.
-        * This is needed when we sync the memory.  Then we sync the buffer if
-        * needed.
-        */
-       for (i = 0; i < nslots; i++)
-               io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
-       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
-           (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
-               swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
-
-       return tlb_addr;
-}
-
-/*
- * Allocates bounce buffer and returns its physical address.
- */
-static phys_addr_t
-map_single(struct device *hwdev, phys_addr_t phys, size_t size,
-          enum dma_data_direction dir, unsigned long attrs)
-{
-       dma_addr_t start_dma_addr;
-
-       if (swiotlb_force == SWIOTLB_NO_FORCE) {
-               dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
-                                    &phys);
-               return SWIOTLB_MAP_ERROR;
-       }
-
-       start_dma_addr = __phys_to_dma(hwdev, io_tlb_start);
-       return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
-                                     dir, attrs);
-}
-
-/*
- * tlb_addr is the physical address of the bounce buffer to unmap.
- */
-void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
-                             size_t size, enum dma_data_direction dir,
-                             unsigned long attrs)
-{
-       unsigned long flags;
-       int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-       int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
-       phys_addr_t orig_addr = io_tlb_orig_addr[index];
-
-       /*
-        * First, sync the memory before unmapping the entry
-        */
-       if (orig_addr != INVALID_PHYS_ADDR &&
-           !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
-           ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
-               swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
-
-       /*
-        * Return the buffer to the free list by setting the corresponding
-        * entries to indicate the number of contiguous entries available.
-        * While returning the entries to the free list, we merge the entries
-        * with slots below and above the pool being returned.
-        */
-       spin_lock_irqsave(&io_tlb_lock, flags);
-       {
-               count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
-                        io_tlb_list[index + nslots] : 0);
-               /*
-                * Step 1: return the slots to the free list, merging the
-                * slots with superceeding slots
-                */
-               for (i = index + nslots - 1; i >= index; i--) {
-                       io_tlb_list[i] = ++count;
-                       io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
-               }
-               /*
-                * Step 2: merge the returned slots with the preceding slots,
-                * if available (non zero)
-                */
-               for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
-                       io_tlb_list[i] = ++count;
-       }
-       spin_unlock_irqrestore(&io_tlb_lock, flags);
-}
-
-void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
-                            size_t size, enum dma_data_direction dir,
-                            enum dma_sync_target target)
-{
-       int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
-       phys_addr_t orig_addr = io_tlb_orig_addr[index];
-
-       if (orig_addr == INVALID_PHYS_ADDR)
-               return;
-       orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
-
-       switch (target) {
-       case SYNC_FOR_CPU:
-               if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
-                       swiotlb_bounce(orig_addr, tlb_addr,
-                                      size, DMA_FROM_DEVICE);
-               else
-                       BUG_ON(dir != DMA_TO_DEVICE);
-               break;
-       case SYNC_FOR_DEVICE:
-               if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
-                       swiotlb_bounce(orig_addr, tlb_addr,
-                                      size, DMA_TO_DEVICE);
-               else
-                       BUG_ON(dir != DMA_FROM_DEVICE);
-               break;
-       default:
-               BUG();
-       }
-}
-
-static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
-               size_t size)
-{
-       u64 mask = DMA_BIT_MASK(32);
-
-       if (dev && dev->coherent_dma_mask)
-               mask = dev->coherent_dma_mask;
-       return addr + size - 1 <= mask;
-}
-
-static void *
-swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
-               unsigned long attrs)
-{
-       phys_addr_t phys_addr;
-
-       if (swiotlb_force == SWIOTLB_NO_FORCE)
-               goto out_warn;
-
-       phys_addr = swiotlb_tbl_map_single(dev,
-                       __phys_to_dma(dev, io_tlb_start),
-                       0, size, DMA_FROM_DEVICE, attrs);
-       if (phys_addr == SWIOTLB_MAP_ERROR)
-               goto out_warn;
-
-       *dma_handle = __phys_to_dma(dev, phys_addr);
-       if (!dma_coherent_ok(dev, *dma_handle, size))
-               goto out_unmap;
-
-       memset(phys_to_virt(phys_addr), 0, size);
-       return phys_to_virt(phys_addr);
-
-out_unmap:
-       dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
-               (unsigned long long)dev->coherent_dma_mask,
-               (unsigned long long)*dma_handle);
-
-       /*
-        * DMA_TO_DEVICE to avoid memcpy in unmap_single.
-        * DMA_ATTR_SKIP_CPU_SYNC is optional.
-        */
-       swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
-                       DMA_ATTR_SKIP_CPU_SYNC);
-out_warn:
-       if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
-               dev_warn(dev,
-                       "swiotlb: coherent allocation failed, size=%zu\n",
-                       size);
-               dump_stack();
-       }
-       return NULL;
-}
-
-static bool swiotlb_free_buffer(struct device *dev, size_t size,
-               dma_addr_t dma_addr)
-{
-       phys_addr_t phys_addr = dma_to_phys(dev, dma_addr);
-
-       WARN_ON_ONCE(irqs_disabled());
-
-       if (!is_swiotlb_buffer(phys_addr))
-               return false;
-
-       /*
-        * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
-        * DMA_ATTR_SKIP_CPU_SYNC is optional.
-        */
-       swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
-                                DMA_ATTR_SKIP_CPU_SYNC);
-       return true;
-}
-
-static void
-swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
-            int do_panic)
-{
-       if (swiotlb_force == SWIOTLB_NO_FORCE)
-               return;
-
-       /*
-        * Ran out of IOMMU space for this operation. This is very bad.
-        * Unfortunately the drivers cannot handle this operation properly.
-        * unless they check for dma_mapping_error (most don't)
-        * When the mapping is small enough return a static buffer to limit
-        * the damage, or panic when the transfer is too big.
-        */
-       dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n",
-                           size);
-
-       if (size <= io_tlb_overflow || !do_panic)
-               return;
-
-       if (dir == DMA_BIDIRECTIONAL)
-               panic("DMA: Random memory could be DMA accessed\n");
-       if (dir == DMA_FROM_DEVICE)
-               panic("DMA: Random memory could be DMA written\n");
-       if (dir == DMA_TO_DEVICE)
-               panic("DMA: Random memory could be DMA read\n");
-}
-
-/*
- * Map a single buffer of the indicated size for DMA in streaming mode.  The
- * physical address to use is returned.
- *
- * Once the device is given the dma address, the device owns this memory until
- * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
- */
-dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
-                           unsigned long offset, size_t size,
-                           enum dma_data_direction dir,
-                           unsigned long attrs)
-{
-       phys_addr_t map, phys = page_to_phys(page) + offset;
-       dma_addr_t dev_addr = phys_to_dma(dev, phys);
-
-       BUG_ON(dir == DMA_NONE);
-       /*
-        * If the address happens to be in the device's DMA window,
-        * we can safely return the device addr and not worry about bounce
-        * buffering it.
-        */
-       if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
-               return dev_addr;
-
-       trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
-
-       /* Oh well, have to allocate and map a bounce buffer. */
-       map = map_single(dev, phys, size, dir, attrs);
-       if (map == SWIOTLB_MAP_ERROR) {
-               swiotlb_full(dev, size, dir, 1);
-               return __phys_to_dma(dev, io_tlb_overflow_buffer);
-       }
-
-       dev_addr = __phys_to_dma(dev, map);
-
-       /* Ensure that the address returned is DMA'ble */
-       if (dma_capable(dev, dev_addr, size))
-               return dev_addr;
-
-       attrs |= DMA_ATTR_SKIP_CPU_SYNC;
-       swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
-
-       return __phys_to_dma(dev, io_tlb_overflow_buffer);
-}
-
-/*
- * Unmap a single streaming mode DMA translation.  The dma_addr and size must
- * match what was provided for in a previous swiotlb_map_page call.  All
- * other usages are undefined.
- *
- * After this call, reads by the cpu to the buffer are guaranteed to see
- * whatever the device wrote there.
- */
-static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
-                        size_t size, enum dma_data_direction dir,
-                        unsigned long attrs)
-{
-       phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
-
-       BUG_ON(dir == DMA_NONE);
-
-       if (is_swiotlb_buffer(paddr)) {
-               swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
-               return;
-       }
-
-       if (dir != DMA_FROM_DEVICE)
-               return;
-
-       /*
-        * phys_to_virt doesn't work with hihgmem page but we could
-        * call dma_mark_clean() with hihgmem page here. However, we
-        * are fine since dma_mark_clean() is null on POWERPC. We can
-        * make dma_mark_clean() take a physical address if necessary.
-        */
-       dma_mark_clean(phys_to_virt(paddr), size);
-}
-
-void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
-                       size_t size, enum dma_data_direction dir,
-                       unsigned long attrs)
-{
-       unmap_single(hwdev, dev_addr, size, dir, attrs);
-}
-
-/*
- * Make physical memory consistent for a single streaming mode DMA translation
- * after a transfer.
- *
- * If you perform a swiotlb_map_page() but wish to interrogate the buffer
- * using the cpu, yet do not wish to teardown the dma mapping, you must
- * call this function before doing so.  At the next point you give the dma
- * address back to the card, you must first perform a
- * swiotlb_dma_sync_for_device, and then the device again owns the buffer
- */
-static void
-swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
-                   size_t size, enum dma_data_direction dir,
-                   enum dma_sync_target target)
-{
-       phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
-
-       BUG_ON(dir == DMA_NONE);
-
-       if (is_swiotlb_buffer(paddr)) {
-               swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
-               return;
-       }
-
-       if (dir != DMA_FROM_DEVICE)
-               return;
-
-       dma_mark_clean(phys_to_virt(paddr), size);
-}
-
-void
-swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
-                           size_t size, enum dma_data_direction dir)
-{
-       swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
-}
-
-void
-swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
-                              size_t size, enum dma_data_direction dir)
-{
-       swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
-}
-
-/*
- * Map a set of buffers described by scatterlist in streaming mode for DMA.
- * This is the scatter-gather version of the above swiotlb_map_page
- * interface.  Here the scatter gather list elements are each tagged with the
- * appropriate dma address and length.  They are obtained via
- * sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- *       DMA address/length pairs than there are SG table elements.
- *       (for example via virtual mapping capabilities)
- *       The routine returns the number of addr/length pairs actually
- *       used, at most nents.
- *
- * Device ownership issues as mentioned above for swiotlb_map_page are the
- * same here.
- */
-int
-swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
-                    enum dma_data_direction dir, unsigned long attrs)
-{
-       struct scatterlist *sg;
-       int i;
-
-       BUG_ON(dir == DMA_NONE);
-
-       for_each_sg(sgl, sg, nelems, i) {
-               phys_addr_t paddr = sg_phys(sg);
-               dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
-
-               if (swiotlb_force == SWIOTLB_FORCE ||
-                   !dma_capable(hwdev, dev_addr, sg->length)) {
-                       phys_addr_t map = map_single(hwdev, sg_phys(sg),
-                                                    sg->length, dir, attrs);
-                       if (map == SWIOTLB_MAP_ERROR) {
-                               /* Don't panic here, we expect map_sg users
-                                  to do proper error handling. */
-                               swiotlb_full(hwdev, sg->length, dir, 0);
-                               attrs |= DMA_ATTR_SKIP_CPU_SYNC;
-                               swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
-                                                      attrs);
-                               sg_dma_len(sgl) = 0;
-                               return 0;
-                       }
-                       sg->dma_address = __phys_to_dma(hwdev, map);
-               } else
-                       sg->dma_address = dev_addr;
-               sg_dma_len(sg) = sg->length;
-       }
-       return nelems;
-}
-
-/*
- * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
- * concerning calls here are the same as for swiotlb_unmap_page() above.
- */
-void
-swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
-                      int nelems, enum dma_data_direction dir,
-                      unsigned long attrs)
-{
-       struct scatterlist *sg;
-       int i;
-
-       BUG_ON(dir == DMA_NONE);
-
-       for_each_sg(sgl, sg, nelems, i)
-               unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
-                            attrs);
-}
-
-/*
- * Make physical memory consistent for a set of streaming mode DMA translations
- * after a transfer.
- *
- * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
- * and usage.
- */
-static void
-swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
-               int nelems, enum dma_data_direction dir,
-               enum dma_sync_target target)
-{
-       struct scatterlist *sg;
-       int i;
-
-       for_each_sg(sgl, sg, nelems, i)
-               swiotlb_sync_single(hwdev, sg->dma_address,
-                                   sg_dma_len(sg), dir, target);
-}
-
-void
-swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
-                       int nelems, enum dma_data_direction dir)
-{
-       swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
-}
-
-void
-swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
-                          int nelems, enum dma_data_direction dir)
-{
-       swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
-}
-
-int
-swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
-{
-       return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer));
-}
-
-/*
- * Return whether the given device DMA address mask can be supported
- * properly.  For example, if your device can only drive the low 24-bits
- * during bus mastering, then you would pass 0x00ffffff as the mask to
- * this function.
- */
-int
-swiotlb_dma_supported(struct device *hwdev, u64 mask)
-{
-       return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
-}
-
-void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
-               gfp_t gfp, unsigned long attrs)
-{
-       void *vaddr;
-
-       /* temporary workaround: */
-       if (gfp & __GFP_NOWARN)
-               attrs |= DMA_ATTR_NO_WARN;
-
-       /*
-        * Don't print a warning when the first allocation attempt fails.
-        * swiotlb_alloc_coherent() will print a warning when the DMA memory
-        * allocation ultimately failed.
-        */
-       gfp |= __GFP_NOWARN;
-
-       vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
-       if (!vaddr)
-               vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs);
-       return vaddr;
-}
-
-void swiotlb_free(struct device *dev, size_t size, void *vaddr,
-               dma_addr_t dma_addr, unsigned long attrs)
-{
-       if (!swiotlb_free_buffer(dev, size, dma_addr))
-               dma_direct_free(dev, size, vaddr, dma_addr, attrs);
-}
-
-const struct dma_map_ops swiotlb_dma_ops = {
-       .mapping_error          = swiotlb_dma_mapping_error,
-       .alloc                  = swiotlb_alloc,
-       .free                   = swiotlb_free,
-       .sync_single_for_cpu    = swiotlb_sync_single_for_cpu,
-       .sync_single_for_device = swiotlb_sync_single_for_device,
-       .sync_sg_for_cpu        = swiotlb_sync_sg_for_cpu,
-       .sync_sg_for_device     = swiotlb_sync_sg_for_device,
-       .map_sg                 = swiotlb_map_sg_attrs,
-       .unmap_sg               = swiotlb_unmap_sg_attrs,
-       .map_page               = swiotlb_map_page,
-       .unmap_page             = swiotlb_unmap_page,
-       .dma_supported          = dma_direct_supported,
-};
index 60aedc87936106460e436fe66429d45a59f36060..08d3d59dca17343c1a91def02d0a7da931c1c0f0 100644 (file)
@@ -5282,21 +5282,31 @@ static struct bpf_test tests[] = {
        {       /* Mainly checking JIT here. */
                "BPF_MAXINSNS: Ctx heavy transformations",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC,
+#endif
                { },
                {
                        {  1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
                        { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
                },
                .fill_helper = bpf_fill_maxinsns6,
+               .expected_errcode = -ENOTSUPP,
        },
        {       /* Mainly checking JIT here. */
                "BPF_MAXINSNS: Call heavy transformations",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC | FLAG_NO_DATA,
+#endif
                { },
                { { 1, 0 }, { 10, 0 } },
                .fill_helper = bpf_fill_maxinsns7,
+               .expected_errcode = -ENOTSUPP,
        },
        {       /* Mainly checking JIT here. */
                "BPF_MAXINSNS: Jump heavy test",
@@ -5347,18 +5357,28 @@ static struct bpf_test tests[] = {
        {
                "BPF_MAXINSNS: exec all MSH",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC,
+#endif
                { 0xfa, 0xfb, 0xfc, 0xfd, },
                { { 4, 0xababab83 } },
                .fill_helper = bpf_fill_maxinsns13,
+               .expected_errcode = -ENOTSUPP,
        },
        {
                "BPF_MAXINSNS: ld_abs+get_processor_id",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC,
+#endif
                { },
                { { 1, 0xbee } },
                .fill_helper = bpf_fill_ld_abs_get_processor_id,
+               .expected_errcode = -ENOTSUPP,
        },
        /*
         * LD_IND / LD_ABS on fragmented SKBs
index b2aa8f5148449de1557e3ee48feebb8f1cab3083..cea592f402ed029d6d5dd63addd2d2bc8a8391f1 100644 (file)
@@ -260,13 +260,6 @@ plain(void)
 {
        int err;
 
-       /*
-        * Make sure crng is ready. Otherwise we get "(ptrval)" instead
-        * of a hashed address when printing '%p' in plain_hash() and
-        * plain_format().
-        */
-       wait_for_random_bytes();
-
        err = plain_hash();
        if (err) {
                pr_warn("plain 'p' does not appear to be hashed\n");
index 347cc834c04a8cbc388af1b7594e4f09bdc68b41..2e5d3df0853d928021cba0e30c70ff682afeaf68 100644 (file)
@@ -359,15 +359,8 @@ static void wb_shutdown(struct bdi_writeback *wb)
        spin_lock_bh(&wb->work_lock);
        if (!test_and_clear_bit(WB_registered, &wb->state)) {
                spin_unlock_bh(&wb->work_lock);
-               /*
-                * Wait for wb shutdown to finish if someone else is just
-                * running wb_shutdown(). Otherwise we could proceed to wb /
-                * bdi destruction before wb_shutdown() is finished.
-                */
-               wait_on_bit(&wb->state, WB_shutting_down, TASK_UNINTERRUPTIBLE);
                return;
        }
-       set_bit(WB_shutting_down, &wb->state);
        spin_unlock_bh(&wb->work_lock);
 
        cgwb_remove_from_bdi_list(wb);
@@ -379,12 +372,6 @@ static void wb_shutdown(struct bdi_writeback *wb)
        mod_delayed_work(bdi_wq, &wb->dwork, 0);
        flush_delayed_work(&wb->dwork);
        WARN_ON(!list_empty(&wb->work_list));
-       /*
-        * Make sure bit gets cleared after shutdown is finished. Matches with
-        * the barrier provided by test_and_clear_bit() above.
-        */
-       smp_wmb();
-       clear_and_wake_up_bit(WB_shutting_down, &wb->state);
 }
 
 static void wb_exit(struct bdi_writeback *wb)
@@ -508,10 +495,12 @@ static void cgwb_release_workfn(struct work_struct *work)
        struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
                                                release_work);
 
+       mutex_lock(&wb->bdi->cgwb_release_mutex);
        wb_shutdown(wb);
 
        css_put(wb->memcg_css);
        css_put(wb->blkcg_css);
+       mutex_unlock(&wb->bdi->cgwb_release_mutex);
 
        fprop_local_destroy_percpu(&wb->memcg_completions);
        percpu_ref_exit(&wb->refcnt);
@@ -697,6 +686,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
 
        INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
        bdi->cgwb_congested_tree = RB_ROOT;
+       mutex_init(&bdi->cgwb_release_mutex);
 
        ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
        if (!ret) {
@@ -717,7 +707,10 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
        spin_lock_irq(&cgwb_lock);
        radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
                cgwb_kill(*slot);
+       spin_unlock_irq(&cgwb_lock);
 
+       mutex_lock(&bdi->cgwb_release_mutex);
+       spin_lock_irq(&cgwb_lock);
        while (!list_empty(&bdi->wb_list)) {
                wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
                                      bdi_node);
@@ -726,6 +719,7 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
                spin_lock_irq(&cgwb_lock);
        }
        spin_unlock_irq(&cgwb_lock);
+       mutex_unlock(&bdi->cgwb_release_mutex);
 }
 
 /**
index 56e2d9125ea55a57632feb22c64f2c1a76f5ec6e..38c926520c9718b8929a72829931080a3a53502d 100644 (file)
@@ -43,12 +43,25 @@ const struct trace_print_flags vmaflag_names[] = {
 
 void __dump_page(struct page *page, const char *reason)
 {
+       bool page_poisoned = PagePoisoned(page);
+       int mapcount;
+
+       /*
+        * If struct page is poisoned don't access Page*() functions as that
+        * leads to recursive loop. Page*() check for poisoned pages, and calls
+        * dump_page() when detected.
+        */
+       if (page_poisoned) {
+               pr_emerg("page:%px is uninitialized and poisoned", page);
+               goto hex_only;
+       }
+
        /*
         * Avoid VM_BUG_ON() in page_mapcount().
         * page->_mapcount space in struct page is used by sl[aou]b pages to
         * encode own info.
         */
-       int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
+       mapcount = PageSlab(page) ? 0 : page_mapcount(page);
 
        pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx",
                  page, page_ref_count(page), mapcount,
@@ -60,6 +73,7 @@ void __dump_page(struct page *page, const char *reason)
 
        pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags);
 
+hex_only:
        print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32,
                        sizeof(unsigned long), page,
                        sizeof(struct page), false);
@@ -68,7 +82,7 @@ void __dump_page(struct page *page, const char *reason)
                pr_alert("page dumped because: %s\n", reason);
 
 #ifdef CONFIG_MEMCG
-       if (page->mem_cgroup)
+       if (!page_poisoned && page->mem_cgroup)
                pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup);
 #endif
 }
index b70d7ba7cc13522c5bab5594b1211679b21b01e7..fc5f98069f4ea5b2906cf45e8997327c99a5b7ce 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1238,8 +1238,6 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
        int locked = 0;
        long ret = 0;
 
-       VM_BUG_ON(start & ~PAGE_MASK);
-       VM_BUG_ON(len != PAGE_ALIGN(len));
        end = start + len;
 
        for (nstart = start; nstart < end; nstart = nend) {
index 3612fbb32e9d5412e8494e4c220fad84e3a4e779..039ddbc574e926800f9104ede90782753605f46b 100644 (file)
@@ -2163,6 +2163,7 @@ static void __init gather_bootmem_prealloc(void)
                 */
                if (hstate_is_gigantic(h))
                        adjust_managed_page_count(page, 1 << h->order);
+               cond_resched();
        }
 }
 
index f185455b34065d27efa2b6a90c9dd2c1dfe92ae9..c3bd5209da380d9a51a0fa4515f4fcdeefcad409 100644 (file)
@@ -619,12 +619,13 @@ void kasan_kfree_large(void *ptr, unsigned long ip)
 int kasan_module_alloc(void *addr, size_t size)
 {
        void *ret;
+       size_t scaled_size;
        size_t shadow_size;
        unsigned long shadow_start;
 
        shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
-       shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
-                       PAGE_SIZE);
+       scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
+       shadow_size = round_up(scaled_size, PAGE_SIZE);
 
        if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
                return -EINVAL;
index cc16d70b8333890730d16c08b858631947e38d70..11e46f83e1adea6e12af696a357c79864e65575a 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/kmemleak.h>
 #include <linux/seq_file.h>
 #include <linux/memblock.h>
-#include <linux/bootmem.h>
 
 #include <asm/sections.h>
 #include <linux/io.h>
@@ -228,7 +227,8 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
                 * so we use WARN_ONCE() here to see the stack trace if
                 * fail happens.
                 */
-               WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
+               WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
+                         "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
        }
 
        return __memblock_find_range_top_down(start, end, size, align, nid,
index d1eb87ef4b1afa101fde9da06d2991644ca49dda..5801b5f0a634b5561db5e70c347a93fd3a1ad59f 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -186,8 +186,8 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
        return next;
 }
 
-static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf);
-
+static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
+               struct list_head *uf);
 SYSCALL_DEFINE1(brk, unsigned long, brk)
 {
        unsigned long retval;
@@ -245,7 +245,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
                goto out;
 
        /* Ok, looks good - let it rip. */
-       if (do_brk(oldbrk, newbrk-oldbrk, &uf) < 0)
+       if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0)
                goto out;
 
 set_brk:
@@ -2929,21 +2929,14 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
  *  anonymous maps.  eventually we may be able to do some
  *  brk-specific accounting here.
  */
-static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf)
+static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma, *prev;
-       unsigned long len;
        struct rb_node **rb_link, *rb_parent;
        pgoff_t pgoff = addr >> PAGE_SHIFT;
        int error;
 
-       len = PAGE_ALIGN(request);
-       if (len < request)
-               return -ENOMEM;
-       if (!len)
-               return 0;
-
        /* Until we need other flags, refuse anything except VM_EXEC. */
        if ((flags & (~VM_EXEC)) != 0)
                return -EINVAL;
@@ -3015,18 +3008,20 @@ out:
        return 0;
 }
 
-static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf)
-{
-       return do_brk_flags(addr, len, 0, uf);
-}
-
-int vm_brk_flags(unsigned long addr, unsigned long len, unsigned long flags)
+int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
 {
        struct mm_struct *mm = current->mm;
+       unsigned long len;
        int ret;
        bool populate;
        LIST_HEAD(uf);
 
+       len = PAGE_ALIGN(request);
+       if (len < request)
+               return -ENOMEM;
+       if (!len)
+               return 0;
+
        if (down_write_killable(&mm->mmap_sem))
                return -EINTR;
 
index 1521100f1e63b729bba37e21723a312950d688d8..a790ef4be74e3bb35e5cefc13398645bd607979a 100644 (file)
@@ -6383,7 +6383,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
        free_area_init_core(pgdat);
 }
 
-#ifdef CONFIG_HAVE_MEMBLOCK
+#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
 /*
  * Only struct pages that are backed by physical memory are zeroed and
  * initialized by going through __init_single_page(). But, there are some
@@ -6421,7 +6421,7 @@ void __paginginit zero_resv_unavail(void)
        if (pgcnt)
                pr_info("Reserved but unavailable: %lld pages", pgcnt);
 }
-#endif /* CONFIG_HAVE_MEMBLOCK */
+#endif /* CONFIG_HAVE_MEMBLOCK && !CONFIG_FLAT_NODE_MEM_MAP */
 
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 
@@ -6847,6 +6847,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
        /* Initialise every node */
        mminit_verify_pageflags_layout();
        setup_nr_node_ids();
+       zero_resv_unavail();
        for_each_online_node(nid) {
                pg_data_t *pgdat = NODE_DATA(nid);
                free_area_init_node(nid, NULL,
@@ -6857,7 +6858,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
                        node_set_state(nid, N_MEMORY);
                check_for_memory(pgdat, nid);
        }
-       zero_resv_unavail();
 }
 
 static int __init cmdline_parse_core(char *p, unsigned long *core,
@@ -7033,9 +7033,9 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
 
 void __init free_area_init(unsigned long *zones_size)
 {
+       zero_resv_unavail();
        free_area_init_node(0, zones_size,
                        __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
-       zero_resv_unavail();
 }
 
 static int page_alloc_cpu_dead(unsigned int cpu)
index 6db729dc4c5013784e65cc9e6438bef39d22b9a8..eb477809a5c0a534e2977f6fd6c1df74a05bc170 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -64,6 +64,7 @@
 #include <linux/backing-dev.h>
 #include <linux/page_idle.h>
 #include <linux/memremap.h>
+#include <linux/userfaultfd_k.h>
 
 #include <asm/tlbflush.h>
 
@@ -1481,11 +1482,16 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                                set_pte_at(mm, address, pvmw.pte, pteval);
                        }
 
-               } else if (pte_unused(pteval)) {
+               } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
                        /*
                         * The guest indicated that the page content is of no
                         * interest anymore. Simply discard the pte, vmscan
                         * will take care of the rest.
+                        * A future reference will then fault in a new zero
+                        * page. When userfaultfd is active, we must not drop
+                        * this page though, as its main user (postcopy
+                        * migration) will not expect userfaults on already
+                        * copied pages.
                         */
                        dec_mm_counter(mm, mm_counter(page));
                        /* We have to invalidate as we cleared the pte */
index 890b1f04a03a3d46f80fe1b2cfccae1f14b79836..2296caf87bfbd28a626663af04f2054a0cc3c45c 100644 (file)
@@ -567,10 +567,14 @@ static int shutdown_cache(struct kmem_cache *s)
        list_del(&s->list);
 
        if (s->flags & SLAB_TYPESAFE_BY_RCU) {
+#ifdef SLAB_SUPPORTS_SYSFS
+               sysfs_slab_unlink(s);
+#endif
                list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
                schedule_work(&slab_caches_to_rcu_destroy_work);
        } else {
 #ifdef SLAB_SUPPORTS_SYSFS
+               sysfs_slab_unlink(s);
                sysfs_slab_release(s);
 #else
                slab_kmem_cache_release(s);
index a3b8467c14af642138deaf35fd3ed3f7f87aed93..51258eff417836f6c5a72433a65c016c8391beb2 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5667,7 +5667,6 @@ static void sysfs_slab_remove_workfn(struct work_struct *work)
        kset_unregister(s->memcg_kset);
 #endif
        kobject_uevent(&s->kobj, KOBJ_REMOVE);
-       kobject_del(&s->kobj);
 out:
        kobject_put(&s->kobj);
 }
@@ -5752,6 +5751,12 @@ static void sysfs_slab_remove(struct kmem_cache *s)
        schedule_work(&s->kobj_remove_work);
 }
 
+void sysfs_slab_unlink(struct kmem_cache *s)
+{
+       if (slab_state >= FULL)
+               kobject_del(&s->kobj);
+}
+
 void sysfs_slab_release(struct kmem_cache *s)
 {
        if (slab_state >= FULL)
index 75eda9c2b2602fe24b4c431f797c5e0fc563ebda..8ba0870ecddd0fd592d16ee674b060db512b5b37 100644 (file)
@@ -1796,11 +1796,9 @@ static void vmstat_update(struct work_struct *w)
                 * to occur in the future. Keep on running the
                 * update worker thread.
                 */
-               preempt_disable();
                queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
                                this_cpu_ptr(&vmstat_work),
                                round_jiffies_relative(sysctl_stat_interval));
-               preempt_enable();
        }
 }
 
index 73a65789271ba9346902dd721b0accd8ce747adc..8ccee3d01822f78184357141ced7a07c3109dc2c 100644 (file)
@@ -693,7 +693,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
 
        return pp;
 }
index 18c5271910dc2c1e1715efcd2b448b7cee6a844b..5c1343195292c8f474ff683c73c8f73aa51843a0 100644 (file)
@@ -225,7 +225,8 @@ static int parse_opts(char *opts, struct p9_client *clnt)
        }
 
 free_and_return:
-       v9fs_put_trans(clnt->trans_mod);
+       if (ret)
+               v9fs_put_trans(clnt->trans_mod);
        kfree(tmp_options);
        return ret;
 }
index 13ec0d5415c74486c68f8290689d16d78513e6e9..bdaf53925acd5606fdb953800620bd05cf0f259e 100644 (file)
@@ -20,11 +20,7 @@ obj-$(CONFIG_TLS)            += tls/
 obj-$(CONFIG_XFRM)             += xfrm/
 obj-$(CONFIG_UNIX)             += unix/
 obj-$(CONFIG_NET)              += ipv6/
-ifneq ($(CC_CAN_LINK),y)
-$(warning CC cannot link executables. Skipping bpfilter.)
-else
 obj-$(CONFIG_BPFILTER)         += bpfilter/
-endif
 obj-$(CONFIG_PACKET)           += packet/
 obj-$(CONFIG_NET_KEY)          += key/
 obj-$(CONFIG_BRIDGE)           += bridge/
index 55fdba05d7d9daa805d358118852aabb07746e81..9b6bc5abe94680c0a982b9193932f245080f2f85 100644 (file)
@@ -1869,7 +1869,7 @@ static const struct proto_ops atalk_dgram_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = atalk_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = atalk_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = atalk_compat_ioctl,
index 36b3adacc0ddc1bd9a6c5b8dd55cedba4e9bf47b..10462de734eafc00efb9490ddd58cd0bbc83b7c8 100644 (file)
@@ -252,8 +252,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
 
        ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
        pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
-       refcount_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
-       ATM_SKB(skb)->atm_options = atmvcc->atm_options;
+       atm_account_tx(atmvcc, skb);
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += skb->len;
 
index 66caa48a27c2307c1b2b43f4a4381f3b34e78485..d795b9c5aea4a4e35021d9db2e10254036df55fe 100644 (file)
@@ -381,8 +381,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
                memcpy(here, llc_oui, sizeof(llc_oui));
                ((__be16 *) here)[3] = skb->protocol;
        }
-       refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
-       ATM_SKB(skb)->atm_options = vcc->atm_options;
+       atm_account_tx(vcc, skb);
        entry->vccs->last_use = jiffies;
        pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
        old = xchg(&entry->vccs->xoff, 1);      /* assume XOFF ... */
index 1f2af59935db356c003cfa8dd7d1bce388fada53..a7a68e5096288df11af1037297189962dc2fa548 100644 (file)
@@ -630,10 +630,9 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
                goto out;
        }
        pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
-       refcount_add(skb->truesize, &sk->sk_wmem_alloc);
+       atm_account_tx(vcc, skb);
 
        skb->dev = NULL; /* for paths shared with net_device interfaces */
-       ATM_SKB(skb)->atm_options = vcc->atm_options;
        if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
                kfree_skb(skb);
                error = -EFAULT;
@@ -648,11 +647,16 @@ out:
        return error;
 }
 
-__poll_t vcc_poll_mask(struct socket *sock, __poll_t events)
+__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
-       struct atm_vcc *vcc = ATM_SD(sock);
-       __poll_t mask = 0;
+       struct atm_vcc *vcc;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
+
+       vcc = ATM_SD(sock);
 
        /* exceptional events */
        if (sk->sk_err)
index 526796ad230fc6a2dbdca37f0d4f66f4edf47f17..5850649068bb29b3d688b4c8e29373b4a7f7592d 100644 (file)
@@ -17,7 +17,7 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci);
 int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
                int flags);
 int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len);
-__poll_t vcc_poll_mask(struct socket *sock, __poll_t events);
+__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait);
 int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int vcc_setsockopt(struct socket *sock, int level, int optname,
index 5a95fcf6f9b6cc62ced5480910dac7e41f2e7f06..d7f5cf5b7594d0ea4e766e06fbc07e6fce590e3b 100644 (file)
@@ -182,9 +182,8 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
        struct net_device *dev = skb->dev;
 
        ATM_SKB(skb)->vcc = vcc;
-       ATM_SKB(skb)->atm_options = vcc->atm_options;
+       atm_account_tx(vcc, skb);
 
-       refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
        if (vcc->send(vcc, skb) < 0) {
                dev->stats.tx_dropped++;
                return;
index 75620c2f261723a915b74df013ac214479ba70c4..24b53c4c39c6a6b5323a1aa79318b2ab2907a332 100644 (file)
@@ -555,8 +555,7 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
                                        sizeof(struct llc_snap_hdr));
        }
 
-       refcount_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
-       ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
+       atm_account_tx(entry->shortcut, skb);
        entry->shortcut->send(entry->shortcut, skb);
        entry->packets_fwded++;
        mpc->in_ops->put(entry);
index 21d9d341a6199255a017437954e4b688f1ba5bfd..af8c4b38b7463e03bf4b060735ce852b515d526c 100644 (file)
@@ -350,8 +350,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
                return 1;
        }
 
-       refcount_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
-       ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
+       atm_account_tx(vcc, skb);
        pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
                 skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
        ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
index 9f75092fe7785c080b2a32f9c2c8b147056bd488..2cb10af16afcf8eeb925bfe1aab33e839821109a 100644 (file)
@@ -113,7 +113,7 @@ static const struct proto_ops pvc_proto_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      pvc_getname,
-       .poll_mask =    vcc_poll_mask,
+       .poll =         vcc_poll,
        .ioctl =        vcc_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = vcc_compat_ioctl,
index ee10e8d46185173067f459aa5efdf5a77f8f9f06..b3ba44aab0ee6c9425fd278ebf8e2df1590a6d7a 100644 (file)
@@ -35,8 +35,8 @@ static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
        struct sock *sk = sk_atm(vcc);
 
        pr_debug("(%d) %d -= %d\n",
-                vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
-       WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
+                vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize);
+       WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc));
        dev_kfree_skb_any(skb);
        sk->sk_write_space(sk);
 }
index 53f4ad7087b169bccbd8d0b86c7463fd77204a8d..2f91b766ac423c97a0b9c1fd340222e31b17eefa 100644 (file)
@@ -636,7 +636,7 @@ static const struct proto_ops svc_proto_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       svc_accept,
        .getname =      svc_getname,
-       .poll_mask =    vcc_poll_mask,
+       .poll =         vcc_poll,
        .ioctl =        svc_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = svc_compat_ioctl,
index d1d2442ce573280cbc5b12beba96225bb7445a47..c603d33d54108b9f93f1745534da28d25f12c0ea 100644 (file)
@@ -1941,7 +1941,7 @@ static const struct proto_ops ax25_proto_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = ax25_accept,
        .getname        = ax25_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = ax25_ioctl,
        .listen         = ax25_listen,
        .shutdown       = ax25_shutdown,
index be09a98838252f4f0c23cec0625930cf896cd0ff..73bf6a93a3cf1141a34657bf1284893199e04db9 100644 (file)
@@ -2732,7 +2732,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 {
        struct batadv_neigh_ifinfo *router_ifinfo = NULL;
        struct batadv_neigh_node *router;
-       struct batadv_gw_node *curr_gw;
+       struct batadv_gw_node *curr_gw = NULL;
        int ret = 0;
        void *hdr;
 
@@ -2780,6 +2780,8 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
        ret = 0;
 
 out:
+       if (curr_gw)
+               batadv_gw_node_put(curr_gw);
        if (router_ifinfo)
                batadv_neigh_ifinfo_put(router_ifinfo);
        if (router)
index ec93337ee2597738e46b87dd72724d5becf3f48e..6baec4e68898c6e992e7522d2ee8c78ce62a1b08 100644 (file)
@@ -927,7 +927,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
 {
        struct batadv_neigh_ifinfo *router_ifinfo = NULL;
        struct batadv_neigh_node *router;
-       struct batadv_gw_node *curr_gw;
+       struct batadv_gw_node *curr_gw = NULL;
        int ret = 0;
        void *hdr;
 
@@ -995,6 +995,8 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
        ret = 0;
 
 out:
+       if (curr_gw)
+               batadv_gw_node_put(curr_gw);
        if (router_ifinfo)
                batadv_neigh_ifinfo_put(router_ifinfo);
        if (router)
index 4229b01ac7b54008e023df0ed6546a6d541498ba..87479c60670ebfbe2ad3df17130f1289d657df7b 100644 (file)
@@ -19,6 +19,7 @@
 #include "debugfs.h"
 #include "main.h"
 
+#include <linux/dcache.h>
 #include <linux/debugfs.h>
 #include <linux/err.h>
 #include <linux/errno.h>
@@ -343,6 +344,25 @@ out:
        return -ENOMEM;
 }
 
+/**
+ * batadv_debugfs_rename_hardif() - Fix debugfs path for renamed hardif
+ * @hard_iface: hard interface which was renamed
+ */
+void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface)
+{
+       const char *name = hard_iface->net_dev->name;
+       struct dentry *dir;
+       struct dentry *d;
+
+       dir = hard_iface->debug_dir;
+       if (!dir)
+               return;
+
+       d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name);
+       if (!d)
+               pr_err("Can't rename debugfs dir to %s\n", name);
+}
+
 /**
  * batadv_debugfs_del_hardif() - delete the base directory for a hard interface
  *  in debugfs.
@@ -413,6 +433,26 @@ out:
        return -ENOMEM;
 }
 
+/**
+ * batadv_debugfs_rename_meshif() - Fix debugfs path for renamed softif
+ * @dev: net_device which was renamed
+ */
+void batadv_debugfs_rename_meshif(struct net_device *dev)
+{
+       struct batadv_priv *bat_priv = netdev_priv(dev);
+       const char *name = dev->name;
+       struct dentry *dir;
+       struct dentry *d;
+
+       dir = bat_priv->debug_dir;
+       if (!dir)
+               return;
+
+       d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name);
+       if (!d)
+               pr_err("Can't rename debugfs dir to %s\n", name);
+}
+
 /**
  * batadv_debugfs_del_meshif() - Remove interface dependent debugfs entries
  * @dev: netdev struct of the soft interface
index 37b069698b04b369e68e4e8a31c3ac01575b0178..08a592ffbee5203ac4994fc49bf9c187c2e66f8e 100644 (file)
@@ -30,8 +30,10 @@ struct net_device;
 void batadv_debugfs_init(void);
 void batadv_debugfs_destroy(void);
 int batadv_debugfs_add_meshif(struct net_device *dev);
+void batadv_debugfs_rename_meshif(struct net_device *dev);
 void batadv_debugfs_del_meshif(struct net_device *dev);
 int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface);
+void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface);
 void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface);
 
 #else
@@ -49,6 +51,10 @@ static inline int batadv_debugfs_add_meshif(struct net_device *dev)
        return 0;
 }
 
+static inline void batadv_debugfs_rename_meshif(struct net_device *dev)
+{
+}
+
 static inline void batadv_debugfs_del_meshif(struct net_device *dev)
 {
 }
@@ -59,6 +65,11 @@ int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface)
        return 0;
 }
 
+static inline
+void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface)
+{
+}
+
 static inline
 void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
 {
index c405d15befd60bdabf9f50813c3bee446238d539..2f0d42f2f913e74cf10c0c6ce89320434994cac5 100644 (file)
@@ -989,6 +989,32 @@ void batadv_hardif_remove_interfaces(void)
        rtnl_unlock();
 }
 
+/**
+ * batadv_hard_if_event_softif() - Handle events for soft interfaces
+ * @event: NETDEV_* event to handle
+ * @net_dev: net_device which generated an event
+ *
+ * Return: NOTIFY_* result
+ */
+static int batadv_hard_if_event_softif(unsigned long event,
+                                      struct net_device *net_dev)
+{
+       struct batadv_priv *bat_priv;
+
+       switch (event) {
+       case NETDEV_REGISTER:
+               batadv_sysfs_add_meshif(net_dev);
+               bat_priv = netdev_priv(net_dev);
+               batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
+               break;
+       case NETDEV_CHANGENAME:
+               batadv_debugfs_rename_meshif(net_dev);
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
 static int batadv_hard_if_event(struct notifier_block *this,
                                unsigned long event, void *ptr)
 {
@@ -997,12 +1023,8 @@ static int batadv_hard_if_event(struct notifier_block *this,
        struct batadv_hard_iface *primary_if = NULL;
        struct batadv_priv *bat_priv;
 
-       if (batadv_softif_is_valid(net_dev) && event == NETDEV_REGISTER) {
-               batadv_sysfs_add_meshif(net_dev);
-               bat_priv = netdev_priv(net_dev);
-               batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
-               return NOTIFY_DONE;
-       }
+       if (batadv_softif_is_valid(net_dev))
+               return batadv_hard_if_event_softif(event, net_dev);
 
        hard_iface = batadv_hardif_get_by_netdev(net_dev);
        if (!hard_iface && (event == NETDEV_REGISTER ||
@@ -1051,6 +1073,9 @@ static int batadv_hard_if_event(struct notifier_block *this,
                if (batadv_is_wifi_hardif(hard_iface))
                        hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS;
                break;
+       case NETDEV_CHANGENAME:
+               batadv_debugfs_rename_hardif(hard_iface);
+               break;
        default:
                break;
        }
index 3986551397caa5ffb6ba7338eeb4769c8b8f99fb..12a2b7d21376721d15c6a31f3e794e4270d74b5c 100644 (file)
@@ -1705,7 +1705,9 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
                ether_addr_copy(common->addr, tt_addr);
                common->vid = vid;
 
-               common->flags = flags;
+               if (!is_multicast_ether_addr(common->addr))
+                       common->flags = flags & (~BATADV_TT_SYNC_MASK);
+
                tt_global_entry->roam_at = 0;
                /* node must store current time in case of roaming. This is
                 * needed to purge this entry out on timeout (if nobody claims
@@ -1768,7 +1770,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
                 * TT_CLIENT_TEMP, therefore they have to be copied in the
                 * client entry
                 */
-               common->flags |= flags & (~BATADV_TT_SYNC_MASK);
+               if (!is_multicast_ether_addr(common->addr))
+                       common->flags |= flags & (~BATADV_TT_SYNC_MASK);
 
                /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
                 * one originator left in the list and we previously received a
index 510ab4f55df56bc1c356d5130d2dbea4be4744ff..3264e1873219bd40b8c1ccfc2ce6c40d96ca0030 100644 (file)
@@ -437,13 +437,16 @@ static inline __poll_t bt_accept_poll(struct sock *parent)
        return 0;
 }
 
-__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events)
+__poll_t bt_sock_poll(struct file *file, struct socket *sock,
+                         poll_table *wait)
 {
        struct sock *sk = sock->sk;
        __poll_t mask = 0;
 
        BT_DBG("sock %p, sk %p", sock, sk);
 
+       poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_state == BT_LISTEN)
                return bt_accept_poll(sk);
 
@@ -475,7 +478,7 @@ __poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events)
 
        return mask;
 }
-EXPORT_SYMBOL(bt_sock_poll_mask);
+EXPORT_SYMBOL(bt_sock_poll);
 
 int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 {
index d6c0998615388d078c0910bee08784b4fac2f0c0..1506e1632394acf06e9f5873d045bd394e5b3059 100644 (file)
@@ -1975,7 +1975,7 @@ static const struct proto_ops hci_sock_ops = {
        .sendmsg        = hci_sock_sendmsg,
        .recvmsg        = hci_sock_recvmsg,
        .ioctl          = hci_sock_ioctl,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = hci_sock_setsockopt,
index 742a190034e6378a4be886ed730d55936c82ee27..686bdc6b35b03d1fd0965dc0fd76c5edde78c1eb 100644 (file)
@@ -1653,7 +1653,7 @@ static const struct proto_ops l2cap_sock_ops = {
        .getname        = l2cap_sock_getname,
        .sendmsg        = l2cap_sock_sendmsg,
        .recvmsg        = l2cap_sock_recvmsg,
-       .poll_mask      = bt_sock_poll_mask,
+       .poll           = bt_sock_poll,
        .ioctl          = bt_sock_ioctl,
        .mmap           = sock_no_mmap,
        .socketpair     = sock_no_socketpair,
index 1cf57622473aa70d626e1df5ad867800ab4cfe6e..d606e9212291608ea2e266238c0f65ce18d0c311 100644 (file)
@@ -1049,7 +1049,7 @@ static const struct proto_ops rfcomm_sock_ops = {
        .setsockopt     = rfcomm_sock_setsockopt,
        .getsockopt     = rfcomm_sock_getsockopt,
        .ioctl          = rfcomm_sock_ioctl,
-       .poll_mask      = bt_sock_poll_mask,
+       .poll           = bt_sock_poll,
        .socketpair     = sock_no_socketpair,
        .mmap           = sock_no_mmap
 };
index d60dbc61d170864b1393aabb0d7f7965a1e6ad17..413b8ee49feca325dea79e328c11b8ba00afbce3 100644 (file)
@@ -1197,7 +1197,7 @@ static const struct proto_ops sco_sock_ops = {
        .getname        = sco_sock_getname,
        .sendmsg        = sco_sock_sendmsg,
        .recvmsg        = sco_sock_recvmsg,
-       .poll_mask      = bt_sock_poll_mask,
+       .poll           = bt_sock_poll,
        .ioctl          = bt_sock_ioctl,
        .mmap           = sock_no_mmap,
        .socketpair     = sock_no_socketpair,
index 68c3578343b4b4d026e9df40fda98a7850757877..22a78eedf4b1447a8f42cc442615191d66ff1b99 100644 (file)
@@ -96,6 +96,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
        u32 size = kattr->test.data_size_in;
        u32 repeat = kattr->test.repeat;
        u32 retval, duration;
+       int hh_len = ETH_HLEN;
        struct sk_buff *skb;
        void *data;
        int ret;
@@ -131,12 +132,22 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
        skb_reset_network_header(skb);
 
        if (is_l2)
-               __skb_push(skb, ETH_HLEN);
+               __skb_push(skb, hh_len);
        if (is_direct_pkt_access)
                bpf_compute_data_pointers(skb);
        retval = bpf_test_run(prog, skb, repeat, &duration);
-       if (!is_l2)
-               __skb_push(skb, ETH_HLEN);
+       if (!is_l2) {
+               if (skb_headroom(skb) < hh_len) {
+                       int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
+
+                       if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
+                               kfree_skb(skb);
+                               return -ENOMEM;
+                       }
+               }
+               memset(__skb_push(skb, hh_len), 0, hh_len);
+       }
+
        size = skb->len;
        /* bpf program can never convert linear skb to non-linear */
        if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
diff --git a/net/bpfilter/.gitignore b/net/bpfilter/.gitignore
new file mode 100644 (file)
index 0000000..e97084e
--- /dev/null
@@ -0,0 +1 @@
+bpfilter_umh
index a948b072c28f36451a587a88bcb6f86c32023693..76deb661588322d9cf8ac6bdd73ba63f5d1416fc 100644 (file)
@@ -1,6 +1,5 @@
 menuconfig BPFILTER
        bool "BPF based packet filtering framework (BPFILTER)"
-       default n
        depends on NET && BPF && INET
        help
          This builds experimental bpfilter framework that is aiming to
@@ -9,6 +8,7 @@ menuconfig BPFILTER
 if BPFILTER
 config BPFILTER_UMH
        tristate "bpfilter kernel module with user mode helper"
+       depends on $(success,$(srctree)/scripts/cc-can-link.sh $(CC))
        default m
        help
          This builds bpfilter kernel module with embedded user mode helper
index e0bbe7583e58dcca5e17136d1b091ff03465b4d2..39c6980b5d9952eed1046f656d8c0a85b4a0d2d6 100644 (file)
@@ -15,18 +15,7 @@ ifeq ($(CONFIG_BPFILTER_UMH), y)
 HOSTLDFLAGS += -static
 endif
 
-# a bit of elf magic to convert bpfilter_umh binary into a binary blob
-# inside bpfilter_umh.o elf file referenced by
-# _binary_net_bpfilter_bpfilter_umh_start symbol
-# which bpfilter_kern.c passes further into umh blob loader at run-time
-quiet_cmd_copy_umh = GEN $@
-      cmd_copy_umh = echo ':' > $(obj)/.bpfilter_umh.o.cmd; \
-      $(OBJCOPY) -I binary -O `$(OBJDUMP) -f $<|grep format|cut -d' ' -f8` \
-      -B `$(OBJDUMP) -f $<|grep architecture|cut -d, -f1|cut -d' ' -f2` \
-      --rename-section .data=.init.rodata $< $@
-
-$(obj)/bpfilter_umh.o: $(obj)/bpfilter_umh
-       $(call cmd,copy_umh)
+$(obj)/bpfilter_umh_blob.o: $(obj)/bpfilter_umh
 
 obj-$(CONFIG_BPFILTER_UMH) += bpfilter.o
-bpfilter-objs += bpfilter_kern.o bpfilter_umh.o
+bpfilter-objs += bpfilter_kern.o bpfilter_umh_blob.o
index 09522573f611b01ba5fb4d52125e8264d9147f20..f0fc182d3db77eb311d91f7faef4e8a6f85886b3 100644 (file)
 #include <linux/file.h>
 #include "msgfmt.h"
 
-#define UMH_start _binary_net_bpfilter_bpfilter_umh_start
-#define UMH_end _binary_net_bpfilter_bpfilter_umh_end
-
-extern char UMH_start;
-extern char UMH_end;
+extern char bpfilter_umh_start;
+extern char bpfilter_umh_end;
 
 static struct umh_info info;
 /* since ip_getsockopt() can run in parallel, serialize access to umh */
@@ -93,7 +90,9 @@ static int __init load_umh(void)
        int err;
 
        /* fork usermode process */
-       err = fork_usermode_blob(&UMH_start, &UMH_end - &UMH_start, &info);
+       err = fork_usermode_blob(&bpfilter_umh_start,
+                                &bpfilter_umh_end - &bpfilter_umh_start,
+                                &info);
        if (err)
                return err;
        pr_info("Loaded bpfilter_umh pid %d\n", info.pid);
diff --git a/net/bpfilter/bpfilter_umh_blob.S b/net/bpfilter/bpfilter_umh_blob.S
new file mode 100644 (file)
index 0000000..40311d1
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+       .section .init.rodata, "a"
+       .global bpfilter_umh_start
+bpfilter_umh_start:
+       .incbin "net/bpfilter/bpfilter_umh"
+       .global bpfilter_umh_end
+bpfilter_umh_end:
index c7991867d62273f48bb55e88774b573e81f40536..a6fb1b3bcad9b2f3c1c24b2a3496ad21b07c69d9 100644 (file)
@@ -934,11 +934,15 @@ static int caif_release(struct socket *sock)
 }
 
 /* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
-static __poll_t caif_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t caif_poll(struct file *file,
+                             struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
+       __poll_t mask;
        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
-       __poll_t mask = 0;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* exceptional events? */
        if (sk->sk_err)
@@ -972,7 +976,7 @@ static const struct proto_ops caif_seqpacket_ops = {
        .socketpair = sock_no_socketpair,
        .accept = sock_no_accept,
        .getname = sock_no_getname,
-       .poll_mask = caif_poll_mask,
+       .poll = caif_poll,
        .ioctl = sock_no_ioctl,
        .listen = sock_no_listen,
        .shutdown = sock_no_shutdown,
@@ -993,7 +997,7 @@ static const struct proto_ops caif_stream_ops = {
        .socketpair = sock_no_socketpair,
        .accept = sock_no_accept,
        .getname = sock_no_getname,
-       .poll_mask = caif_poll_mask,
+       .poll = caif_poll,
        .ioctl = sock_no_ioctl,
        .listen = sock_no_listen,
        .shutdown = sock_no_shutdown,
index 9393f25df08d3fce299aaa463efd79244e6527e9..0af8f0db892a3311fb5a1a898ab0bff5696adf00 100644 (file)
@@ -1660,7 +1660,7 @@ static const struct proto_ops bcm_ops = {
        .socketpair    = sock_no_socketpair,
        .accept        = sock_no_accept,
        .getname       = sock_no_getname,
-       .poll_mask     = datagram_poll_mask,
+       .poll          = datagram_poll,
        .ioctl         = can_ioctl,     /* use can_ioctl() from af_can.c */
        .listen        = sock_no_listen,
        .shutdown      = sock_no_shutdown,
index fd7e2f49ea6a20b79c43bf50c72d2b1e8b48d260..1051eee8258184f33d15a6142ee8b387839c9adc 100644 (file)
@@ -843,7 +843,7 @@ static const struct proto_ops raw_ops = {
        .socketpair    = sock_no_socketpair,
        .accept        = sock_no_accept,
        .getname       = raw_getname,
-       .poll_mask     = datagram_poll_mask,
+       .poll          = datagram_poll,
        .ioctl         = can_ioctl,     /* use can_ioctl() from af_can.c */
        .listen        = sock_no_listen,
        .shutdown      = sock_no_shutdown,
index f19bf3dc2bd6ea02cb828a95d0b91322ac8b0004..9938952c5c78f1e72ef13f44517ef054a60205b2 100644 (file)
@@ -819,8 +819,9 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
 
 /**
  *     datagram_poll - generic datagram poll
+ *     @file: file struct
  *     @sock: socket
- *     @events to wait for
+ *     @wait: poll table
  *
  *     Datagram poll: Again totally generic. This also handles
  *     sequenced packet sockets providing the socket receive queue
@@ -830,10 +831,14 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
  *     and you use a different write policy from sock_writeable()
  *     then please supply your own write_space callback.
  */
-__poll_t datagram_poll_mask(struct socket *sock, __poll_t events)
+__poll_t datagram_poll(struct file *file, struct socket *sock,
+                          poll_table *wait)
 {
        struct sock *sk = sock->sk;
-       __poll_t mask = 0;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* exceptional events? */
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
@@ -866,4 +871,4 @@ __poll_t datagram_poll_mask(struct socket *sock, __poll_t events)
 
        return mask;
 }
-EXPORT_SYMBOL(datagram_poll_mask);
+EXPORT_SYMBOL(datagram_poll);
index 57b7bab5f70bb7c50a8be565cc90a40bc1c2d5d6..a5aa1c7444e688e66263fc112a4211409840a749 100644 (file)
@@ -8643,7 +8643,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
                /* We get here if we can't use the current device name */
                if (!pat)
                        goto out;
-               if (dev_get_valid_name(net, dev, pat) < 0)
+               err = dev_get_valid_name(net, dev, pat);
+               if (err < 0)
                        goto out;
        }
 
@@ -8655,7 +8656,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
        dev_close(dev);
 
        /* And unlink it from device chain */
-       err = -ENODEV;
        unlist_netdevice(dev);
 
        synchronize_net();
index a04e1e88bf3ab49340d788589c365aaf45d9d3e2..50537ff961a722e18731b7b9671deb739bfce847 100644 (file)
@@ -285,16 +285,9 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
                if (ifr->ifr_qlen < 0)
                        return -EINVAL;
                if (dev->tx_queue_len ^ ifr->ifr_qlen) {
-                       unsigned int orig_len = dev->tx_queue_len;
-
-                       dev->tx_queue_len = ifr->ifr_qlen;
-                       err = call_netdevice_notifiers(
-                                       NETDEV_CHANGE_TX_QUEUE_LEN, dev);
-                       err = notifier_to_errno(err);
-                       if (err) {
-                               dev->tx_queue_len = orig_len;
+                       err = dev_change_tx_queue_len(dev, ifr->ifr_qlen);
+                       if (err)
                                return err;
-                       }
                }
                return 0;
 
index 126ffc5bc630cb412e4bcf1a48869ec6711fda54..f64aa13811eaeedf8f0040bc9f993ad9e1661eca 100644 (file)
@@ -416,6 +416,14 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops,
                if (rule->mark && r->mark != rule->mark)
                        continue;
 
+               if (rule->suppress_ifgroup != -1 &&
+                   r->suppress_ifgroup != rule->suppress_ifgroup)
+                       continue;
+
+               if (rule->suppress_prefixlen != -1 &&
+                   r->suppress_prefixlen != rule->suppress_prefixlen)
+                       continue;
+
                if (rule->mark_mask && r->mark_mask != rule->mark_mask)
                        continue;
 
@@ -436,6 +444,9 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops,
                if (rule->ip_proto && r->ip_proto != rule->ip_proto)
                        continue;
 
+               if (rule->proto && r->proto != rule->proto)
+                       continue;
+
                if (fib_rule_port_range_set(&rule->sport_range) &&
                    !fib_rule_port_range_compare(&r->sport_range,
                                                 &rule->sport_range))
@@ -645,6 +656,73 @@ errout:
        return err;
 }
 
+static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh,
+                      struct nlattr **tb, struct fib_rule *rule)
+{
+       struct fib_rule *r;
+
+       list_for_each_entry(r, &ops->rules_list, list) {
+               if (r->action != rule->action)
+                       continue;
+
+               if (r->table != rule->table)
+                       continue;
+
+               if (r->pref != rule->pref)
+                       continue;
+
+               if (memcmp(r->iifname, rule->iifname, IFNAMSIZ))
+                       continue;
+
+               if (memcmp(r->oifname, rule->oifname, IFNAMSIZ))
+                       continue;
+
+               if (r->mark != rule->mark)
+                       continue;
+
+               if (r->suppress_ifgroup != rule->suppress_ifgroup)
+                       continue;
+
+               if (r->suppress_prefixlen != rule->suppress_prefixlen)
+                       continue;
+
+               if (r->mark_mask != rule->mark_mask)
+                       continue;
+
+               if (r->tun_id != rule->tun_id)
+                       continue;
+
+               if (r->fr_net != rule->fr_net)
+                       continue;
+
+               if (r->l3mdev != rule->l3mdev)
+                       continue;
+
+               if (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
+                   !uid_eq(r->uid_range.end, rule->uid_range.end))
+                       continue;
+
+               if (r->ip_proto != rule->ip_proto)
+                       continue;
+
+               if (r->proto != rule->proto)
+                       continue;
+
+               if (!fib_rule_port_range_compare(&r->sport_range,
+                                                &rule->sport_range))
+                       continue;
+
+               if (!fib_rule_port_range_compare(&r->dport_range,
+                                                &rule->dport_range))
+                       continue;
+
+               if (!ops->compare(r, frh, tb))
+                       continue;
+               return 1;
+       }
+       return 0;
+}
+
 int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
                   struct netlink_ext_ack *extack)
 {
@@ -679,7 +757,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
                goto errout;
 
        if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
-           rule_find(ops, frh, tb, rule, user_priority)) {
+           rule_exists(ops, frh, tb, rule)) {
                err = -EEXIST;
                goto errout_free;
        }
index 3d9ba7e5965adc4658b379a0cf55ff2f22f4b94d..06da770f543fdc2742f8503a5436a5893a566914 100644 (file)
@@ -459,11 +459,21 @@ static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
             (!unaligned_ok && offset >= 0 &&
              offset + ip_align >= 0 &&
              offset + ip_align % size == 0))) {
+               bool ldx_off_ok = offset <= S16_MAX;
+
                *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
                *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
-               *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, size, 2 + endian);
-               *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_D,
-                                     offset);
+               *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
+                                     size, 2 + endian + (!ldx_off_ok * 2));
+               if (ldx_off_ok) {
+                       *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
+                                             BPF_REG_D, offset);
+               } else {
+                       *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D);
+                       *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset);
+                       *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
+                                             BPF_REG_TMP, 0);
+               }
                if (endian)
                        *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
                *insn++ = BPF_JMP_A(8);
@@ -1762,6 +1772,37 @@ static const struct bpf_func_proto bpf_skb_pull_data_proto = {
        .arg2_type      = ARG_ANYTHING,
 };
 
+static inline int sk_skb_try_make_writable(struct sk_buff *skb,
+                                          unsigned int write_len)
+{
+       int err = __bpf_try_make_writable(skb, write_len);
+
+       bpf_compute_data_end_sk_skb(skb);
+       return err;
+}
+
+BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len)
+{
+       /* Idea is the following: should the needed direct read/write
+        * test fail during runtime, we can pull in more data and redo
+        * again, since implicitly, we invalidate previous checks here.
+        *
+        * Or, since we know how much we need to make read/writeable,
+        * this can be done once at the program beginning for direct
+        * access case. By this we overcome limitations of only current
+        * headroom being accessible.
+        */
+       return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb));
+}
+
+static const struct bpf_func_proto sk_skb_pull_data_proto = {
+       .func           = sk_skb_pull_data,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+};
+
 BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
           u64, from, u64, to, u64, flags)
 {
@@ -2779,7 +2820,8 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
 
 static u32 __bpf_skb_max_len(const struct sk_buff *skb)
 {
-       return skb->dev->mtu + skb->dev->hard_header_len;
+       return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
+                         SKB_MAX_ALLOC;
 }
 
 static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff)
@@ -2863,8 +2905,8 @@ static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
        return __skb_trim_rcsum(skb, new_len);
 }
 
-BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
-          u64, flags)
+static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
+                                       u64 flags)
 {
        u32 max_len = __bpf_skb_max_len(skb);
        u32 min_len = __bpf_skb_min_len(skb);
@@ -2900,6 +2942,13 @@ BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
                if (!ret && skb_is_gso(skb))
                        skb_gso_reset(skb);
        }
+       return ret;
+}
+
+BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
+          u64, flags)
+{
+       int ret = __bpf_skb_change_tail(skb, new_len, flags);
 
        bpf_compute_data_pointers(skb);
        return ret;
@@ -2914,8 +2963,26 @@ static const struct bpf_func_proto bpf_skb_change_tail_proto = {
        .arg3_type      = ARG_ANYTHING,
 };
 
-BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
+BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len,
           u64, flags)
+{
+       int ret = __bpf_skb_change_tail(skb, new_len, flags);
+
+       bpf_compute_data_end_sk_skb(skb);
+       return ret;
+}
+
+static const struct bpf_func_proto sk_skb_change_tail_proto = {
+       .func           = sk_skb_change_tail,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_ANYTHING,
+};
+
+static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
+                                       u64 flags)
 {
        u32 max_len = __bpf_skb_max_len(skb);
        u32 new_len = skb->len + head_room;
@@ -2941,8 +3008,16 @@ BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
                skb_reset_mac_header(skb);
        }
 
+       return ret;
+}
+
+BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
+          u64, flags)
+{
+       int ret = __bpf_skb_change_head(skb, head_room, flags);
+
        bpf_compute_data_pointers(skb);
-       return 0;
+       return ret;
 }
 
 static const struct bpf_func_proto bpf_skb_change_head_proto = {
@@ -2954,6 +3029,23 @@ static const struct bpf_func_proto bpf_skb_change_head_proto = {
        .arg3_type      = ARG_ANYTHING,
 };
 
+BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room,
+          u64, flags)
+{
+       int ret = __bpf_skb_change_head(skb, head_room, flags);
+
+       bpf_compute_data_end_sk_skb(skb);
+       return ret;
+}
+
+static const struct bpf_func_proto sk_skb_change_head_proto = {
+       .func           = sk_skb_change_head,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_ANYTHING,
+};
 static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
 {
        return xdp_data_meta_unsupported(xdp) ? 0 :
@@ -3046,12 +3138,16 @@ static int __bpf_tx_xdp(struct net_device *dev,
                        u32 index)
 {
        struct xdp_frame *xdpf;
-       int sent;
+       int err, sent;
 
        if (!dev->netdev_ops->ndo_xdp_xmit) {
                return -EOPNOTSUPP;
        }
 
+       err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
+       if (unlikely(err))
+               return err;
+
        xdpf = convert_to_xdp_frame(xdp);
        if (unlikely(!xdpf))
                return -EOVERFLOW;
@@ -3214,20 +3310,6 @@ err:
 }
 EXPORT_SYMBOL_GPL(xdp_do_redirect);
 
-static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
-{
-       unsigned int len;
-
-       if (unlikely(!(fwd->flags & IFF_UP)))
-               return -ENETDOWN;
-
-       len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
-       if (skb->len > len)
-               return -EMSGSIZE;
-
-       return 0;
-}
-
 static int xdp_do_generic_redirect_map(struct net_device *dev,
                                       struct sk_buff *skb,
                                       struct xdp_buff *xdp,
@@ -3256,10 +3338,11 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
        }
 
        if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
-               if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
+               struct bpf_dtab_netdev *dst = fwd;
+
+               err = dev_map_generic_redirect(dst, skb, xdp_prog);
+               if (unlikely(err))
                        goto err;
-               skb->dev = fwd;
-               generic_xdp_tx(skb, xdp_prog);
        } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
                struct xdp_sock *xs = fwd;
 
@@ -3298,7 +3381,8 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
                goto err;
        }
 
-       if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
+       err = xdp_ok_fwd_dev(fwd, skb->len);
+       if (unlikely(err))
                goto err;
 
        skb->dev = fwd;
@@ -4086,8 +4170,9 @@ static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
        memcpy(params->smac, dev->dev_addr, ETH_ALEN);
        params->h_vlan_TCI = 0;
        params->h_vlan_proto = 0;
+       params->ifindex = dev->ifindex;
 
-       return dev->ifindex;
+       return 0;
 }
 #endif
 
@@ -4111,7 +4196,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        /* verify forwarding is enabled on this interface */
        in_dev = __in_dev_get_rcu(dev);
        if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
-               return 0;
+               return BPF_FIB_LKUP_RET_FWD_DISABLED;
 
        if (flags & BPF_FIB_LOOKUP_OUTPUT) {
                fl4.flowi4_iif = 1;
@@ -4136,7 +4221,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
                tb = fib_get_table(net, tbid);
                if (unlikely(!tb))
-                       return 0;
+                       return BPF_FIB_LKUP_RET_NOT_FWDED;
 
                err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
        } else {
@@ -4148,8 +4233,20 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
                err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
        }
 
-       if (err || res.type != RTN_UNICAST)
-               return 0;
+       if (err) {
+               /* map fib lookup errors to RTN_ type */
+               if (err == -EINVAL)
+                       return BPF_FIB_LKUP_RET_BLACKHOLE;
+               if (err == -EHOSTUNREACH)
+                       return BPF_FIB_LKUP_RET_UNREACHABLE;
+               if (err == -EACCES)
+                       return BPF_FIB_LKUP_RET_PROHIBIT;
+
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
+       }
+
+       if (res.type != RTN_UNICAST)
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
 
        if (res.fi->fib_nhs > 1)
                fib_select_path(net, &res, &fl4, NULL);
@@ -4157,19 +4254,16 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        if (check_mtu) {
                mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst);
                if (params->tot_len > mtu)
-                       return 0;
+                       return BPF_FIB_LKUP_RET_FRAG_NEEDED;
        }
 
        nh = &res.fi->fib_nh[res.nh_sel];
 
        /* do not handle lwt encaps right now */
        if (nh->nh_lwtstate)
-               return 0;
+               return BPF_FIB_LKUP_RET_UNSUPP_LWT;
 
        dev = nh->nh_dev;
-       if (unlikely(!dev))
-               return 0;
-
        if (nh->nh_gw)
                params->ipv4_dst = nh->nh_gw;
 
@@ -4179,10 +4273,10 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
         * rcu_read_lock_bh is not needed here
         */
        neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst);
-       if (neigh)
-               return bpf_fib_set_fwd_params(params, neigh, dev);
+       if (!neigh)
+               return BPF_FIB_LKUP_RET_NO_NEIGH;
 
-       return 0;
+       return bpf_fib_set_fwd_params(params, neigh, dev);
 }
 #endif
 
@@ -4203,7 +4297,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
        /* link local addresses are never forwarded */
        if (rt6_need_strict(dst) || rt6_need_strict(src))
-               return 0;
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
 
        dev = dev_get_by_index_rcu(net, params->ifindex);
        if (unlikely(!dev))
@@ -4211,7 +4305,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
        idev = __in6_dev_get_safely(dev);
        if (unlikely(!idev || !net->ipv6.devconf_all->forwarding))
-               return 0;
+               return BPF_FIB_LKUP_RET_FWD_DISABLED;
 
        if (flags & BPF_FIB_LOOKUP_OUTPUT) {
                fl6.flowi6_iif = 1;
@@ -4238,7 +4332,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
                tb = ipv6_stub->fib6_get_table(net, tbid);
                if (unlikely(!tb))
-                       return 0;
+                       return BPF_FIB_LKUP_RET_NOT_FWDED;
 
                f6i = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, strict);
        } else {
@@ -4251,11 +4345,23 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        }
 
        if (unlikely(IS_ERR_OR_NULL(f6i) || f6i == net->ipv6.fib6_null_entry))
-               return 0;
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
+
+       if (unlikely(f6i->fib6_flags & RTF_REJECT)) {
+               switch (f6i->fib6_type) {
+               case RTN_BLACKHOLE:
+                       return BPF_FIB_LKUP_RET_BLACKHOLE;
+               case RTN_UNREACHABLE:
+                       return BPF_FIB_LKUP_RET_UNREACHABLE;
+               case RTN_PROHIBIT:
+                       return BPF_FIB_LKUP_RET_PROHIBIT;
+               default:
+                       return BPF_FIB_LKUP_RET_NOT_FWDED;
+               }
+       }
 
-       if (unlikely(f6i->fib6_flags & RTF_REJECT ||
-           f6i->fib6_type != RTN_UNICAST))
-               return 0;
+       if (f6i->fib6_type != RTN_UNICAST)
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
 
        if (f6i->fib6_nsiblings && fl6.flowi6_oif == 0)
                f6i = ipv6_stub->fib6_multipath_select(net, f6i, &fl6,
@@ -4265,11 +4371,11 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        if (check_mtu) {
                mtu = ipv6_stub->ip6_mtu_from_fib6(f6i, dst, src);
                if (params->tot_len > mtu)
-                       return 0;
+                       return BPF_FIB_LKUP_RET_FRAG_NEEDED;
        }
 
        if (f6i->fib6_nh.nh_lwtstate)
-               return 0;
+               return BPF_FIB_LKUP_RET_UNSUPP_LWT;
 
        if (f6i->fib6_flags & RTF_GATEWAY)
                *dst = f6i->fib6_nh.nh_gw;
@@ -4283,10 +4389,10 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
         */
        neigh = ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128,
                                      ndisc_hashfn, dst, dev);
-       if (neigh)
-               return bpf_fib_set_fwd_params(params, neigh, dev);
+       if (!neigh)
+               return BPF_FIB_LKUP_RET_NO_NEIGH;
 
-       return 0;
+       return bpf_fib_set_fwd_params(params, neigh, dev);
 }
 #endif
 
@@ -4328,7 +4434,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
           struct bpf_fib_lookup *, params, int, plen, u32, flags)
 {
        struct net *net = dev_net(skb->dev);
-       int index = -EAFNOSUPPORT;
+       int rc = -EAFNOSUPPORT;
 
        if (plen < sizeof(*params))
                return -EINVAL;
@@ -4339,25 +4445,25 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
        switch (params->family) {
 #if IS_ENABLED(CONFIG_INET)
        case AF_INET:
-               index = bpf_ipv4_fib_lookup(net, params, flags, false);
+               rc = bpf_ipv4_fib_lookup(net, params, flags, false);
                break;
 #endif
 #if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
-               index = bpf_ipv6_fib_lookup(net, params, flags, false);
+               rc = bpf_ipv6_fib_lookup(net, params, flags, false);
                break;
 #endif
        }
 
-       if (index > 0) {
+       if (!rc) {
                struct net_device *dev;
 
-               dev = dev_get_by_index_rcu(net, index);
+               dev = dev_get_by_index_rcu(net, params->ifindex);
                if (!is_skb_forwardable(dev, skb))
-                       index = 0;
+                       rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
        }
 
-       return index;
+       return rc;
 }
 
 static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
@@ -4430,10 +4536,10 @@ static const struct bpf_func_proto bpf_lwt_push_encap_proto = {
        .arg4_type      = ARG_CONST_SIZE
 };
 
+#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
 BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
           const void *, from, u32, len)
 {
-#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
        struct seg6_bpf_srh_state *srh_state =
                this_cpu_ptr(&seg6_bpf_srh_states);
        void *srh_tlvs, *srh_end, *ptr;
@@ -4459,9 +4565,6 @@ BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
 
        memcpy(skb->data + offset, from, len);
        return 0;
-#else /* CONFIG_IPV6_SEG6_BPF */
-       return -EOPNOTSUPP;
-#endif
 }
 
 static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
@@ -4477,7 +4580,6 @@ static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
 BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
           u32, action, void *, param, u32, param_len)
 {
-#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
        struct seg6_bpf_srh_state *srh_state =
                this_cpu_ptr(&seg6_bpf_srh_states);
        struct ipv6_sr_hdr *srh;
@@ -4525,9 +4627,6 @@ BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
        default:
                return -EINVAL;
        }
-#else /* CONFIG_IPV6_SEG6_BPF */
-       return -EOPNOTSUPP;
-#endif
 }
 
 static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
@@ -4543,7 +4642,6 @@ static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
 BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
           s32, len)
 {
-#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
        struct seg6_bpf_srh_state *srh_state =
                this_cpu_ptr(&seg6_bpf_srh_states);
        void *srh_end, *srh_tlvs, *ptr;
@@ -4587,9 +4685,6 @@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
        srh_state->hdrlen += len;
        srh_state->valid = 0;
        return 0;
-#else /* CONFIG_IPV6_SEG6_BPF */
-       return -EOPNOTSUPP;
-#endif
 }
 
 static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
@@ -4600,6 +4695,7 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
        .arg2_type      = ARG_ANYTHING,
        .arg3_type      = ARG_ANYTHING,
 };
+#endif /* CONFIG_IPV6_SEG6_BPF */
 
 bool bpf_helper_changes_pkt_data(void *func)
 {
@@ -4608,9 +4704,12 @@ bool bpf_helper_changes_pkt_data(void *func)
            func == bpf_skb_store_bytes ||
            func == bpf_skb_change_proto ||
            func == bpf_skb_change_head ||
+           func == sk_skb_change_head ||
            func == bpf_skb_change_tail ||
+           func == sk_skb_change_tail ||
            func == bpf_skb_adjust_room ||
            func == bpf_skb_pull_data ||
+           func == sk_skb_pull_data ||
            func == bpf_clone_redirect ||
            func == bpf_l3_csum_replace ||
            func == bpf_l4_csum_replace ||
@@ -4618,11 +4717,12 @@ bool bpf_helper_changes_pkt_data(void *func)
            func == bpf_xdp_adjust_meta ||
            func == bpf_msg_pull_data ||
            func == bpf_xdp_adjust_tail ||
-           func == bpf_lwt_push_encap ||
+#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
            func == bpf_lwt_seg6_store_bytes ||
            func == bpf_lwt_seg6_adjust_srh ||
-           func == bpf_lwt_seg6_action
-           )
+           func == bpf_lwt_seg6_action ||
+#endif
+           func == bpf_lwt_push_encap)
                return true;
 
        return false;
@@ -4862,11 +4962,11 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
        case BPF_FUNC_skb_load_bytes:
                return &bpf_skb_load_bytes_proto;
        case BPF_FUNC_skb_pull_data:
-               return &bpf_skb_pull_data_proto;
+               return &sk_skb_pull_data_proto;
        case BPF_FUNC_skb_change_tail:
-               return &bpf_skb_change_tail_proto;
+               return &sk_skb_change_tail_proto;
        case BPF_FUNC_skb_change_head:
-               return &bpf_skb_change_head_proto;
+               return &sk_skb_change_head_proto;
        case BPF_FUNC_get_socket_cookie:
                return &bpf_get_socket_cookie_proto;
        case BPF_FUNC_get_socket_uid:
@@ -4957,12 +5057,14 @@ static const struct bpf_func_proto *
 lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 {
        switch (func_id) {
+#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
        case BPF_FUNC_lwt_seg6_store_bytes:
                return &bpf_lwt_seg6_store_bytes_proto;
        case BPF_FUNC_lwt_seg6_action:
                return &bpf_lwt_seg6_action_proto;
        case BPF_FUNC_lwt_seg6_adjust_srh:
                return &bpf_lwt_seg6_adjust_srh_proto;
+#endif
        default:
                return lwt_out_func_proto(func_id, prog);
        }
index b2b2323bdc84c44afc33304d9d2f6a22738f6523..188d693cb251a05d6483b81bbd8d815e28b77164 100644 (file)
@@ -77,8 +77,20 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
                d->lock = lock;
                spin_lock_bh(lock);
        }
-       if (d->tail)
-               return gnet_stats_copy(d, type, NULL, 0, padattr);
+       if (d->tail) {
+               int ret = gnet_stats_copy(d, type, NULL, 0, padattr);
+
+               /* The initial attribute added in gnet_stats_copy() may be
+                * preceded by a padding attribute, in which case d->tail will
+                * end up pointing at the padding instead of the real attribute.
+                * Fix this so gnet_stats_finish_copy() adjusts the length of
+                * the right attribute.
+                */
+               if (ret == 0 && d->tail->nla_type == padattr)
+                       d->tail = (struct nlattr *)((char *)d->tail +
+                                                   NLA_ALIGN(d->tail->nla_len));
+               return ret;
+       }
 
        return 0;
 }
index c642304f178ce0a4e1358d59e45032a39f76fb3f..8e51f8555e11b95bc48ab334f50571048f705101 100644 (file)
@@ -858,6 +858,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
        n->cloned = 1;
        n->nohdr = 0;
        n->peeked = 0;
+       C(pfmemalloc);
        n->destructor = NULL;
        C(tail);
        C(end);
@@ -5276,8 +5277,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
                        if (npages >= 1 << order) {
                                page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
                                                   __GFP_COMP |
-                                                  __GFP_NOWARN |
-                                                  __GFP_NORETRY,
+                                                  __GFP_NOWARN,
                                                   order);
                                if (page)
                                        goto fill_page;
index bcc41829a16d50714bdd3c25c976c0b7296fab84..9e8f65585b81152fd9e6c72f7a285a52ef23a686 100644 (file)
@@ -3243,7 +3243,8 @@ static int req_prot_init(const struct proto *prot)
 
        rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
                                           rsk_prot->obj_size, 0,
-                                          prot->slab_flags, NULL);
+                                          SLAB_ACCOUNT | prot->slab_flags,
+                                          NULL);
 
        if (!rsk_prot->slab) {
                pr_crit("%s: Can't create request sock SLAB cache!\n",
@@ -3258,7 +3259,8 @@ int proto_register(struct proto *prot, int alloc_slab)
        if (alloc_slab) {
                prot->slab = kmem_cache_create_usercopy(prot->name,
                                        prot->obj_size, 0,
-                                       SLAB_HWCACHE_ALIGN | prot->slab_flags,
+                                       SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
+                                       prot->slab_flags,
                                        prot->useroffset, prot->usersize,
                                        NULL);
 
@@ -3281,6 +3283,7 @@ int proto_register(struct proto *prot, int alloc_slab)
                                kmem_cache_create(prot->twsk_prot->twsk_slab_name,
                                                  prot->twsk_prot->twsk_obj_size,
                                                  0,
+                                                 SLAB_ACCOUNT |
                                                  prot->slab_flags,
                                                  NULL);
                        if (prot->twsk_prot->twsk_slab == NULL)
index 8b5ba6dffac7ebc88fd21075793dc3db43a74a43..12877a1514e7b8e873cd26529e58f7ebaae99c1a 100644 (file)
@@ -600,7 +600,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
 {
        struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
        struct dccp_sock *dp = dccp_sk(sk);
-       ktime_t now = ktime_get_real();
+       ktime_t now = ktime_get();
        s64 delta = 0;
 
        switch (fbtype) {
@@ -625,15 +625,14 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
        case CCID3_FBACK_PERIODIC:
                delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
                if (delta <= 0)
-                       DCCP_BUG("delta (%ld) <= 0", (long)delta);
-               else
-                       hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
+                       delta = 1;
+               hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
                break;
        default:
                return;
        }
 
-       ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta,
+       ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta,
                       hc->rx_x_recv, hc->rx_pinv);
 
        hc->rx_tstamp_last_feedback = now;
@@ -680,7 +679,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
 static u32 ccid3_first_li(struct sock *sk)
 {
        struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
-       u32 x_recv, p, delta;
+       u32 x_recv, p;
+       s64 delta;
        u64 fval;
 
        if (hc->rx_rtt == 0) {
@@ -688,7 +688,9 @@ static u32 ccid3_first_li(struct sock *sk)
                hc->rx_rtt = DCCP_FALLBACK_RTT;
        }
 
-       delta  = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback));
+       delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback);
+       if (delta <= 0)
+               delta = 1;
        x_recv = scaled_div32(hc->rx_bytes_recv, delta);
        if (x_recv == 0) {              /* would also trigger divide-by-zero */
                DCCP_WARN("X_recv==0\n");
index 0ea2ee56ac1bee6948ee4ed37c8172b300a7f9de..f91e3816806baae37e0e0793dcef72e8b291777e 100644 (file)
@@ -316,7 +316,8 @@ int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
                 int flags, int *addr_len);
 void dccp_shutdown(struct sock *sk, int how);
 int inet_dccp_listen(struct socket *sock, int backlog);
-__poll_t dccp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t dccp_poll(struct file *file, struct socket *sock,
+                      poll_table *wait);
 int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
 void dccp_req_err(struct sock *sk, u64 seq);
 
index a9e478cd3787c90f3d81e3bc2f71a14f7b11e280..b08feb219b44b67eadf408a33649d8c7ec9db2d0 100644 (file)
@@ -984,7 +984,7 @@ static const struct proto_ops inet_dccp_ops = {
        .accept            = inet_accept,
        .getname           = inet_getname,
        /* FIXME: work on tcp_poll to rename it to inet_csk_poll */
-       .poll_mask         = dccp_poll_mask,
+       .poll              = dccp_poll,
        .ioctl             = inet_ioctl,
        /* FIXME: work on inet_listen to rename it to sock_common_listen */
        .listen            = inet_dccp_listen,
index 17fc4e0166ba89ed435dc65bbdd5951d9018c093..6344f1b18a6a1b30cd2f3c559987a2c9e9546f81 100644 (file)
@@ -1070,7 +1070,7 @@ static const struct proto_ops inet6_dccp_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = inet_accept,
        .getname           = inet6_getname,
-       .poll_mask         = dccp_poll_mask,
+       .poll              = dccp_poll,
        .ioctl             = inet6_ioctl,
        .listen            = inet_dccp_listen,
        .shutdown          = inet_shutdown,
index ca21c1c76da013575d5bd0c8b3a4ac42eb2b229b..0d56e36a6db7b77dcdeb9697dd81bf62895e6e4c 100644 (file)
@@ -312,11 +312,20 @@ int dccp_disconnect(struct sock *sk, int flags)
 
 EXPORT_SYMBOL_GPL(dccp_disconnect);
 
-__poll_t dccp_poll_mask(struct socket *sock, __poll_t events)
+/*
+ *     Wait for a DCCP event.
+ *
+ *     Note that we don't need to lock the socket, as the upper poll layers
+ *     take care of normal races (between the test and the event) and we don't
+ *     go look at any of the socket buffers directly.
+ */
+__poll_t dccp_poll(struct file *file, struct socket *sock,
+                      poll_table *wait)
 {
        __poll_t mask;
        struct sock *sk = sock->sk;
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
        if (sk->sk_state == DCCP_LISTEN)
                return inet_csk_listen_poll(sk);
 
@@ -358,7 +367,7 @@ __poll_t dccp_poll_mask(struct socket *sock, __poll_t events)
        return mask;
 }
 
-EXPORT_SYMBOL_GPL(dccp_poll_mask);
+EXPORT_SYMBOL_GPL(dccp_poll);
 
 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 {
index 9a686d890bfad179c09a182245a96bba5dba21ea..7d6ff983ba2cbbf7915a61ffad57e52f66f3a193 100644 (file)
@@ -1207,11 +1207,11 @@ static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int peer)
 }
 
 
-static __poll_t dn_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table  *wait)
 {
        struct sock *sk = sock->sk;
        struct dn_scp *scp = DN_SK(sk);
-       __poll_t mask = datagram_poll_mask(sock, events);
+       __poll_t mask = datagram_poll(file, sock, wait);
 
        if (!skb_queue_empty(&scp->other_receive_queue))
                mask |= EPOLLRDBAND;
@@ -2331,7 +2331,7 @@ static const struct proto_ops dn_proto_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       dn_accept,
        .getname =      dn_getname,
-       .poll_mask =    dn_poll_mask,
+       .poll =         dn_poll,
        .ioctl =        dn_ioctl,
        .listen =       dn_listen,
        .shutdown =     dn_shutdown,
index 40c851693f77e35a1f573fdbf0bcd86adb94cf13..0c9478b91fa5b6c8f6b586ed8ead66c8db538ea7 100644 (file)
@@ -86,35 +86,39 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
                opt++;
                kdebug("options: '%s'", opt);
                do {
+                       int opt_len, opt_nlen;
                        const char *eq;
-                       int opt_len, opt_nlen, opt_vlen, tmp;
+                       char optval[128];
 
                        next_opt = memchr(opt, '#', end - opt) ?: end;
                        opt_len = next_opt - opt;
-                       if (opt_len <= 0 || opt_len > 128) {
+                       if (opt_len <= 0 || opt_len > sizeof(optval)) {
                                pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
                                                    opt_len);
                                return -EINVAL;
                        }
 
-                       eq = memchr(opt, '=', opt_len) ?: end;
-                       opt_nlen = eq - opt;
-                       eq++;
-                       opt_vlen = next_opt - eq; /* will be -1 if no value */
+                       eq = memchr(opt, '=', opt_len);
+                       if (eq) {
+                               opt_nlen = eq - opt;
+                               eq++;
+                               memcpy(optval, eq, next_opt - eq);
+                               optval[next_opt - eq] = '\0';
+                       } else {
+                               opt_nlen = opt_len;
+                               optval[0] = '\0';
+                       }
 
-                       tmp = opt_vlen >= 0 ? opt_vlen : 0;
-                       kdebug("option '%*.*s' val '%*.*s'",
-                              opt_nlen, opt_nlen, opt, tmp, tmp, eq);
+                       kdebug("option '%*.*s' val '%s'",
+                              opt_nlen, opt_nlen, opt, optval);
 
                        /* see if it's an error number representing a DNS error
                         * that's to be recorded as the result in this key */
                        if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
                            memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
                                kdebug("dns error number option");
-                               if (opt_vlen <= 0)
-                                       goto bad_option_value;
 
-                               ret = kstrtoul(eq, 10, &derrno);
+                               ret = kstrtoul(optval, 10, &derrno);
                                if (ret < 0)
                                        goto bad_option_value;
 
index 275449b0d633586a4befec517ab3a36c5e3ba5a5..3297e7fa99458b13c40609588f187d366cf37411 100644 (file)
@@ -90,12 +90,18 @@ static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n)
        return 0;
 }
 
+static int lowpan_get_iflink(const struct net_device *dev)
+{
+       return lowpan_802154_dev(dev)->wdev->ifindex;
+}
+
 static const struct net_device_ops lowpan_netdev_ops = {
        .ndo_init               = lowpan_dev_init,
        .ndo_start_xmit         = lowpan_xmit,
        .ndo_open               = lowpan_open,
        .ndo_stop               = lowpan_stop,
        .ndo_neigh_construct    = lowpan_neigh_construct,
+       .ndo_get_iflink         = lowpan_get_iflink,
 };
 
 static void lowpan_setup(struct net_device *ldev)
index a0768d2759b8ecb8954dd544561b68f26d0c6510..a60658c85a9ad09b405f2d928e70acf64a9ebc4d 100644 (file)
@@ -423,7 +423,7 @@ static const struct proto_ops ieee802154_raw_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = sock_no_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = ieee802154_sock_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = sock_no_shutdown,
@@ -969,7 +969,7 @@ static const struct proto_ops ieee802154_dgram_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = sock_no_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = ieee802154_sock_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = sock_no_shutdown,
index 15e125558c76e5fa2fe466ab0d64be1d3183ebed..b403499fdabea7367f65c588d957a30f5a6572b5 100644 (file)
@@ -986,7 +986,7 @@ const struct proto_ops inet_stream_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = inet_accept,
        .getname           = inet_getname,
-       .poll_mask         = tcp_poll_mask,
+       .poll              = tcp_poll,
        .ioctl             = inet_ioctl,
        .listen            = inet_listen,
        .shutdown          = inet_shutdown,
@@ -1021,7 +1021,7 @@ const struct proto_ops inet_dgram_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = inet_getname,
-       .poll_mask         = udp_poll_mask,
+       .poll              = udp_poll,
        .ioctl             = inet_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = inet_shutdown,
@@ -1042,7 +1042,7 @@ EXPORT_SYMBOL(inet_dgram_ops);
 
 /*
  * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
- * udp_poll_mask
+ * udp_poll
  */
 static const struct proto_ops inet_sockraw_ops = {
        .family            = PF_INET,
@@ -1053,7 +1053,7 @@ static const struct proto_ops inet_sockraw_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = inet_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = inet_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = inet_shutdown,
index b21833651394233bbdb143d765e4408333b13b72..e46cdd310e5f86ef6985993e4226db614a2a8732 100644 (file)
@@ -300,6 +300,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
        if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
                struct flowi4 fl4 = {
                        .flowi4_iif = LOOPBACK_IFINDEX,
+                       .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
                        .daddr = ip_hdr(skb)->saddr,
                        .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
                        .flowi4_scope = scope,
index 1540db65241a6fd4d96b00546f13a3e3d3cd1815..c9ec1603666bffcfb24597b933a05f53b6d83440 100644 (file)
@@ -448,9 +448,7 @@ next_proto:
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
-       skb_gro_remcsum_cleanup(skb, &grc);
-       skb->remcsum_offload = 0;
+       skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
 
        return pp;
 }
index 1859c473b21a862b383edebbcf2c1656f9c58b3b..6a7d980105f60514c8180e6333f0a4a53912c3d5 100644 (file)
@@ -223,7 +223,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
 
        return pp;
 }
index 85b617b655bc2d602563b1bd174f436554c9d046..b3c899a630a0fe696207f8749f5ceb510daa445e 100644 (file)
@@ -1200,13 +1200,14 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
        spin_lock_bh(&im->lock);
        if (pmc) {
                im->interface = pmc->interface;
-               im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
                im->sfmode = pmc->sfmode;
                if (pmc->sfmode == MCAST_INCLUDE) {
                        im->tomb = pmc->tomb;
                        im->sources = pmc->sources;
                        for (psf = im->sources; psf; psf = psf->sf_next)
-                               psf->sf_crcount = im->crcount;
+                               psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+               } else {
+                       im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
                }
                in_dev_put(pmc->interface);
                kfree(pmc);
@@ -1288,7 +1289,7 @@ static void igmp_group_dropped(struct ip_mc_list *im)
 #endif
 }
 
-static void igmp_group_added(struct ip_mc_list *im)
+static void igmp_group_added(struct ip_mc_list *im, unsigned int mode)
 {
        struct in_device *in_dev = im->interface;
 #ifdef CONFIG_IP_MULTICAST
@@ -1316,7 +1317,13 @@ static void igmp_group_added(struct ip_mc_list *im)
        }
        /* else, v3 */
 
-       im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+       /* Based on RFC3376 5.1, for newly added INCLUDE SSM, we should
+        * not send filter-mode change record as the mode should be from
+        * IN() to IN(A).
+        */
+       if (mode == MCAST_EXCLUDE)
+               im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+
        igmp_ifc_event(in_dev);
 #endif
 }
@@ -1381,8 +1388,7 @@ static void ip_mc_hash_remove(struct in_device *in_dev,
 /*
  *     A socket has joined a multicast group on device dev.
  */
-
-void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
+void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, unsigned int mode)
 {
        struct ip_mc_list *im;
 #ifdef CONFIG_IP_MULTICAST
@@ -1394,7 +1400,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
        for_each_pmc_rtnl(in_dev, im) {
                if (im->multiaddr == addr) {
                        im->users++;
-                       ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0);
+                       ip_mc_add_src(in_dev, &addr, mode, 0, NULL, 0);
                        goto out;
                }
        }
@@ -1408,8 +1414,8 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
        in_dev_hold(in_dev);
        im->multiaddr = addr;
        /* initial mode is (EX, empty) */
-       im->sfmode = MCAST_EXCLUDE;
-       im->sfcount[MCAST_EXCLUDE] = 1;
+       im->sfmode = mode;
+       im->sfcount[mode] = 1;
        refcount_set(&im->refcnt, 1);
        spin_lock_init(&im->lock);
 #ifdef CONFIG_IP_MULTICAST
@@ -1426,12 +1432,17 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
 #ifdef CONFIG_IP_MULTICAST
        igmpv3_del_delrec(in_dev, im);
 #endif
-       igmp_group_added(im);
+       igmp_group_added(im, mode);
        if (!in_dev->dead)
                ip_rt_multicast_event(in_dev);
 out:
        return;
 }
+
+void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
+{
+       __ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE);
+}
 EXPORT_SYMBOL(ip_mc_inc_group);
 
 static int ip_mc_check_iphdr(struct sk_buff *skb)
@@ -1688,7 +1699,7 @@ void ip_mc_remap(struct in_device *in_dev)
 #ifdef CONFIG_IP_MULTICAST
                igmpv3_del_delrec(in_dev, pmc);
 #endif
-               igmp_group_added(pmc);
+               igmp_group_added(pmc, pmc->sfmode);
        }
 }
 
@@ -1751,7 +1762,7 @@ void ip_mc_up(struct in_device *in_dev)
 #ifdef CONFIG_IP_MULTICAST
                igmpv3_del_delrec(in_dev, pmc);
 #endif
-               igmp_group_added(pmc);
+               igmp_group_added(pmc, pmc->sfmode);
        }
 }
 
@@ -2130,8 +2141,8 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc)
 
 /* Join a multicast group
  */
-
-int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
+static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr,
+                             unsigned int mode)
 {
        __be32 addr = imr->imr_multiaddr.s_addr;
        struct ip_mc_socklist *iml, *i;
@@ -2172,15 +2183,30 @@ int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
        memcpy(&iml->multi, imr, sizeof(*imr));
        iml->next_rcu = inet->mc_list;
        iml->sflist = NULL;
-       iml->sfmode = MCAST_EXCLUDE;
+       iml->sfmode = mode;
        rcu_assign_pointer(inet->mc_list, iml);
-       ip_mc_inc_group(in_dev, addr);
+       __ip_mc_inc_group(in_dev, addr, mode);
        err = 0;
 done:
        return err;
 }
+
+/* Join ASM (Any-Source Multicast) group
+ */
+int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
+{
+       return __ip_mc_join_group(sk, imr, MCAST_EXCLUDE);
+}
 EXPORT_SYMBOL(ip_mc_join_group);
 
+/* Join SSM (Source-Specific Multicast) group
+ */
+int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
+                        unsigned int mode)
+{
+       return __ip_mc_join_group(sk, imr, mode);
+}
+
 static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
                           struct in_device *in_dev)
 {
index c9e35b81d0931df8429a33e8d03e719b87da0747..1e4cf3ab560fac154fefb7acd3539eb6e91ed84e 100644 (file)
@@ -90,7 +90,7 @@ static void inet_frags_free_cb(void *ptr, void *arg)
 
 void inet_frags_exit_net(struct netns_frags *nf)
 {
-       nf->low_thresh = 0; /* prevent creation of new frags */
+       nf->high_thresh = 0; /* prevent creation of new frags */
 
        rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
 }
index 31ff46daae974645dfe73c97e6e507a0ad62dd4b..3647167c8fa313f9eb7a5c5ad34cb0cb7a7aea5e 100644 (file)
@@ -243,9 +243,9 @@ static inline int compute_score(struct sock *sk, struct net *net,
                        bool dev_match = (sk->sk_bound_dev_if == dif ||
                                          sk->sk_bound_dev_if == sdif);
 
-                       if (exact_dif && !dev_match)
+                       if (!dev_match)
                                return -1;
-                       if (sk->sk_bound_dev_if && dev_match)
+                       if (sk->sk_bound_dev_if)
                                score += 4;
                }
                if (sk->sk_incoming_cpu == raw_smp_processor_id())
index af5a830ff6ad320ae68066ab86476962db978f79..b3308e9d97626838c6a7e4f2dd58db75695dfcaa 100644 (file)
@@ -1145,7 +1145,8 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
        cork->fragsize = ip_sk_use_pmtu(sk) ?
                         dst_mtu(&rt->dst) : rt->dst.dev->mtu;
 
-       cork->gso_size = sk->sk_type == SOCK_DGRAM ? ipc->gso_size : 0;
+       cork->gso_size = sk->sk_type == SOCK_DGRAM &&
+                        sk->sk_protocol == IPPROTO_UDP ? ipc->gso_size : 0;
        cork->dst = &rt->dst;
        cork->length = 0;
        cork->ttl = ipc->ttl;
index fc32fdbeefa61c18da5b9330d4da73ca6db992bd..64c76dcf73863a81addc1c7effcd3a687a97483d 100644 (file)
@@ -984,7 +984,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                        mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
                        mreq.imr_address.s_addr = mreqs.imr_interface;
                        mreq.imr_ifindex = 0;
-                       err = ip_mc_join_group(sk, &mreq);
+                       err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
                        if (err && err != -EADDRINUSE)
                                break;
                        omode = MCAST_INCLUDE;
@@ -1061,7 +1061,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                        mreq.imr_multiaddr = psin->sin_addr;
                        mreq.imr_address.s_addr = 0;
                        mreq.imr_ifindex = greqs.gsr_interface;
-                       err = ip_mc_join_group(sk, &mreq);
+                       err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
                        if (err && err != -EADDRINUSE)
                                break;
                        greqs.gsr_interface = mreq.imr_ifindex;
index ca0dad90803a92bdcbb1e199554985ad4626fada..e77872c93c206693f4bcfdde98a044c6e7cfb780 100644 (file)
@@ -1898,6 +1898,7 @@ static struct xt_match ipt_builtin_mt[] __read_mostly = {
                .checkentry = icmp_checkentry,
                .proto      = IPPROTO_ICMP,
                .family     = NFPROTO_IPV4,
+               .me         = THIS_MODULE,
        },
 };
 
index 805e83ec3ad9347abc6ce778f296319746772f1c..16471410496592f52ac7927d218a44341f139339 100644 (file)
@@ -37,7 +37,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
                 * to a listener socket if there's one */
                struct sock *sk2;
 
-               sk2 = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol,
+               sk2 = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
                                            iph->saddr, laddr ? laddr : iph->daddr,
                                            hp->source, lport ? lport : hp->dest,
                                            skb->dev, NF_TPROXY_LOOKUP_LISTENER);
@@ -71,7 +71,7 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
 EXPORT_SYMBOL_GPL(nf_tproxy_laddr4);
 
 struct sock *
-nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
+nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
                      const u8 protocol,
                      const __be32 saddr, const __be32 daddr,
                      const __be16 sport, const __be16 dport,
@@ -79,16 +79,21 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
                      const enum nf_tproxy_lookup_t lookup_type)
 {
        struct sock *sk;
-       struct tcphdr *tcph;
 
        switch (protocol) {
-       case IPPROTO_TCP:
+       case IPPROTO_TCP: {
+               struct tcphdr _hdr, *hp;
+
+               hp = skb_header_pointer(skb, ip_hdrlen(skb),
+                                       sizeof(struct tcphdr), &_hdr);
+               if (hp == NULL)
+                       return NULL;
+
                switch (lookup_type) {
                case NF_TPROXY_LOOKUP_LISTENER:
-                       tcph = hp;
                        sk = inet_lookup_listener(net, &tcp_hashinfo, skb,
                                                    ip_hdrlen(skb) +
-                                                     __tcp_hdrlen(tcph),
+                                                     __tcp_hdrlen(hp),
                                                    saddr, sport,
                                                    daddr, dport,
                                                    in->ifindex, 0);
@@ -110,6 +115,7 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
                        BUG();
                }
                break;
+               }
        case IPPROTO_UDP:
                sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
                                     in->ifindex);
index d06247ba08b2667b1049329e8921af9388545c54..5fa335fd385254def583b9a5100fbe7b9ce94cd6 100644 (file)
@@ -189,8 +189,9 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
        if (write && ret == 0) {
                low = make_kgid(user_ns, urange[0]);
                high = make_kgid(user_ns, urange[1]);
-               if (!gid_valid(low) || !gid_valid(high) ||
-                   (urange[1] < urange[0]) || gid_lt(high, low)) {
+               if (!gid_valid(low) || !gid_valid(high))
+                       return -EINVAL;
+               if (urange[1] < urange[0] || gid_lt(high, low)) {
                        low = make_kgid(&init_user_ns, 1);
                        high = make_kgid(&init_user_ns, 0);
                }
@@ -265,8 +266,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
            ipv4.sysctl_tcp_fastopen);
        struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
        struct tcp_fastopen_context *ctxt;
-       int ret;
        u32  user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
+       __le32 key[4];
+       int ret, i;
 
        tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
        if (!tbl.data)
@@ -275,11 +277,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
        rcu_read_lock();
        ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
        if (ctxt)
-               memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
+               memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
        else
-               memset(user_key, 0, sizeof(user_key));
+               memset(key, 0, sizeof(key));
        rcu_read_unlock();
 
+       for (i = 0; i < ARRAY_SIZE(key); i++)
+               user_key[i] = le32_to_cpu(key[i]);
+
        snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
                user_key[0], user_key[1], user_key[2], user_key[3]);
        ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
@@ -290,13 +295,17 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
                        ret = -EINVAL;
                        goto bad_key;
                }
-               tcp_fastopen_reset_cipher(net, NULL, user_key,
+
+               for (i = 0; i < ARRAY_SIZE(user_key); i++)
+                       key[i] = cpu_to_le32(user_key[i]);
+
+               tcp_fastopen_reset_cipher(net, NULL, key,
                                          TCP_FASTOPEN_KEY_LENGTH);
        }
 
 bad_key:
        pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
-              user_key[0], user_key[1], user_key[2], user_key[3],
+               user_key[0], user_key[1], user_key[2], user_key[3],
               (char *)tbl.data, ret);
        kfree(tbl.data);
        return ret;
index 141acd92e58aeddeb9a0ba1eaacf3bd520a836a3..4491faf83f4f93cf4384f7b192ffe3022567cc0a 100644 (file)
@@ -494,21 +494,32 @@ static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
 }
 
 /*
- * Socket is not locked. We are protected from async events by poll logic and
- * correct handling of state changes made by other threads is impossible in
- * any case.
+ *     Wait for a TCP event.
+ *
+ *     Note that we don't need to lock the socket, as the upper poll layers
+ *     take care of normal races (between the test and the event) and we don't
+ *     go look at any of the socket buffers directly.
  */
-__poll_t tcp_poll_mask(struct socket *sock, __poll_t events)
+__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
+       __poll_t mask;
        struct sock *sk = sock->sk;
        const struct tcp_sock *tp = tcp_sk(sk);
-       __poll_t mask = 0;
        int state;
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
+
        state = inet_sk_state_load(sk);
        if (state == TCP_LISTEN)
                return inet_csk_listen_poll(sk);
 
+       /* Socket is not locked. We are protected from async events
+        * by poll logic and correct handling of state changes
+        * made by other threads is impossible in any case.
+        */
+
+       mask = 0;
+
        /*
         * EPOLLHUP is certainly not done right. But poll() doesn't
         * have a notion of HUP in just one direction, and for a
@@ -589,7 +600,7 @@ __poll_t tcp_poll_mask(struct socket *sock, __poll_t events)
 
        return mask;
 }
-EXPORT_SYMBOL(tcp_poll_mask);
+EXPORT_SYMBOL(tcp_poll);
 
 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 {
@@ -1987,7 +1998,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
                         * shouldn't happen.
                         */
                        if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
-                                "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
+                                "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n",
                                 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
                                 flags))
                                break;
@@ -2002,7 +2013,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
                        if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
                                goto found_fin_ok;
                        WARN(!(flags & MSG_PEEK),
-                            "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
+                            "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n",
                             *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
                }
 
@@ -2551,6 +2562,8 @@ int tcp_disconnect(struct sock *sk, int flags)
 
        tcp_clear_xmit_timers(sk);
        __skb_queue_purge(&sk->sk_receive_queue);
+       tp->copied_seq = tp->rcv_nxt;
+       tp->urg_data = 0;
        tcp_write_queue_purge(sk);
        tcp_fastopen_active_disable_ofo_check(sk);
        skb_rbtree_purge(&tp->out_of_order_queue);
@@ -2810,14 +2823,17 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
        case TCP_REPAIR:
                if (!tcp_can_repair_sock(sk))
                        err = -EPERM;
-               else if (val == 1) {
+               else if (val == TCP_REPAIR_ON) {
                        tp->repair = 1;
                        sk->sk_reuse = SK_FORCE_REUSE;
                        tp->repair_queue = TCP_NO_QUEUE;
-               } else if (val == 0) {
+               } else if (val == TCP_REPAIR_OFF) {
                        tp->repair = 0;
                        sk->sk_reuse = SK_NO_REUSE;
                        tcp_send_window_probe(sk);
+               } else if (val == TCP_REPAIR_OFF_NO_WP) {
+                       tp->repair = 0;
+                       sk->sk_reuse = SK_NO_REUSE;
                } else
                        err = -EINVAL;
 
@@ -3709,8 +3725,7 @@ int tcp_abort(struct sock *sk, int err)
                        struct request_sock *req = inet_reqsk(sk);
 
                        local_bh_disable();
-                       inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
-                                                         req);
+                       inet_csk_reqsk_queue_drop(req->rsk_listener, req);
                        local_bh_enable();
                        return 0;
                }
index 5f5e5936760e65739859d0d8d9717b3204482a43..5869f89ca6564ec46a3c04a2b344a8630b0ba459 100644 (file)
@@ -55,7 +55,6 @@ struct dctcp {
        u32 dctcp_alpha;
        u32 next_seq;
        u32 ce_state;
-       u32 delayed_ack_reserved;
        u32 loss_cwnd;
 };
 
@@ -96,7 +95,6 @@ static void dctcp_init(struct sock *sk)
 
                ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
 
-               ca->delayed_ack_reserved = 0;
                ca->loss_cwnd = 0;
                ca->ce_state = 0;
 
@@ -134,7 +132,8 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
        /* State has changed from CE=0 to CE=1 and delayed
         * ACK has not sent yet.
         */
-       if (!ca->ce_state && ca->delayed_ack_reserved) {
+       if (!ca->ce_state &&
+           inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
                u32 tmp_rcv_nxt;
 
                /* Save current rcv_nxt. */
@@ -164,7 +163,8 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
        /* State has changed from CE=1 to CE=0 and delayed
         * ACK has not sent yet.
         */
-       if (ca->ce_state && ca->delayed_ack_reserved) {
+       if (ca->ce_state &&
+           inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
                u32 tmp_rcv_nxt;
 
                /* Save current rcv_nxt. */
@@ -248,25 +248,6 @@ static void dctcp_state(struct sock *sk, u8 new_state)
        }
 }
 
-static void dctcp_update_ack_reserved(struct sock *sk, enum tcp_ca_event ev)
-{
-       struct dctcp *ca = inet_csk_ca(sk);
-
-       switch (ev) {
-       case CA_EVENT_DELAYED_ACK:
-               if (!ca->delayed_ack_reserved)
-                       ca->delayed_ack_reserved = 1;
-               break;
-       case CA_EVENT_NON_DELAYED_ACK:
-               if (ca->delayed_ack_reserved)
-                       ca->delayed_ack_reserved = 0;
-               break;
-       default:
-               /* Don't care for the rest. */
-               break;
-       }
-}
-
 static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
 {
        switch (ev) {
@@ -276,10 +257,6 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
        case CA_EVENT_ECN_NO_CE:
                dctcp_ce_state_1_to_0(sk);
                break;
-       case CA_EVENT_DELAYED_ACK:
-       case CA_EVENT_NON_DELAYED_ACK:
-               dctcp_update_ack_reserved(sk, ev);
-               break;
        default:
                /* Don't care for the rest. */
                break;
index 355d3dffd021ccad0f30891994289d916f7d276c..8e5522c6833ad5b01c32590f62e2b20131869327 100644 (file)
@@ -265,7 +265,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
                 * it is probably a retransmit.
                 */
                if (tp->ecn_flags & TCP_ECN_SEEN)
-                       tcp_enter_quickack_mode(sk, 1);
+                       tcp_enter_quickack_mode(sk, 2);
                break;
        case INET_ECN_CE:
                if (tcp_ca_needs_ecn(sk))
@@ -273,7 +273,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
 
                if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
                        /* Better not delay acks, sender can have a very low cwnd */
-                       tcp_enter_quickack_mode(sk, 1);
+                       tcp_enter_quickack_mode(sk, 2);
                        tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
                }
                tp->ecn_flags |= TCP_ECN_SEEN;
@@ -3181,6 +3181,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
 
                if (tcp_is_reno(tp)) {
                        tcp_remove_reno_sacks(sk, pkts_acked);
+
+                       /* If any of the cumulatively ACKed segments was
+                        * retransmitted, non-SACK case cannot confirm that
+                        * progress was due to original transmission due to
+                        * lack of TCPCB_SACKED_ACKED bits even if some of
+                        * the packets may have been never retransmitted.
+                        */
+                       if (flag & FLAG_RETRANS_DATA_ACKED)
+                               flag &= ~FLAG_ORIG_SACK_ACKED;
                } else {
                        int delta;
 
index bea17f1e8302585d70c1e0108ae1c33d149230d8..3b2711e33e4c7c06ed8caec20cf0241f36068f54 100644 (file)
@@ -156,11 +156,24 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
         */
        if (tcptw->tw_ts_recent_stamp &&
            (!twp || (reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
-               tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
-               if (tp->write_seq == 0)
-                       tp->write_seq = 1;
-               tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
-               tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
+               /* In case of repair and re-using TIME-WAIT sockets we still
+                * want to be sure that it is safe as above but honor the
+                * sequence numbers and time stamps set as part of the repair
+                * process.
+                *
+                * Without this check re-using a TIME-WAIT socket with TCP
+                * repair would accumulate a -1 on the repair assigned
+                * sequence number. The first time it is reused the sequence
+                * is -1, the second time -2, etc. This fixes that issue
+                * without appearing to create any others.
+                */
+               if (likely(!tp->repair)) {
+                       tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
+                       if (tp->write_seq == 0)
+                               tp->write_seq = 1;
+                       tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
+                       tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
+               }
                sock_hold(sktw);
                return 1;
        }
index 8e08b409c71e1f8e69422f1756d48b5bc55411c3..00e5a300ddb934957463c4abdfbccb50807256e1 100644 (file)
@@ -3523,8 +3523,6 @@ void tcp_send_delayed_ack(struct sock *sk)
        int ato = icsk->icsk_ack.ato;
        unsigned long timeout;
 
-       tcp_ca_event(sk, CA_EVENT_DELAYED_ACK);
-
        if (ato > TCP_DELACK_MIN) {
                const struct tcp_sock *tp = tcp_sk(sk);
                int max_ato = HZ / 2;
@@ -3581,8 +3579,6 @@ void tcp_send_ack(struct sock *sk)
        if (sk->sk_state == TCP_CLOSE)
                return;
 
-       tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK);
-
        /* We are not putting this on the write queue, so
         * tcp_transmit_skb() will set the ownership to this
         * sock.
index 9bb27df4dac5ec5f133b15e972f384bdc1d165b1..24e116ddae79ce0696e3f63290385ae15e28ac18 100644 (file)
@@ -2591,7 +2591,7 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname,
  *     udp_poll - wait for a UDP event.
  *     @file - file struct
  *     @sock - socket
- *     @events - events to wait for
+ *     @wait - poll table
  *
  *     This is same as datagram poll, except for the special case of
  *     blocking sockets. If application is using a blocking fd
@@ -2600,23 +2600,23 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname,
  *     but then block when reading it. Add special case code
  *     to work around these arguably broken applications.
  */
-__poll_t udp_poll_mask(struct socket *sock, __poll_t events)
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
-       __poll_t mask = datagram_poll_mask(sock, events);
+       __poll_t mask = datagram_poll(file, sock, wait);
        struct sock *sk = sock->sk;
 
        if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Check for false positives due to checksum errors */
-       if ((mask & EPOLLRDNORM) && !(sock->file->f_flags & O_NONBLOCK) &&
+       if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
            !(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
                mask &= ~(EPOLLIN | EPOLLRDNORM);
 
        return mask;
 
 }
-EXPORT_SYMBOL(udp_poll_mask);
+EXPORT_SYMBOL(udp_poll);
 
 int udp_abort(struct sock *sk, int err)
 {
index 92dc9e5a7ff3d0a7509bfa2a66e9189c8341a5fa..69c54540d5b4f2664b78b56468b09e3c1f6ac888 100644 (file)
@@ -394,7 +394,7 @@ unflush:
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
        return pp;
 }
 EXPORT_SYMBOL(udp_gro_receive);
index 0eff75525da101e4fce2798626a317366f94623f..b3885ca22d6fb7aa6165c2773ae02d9885099d8f 100644 (file)
@@ -108,6 +108,7 @@ config IPV6_MIP6
 config IPV6_ILA
        tristate "IPv6: Identifier Locator Addressing (ILA)"
        depends on NETFILTER
+       select DST_CACHE
        select LWTUNNEL
        ---help---
          Support for IPv6 Identifier Locator Addressing (ILA).
index c134286d6a4179516709570ad534d1ae26fd0bce..91580c62bb86bc3fb67661566e9b9e82a2e14e8c 100644 (file)
@@ -4528,6 +4528,7 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
                               unsigned long expires, u32 flags)
 {
        struct fib6_info *f6i;
+       u32 prio;
 
        f6i = addrconf_get_prefix_route(&ifp->addr,
                                        ifp->prefix_len,
@@ -4536,13 +4537,15 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
        if (!f6i)
                return -ENOENT;
 
-       if (f6i->fib6_metric != ifp->rt_priority) {
+       prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF;
+       if (f6i->fib6_metric != prio) {
+               /* delete old one */
+               ip6_del_rt(dev_net(ifp->idev->dev), f6i);
+
                /* add new one */
                addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
                                      ifp->rt_priority, ifp->idev->dev,
                                      expires, flags, GFP_KERNEL);
-               /* delete old one */
-               ip6_del_rt(dev_net(ifp->idev->dev), f6i);
        } else {
                if (!expires)
                        fib6_clean_expires(f6i);
index 74f2a261e8df4dc78a3baddb31609cdc70ba6035..9ed0eae91758f8506b4f6ca0fe3a9c2dc3fe1323 100644 (file)
@@ -570,7 +570,7 @@ const struct proto_ops inet6_stream_ops = {
        .socketpair        = sock_no_socketpair,        /* a do nothing */
        .accept            = inet_accept,               /* ok           */
        .getname           = inet6_getname,
-       .poll_mask         = tcp_poll_mask,             /* ok           */
+       .poll              = tcp_poll,                  /* ok           */
        .ioctl             = inet6_ioctl,               /* must change  */
        .listen            = inet_listen,               /* ok           */
        .shutdown          = inet_shutdown,             /* ok           */
@@ -603,7 +603,7 @@ const struct proto_ops inet6_dgram_ops = {
        .socketpair        = sock_no_socketpair,        /* a do nothing */
        .accept            = sock_no_accept,            /* a do nothing */
        .getname           = inet6_getname,
-       .poll_mask         = udp_poll_mask,             /* ok           */
+       .poll              = udp_poll,                  /* ok           */
        .ioctl             = inet6_ioctl,               /* must change  */
        .listen            = sock_no_listen,            /* ok           */
        .shutdown          = inet_shutdown,             /* ok           */
index 1323b9679cf718d0023bf5880dcd60fb8602d9db..1c0bb9fb76e61fa7d12317190ebac38847530858 100644 (file)
@@ -799,8 +799,7 @@ static int calipso_opt_update(struct sock *sk, struct ipv6_opt_hdr *hop)
 {
        struct ipv6_txoptions *old = txopt_get(inet6_sk(sk)), *txopts;
 
-       txopts = ipv6_renew_options_kern(sk, old, IPV6_HOPOPTS,
-                                        hop, hop ? ipv6_optlen(hop) : 0);
+       txopts = ipv6_renew_options(sk, old, IPV6_HOPOPTS, hop);
        txopt_put(old);
        if (IS_ERR(txopts))
                return PTR_ERR(txopts);
@@ -1222,8 +1221,7 @@ static int calipso_req_setattr(struct request_sock *req,
        if (IS_ERR(new))
                return PTR_ERR(new);
 
-       txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS,
-                                        new, new ? ipv6_optlen(new) : 0);
+       txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new);
 
        kfree(new);
 
@@ -1260,8 +1258,7 @@ static void calipso_req_delattr(struct request_sock *req)
        if (calipso_opt_del(req_inet->ipv6_opt->hopopt, &new))
                return; /* Nothing to do */
 
-       txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS,
-                                        new, new ? ipv6_optlen(new) : 0);
+       txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new);
 
        if (!IS_ERR(txopts)) {
                txopts = xchg(&req_inet->ipv6_opt, txopts);
index 5bc2bf3733abd387de8d21932c95ef32eea30d80..20291c2036fcdcd23ccdc2f5b5ae2a1734b2833d 100644 (file)
@@ -1015,29 +1015,21 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
 }
 EXPORT_SYMBOL_GPL(ipv6_dup_options);
 
-static int ipv6_renew_option(void *ohdr,
-                            struct ipv6_opt_hdr __user *newopt, int newoptlen,
-                            int inherit,
-                            struct ipv6_opt_hdr **hdr,
-                            char **p)
+static void ipv6_renew_option(int renewtype,
+                             struct ipv6_opt_hdr **dest,
+                             struct ipv6_opt_hdr *old,
+                             struct ipv6_opt_hdr *new,
+                             int newtype, char **p)
 {
-       if (inherit) {
-               if (ohdr) {
-                       memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr));
-                       *hdr = (struct ipv6_opt_hdr *)*p;
-                       *p += CMSG_ALIGN(ipv6_optlen(*hdr));
-               }
-       } else {
-               if (newopt) {
-                       if (copy_from_user(*p, newopt, newoptlen))
-                               return -EFAULT;
-                       *hdr = (struct ipv6_opt_hdr *)*p;
-                       if (ipv6_optlen(*hdr) > newoptlen)
-                               return -EINVAL;
-                       *p += CMSG_ALIGN(newoptlen);
-               }
-       }
-       return 0;
+       struct ipv6_opt_hdr *src;
+
+       src = (renewtype == newtype ? new : old);
+       if (!src)
+               return;
+
+       memcpy(*p, src, ipv6_optlen(src));
+       *dest = (struct ipv6_opt_hdr *)*p;
+       *p += CMSG_ALIGN(ipv6_optlen(*dest));
 }
 
 /**
@@ -1063,13 +1055,11 @@ static int ipv6_renew_option(void *ohdr,
  */
 struct ipv6_txoptions *
 ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
-                  int newtype,
-                  struct ipv6_opt_hdr __user *newopt, int newoptlen)
+                  int newtype, struct ipv6_opt_hdr *newopt)
 {
        int tot_len = 0;
        char *p;
        struct ipv6_txoptions *opt2;
-       int err;
 
        if (opt) {
                if (newtype != IPV6_HOPOPTS && opt->hopopt)
@@ -1082,8 +1072,8 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
                        tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
        }
 
-       if (newopt && newoptlen)
-               tot_len += CMSG_ALIGN(newoptlen);
+       if (newopt)
+               tot_len += CMSG_ALIGN(ipv6_optlen(newopt));
 
        if (!tot_len)
                return NULL;
@@ -1098,29 +1088,19 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
        opt2->tot_len = tot_len;
        p = (char *)(opt2 + 1);
 
-       err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen,
-                               newtype != IPV6_HOPOPTS,
-                               &opt2->hopopt, &p);
-       if (err)
-               goto out;
-
-       err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen,
-                               newtype != IPV6_RTHDRDSTOPTS,
-                               &opt2->dst0opt, &p);
-       if (err)
-               goto out;
-
-       err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen,
-                               newtype != IPV6_RTHDR,
-                               (struct ipv6_opt_hdr **)&opt2->srcrt, &p);
-       if (err)
-               goto out;
-
-       err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen,
-                               newtype != IPV6_DSTOPTS,
-                               &opt2->dst1opt, &p);
-       if (err)
-               goto out;
+       ipv6_renew_option(IPV6_HOPOPTS, &opt2->hopopt,
+                         (opt ? opt->hopopt : NULL),
+                         newopt, newtype, &p);
+       ipv6_renew_option(IPV6_RTHDRDSTOPTS, &opt2->dst0opt,
+                         (opt ? opt->dst0opt : NULL),
+                         newopt, newtype, &p);
+       ipv6_renew_option(IPV6_RTHDR,
+                         (struct ipv6_opt_hdr **)&opt2->srcrt,
+                         (opt ? (struct ipv6_opt_hdr *)opt->srcrt : NULL),
+                         newopt, newtype, &p);
+       ipv6_renew_option(IPV6_DSTOPTS, &opt2->dst1opt,
+                         (opt ? opt->dst1opt : NULL),
+                         newopt, newtype, &p);
 
        opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) +
                          (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) +
@@ -1128,37 +1108,6 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
        opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0);
 
        return opt2;
-out:
-       sock_kfree_s(sk, opt2, opt2->tot_len);
-       return ERR_PTR(err);
-}
-
-/**
- * ipv6_renew_options_kern - replace a specific ext hdr with a new one.
- *
- * @sk: sock from which to allocate memory
- * @opt: original options
- * @newtype: option type to replace in @opt
- * @newopt: new option of type @newtype to replace (kernel-mem)
- * @newoptlen: length of @newopt
- *
- * See ipv6_renew_options().  The difference is that @newopt is
- * kernel memory, rather than user memory.
- */
-struct ipv6_txoptions *
-ipv6_renew_options_kern(struct sock *sk, struct ipv6_txoptions *opt,
-                       int newtype, struct ipv6_opt_hdr *newopt,
-                       int newoptlen)
-{
-       struct ipv6_txoptions *ret_val;
-       const mm_segment_t old_fs = get_fs();
-
-       set_fs(KERNEL_DS);
-       ret_val = ipv6_renew_options(sk, opt, newtype,
-                                    (struct ipv6_opt_hdr __user *)newopt,
-                                    newoptlen);
-       set_fs(old_fs);
-       return ret_val;
 }
 
 struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
index 2febe26de6a150155e269da0c38e5cb1122aca8d..595ad408dba09184eb814eee1870e04c17b79f77 100644 (file)
@@ -113,9 +113,9 @@ static inline int compute_score(struct sock *sk, struct net *net,
                        bool dev_match = (sk->sk_bound_dev_if == dif ||
                                          sk->sk_bound_dev_if == sdif);
 
-                       if (exact_dif && !dev_match)
+                       if (!dev_match)
                                return -1;
-                       if (sk->sk_bound_dev_if && dev_match)
+                       if (sk->sk_bound_dev_if)
                                score++;
                }
                if (sk->sk_incoming_cpu == raw_smp_processor_id())
index 39d1d487eca25faceacbc3619fc6c4c38088d62a..d212738e9d100d4e3270f9188466da6b8a3d186c 100644 (file)
@@ -167,8 +167,9 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags)
        return f6i;
 }
 
-void fib6_info_destroy(struct fib6_info *f6i)
+void fib6_info_destroy_rcu(struct rcu_head *head)
 {
+       struct fib6_info *f6i = container_of(head, struct fib6_info, rcu);
        struct rt6_exception_bucket *bucket;
        struct dst_metrics *m;
 
@@ -206,7 +207,7 @@ void fib6_info_destroy(struct fib6_info *f6i)
 
        kfree(f6i);
 }
-EXPORT_SYMBOL_GPL(fib6_info_destroy);
+EXPORT_SYMBOL_GPL(fib6_info_destroy_rcu);
 
 static struct fib6_node *node_alloc(struct net *net)
 {
@@ -934,20 +935,19 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
 {
        struct fib6_info *leaf = rcu_dereference_protected(fn->leaf,
                                    lockdep_is_held(&rt->fib6_table->tb6_lock));
-       enum fib_event_type event = FIB_EVENT_ENTRY_ADD;
-       struct fib6_info *iter = NULL, *match = NULL;
+       struct fib6_info *iter = NULL;
        struct fib6_info __rcu **ins;
+       struct fib6_info __rcu **fallback_ins = NULL;
        int replace = (info->nlh &&
                       (info->nlh->nlmsg_flags & NLM_F_REPLACE));
-       int append = (info->nlh &&
-                      (info->nlh->nlmsg_flags & NLM_F_APPEND));
        int add = (!info->nlh ||
                   (info->nlh->nlmsg_flags & NLM_F_CREATE));
        int found = 0;
+       bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
        u16 nlflags = NLM_F_EXCL;
        int err;
 
-       if (append)
+       if (info->nlh && (info->nlh->nlmsg_flags & NLM_F_APPEND))
                nlflags |= NLM_F_APPEND;
 
        ins = &fn->leaf;
@@ -969,8 +969,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
 
                        nlflags &= ~NLM_F_EXCL;
                        if (replace) {
-                               found++;
-                               break;
+                               if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) {
+                                       found++;
+                                       break;
+                               }
+                               if (rt_can_ecmp)
+                                       fallback_ins = fallback_ins ?: ins;
+                               goto next_iter;
                        }
 
                        if (rt6_duplicate_nexthop(iter, rt)) {
@@ -985,51 +990,71 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
                                fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu);
                                return -EEXIST;
                        }
-
-                       /* first route that matches */
-                       if (!match)
-                               match = iter;
+                       /* If we have the same destination and the same metric,
+                        * but not the same gateway, then the route we try to
+                        * add is sibling to this route, increment our counter
+                        * of siblings, and later we will add our route to the
+                        * list.
+                        * Only static routes (which don't have flag
+                        * RTF_EXPIRES) are used for ECMPv6.
+                        *
+                        * To avoid long list, we only had siblings if the
+                        * route have a gateway.
+                        */
+                       if (rt_can_ecmp &&
+                           rt6_qualify_for_ecmp(iter))
+                               rt->fib6_nsiblings++;
                }
 
                if (iter->fib6_metric > rt->fib6_metric)
                        break;
 
+next_iter:
                ins = &iter->fib6_next;
        }
 
+       if (fallback_ins && !found) {
+               /* No ECMP-able route found, replace first non-ECMP one */
+               ins = fallback_ins;
+               iter = rcu_dereference_protected(*ins,
+                                   lockdep_is_held(&rt->fib6_table->tb6_lock));
+               found++;
+       }
+
        /* Reset round-robin state, if necessary */
        if (ins == &fn->leaf)
                fn->rr_ptr = NULL;
 
        /* Link this route to others same route. */
-       if (append && match) {
+       if (rt->fib6_nsiblings) {
+               unsigned int fib6_nsiblings;
                struct fib6_info *sibling, *temp_sibling;
 
-               if (rt->fib6_flags & RTF_REJECT) {
-                       NL_SET_ERR_MSG(extack,
-                                      "Can not append a REJECT route");
-                       return -EINVAL;
-               } else if (match->fib6_flags & RTF_REJECT) {
-                       NL_SET_ERR_MSG(extack,
-                                      "Can not append to a REJECT route");
-                       return -EINVAL;
+               /* Find the first route that have the same metric */
+               sibling = leaf;
+               while (sibling) {
+                       if (sibling->fib6_metric == rt->fib6_metric &&
+                           rt6_qualify_for_ecmp(sibling)) {
+                               list_add_tail(&rt->fib6_siblings,
+                                             &sibling->fib6_siblings);
+                               break;
+                       }
+                       sibling = rcu_dereference_protected(sibling->fib6_next,
+                                   lockdep_is_held(&rt->fib6_table->tb6_lock));
                }
-               event = FIB_EVENT_ENTRY_APPEND;
-               rt->fib6_nsiblings = match->fib6_nsiblings;
-               list_add_tail(&rt->fib6_siblings, &match->fib6_siblings);
-               match->fib6_nsiblings++;
-
                /* For each sibling in the list, increment the counter of
                 * siblings. BUG() if counters does not match, list of siblings
                 * is broken!
                 */
+               fib6_nsiblings = 0;
                list_for_each_entry_safe(sibling, temp_sibling,
-                                        &match->fib6_siblings, fib6_siblings) {
+                                        &rt->fib6_siblings, fib6_siblings) {
                        sibling->fib6_nsiblings++;
-                       BUG_ON(sibling->fib6_nsiblings != match->fib6_nsiblings);
+                       BUG_ON(sibling->fib6_nsiblings != rt->fib6_nsiblings);
+                       fib6_nsiblings++;
                }
-
-               rt6_multipath_rebalance(match);
+               BUG_ON(fib6_nsiblings != rt->fib6_nsiblings);
+               rt6_multipath_rebalance(temp_sibling);
        }
 
        /*
@@ -1042,8 +1067,9 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
 add:
                nlflags |= NLM_F_CREATE;
 
-               err = call_fib6_entry_notifiers(info->nl_net, event, rt,
-                                               extack);
+               err = call_fib6_entry_notifiers(info->nl_net,
+                                               FIB_EVENT_ENTRY_ADD,
+                                               rt, extack);
                if (err)
                        return err;
 
@@ -1061,7 +1087,7 @@ add:
                }
 
        } else {
-               struct fib6_info *tmp;
+               int nsiblings;
 
                if (!found) {
                        if (add)
@@ -1076,57 +1102,48 @@ add:
                if (err)
                        return err;
 
-               /* if route being replaced has siblings, set tmp to
-                * last one, otherwise tmp is current route. this is
-                * used to set fib6_next for new route
-                */
-               if (iter->fib6_nsiblings)
-                       tmp = list_last_entry(&iter->fib6_siblings,
-                                             struct fib6_info,
-                                             fib6_siblings);
-               else
-                       tmp = iter;
-
-               /* insert new route */
                atomic_inc(&rt->fib6_ref);
                rcu_assign_pointer(rt->fib6_node, fn);
-               rt->fib6_next = tmp->fib6_next;
+               rt->fib6_next = iter->fib6_next;
                rcu_assign_pointer(*ins, rt);
-
                if (!info->skip_notify)
                        inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
                if (!(fn->fn_flags & RTN_RTINFO)) {
                        info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
                        fn->fn_flags |= RTN_RTINFO;
                }
+               nsiblings = iter->fib6_nsiblings;
+               iter->fib6_node = NULL;
+               fib6_purge_rt(iter, fn, info->nl_net);
+               if (rcu_access_pointer(fn->rr_ptr) == iter)
+                       fn->rr_ptr = NULL;
+               fib6_info_release(iter);
 
-               /* delete old route */
-               rt = iter;
-
-               if (rt->fib6_nsiblings) {
-                       struct fib6_info *tmp;
-
+               if (nsiblings) {
                        /* Replacing an ECMP route, remove all siblings */
-                       list_for_each_entry_safe(iter, tmp, &rt->fib6_siblings,
-                                                fib6_siblings) {
-                               iter->fib6_node = NULL;
-                               fib6_purge_rt(iter, fn, info->nl_net);
-                               if (rcu_access_pointer(fn->rr_ptr) == iter)
-                                       fn->rr_ptr = NULL;
-                               fib6_info_release(iter);
-
-                               rt->fib6_nsiblings--;
-                               info->nl_net->ipv6.rt6_stats->fib_rt_entries--;
+                       ins = &rt->fib6_next;
+                       iter = rcu_dereference_protected(*ins,
+                                   lockdep_is_held(&rt->fib6_table->tb6_lock));
+                       while (iter) {
+                               if (iter->fib6_metric > rt->fib6_metric)
+                                       break;
+                               if (rt6_qualify_for_ecmp(iter)) {
+                                       *ins = iter->fib6_next;
+                                       iter->fib6_node = NULL;
+                                       fib6_purge_rt(iter, fn, info->nl_net);
+                                       if (rcu_access_pointer(fn->rr_ptr) == iter)
+                                               fn->rr_ptr = NULL;
+                                       fib6_info_release(iter);
+                                       nsiblings--;
+                                       info->nl_net->ipv6.rt6_stats->fib_rt_entries--;
+                               } else {
+                                       ins = &iter->fib6_next;
+                               }
+                               iter = rcu_dereference_protected(*ins,
+                                       lockdep_is_held(&rt->fib6_table->tb6_lock));
                        }
+                       WARN_ON(nsiblings != 0);
                }
-
-               WARN_ON(rt->fib6_nsiblings != 0);
-
-               rt->fib6_node = NULL;
-               fib6_purge_rt(rt, fn, info->nl_net);
-               if (rcu_access_pointer(fn->rr_ptr) == rt)
-                       fn->rr_ptr = NULL;
-               fib6_info_release(rt);
        }
 
        return 0;
index c8cf2fdbb13b88cc1bf6b494a75407cdc16977eb..cd2cfb04e5d82010a5eb1800a53fc8007479c6f9 100644 (file)
@@ -927,7 +927,6 @@ tx_err:
 static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
                                         struct net_device *dev)
 {
-       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
        struct ip6_tnl *t = netdev_priv(dev);
        struct dst_entry *dst = skb_dst(skb);
        struct net_device_stats *stats;
@@ -1010,6 +1009,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
                        goto tx_err;
                }
        } else {
+               struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
                switch (skb->protocol) {
                case htons(ETH_P_IP):
                        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
index 021e5aef6ba31b7a9face6eb363a6409761385a7..a14fb4fcdf1858352da2b62742f853bf4a2f9f71 100644 (file)
@@ -1219,7 +1219,8 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
        if (mtu < IPV6_MIN_MTU)
                return -EINVAL;
        cork->base.fragsize = mtu;
-       cork->base.gso_size = sk->sk_type == SOCK_DGRAM ? ipc6->gso_size : 0;
+       cork->base.gso_size = sk->sk_type == SOCK_DGRAM &&
+                             sk->sk_protocol == IPPROTO_UDP ? ipc6->gso_size : 0;
 
        if (dst_allfrag(xfrm_dst_path(&rt->dst)))
                cork->base.flags |= IPCORK_ALLFRAG;
index 4d780c7f013060732dda2db760d7ba0474c812e3..568ca4187cd101e745988ee262f79431ef8d28cc 100644 (file)
@@ -398,6 +398,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
        case IPV6_DSTOPTS:
        {
                struct ipv6_txoptions *opt;
+               struct ipv6_opt_hdr *new = NULL;
+
+               /* hop-by-hop / destination options are privileged option */
+               retv = -EPERM;
+               if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
+                       break;
 
                /* remove any sticky options header with a zero option
                 * length, per RFC3542.
@@ -409,17 +415,22 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
                else if (optlen < sizeof(struct ipv6_opt_hdr) ||
                         optlen & 0x7 || optlen > 8 * 255)
                        goto e_inval;
-
-               /* hop-by-hop / destination options are privileged option */
-               retv = -EPERM;
-               if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
-                       break;
+               else {
+                       new = memdup_user(optval, optlen);
+                       if (IS_ERR(new)) {
+                               retv = PTR_ERR(new);
+                               break;
+                       }
+                       if (unlikely(ipv6_optlen(new) > optlen)) {
+                               kfree(new);
+                               goto e_inval;
+                       }
+               }
 
                opt = rcu_dereference_protected(np->opt,
                                                lockdep_sock_is_held(sk));
-               opt = ipv6_renew_options(sk, opt, optname,
-                                        (struct ipv6_opt_hdr __user *)optval,
-                                        optlen);
+               opt = ipv6_renew_options(sk, opt, optname, new);
+               kfree(new);
                if (IS_ERR(opt)) {
                        retv = PTR_ERR(opt);
                        break;
@@ -718,8 +729,9 @@ done:
                        struct sockaddr_in6 *psin6;
 
                        psin6 = (struct sockaddr_in6 *)&greqs.gsr_group;
-                       retv = ipv6_sock_mc_join(sk, greqs.gsr_interface,
-                                                &psin6->sin6_addr);
+                       retv = ipv6_sock_mc_join_ssm(sk, greqs.gsr_interface,
+                                                    &psin6->sin6_addr,
+                                                    MCAST_INCLUDE);
                        /* prior join w/ different source is ok */
                        if (retv && retv != -EADDRINUSE)
                                break;
index 975021df7c1cf2eae6897e3dd57ea20998f4ea90..2699be7202be205a1709104a3969a83684743e57 100644 (file)
@@ -95,6 +95,8 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
                          int delta);
 static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
                            struct inet6_dev *idev);
+static int __ipv6_dev_mc_inc(struct net_device *dev,
+                            const struct in6_addr *addr, unsigned int mode);
 
 #define MLD_QRV_DEFAULT                2
 /* RFC3810, 9.2. Query Interval */
@@ -132,7 +134,8 @@ static int unsolicited_report_interval(struct inet6_dev *idev)
        return iv > 0 ? iv : 1;
 }
 
-int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
+static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
+                              const struct in6_addr *addr, unsigned int mode)
 {
        struct net_device *dev = NULL;
        struct ipv6_mc_socklist *mc_lst;
@@ -179,7 +182,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
        }
 
        mc_lst->ifindex = dev->ifindex;
-       mc_lst->sfmode = MCAST_EXCLUDE;
+       mc_lst->sfmode = mode;
        rwlock_init(&mc_lst->sflock);
        mc_lst->sflist = NULL;
 
@@ -187,7 +190,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
         *      now add/increase the group membership on the device
         */
 
-       err = ipv6_dev_mc_inc(dev, addr);
+       err = __ipv6_dev_mc_inc(dev, addr, mode);
 
        if (err) {
                sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
@@ -199,8 +202,19 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
 
        return 0;
 }
+
+int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
+{
+       return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE);
+}
 EXPORT_SYMBOL(ipv6_sock_mc_join);
 
+int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
+                         const struct in6_addr *addr, unsigned int mode)
+{
+       return __ipv6_sock_mc_join(sk, ifindex, addr, mode);
+}
+
 /*
  *     socket leave on multicast group
  */
@@ -646,7 +660,7 @@ bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
        return rv;
 }
 
-static void igmp6_group_added(struct ifmcaddr6 *mc)
+static void igmp6_group_added(struct ifmcaddr6 *mc, unsigned int mode)
 {
        struct net_device *dev = mc->idev->dev;
        char buf[MAX_ADDR_LEN];
@@ -672,7 +686,13 @@ static void igmp6_group_added(struct ifmcaddr6 *mc)
        }
        /* else v2 */
 
-       mc->mca_crcount = mc->idev->mc_qrv;
+       /* Based on RFC3810 6.1, for newly added INCLUDE SSM, we
+        * should not send filter-mode change record as the mode
+        * should be from IN() to IN(A).
+        */
+       if (mode == MCAST_EXCLUDE)
+               mc->mca_crcount = mc->idev->mc_qrv;
+
        mld_ifc_event(mc->idev);
 }
 
@@ -770,13 +790,14 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
        spin_lock_bh(&im->mca_lock);
        if (pmc) {
                im->idev = pmc->idev;
-               im->mca_crcount = idev->mc_qrv;
                im->mca_sfmode = pmc->mca_sfmode;
                if (pmc->mca_sfmode == MCAST_INCLUDE) {
                        im->mca_tomb = pmc->mca_tomb;
                        im->mca_sources = pmc->mca_sources;
                        for (psf = im->mca_sources; psf; psf = psf->sf_next)
-                               psf->sf_crcount = im->mca_crcount;
+                               psf->sf_crcount = idev->mc_qrv;
+               } else {
+                       im->mca_crcount = idev->mc_qrv;
                }
                in6_dev_put(pmc->idev);
                kfree(pmc);
@@ -831,7 +852,8 @@ static void ma_put(struct ifmcaddr6 *mc)
 }
 
 static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
-                                  const struct in6_addr *addr)
+                                  const struct in6_addr *addr,
+                                  unsigned int mode)
 {
        struct ifmcaddr6 *mc;
 
@@ -849,9 +871,8 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
        refcount_set(&mc->mca_refcnt, 1);
        spin_lock_init(&mc->mca_lock);
 
-       /* initial mode is (EX, empty) */
-       mc->mca_sfmode = MCAST_EXCLUDE;
-       mc->mca_sfcount[MCAST_EXCLUDE] = 1;
+       mc->mca_sfmode = mode;
+       mc->mca_sfcount[mode] = 1;
 
        if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
            IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
@@ -863,7 +884,8 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
 /*
  *     device multicast group inc (add if not found)
  */
-int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
+static int __ipv6_dev_mc_inc(struct net_device *dev,
+                            const struct in6_addr *addr, unsigned int mode)
 {
        struct ifmcaddr6 *mc;
        struct inet6_dev *idev;
@@ -887,14 +909,13 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
                if (ipv6_addr_equal(&mc->mca_addr, addr)) {
                        mc->mca_users++;
                        write_unlock_bh(&idev->lock);
-                       ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0,
-                               NULL, 0);
+                       ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0);
                        in6_dev_put(idev);
                        return 0;
                }
        }
 
-       mc = mca_alloc(idev, addr);
+       mc = mca_alloc(idev, addr, mode);
        if (!mc) {
                write_unlock_bh(&idev->lock);
                in6_dev_put(idev);
@@ -911,11 +932,16 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
        write_unlock_bh(&idev->lock);
 
        mld_del_delrec(idev, mc);
-       igmp6_group_added(mc);
+       igmp6_group_added(mc, mode);
        ma_put(mc);
        return 0;
 }
 
+int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
+{
+       return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE);
+}
+
 /*
  *     device multicast group del
  */
@@ -1751,7 +1777,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
 
                psf_next = psf->sf_next;
 
-               if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
+               if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) {
                        psf_prev = psf;
                        continue;
                }
@@ -2066,7 +2092,7 @@ static void mld_send_initial_cr(struct inet6_dev *idev)
                if (pmc->mca_sfcount[MCAST_EXCLUDE])
                        type = MLD2_CHANGE_TO_EXCLUDE;
                else
-                       type = MLD2_CHANGE_TO_INCLUDE;
+                       type = MLD2_ALLOW_NEW_SOURCES;
                skb = add_grec(skb, pmc, type, 0, 0, 1);
                spin_unlock_bh(&pmc->mca_lock);
        }
@@ -2082,7 +2108,8 @@ void ipv6_mc_dad_complete(struct inet6_dev *idev)
                mld_send_initial_cr(idev);
                idev->mc_dad_count--;
                if (idev->mc_dad_count)
-                       mld_dad_start_timer(idev, idev->mc_maxdelay);
+                       mld_dad_start_timer(idev,
+                                           unsolicited_report_interval(idev));
        }
 }
 
@@ -2094,7 +2121,8 @@ static void mld_dad_timer_expire(struct timer_list *t)
        if (idev->mc_dad_count) {
                idev->mc_dad_count--;
                if (idev->mc_dad_count)
-                       mld_dad_start_timer(idev, idev->mc_maxdelay);
+                       mld_dad_start_timer(idev,
+                                           unsolicited_report_interval(idev));
        }
        in6_dev_put(idev);
 }
@@ -2452,7 +2480,8 @@ static void mld_ifc_timer_expire(struct timer_list *t)
        if (idev->mc_ifc_count) {
                idev->mc_ifc_count--;
                if (idev->mc_ifc_count)
-                       mld_ifc_start_timer(idev, idev->mc_maxdelay);
+                       mld_ifc_start_timer(idev,
+                                           unsolicited_report_interval(idev));
        }
        in6_dev_put(idev);
 }
@@ -2543,7 +2572,7 @@ void ipv6_mc_up(struct inet6_dev *idev)
        ipv6_mc_reset(idev);
        for (i = idev->mc_list; i; i = i->next) {
                mld_del_delrec(idev, i);
-               igmp6_group_added(i);
+               igmp6_group_added(i, i->mca_sfmode);
        }
        read_unlock_bh(&idev->lock);
 }
index e640d2f3c55cf00568ba195a5f667a6da616ca47..0ec273997d1dc6eff71f62c66bbe214e369ab8f9 100644 (file)
@@ -811,7 +811,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
                        return;
                }
        }
-       if (ndopts.nd_opts_nonce)
+       if (ndopts.nd_opts_nonce && ndopts.nd_opts_nonce->nd_opt_len == 1)
                memcpy(&nonce, (u8 *)(ndopts.nd_opts_nonce + 1), 6);
 
        inc = ipv6_addr_is_multicast(daddr);
index 7eab959734bc736cc103551fb50bce84f9aeaec7..daf2e9e9193d19f8f89890f96ca0439d8d55c1c6 100644 (file)
@@ -1909,6 +1909,7 @@ static struct xt_match ip6t_builtin_mt[] __read_mostly = {
                .checkentry = icmp6_checkentry,
                .proto      = IPPROTO_ICMPV6,
                .family     = NFPROTO_IPV6,
+               .me         = THIS_MODULE,
        },
 };
 
index 5e0332014c1738999e680c1853829f384e880284..e4d9e6976d3c295e68b13c0ceecd5fa76db4fbc1 100644 (file)
@@ -107,7 +107,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
        if (hdr == NULL)
                goto err_reg;
 
-       net->nf_frag.sysctl.frags_hdr = hdr;
+       net->nf_frag_frags_hdr = hdr;
        return 0;
 
 err_reg:
@@ -121,8 +121,8 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
 {
        struct ctl_table *table;
 
-       table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg;
-       unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr);
+       table = net->nf_frag_frags_hdr->ctl_table_arg;
+       unregister_net_sysctl_table(net->nf_frag_frags_hdr);
        if (!net_eq(net, &init_net))
                kfree(table);
 }
@@ -585,6 +585,8 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
            fq->q.meat == fq->q.len &&
            nf_ct_frag6_reasm(fq, skb, dev))
                ret = 0;
+       else
+               skb_dst_drop(skb);
 
 out_unlock:
        spin_unlock_bh(&fq->q.lock);
index bf1d6c421e3bd0d5524559d507eb14ce9874496f..5dfd33af64515518a2f94b13a62a8ae4dce846da 100644 (file)
@@ -55,7 +55,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
                 * to a listener socket if there's one */
                struct sock *sk2;
 
-               sk2 = nf_tproxy_get_sock_v6(net, skb, thoff, hp, tproto,
+               sk2 = nf_tproxy_get_sock_v6(net, skb, thoff, tproto,
                                            &iph->saddr,
                                            nf_tproxy_laddr6(skb, laddr, &iph->daddr),
                                            hp->source,
@@ -72,7 +72,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
 EXPORT_SYMBOL_GPL(nf_tproxy_handle_time_wait6);
 
 struct sock *
-nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
+nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
                      const u8 protocol,
                      const struct in6_addr *saddr, const struct in6_addr *daddr,
                      const __be16 sport, const __be16 dport,
@@ -80,15 +80,20 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
                      const enum nf_tproxy_lookup_t lookup_type)
 {
        struct sock *sk;
-       struct tcphdr *tcph;
 
        switch (protocol) {
-       case IPPROTO_TCP:
+       case IPPROTO_TCP: {
+               struct tcphdr _hdr, *hp;
+
+               hp = skb_header_pointer(skb, thoff,
+                                       sizeof(struct tcphdr), &_hdr);
+               if (hp == NULL)
+                       return NULL;
+
                switch (lookup_type) {
                case NF_TPROXY_LOOKUP_LISTENER:
-                       tcph = hp;
                        sk = inet6_lookup_listener(net, &tcp_hashinfo, skb,
-                                                  thoff + __tcp_hdrlen(tcph),
+                                                  thoff + __tcp_hdrlen(hp),
                                                   saddr, sport,
                                                   daddr, ntohs(dport),
                                                   in->ifindex, 0);
@@ -110,6 +115,7 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
                        BUG();
                }
                break;
+               }
        case IPPROTO_UDP:
                sk = udp6_lib_lookup(net, saddr, sport, daddr, dport,
                                     in->ifindex);
index ce6f0d15b5dd5d8a9531a8316a932d3d30a3491b..afc307c89d1a977a00693999ec0f54b50005b7bd 100644 (file)
@@ -1334,7 +1334,7 @@ void raw6_proc_exit(void)
 }
 #endif /* CONFIG_PROC_FS */
 
-/* Same as inet6_dgram_ops, sans udp_poll_mask.  */
+/* Same as inet6_dgram_ops, sans udp_poll.  */
 const struct proto_ops inet6_sockraw_ops = {
        .family            = PF_INET6,
        .owner             = THIS_MODULE,
@@ -1344,7 +1344,7 @@ const struct proto_ops inet6_sockraw_ops = {
        .socketpair        = sock_no_socketpair,        /* a do nothing */
        .accept            = sock_no_accept,            /* a do nothing */
        .getname           = inet6_getname,
-       .poll_mask         = datagram_poll_mask,        /* ok           */
+       .poll              = datagram_poll,             /* ok           */
        .ioctl             = inet6_ioctl,               /* must change  */
        .listen            = sock_no_listen,            /* ok           */
        .shutdown          = inet_shutdown,             /* ok           */
index 86a0e4333d42212d03f53e0d54fcf4e03a328607..2ce0bd17de4f035abcdfb0473c7fa97d64dd996a 100644 (file)
@@ -3842,7 +3842,7 @@ static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
                        lockdep_is_held(&rt->fib6_table->tb6_lock));
        while (iter) {
                if (iter->fib6_metric == rt->fib6_metric &&
-                   iter->fib6_nsiblings)
+                   rt6_qualify_for_ecmp(iter))
                        return iter;
                iter = rcu_dereference_protected(iter->fib6_next,
                                lockdep_is_held(&rt->fib6_table->tb6_lock));
@@ -4388,6 +4388,13 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
                        rt = NULL;
                        goto cleanup;
                }
+               if (!rt6_qualify_for_ecmp(rt)) {
+                       err = -EINVAL;
+                       NL_SET_ERR_MSG(extack,
+                                      "Device only routes can not be added for IPv6 using the multipath API.");
+                       fib6_info_release(rt);
+                       goto cleanup;
+               }
 
                rt->fib6_nh.nh_weight = rtnh->rtnh_hops + 1;
 
@@ -4439,7 +4446,6 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
                 */
                cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
                                                     NLM_F_REPLACE);
-               cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_APPEND;
                nhn++;
        }
 
index 33fb35cbfac132b1a85cd2c9ce62b4344cbe8afe..558fe8cc6d43858ca828cbd8dc8ea65e63bc6602 100644 (file)
@@ -373,7 +373,7 @@ static int seg6_hmac_init_algo(void)
                        return -ENOMEM;
 
                for_each_possible_cpu(cpu) {
-                       tfm = crypto_alloc_shash(algo->name, 0, GFP_KERNEL);
+                       tfm = crypto_alloc_shash(algo->name, 0, 0);
                        if (IS_ERR(tfm))
                                return PTR_ERR(tfm);
                        p_tfm = per_cpu_ptr(algo->tfms, cpu);
index 19ccf0dc996ca7da1f47bd887b18e4755257e462..a8854dd3e9c5ef64a7a480bb6ff891fac0e6d1ea 100644 (file)
@@ -101,7 +101,7 @@ static __be32 seg6_make_flowlabel(struct net *net, struct sk_buff *skb,
 
        if (do_flowlabel > 0) {
                hash = skb_get_hash(skb);
-               rol32(hash, 16);
+               hash = rol32(hash, 16);
                flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
        } else if (!do_flowlabel && skb->protocol == htons(ETH_P_IPV6)) {
                flowlabel = ip6_flowlabel(inner_hdr);
index 68e86257a549988b5f87098b24c8e3d0bd1dc1ce..893a022f962081416fa1b9e5f96416a8c2e92e5c 100644 (file)
@@ -1488,11 +1488,14 @@ static inline __poll_t iucv_accept_poll(struct sock *parent)
        return 0;
 }
 
-static __poll_t iucv_sock_poll_mask(struct socket *sock, __poll_t events)
+__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
+                           poll_table *wait)
 {
        struct sock *sk = sock->sk;
        __poll_t mask = 0;
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_state == IUCV_LISTEN)
                return iucv_accept_poll(sk);
 
@@ -2385,7 +2388,7 @@ static const struct proto_ops iucv_sock_ops = {
        .getname        = iucv_sock_getname,
        .sendmsg        = iucv_sock_sendmsg,
        .recvmsg        = iucv_sock_recvmsg,
-       .poll_mask      = iucv_sock_poll_mask,
+       .poll           = iucv_sock_poll,
        .ioctl          = sock_no_ioctl,
        .mmap           = sock_no_mmap,
        .socketpair     = sock_no_socketpair,
index 84b7d5c6fec81a7c62ed4744d48726dee8c7e426..d3601d421571b9825ff0a6cea9b75cb52fd51dea 100644 (file)
@@ -1336,9 +1336,9 @@ static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
        struct list_head *head;
        int index = 0;
 
-       /* For SOCK_SEQPACKET sock type, datagram_poll_mask checks the sk_state,
-        * so  we set sk_state, otherwise epoll_wait always returns right away
-        * with EPOLLHUP
+       /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
+        * we set sk_state, otherwise epoll_wait always returns right away with
+        * EPOLLHUP
         */
        kcm->sk.sk_state = TCP_ESTABLISHED;
 
@@ -1903,7 +1903,7 @@ static const struct proto_ops kcm_dgram_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      sock_no_getname,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        kcm_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
@@ -1924,7 +1924,7 @@ static const struct proto_ops kcm_seqpacket_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      sock_no_getname,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        kcm_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
index 8bdc1cbe490a4ae819db32851ea6a8184b0727b0..5e1d2946ffbf2a2cf4e65db44658c7f374e72e25 100644 (file)
@@ -3751,7 +3751,7 @@ static const struct proto_ops pfkey_ops = {
 
        /* Now the operations that really occur. */
        .release        =       pfkey_release,
-       .poll_mask      =       datagram_poll_mask,
+       .poll           =       datagram_poll,
        .sendmsg        =       pfkey_sendmsg,
        .recvmsg        =       pfkey_recvmsg,
 };
index 181073bf69251392c3a7fd23197a278f37dd67f0..a9c05b2bc1b0bc3471bbf62dc3b7c11e971a7f08 100644 (file)
@@ -613,7 +613,7 @@ static const struct proto_ops l2tp_ip_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = l2tp_ip_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = inet_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = inet_shutdown,
index 336e4c00abbcdaef7385c90e24d2088131efe095..957369192ca181d6da21c9dda03d0e8a9726643e 100644 (file)
@@ -754,7 +754,7 @@ static const struct proto_ops l2tp_ip6_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = sock_no_accept,
        .getname           = l2tp_ip6_getname,
-       .poll_mask         = datagram_poll_mask,
+       .poll              = datagram_poll,
        .ioctl             = inet6_ioctl,
        .listen            = sock_no_listen,
        .shutdown          = inet_shutdown,
index 55188382845c310c98eb86cdfc3b78e1d03e8e0f..e398797878a9740e2b3a1525802a032705981630 100644 (file)
@@ -1818,7 +1818,7 @@ static const struct proto_ops pppol2tp_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = pppol2tp_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = pppol2tp_setsockopt,
index 804de84901868a4cffd2ec5d6c9e979af937cb59..1beeea9549fa6ec1f7b0e5f9af8ff3250a316f59 100644 (file)
@@ -1192,7 +1192,7 @@ static const struct proto_ops llc_ui_ops = {
        .socketpair  = sock_no_socketpair,
        .accept      = llc_ui_accept,
        .getname     = llc_ui_getname,
-       .poll_mask   = datagram_poll_mask,
+       .poll        = datagram_poll,
        .ioctl       = llc_ui_ioctl,
        .listen      = llc_ui_listen,
        .shutdown    = llc_ui_shutdown,
index 44b5dfe8727d936d39338006bc89b125c848d12b..fa1f1e63a2640fd405e42e5aeae9718b4ef12d2a 100644 (file)
@@ -4845,7 +4845,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
        skb_reset_network_header(skb);
        skb_reset_mac_header(skb);
 
+       local_bh_disable();
        __ieee80211_subif_start_xmit(skb, skb->dev, flags);
+       local_bh_enable();
 
        return 0;
 }
index e7b05de1e6d1e136eb509293c4fde81468e12642..25e483e8278bd0404bf044c1a1748fdd1db77580 100644 (file)
@@ -73,8 +73,8 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
        ncm->data[2] = data;
        ncm->data[4] = ntohl(lsc->oem_status);
 
-       netdev_info(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n",
-                   nc->id, data & 0x1 ? "up" : "down");
+       netdev_dbg(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n",
+                  nc->id, data & 0x1 ? "up" : "down");
 
        chained = !list_empty(&nc->link);
        state = nc->state;
@@ -148,9 +148,9 @@ static int ncsi_aen_handler_hncdsc(struct ncsi_dev_priv *ndp,
        hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
        ncm->data[3] = ntohl(hncdsc->status);
        spin_unlock_irqrestore(&nc->lock, flags);
-       netdev_printk(KERN_DEBUG, ndp->ndev.dev,
-                     "NCSI: host driver %srunning on channel %u\n",
-                     ncm->data[3] & 0x1 ? "" : "not ", nc->id);
+       netdev_dbg(ndp->ndev.dev,
+                  "NCSI: host driver %srunning on channel %u\n",
+                  ncm->data[3] & 0x1 ? "" : "not ", nc->id);
 
        return 0;
 }
index 5561e221b71f10b223b381c2ed4b0752bedbc225..091284760d21fa02dc0f9997a2c68ce7f1f618e6 100644 (file)
@@ -788,8 +788,8 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
                }
                break;
        case ncsi_dev_state_config_done:
-               netdev_printk(KERN_DEBUG, ndp->ndev.dev,
-                             "NCSI: channel %u config done\n", nc->id);
+               netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
+                          nc->id);
                spin_lock_irqsave(&nc->lock, flags);
                if (nc->reconfigure_needed) {
                        /* This channel's configuration has been updated
@@ -804,8 +804,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
                        list_add_tail_rcu(&nc->link, &ndp->channel_queue);
                        spin_unlock_irqrestore(&ndp->lock, flags);
 
-                       netdev_printk(KERN_DEBUG, dev,
-                                     "Dirty NCSI channel state reset\n");
+                       netdev_dbg(dev, "Dirty NCSI channel state reset\n");
                        ncsi_process_next_channel(ndp);
                        break;
                }
@@ -816,9 +815,9 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
                } else {
                        hot_nc = NULL;
                        nc->state = NCSI_CHANNEL_INACTIVE;
-                       netdev_warn(ndp->ndev.dev,
-                                   "NCSI: channel %u link down after config\n",
-                                   nc->id);
+                       netdev_dbg(ndp->ndev.dev,
+                                  "NCSI: channel %u link down after config\n",
+                                  nc->id);
                }
                spin_unlock_irqrestore(&nc->lock, flags);
 
@@ -908,9 +907,9 @@ static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
        }
 
        ncm = &found->modes[NCSI_MODE_LINK];
-       netdev_printk(KERN_DEBUG, ndp->ndev.dev,
-                     "NCSI: Channel %u added to queue (link %s)\n",
-                     found->id, ncm->data[2] & 0x1 ? "up" : "down");
+       netdev_dbg(ndp->ndev.dev,
+                  "NCSI: Channel %u added to queue (link %s)\n",
+                  found->id, ncm->data[2] & 0x1 ? "up" : "down");
 
 out:
        spin_lock_irqsave(&ndp->lock, flags);
@@ -1199,14 +1198,14 @@ int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
        switch (old_state) {
        case NCSI_CHANNEL_INACTIVE:
                ndp->ndev.state = ncsi_dev_state_config;
-               netdev_info(ndp->ndev.dev, "NCSI: configuring channel %u\n",
-                           nc->id);
+               netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
+                          nc->id);
                ncsi_configure_channel(ndp);
                break;
        case NCSI_CHANNEL_ACTIVE:
                ndp->ndev.state = ncsi_dev_state_suspend;
-               netdev_info(ndp->ndev.dev, "NCSI: suspending channel %u\n",
-                           nc->id);
+               netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
+                          nc->id);
                ncsi_suspend_channel(ndp);
                break;
        default:
@@ -1226,8 +1225,6 @@ out:
                return ncsi_choose_active_channel(ndp);
        }
 
-       netdev_printk(KERN_DEBUG, ndp->ndev.dev,
-                     "NCSI: No more channels to process\n");
        ncsi_report_link(ndp, false);
        return -ENODEV;
 }
@@ -1318,9 +1315,9 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
                                if ((ndp->ndev.state & 0xff00) ==
                                                ncsi_dev_state_config ||
                                                !list_empty(&nc->link)) {
-                                       netdev_printk(KERN_DEBUG, nd->dev,
-                                                     "NCSI: channel %p marked dirty\n",
-                                                     nc);
+                                       netdev_dbg(nd->dev,
+                                                  "NCSI: channel %p marked dirty\n",
+                                                  nc);
                                        nc->reconfigure_needed = true;
                                }
                                spin_unlock_irqrestore(&nc->lock, flags);
@@ -1338,8 +1335,7 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
                        list_add_tail_rcu(&nc->link, &ndp->channel_queue);
                        spin_unlock_irqrestore(&ndp->lock, flags);
 
-                       netdev_printk(KERN_DEBUG, nd->dev,
-                                     "NCSI: kicked channel %p\n", nc);
+                       netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
                        n++;
                }
        }
@@ -1370,8 +1366,8 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
        list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
                n_vids++;
                if (vlan->vid == vid) {
-                       netdev_printk(KERN_DEBUG, dev,
-                                     "NCSI: vid %u already registered\n", vid);
+                       netdev_dbg(dev, "NCSI: vid %u already registered\n",
+                                  vid);
                        return 0;
                }
        }
@@ -1390,7 +1386,7 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
        vlan->vid = vid;
        list_add_rcu(&vlan->list, &ndp->vlan_vids);
 
-       netdev_printk(KERN_DEBUG, dev, "NCSI: Added new vid %u\n", vid);
+       netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
 
        found = ncsi_kick_channels(ndp) != 0;
 
@@ -1419,8 +1415,7 @@ int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
        /* Remove the VLAN id from our internal list */
        list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
                if (vlan->vid == vid) {
-                       netdev_printk(KERN_DEBUG, dev,
-                                     "NCSI: vid %u found, removing\n", vid);
+                       netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
                        list_del_rcu(&vlan->list);
                        found = true;
                        kfree(vlan);
@@ -1547,7 +1542,7 @@ void ncsi_stop_dev(struct ncsi_dev *nd)
                }
        }
 
-       netdev_printk(KERN_DEBUG, ndp->ndev.dev, "NCSI: Stopping device\n");
+       netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
        ncsi_report_link(ndp, true);
 }
 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
index dbd7d1fad277ebe3fb09f7ec68f7178433a9c438..f0a1c536ef15a0d35a3078bf85b5f4bee704f894 100644 (file)
@@ -460,6 +460,13 @@ config NF_TABLES
 
 if NF_TABLES
 
+config NF_TABLES_SET
+       tristate "Netfilter nf_tables set infrastructure"
+       help
+         This option enables the nf_tables set infrastructure that allows to
+         look up for elements in a set and to build one-way mappings between
+         matchings and actions.
+
 config NF_TABLES_INET
        depends on IPV6
        select NF_TABLES_IPV4
@@ -493,24 +500,6 @@ config NFT_FLOW_OFFLOAD
          This option adds the "flow_offload" expression that you can use to
          choose what flows are placed into the hardware.
 
-config NFT_SET_RBTREE
-       tristate "Netfilter nf_tables rbtree set module"
-       help
-         This option adds the "rbtree" set type (Red Black tree) that is used
-         to build interval-based sets.
-
-config NFT_SET_HASH
-       tristate "Netfilter nf_tables hash set module"
-       help
-         This option adds the "hash" set type that is used to build one-way
-         mappings between matchings and actions.
-
-config NFT_SET_BITMAP
-       tristate "Netfilter nf_tables bitmap set module"
-       help
-         This option adds the "bitmap" set type that is used to build sets
-         whose keys are smaller or equal to 16 bits.
-
 config NFT_COUNTER
        tristate "Netfilter nf_tables counter module"
        help
index 44449389e527b082b9ea171d5c1759b7c7c7f227..8a76dced974d1c10eca35dca78cf2ab284cb2490 100644 (file)
@@ -78,7 +78,11 @@ nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \
                  nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \
                  nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o
 
+nf_tables_set-objs := nf_tables_set_core.o \
+                     nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o
+
 obj-$(CONFIG_NF_TABLES)                += nf_tables.o
+obj-$(CONFIG_NF_TABLES_SET)    += nf_tables_set.o
 obj-$(CONFIG_NFT_COMPAT)       += nft_compat.o
 obj-$(CONFIG_NFT_CONNLIMIT)    += nft_connlimit.o
 obj-$(CONFIG_NFT_NUMGEN)       += nft_numgen.o
@@ -91,9 +95,6 @@ obj-$(CONFIG_NFT_QUEUE)               += nft_queue.o
 obj-$(CONFIG_NFT_QUOTA)                += nft_quota.o
 obj-$(CONFIG_NFT_REJECT)       += nft_reject.o
 obj-$(CONFIG_NFT_REJECT_INET)  += nft_reject_inet.o
-obj-$(CONFIG_NFT_SET_RBTREE)   += nft_set_rbtree.o
-obj-$(CONFIG_NFT_SET_HASH)     += nft_set_hash.o
-obj-$(CONFIG_NFT_SET_BITMAP)   += nft_set_bitmap.o
 obj-$(CONFIG_NFT_COUNTER)      += nft_counter.o
 obj-$(CONFIG_NFT_LOG)          += nft_log.o
 obj-$(CONFIG_NFT_MASQ)         += nft_masq.o
index d8383609fe2825b707cfb8ebc54381761ccc1108..510039862aa93c99904d2dbd3a7969327d0d896a 100644 (file)
@@ -47,6 +47,8 @@ struct nf_conncount_tuple {
        struct hlist_node               node;
        struct nf_conntrack_tuple       tuple;
        struct nf_conntrack_zone        zone;
+       int                             cpu;
+       u32                             jiffies32;
 };
 
 struct nf_conncount_rb {
@@ -91,11 +93,42 @@ bool nf_conncount_add(struct hlist_head *head,
                return false;
        conn->tuple = *tuple;
        conn->zone = *zone;
+       conn->cpu = raw_smp_processor_id();
+       conn->jiffies32 = (u32)jiffies;
        hlist_add_head(&conn->node, head);
        return true;
 }
 EXPORT_SYMBOL_GPL(nf_conncount_add);
 
+static const struct nf_conntrack_tuple_hash *
+find_or_evict(struct net *net, struct nf_conncount_tuple *conn)
+{
+       const struct nf_conntrack_tuple_hash *found;
+       unsigned long a, b;
+       int cpu = raw_smp_processor_id();
+       __s32 age;
+
+       found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
+       if (found)
+               return found;
+       b = conn->jiffies32;
+       a = (u32)jiffies;
+
+       /* conn might have been added just before by another cpu and
+        * might still be unconfirmed.  In this case, nf_conntrack_find()
+        * returns no result.  Thus only evict if this cpu added the
+        * stale entry or if the entry is older than two jiffies.
+        */
+       age = a - b;
+       if (conn->cpu == cpu || age >= 2) {
+               hlist_del(&conn->node);
+               kmem_cache_free(conncount_conn_cachep, conn);
+               return ERR_PTR(-ENOENT);
+       }
+
+       return ERR_PTR(-EAGAIN);
+}
+
 unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
                                 const struct nf_conntrack_tuple *tuple,
                                 const struct nf_conntrack_zone *zone,
@@ -103,18 +136,27 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
 {
        const struct nf_conntrack_tuple_hash *found;
        struct nf_conncount_tuple *conn;
-       struct hlist_node *n;
        struct nf_conn *found_ct;
+       struct hlist_node *n;
        unsigned int length = 0;
 
        *addit = tuple ? true : false;
 
        /* check the saved connections */
        hlist_for_each_entry_safe(conn, n, head, node) {
-               found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
-               if (found == NULL) {
-                       hlist_del(&conn->node);
-                       kmem_cache_free(conncount_conn_cachep, conn);
+               found = find_or_evict(net, conn);
+               if (IS_ERR(found)) {
+                       /* Not found, but might be about to be confirmed */
+                       if (PTR_ERR(found) == -EAGAIN) {
+                               length++;
+                               if (!tuple)
+                                       continue;
+
+                               if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
+                                   nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
+                                   nf_ct_zone_id(zone, zone->dir))
+                                       *addit = false;
+                       }
                        continue;
                }
 
index 3465da2a98bd4ff68fc8e52935aad047c69855e8..3d52804250274602c521f3cfe6c0c3b8fa9e78e9 100644 (file)
@@ -2043,7 +2043,7 @@ int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
                return -EOPNOTSUPP;
 
        /* On boot, we can set this without any fancy locking. */
-       if (!nf_conntrack_htable_size)
+       if (!nf_conntrack_hash)
                return param_set_uint(val, kp);
 
        rc = kstrtouint(val, 0, &hashsize);
index 551a1eddf0fab75eccf803b9711e069e61e60d5d..a75b11c393128d79107fc447c5109b7d0a786ea5 100644 (file)
@@ -465,6 +465,11 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
 
        nf_ct_expect_iterate_destroy(expect_iter_me, NULL);
        nf_ct_iterate_destroy(unhelp, me);
+
+       /* Maybe someone has gotten the helper already when unhelp above.
+        * So need to wait it.
+        */
+       synchronize_rcu();
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
 
index 4264570475788be388e603c1bc70330c812d0eb3..a61d6df6e5f64f5b2086d14f35c88a0491f77ce6 100644 (file)
@@ -424,6 +424,10 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
        if (write) {
                struct ctl_table tmp = *table;
 
+               /* proc_dostring() can append to existing strings, so we need to
+                * initialize it as an empty string.
+                */
+               buf[0] = '\0';
                tmp.data = buf;
                r = proc_dostring(&tmp, write, buffer, lenp, ppos);
                if (r)
@@ -442,14 +446,17 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
                rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
                mutex_unlock(&nf_log_mutex);
        } else {
+               struct ctl_table tmp = *table;
+
+               tmp.data = buf;
                mutex_lock(&nf_log_mutex);
                logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
                if (!logger)
-                       table->data = "NONE";
+                       strlcpy(buf, "NONE", sizeof(buf));
                else
-                       table->data = logger->name;
-               r = proc_dostring(table, write, buffer, lenp, ppos);
+                       strlcpy(buf, logger->name, sizeof(buf));
                mutex_unlock(&nf_log_mutex);
+               r = proc_dostring(&tmp, write, buffer, lenp, ppos);
        }
 
        return r;
diff --git a/net/netfilter/nf_tables_set_core.c b/net/netfilter/nf_tables_set_core.c
new file mode 100644 (file)
index 0000000..8147896
--- /dev/null
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <net/netfilter/nf_tables_core.h>
+
+static int __init nf_tables_set_module_init(void)
+{
+       nft_register_set(&nft_set_hash_fast_type);
+       nft_register_set(&nft_set_hash_type);
+       nft_register_set(&nft_set_rhash_type);
+       nft_register_set(&nft_set_bitmap_type);
+       nft_register_set(&nft_set_rbtree_type);
+
+       return 0;
+}
+
+static void __exit nf_tables_set_module_exit(void)
+{
+       nft_unregister_set(&nft_set_rbtree_type);
+       nft_unregister_set(&nft_set_bitmap_type);
+       nft_unregister_set(&nft_set_rhash_type);
+       nft_unregister_set(&nft_set_hash_type);
+       nft_unregister_set(&nft_set_hash_fast_type);
+}
+
+module_init(nf_tables_set_module_init);
+module_exit(nf_tables_set_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NFT_SET();
index 4ccd2988f9db637166358335d8e26299c7237bec..ea4ba551abb28cb25c833dc408e23d1313b21bb4 100644 (file)
@@ -1243,6 +1243,9 @@ static int nfqnl_recv_unsupp(struct net *net, struct sock *ctnl,
 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
        [NFQA_CFG_CMD]          = { .len = sizeof(struct nfqnl_msg_config_cmd) },
        [NFQA_CFG_PARAMS]       = { .len = sizeof(struct nfqnl_msg_config_params) },
+       [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 },
+       [NFQA_CFG_MASK]         = { .type = NLA_U32 },
+       [NFQA_CFG_FLAGS]        = { .type = NLA_U32 },
 };
 
 static const struct nf_queue_handler nfqh = {
index 8d1ff654e5aff1dfd5c2ace7693876568ea3377a..32535eea51b296ab1f2cb5bdd06972497f380a78 100644 (file)
@@ -832,10 +832,18 @@ nft_target_select_ops(const struct nft_ctx *ctx,
        rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV]));
        family = ctx->family;
 
+       if (strcmp(tg_name, XT_ERROR_TARGET) == 0 ||
+           strcmp(tg_name, XT_STANDARD_TARGET) == 0 ||
+           strcmp(tg_name, "standard") == 0)
+               return ERR_PTR(-EINVAL);
+
        /* Re-use the existing target if it's already loaded. */
        list_for_each_entry(nft_target, &nft_target_list, head) {
                struct xt_target *target = nft_target->ops.data;
 
+               if (!target->target)
+                       continue;
+
                if (nft_target_cmp(target, tg_name, rev, family))
                        return &nft_target->ops;
        }
@@ -844,6 +852,11 @@ nft_target_select_ops(const struct nft_ctx *ctx,
        if (IS_ERR(target))
                return ERR_PTR(-ENOENT);
 
+       if (!target->target) {
+               err = -EINVAL;
+               goto err;
+       }
+
        if (target->targetsize > nla_len(tb[NFTA_TARGET_INFO])) {
                err = -EINVAL;
                goto err;
index d6626e01c7ee6b0c25a2197f75309030edca34c6..128bc16f52dd436aa78ac21ae45be4cf69a70f00 100644 (file)
@@ -296,7 +296,7 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
        return true;
 }
 
-static struct nft_set_type nft_bitmap_type __read_mostly = {
+struct nft_set_type nft_set_bitmap_type __read_mostly = {
        .owner          = THIS_MODULE,
        .ops            = {
                .privsize       = nft_bitmap_privsize,
@@ -314,20 +314,3 @@ static struct nft_set_type nft_bitmap_type __read_mostly = {
                .get            = nft_bitmap_get,
        },
 };
-
-static int __init nft_bitmap_module_init(void)
-{
-       return nft_register_set(&nft_bitmap_type);
-}
-
-static void __exit nft_bitmap_module_exit(void)
-{
-       nft_unregister_set(&nft_bitmap_type);
-}
-
-module_init(nft_bitmap_module_init);
-module_exit(nft_bitmap_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
-MODULE_ALIAS_NFT_SET();
index 6f9a1365a09f07c517804cec45e31cd657f93337..72ef35b51cac9c75ae2c9e4ea342ba0e872f8ade 100644 (file)
@@ -654,7 +654,7 @@ static bool nft_hash_fast_estimate(const struct nft_set_desc *desc, u32 features
        return true;
 }
 
-static struct nft_set_type nft_rhash_type __read_mostly = {
+struct nft_set_type nft_set_rhash_type __read_mostly = {
        .owner          = THIS_MODULE,
        .features       = NFT_SET_MAP | NFT_SET_OBJECT |
                          NFT_SET_TIMEOUT | NFT_SET_EVAL,
@@ -677,7 +677,7 @@ static struct nft_set_type nft_rhash_type __read_mostly = {
        },
 };
 
-static struct nft_set_type nft_hash_type __read_mostly = {
+struct nft_set_type nft_set_hash_type __read_mostly = {
        .owner          = THIS_MODULE,
        .features       = NFT_SET_MAP | NFT_SET_OBJECT,
        .ops            = {
@@ -697,7 +697,7 @@ static struct nft_set_type nft_hash_type __read_mostly = {
        },
 };
 
-static struct nft_set_type nft_hash_fast_type __read_mostly = {
+struct nft_set_type nft_set_hash_fast_type __read_mostly = {
        .owner          = THIS_MODULE,
        .features       = NFT_SET_MAP | NFT_SET_OBJECT,
        .ops            = {
@@ -716,26 +716,3 @@ static struct nft_set_type nft_hash_fast_type __read_mostly = {
                .get            = nft_hash_get,
        },
 };
-
-static int __init nft_hash_module_init(void)
-{
-       if (nft_register_set(&nft_hash_fast_type) ||
-           nft_register_set(&nft_hash_type) ||
-           nft_register_set(&nft_rhash_type))
-               return 1;
-       return 0;
-}
-
-static void __exit nft_hash_module_exit(void)
-{
-       nft_unregister_set(&nft_rhash_type);
-       nft_unregister_set(&nft_hash_type);
-       nft_unregister_set(&nft_hash_fast_type);
-}
-
-module_init(nft_hash_module_init);
-module_exit(nft_hash_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_SET();
index 7f3a9a211034b2dee751dd776e1b5f59db6c6b61..1f8f257cb518b7a5a5b11dccb7734ec51778290c 100644 (file)
@@ -462,7 +462,7 @@ static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
        return true;
 }
 
-static struct nft_set_type nft_rbtree_type __read_mostly = {
+struct nft_set_type nft_set_rbtree_type __read_mostly = {
        .owner          = THIS_MODULE,
        .features       = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
        .ops            = {
@@ -481,20 +481,3 @@ static struct nft_set_type nft_rbtree_type __read_mostly = {
                .get            = nft_rbtree_get,
        },
 };
-
-static int __init nft_rbtree_module_init(void)
-{
-       return nft_register_set(&nft_rbtree_type);
-}
-
-static void __exit nft_rbtree_module_exit(void)
-{
-       nft_unregister_set(&nft_rbtree_type);
-}
-
-module_init(nft_rbtree_module_init);
-module_exit(nft_rbtree_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_SET();
index 58fce4e749a97deb7f50ee96cb328d45624ccc8c..d76550a8b642aafd96853332d18db898e43ff587 100644 (file)
@@ -61,7 +61,7 @@ tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport,
         * addresses, this happens if the redirect already happened
         * and the current packet belongs to an already established
         * connection */
-       sk = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol,
+       sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
                                   iph->saddr, iph->daddr,
                                   hp->source, hp->dest,
                                   skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED);
@@ -77,7 +77,7 @@ tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport,
        else if (!sk)
                /* no, there's no established connection, check if
                 * there's a listener on the redirected addr/port */
-               sk = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol,
+               sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
                                           iph->saddr, laddr,
                                           hp->source, lport,
                                           skb->dev, NF_TPROXY_LOOKUP_LISTENER);
@@ -150,7 +150,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
         * addresses, this happens if the redirect already happened
         * and the current packet belongs to an already established
         * connection */
-       sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, hp, tproto,
+       sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, tproto,
                                   &iph->saddr, &iph->daddr,
                                   hp->source, hp->dest,
                                   xt_in(par), NF_TPROXY_LOOKUP_ESTABLISHED);
@@ -171,7 +171,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
        else if (!sk)
                /* no there's no established connection, check if
                 * there's a listener on the redirected addr/port */
-               sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, hp,
+               sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff,
                                           tproto, &iph->saddr, laddr,
                                           hp->source, lport,
                                           xt_in(par), NF_TPROXY_LOOKUP_LISTENER);
index 1189b84413d5a8236f878a9cc99bcfa09368ec69..393573a99a5a34d3ebaad3a71b36293b6c2fb19f 100644 (file)
@@ -2658,7 +2658,7 @@ static const struct proto_ops netlink_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      netlink_getname,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        netlink_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
index 93fbcafbf3886d34b0be87244c405b8319df89dd..03f37c4e64fe44cd822952225736084ad151b2e8 100644 (file)
@@ -1355,7 +1355,7 @@ static const struct proto_ops nr_proto_ops = {
        .socketpair     =       sock_no_socketpair,
        .accept         =       nr_accept,
        .getname        =       nr_getname,
-       .poll_mask      =       datagram_poll_mask,
+       .poll           =       datagram_poll,
        .ioctl          =       nr_ioctl,
        .listen         =       nr_listen,
        .shutdown       =       sock_no_shutdown,
index 2ceefa183ceed6ba3d06f2aae958104a514f2146..6a196e438b6c03d4c86e0a8a78af1c496a7e599b 100644 (file)
@@ -752,11 +752,14 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
                pr_debug("Fragment %zd bytes remaining %zd",
                         frag_len, remaining_len);
 
-               pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
+               pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0,
                                         frag_len + LLCP_HEADER_SIZE, &err);
                if (pdu == NULL) {
-                       pr_err("Could not allocate PDU\n");
-                       continue;
+                       pr_err("Could not allocate PDU (error=%d)\n", err);
+                       len -= remaining_len;
+                       if (len == 0)
+                               len = err;
+                       break;
                }
 
                pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);
index ab5bb14b49af92241b12584925983de43b143bb7..ea0c0c6f187429426f4849347c09b847f0111fff 100644 (file)
@@ -548,13 +548,16 @@ static inline __poll_t llcp_accept_poll(struct sock *parent)
        return 0;
 }
 
-static __poll_t llcp_sock_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
+                                  poll_table *wait)
 {
        struct sock *sk = sock->sk;
        __poll_t mask = 0;
 
        pr_debug("%p\n", sk);
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_state == LLCP_LISTEN)
                return llcp_accept_poll(sk);
 
@@ -896,7 +899,7 @@ static const struct proto_ops llcp_sock_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = llcp_sock_accept,
        .getname        = llcp_sock_getname,
-       .poll_mask      = llcp_sock_poll_mask,
+       .poll           = llcp_sock_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = llcp_sock_listen,
        .shutdown       = sock_no_shutdown,
@@ -916,7 +919,7 @@ static const struct proto_ops llcp_rawsock_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = llcp_sock_getname,
-       .poll_mask      = llcp_sock_poll_mask,
+       .poll           = llcp_sock_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
index 60c322531c498f1d43582be5b76f3a2f575ed5bc..e2188deb08dc3bb16e2a60808b274a4a092fd2ee 100644 (file)
@@ -284,7 +284,7 @@ static const struct proto_ops rawsock_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = sock_no_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
@@ -304,7 +304,7 @@ static const struct proto_ops rawsock_raw_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = sock_no_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
index 9696ef96b719bf24625adea2a959deac1d2a975f..1a30e165eeb4fd1b884a0d5cd79c6823a5de9feb 100644 (file)
@@ -104,7 +104,7 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
        __skb_pull(skb, nsh_len);
 
        skb_reset_mac_header(skb);
-       skb_reset_mac_len(skb);
+       skb->mac_len = proto == htons(ETH_P_TEB) ? ETH_HLEN : 0;
        skb->protocol = proto;
 
        features &= NETIF_F_SG;
index 50809748c1279ea17b7499acbec5699443804f64..9b27d0cd766d560fdb67ee2e3bbfc415963db8c6 100644 (file)
@@ -2262,6 +2262,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                if (po->stats.stats1.tp_drops)
                        status |= TP_STATUS_LOSING;
        }
+
+       if (do_vnet &&
+           virtio_net_hdr_from_skb(skb, h.raw + macoff -
+                                   sizeof(struct virtio_net_hdr),
+                                   vio_le(), true, 0))
+               goto drop_n_account;
+
        po->stats.stats1.tp_packets++;
        if (copy_skb) {
                status |= TP_STATUS_COPY;
@@ -2269,15 +2276,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        }
        spin_unlock(&sk->sk_receive_queue.lock);
 
-       if (do_vnet) {
-               if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
-                                           sizeof(struct virtio_net_hdr),
-                                           vio_le(), true, 0)) {
-                       spin_lock(&sk->sk_receive_queue.lock);
-                       goto drop_n_account;
-               }
-       }
-
        skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
 
        if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
@@ -2880,6 +2878,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
                        goto out_free;
        } else if (reserve) {
                skb_reserve(skb, -reserve);
+               if (len < reserve)
+                       skb_reset_network_header(skb);
        }
 
        /* Returns -EFAULT on error */
@@ -4078,11 +4078,12 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
        return 0;
 }
 
-static __poll_t packet_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t packet_poll(struct file *file, struct socket *sock,
+                               poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct packet_sock *po = pkt_sk(sk);
-       __poll_t mask = datagram_poll_mask(sock, events);
+       __poll_t mask = datagram_poll(file, sock, wait);
 
        spin_lock_bh(&sk->sk_receive_queue.lock);
        if (po->rx_ring.pg_vec) {
@@ -4424,7 +4425,7 @@ static const struct proto_ops packet_ops_spkt = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      packet_getname_spkt,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        packet_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
@@ -4445,7 +4446,7 @@ static const struct proto_ops packet_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       sock_no_accept,
        .getname =      packet_getname,
-       .poll_mask =    packet_poll_mask,
+       .poll =         packet_poll,
        .ioctl =        packet_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
index c295c4e20f012f31c1b443c5f859969caf412cec..30187990257fdb07a57c03707d6e1af0740b42f0 100644 (file)
@@ -340,12 +340,15 @@ static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
        return sizeof(struct sockaddr_pn);
 }
 
-static __poll_t pn_socket_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
+                                       poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct pep_sock *pn = pep_sk(sk);
        __poll_t mask = 0;
 
+       poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_state == TCP_CLOSE)
                return EPOLLERR;
        if (!skb_queue_empty(&sk->sk_receive_queue))
@@ -445,7 +448,7 @@ const struct proto_ops phonet_dgram_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = pn_socket_getname,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .ioctl          = pn_socket_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
@@ -470,7 +473,7 @@ const struct proto_ops phonet_stream_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = pn_socket_accept,
        .getname        = pn_socket_getname,
-       .poll_mask      = pn_socket_poll_mask,
+       .poll           = pn_socket_poll,
        .ioctl          = pn_socket_ioctl,
        .listen         = pn_socket_listen,
        .shutdown       = sock_no_shutdown,
index 1b5025ea5b0426272145b56fa42e21d908612243..86e1e37eb4e8a68beeecd3bfeeb597951259ea81 100644 (file)
@@ -191,8 +191,13 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
        hdr->type = cpu_to_le32(type);
        hdr->src_node_id = cpu_to_le32(from->sq_node);
        hdr->src_port_id = cpu_to_le32(from->sq_port);
-       hdr->dst_node_id = cpu_to_le32(to->sq_node);
-       hdr->dst_port_id = cpu_to_le32(to->sq_port);
+       if (to->sq_port == QRTR_PORT_CTRL) {
+               hdr->dst_node_id = cpu_to_le32(node->nid);
+               hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST);
+       } else {
+               hdr->dst_node_id = cpu_to_le32(to->sq_node);
+               hdr->dst_port_id = cpu_to_le32(to->sq_port);
+       }
 
        hdr->size = cpu_to_le32(len);
        hdr->confirm_rx = 0;
@@ -764,6 +769,10 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        node = NULL;
        if (addr->sq_node == QRTR_NODE_BCAST) {
                enqueue_fn = qrtr_bcast_enqueue;
+               if (addr->sq_port != QRTR_PORT_CTRL) {
+                       release_sock(sk);
+                       return -ENOTCONN;
+               }
        } else if (addr->sq_node == ipc->us.sq_node) {
                enqueue_fn = qrtr_local_enqueue;
        } else {
@@ -1023,7 +1032,7 @@ static const struct proto_ops qrtr_proto_ops = {
        .recvmsg        = qrtr_recvmsg,
        .getname        = qrtr_getname,
        .ioctl          = qrtr_ioctl,
-       .poll_mask      = datagram_poll_mask,
+       .poll           = datagram_poll,
        .shutdown       = sock_no_shutdown,
        .setsockopt     = sock_no_setsockopt,
        .getsockopt     = sock_no_getsockopt,
index abef75da89a7450092aefc46ed902e6602fba7a6..cfb05953b0e57afad21fd708f0df42d63c77cd55 100644 (file)
@@ -659,11 +659,19 @@ static void rds_conn_info(struct socket *sock, unsigned int len,
 
 int rds_conn_init(void)
 {
+       int ret;
+
+       ret = rds_loop_net_init(); /* register pernet callback */
+       if (ret)
+               return ret;
+
        rds_conn_slab = kmem_cache_create("rds_connection",
                                          sizeof(struct rds_connection),
                                          0, 0, NULL);
-       if (!rds_conn_slab)
+       if (!rds_conn_slab) {
+               rds_loop_net_exit();
                return -ENOMEM;
+       }
 
        rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
        rds_info_register_func(RDS_INFO_SEND_MESSAGES,
@@ -676,6 +684,7 @@ int rds_conn_init(void)
 
 void rds_conn_exit(void)
 {
+       rds_loop_net_exit(); /* unregister pernet callback */
        rds_loop_exit();
 
        WARN_ON(!hlist_empty(rds_conn_hash));
index dac6218a460ed4d4a5b7b03ad4f6056a68784a16..feea1f96ee2ad582dce8f815442da1bbf6e0508a 100644 (file)
@@ -33,6 +33,8 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/in.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
 
 #include "rds_single_path.h"
 #include "rds.h"
 
 static DEFINE_SPINLOCK(loop_conns_lock);
 static LIST_HEAD(loop_conns);
+static atomic_t rds_loop_unloading = ATOMIC_INIT(0);
+
+static void rds_loop_set_unloading(void)
+{
+       atomic_set(&rds_loop_unloading, 1);
+}
+
+static bool rds_loop_is_unloading(struct rds_connection *conn)
+{
+       return atomic_read(&rds_loop_unloading) != 0;
+}
 
 /*
  * This 'loopback' transport is a special case for flows that originate
@@ -165,6 +178,8 @@ void rds_loop_exit(void)
        struct rds_loop_connection *lc, *_lc;
        LIST_HEAD(tmp_list);
 
+       rds_loop_set_unloading();
+       synchronize_rcu();
        /* avoid calling conn_destroy with irqs off */
        spin_lock_irq(&loop_conns_lock);
        list_splice(&loop_conns, &tmp_list);
@@ -177,6 +192,46 @@ void rds_loop_exit(void)
        }
 }
 
+static void rds_loop_kill_conns(struct net *net)
+{
+       struct rds_loop_connection *lc, *_lc;
+       LIST_HEAD(tmp_list);
+
+       spin_lock_irq(&loop_conns_lock);
+       list_for_each_entry_safe(lc, _lc, &loop_conns, loop_node)  {
+               struct net *c_net = read_pnet(&lc->conn->c_net);
+
+               if (net != c_net)
+                       continue;
+               list_move_tail(&lc->loop_node, &tmp_list);
+       }
+       spin_unlock_irq(&loop_conns_lock);
+
+       list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) {
+               WARN_ON(lc->conn->c_passive);
+               rds_conn_destroy(lc->conn);
+       }
+}
+
+static void __net_exit rds_loop_exit_net(struct net *net)
+{
+       rds_loop_kill_conns(net);
+}
+
+static struct pernet_operations rds_loop_net_ops = {
+       .exit = rds_loop_exit_net,
+};
+
+int rds_loop_net_init(void)
+{
+       return register_pernet_device(&rds_loop_net_ops);
+}
+
+void rds_loop_net_exit(void)
+{
+       unregister_pernet_device(&rds_loop_net_ops);
+}
+
 /*
  * This is missing .xmit_* because loop doesn't go through generic
  * rds_send_xmit() and doesn't call rds_recv_incoming().  .listen_stop and
@@ -194,4 +249,5 @@ struct rds_transport rds_loop_transport = {
        .inc_free               = rds_loop_inc_free,
        .t_name                 = "loopback",
        .t_type                 = RDS_TRANS_LOOP,
+       .t_unloading            = rds_loop_is_unloading,
 };
index 469fa4b2da4f38b5fb62358507cb9d9ca62aa825..bbc8cdd030df3137ea250578cb3d429a86fd68f2 100644 (file)
@@ -5,6 +5,8 @@
 /* loop.c */
 extern struct rds_transport rds_loop_transport;
 
+int rds_loop_net_init(void);
+void rds_loop_net_exit(void);
 void rds_loop_exit(void);
 
 #endif
index ebe42e7eb45697030367c4baba455b50c973c409..d00a0ef39a56b38cae4114654c44a3bddccb35ba 100644 (file)
@@ -1470,7 +1470,7 @@ static const struct proto_ops rose_proto_ops = {
        .socketpair     =       sock_no_socketpair,
        .accept         =       rose_accept,
        .getname        =       rose_getname,
-       .poll_mask      =       datagram_poll_mask,
+       .poll           =       datagram_poll,
        .ioctl          =       rose_ioctl,
        .listen         =       rose_listen,
        .shutdown       =       sock_no_shutdown,
index 3b1ac93efee22248ab01c3c8a610e874e99356b5..2b463047dd7ba93267feb584e1ffda280449a0b3 100644 (file)
@@ -734,11 +734,15 @@ static int rxrpc_getsockopt(struct socket *sock, int level, int optname,
 /*
  * permit an RxRPC socket to be polled
  */
-static __poll_t rxrpc_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
+                              poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct rxrpc_sock *rx = rxrpc_sk(sk);
-       __poll_t mask = 0;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* the socket is readable if there are any messages waiting on the Rx
         * queue */
@@ -945,7 +949,7 @@ static const struct proto_ops rxrpc_rpc_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = sock_no_getname,
-       .poll_mask      = rxrpc_poll_mask,
+       .poll           = rxrpc_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = rxrpc_listen,
        .shutdown       = rxrpc_shutdown,
index 526a8e491626efb65fcda10d875e6f55ca2168e8..6e7124e57918e98433f0d3302565ae4e0b9eaaf4 100644 (file)
@@ -91,7 +91,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
        }
        params_old = rtnl_dereference(p->params);
 
-       params_new->action = parm->action;
+       p->tcf_action = parm->action;
        params_new->update_flags = parm->update_flags;
        rcu_assign_pointer(p->params, params_new);
        if (params_old)
@@ -561,7 +561,7 @@ static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
        tcf_lastuse_update(&p->tcf_tm);
        bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
 
-       action = params->action;
+       action = READ_ONCE(p->tcf_action);
        if (unlikely(action == TC_ACT_SHOT))
                goto drop_stats;
 
@@ -599,11 +599,11 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
                .index   = p->tcf_index,
                .refcnt  = p->tcf_refcnt - ref,
                .bindcnt = p->tcf_bindcnt - bind,
+               .action  = p->tcf_action,
        };
        struct tcf_t t;
 
        params = rtnl_dereference(p->params);
-       opt.action = params->action;
        opt.update_flags = params->update_flags;
 
        if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
index 8527cfdc446d9bb82e8fa9fe1364dc13249b1e03..20d7d36b2fc9b9d3af256f48795da6e387f7f781 100644 (file)
@@ -415,7 +415,8 @@ static void tcf_ife_cleanup(struct tc_action *a)
        spin_unlock_bh(&ife->tcf_lock);
 
        p = rcu_dereference_protected(ife->params, 1);
-       kfree_rcu(p, rcu);
+       if (p)
+               kfree_rcu(p, rcu);
 }
 
 /* under ife->tcf_lock for existing action */
@@ -516,8 +517,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                        saddr = nla_data(tb[TCA_IFE_SMAC]);
        }
 
-       ife->tcf_action = parm->action;
-
        if (parm->flags & IFE_ENCODE) {
                if (daddr)
                        ether_addr_copy(p->eth_dst, daddr);
@@ -543,10 +542,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                                       NULL, NULL);
                if (err) {
 metadata_parse_err:
-                       if (exists)
-                               tcf_idr_release(*a, bind);
                        if (ret == ACT_P_CREATED)
-                               _tcf_ife_cleanup(*a);
+                               tcf_idr_release(*a, bind);
 
                        if (exists)
                                spin_unlock_bh(&ife->tcf_lock);
@@ -567,7 +564,7 @@ metadata_parse_err:
                err = use_all_metadata(ife);
                if (err) {
                        if (ret == ACT_P_CREATED)
-                               _tcf_ife_cleanup(*a);
+                               tcf_idr_release(*a, bind);
 
                        if (exists)
                                spin_unlock_bh(&ife->tcf_lock);
@@ -576,6 +573,7 @@ metadata_parse_err:
                }
        }
 
+       ife->tcf_action = parm->action;
        if (exists)
                spin_unlock_bh(&ife->tcf_lock);
 
index 626dac81a48a6b2ab97e9d0c786b08989f693288..9bc6c2ae98a56ceb2a4719be91a1937b5441a58d 100644 (file)
@@ -36,7 +36,7 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
 
        tcf_lastuse_update(&t->tcf_tm);
        bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb);
-       action = params->action;
+       action = READ_ONCE(t->tcf_action);
 
        switch (params->tcft_action) {
        case TCA_TUNNEL_KEY_ACT_RELEASE:
@@ -182,7 +182,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
 
        params_old = rtnl_dereference(t->params);
 
-       params_new->action = parm->action;
+       t->tcf_action = parm->action;
        params_new->tcft_action = parm->t_action;
        params_new->tcft_enc_metadata = metadata;
 
@@ -254,13 +254,13 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
                .index    = t->tcf_index,
                .refcnt   = t->tcf_refcnt - ref,
                .bindcnt  = t->tcf_bindcnt - bind,
+               .action   = t->tcf_action,
        };
        struct tcf_t tm;
 
        params = rtnl_dereference(t->params);
 
        opt.t_action = params->tcft_action;
-       opt.action = params->action;
 
        if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
                goto nla_put_failure;
index cdc3c87c53e62d4db4bb18fa5f59d7889b9866cb..f74513a7c7a8ed179bfbeabb17fe60dd2f9b6eb2 100644 (file)
@@ -1053,7 +1053,7 @@ static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
        for (tp = rtnl_dereference(chain->filter_chain);
             tp; tp = rtnl_dereference(tp->next))
                tfilter_notify(net, oskb, n, tp, block,
-                              q, parent, 0, event, false);
+                              q, parent, NULL, event, false);
 }
 
 static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
@@ -1444,7 +1444,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
                        memset(&cb->args[1], 0,
                               sizeof(cb->args) - sizeof(cb->args[0]));
                if (cb->args[1] == 0) {
-                       if (tcf_fill_node(net, skb, tp, block, q, parent, 0,
+                       if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
                                          NETLINK_CB(cb->skb).portid,
                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                          RTM_NEWTFILTER) <= 0)
index 2b5be42a9f1ca8e63952158ed2b9339e1a308d0b..9e8b26a80fb3ea9e57b6b22d259eaefe171eca09 100644 (file)
@@ -66,7 +66,7 @@ struct fl_flow_mask {
        struct rhashtable_params filter_ht_params;
        struct flow_dissector dissector;
        struct list_head filters;
-       struct rcu_head rcu;
+       struct rcu_work rwork;
        struct list_head list;
 };
 
@@ -203,6 +203,20 @@ static int fl_init(struct tcf_proto *tp)
        return rhashtable_init(&head->ht, &mask_ht_params);
 }
 
+static void fl_mask_free(struct fl_flow_mask *mask)
+{
+       rhashtable_destroy(&mask->ht);
+       kfree(mask);
+}
+
+static void fl_mask_free_work(struct work_struct *work)
+{
+       struct fl_flow_mask *mask = container_of(to_rcu_work(work),
+                                                struct fl_flow_mask, rwork);
+
+       fl_mask_free(mask);
+}
+
 static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
                        bool async)
 {
@@ -210,12 +224,11 @@ static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
                return false;
 
        rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
-       rhashtable_destroy(&mask->ht);
        list_del_rcu(&mask->list);
        if (async)
-               kfree_rcu(mask, rcu);
+               tcf_queue_work(&mask->rwork, fl_mask_free_work);
        else
-               kfree(mask);
+               fl_mask_free(mask);
 
        return true;
 }
index c98a61e980baa68931f7e974582eb1c43ed60cf5..9c4c2bb547d7ea1da26e956a77b23592d467365b 100644 (file)
@@ -21,7 +21,7 @@ static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                             struct sk_buff **to_free)
 {
        qdisc_drop(skb, sch, to_free);
-       return NET_XMIT_SUCCESS;
+       return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 }
 
 static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
index cd2e0e342fb6235840860ff15ceaeb73eddaa492..6c0a9d5dbf9441d00a832915e23d6b82bd8ab313 100644 (file)
@@ -479,24 +479,28 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
        q->cparams.mtu = psched_mtu(qdisc_dev(sch));
 
        if (opt) {
-               int err = fq_codel_change(sch, opt, extack);
+               err = fq_codel_change(sch, opt, extack);
                if (err)
-                       return err;
+                       goto init_failure;
        }
 
        err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
        if (err)
-               return err;
+               goto init_failure;
 
        if (!q->flows) {
                q->flows = kvcalloc(q->flows_cnt,
                                    sizeof(struct fq_codel_flow),
                                    GFP_KERNEL);
-               if (!q->flows)
-                       return -ENOMEM;
+               if (!q->flows) {
+                       err = -ENOMEM;
+                       goto init_failure;
+               }
                q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
-               if (!q->backlogs)
-                       return -ENOMEM;
+               if (!q->backlogs) {
+                       err = -ENOMEM;
+                       goto alloc_failure;
+               }
                for (i = 0; i < q->flows_cnt; i++) {
                        struct fq_codel_flow *flow = q->flows + i;
 
@@ -509,6 +513,13 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
        else
                sch->flags &= ~TCQ_F_CAN_BYPASS;
        return 0;
+
+alloc_failure:
+       kvfree(q->flows);
+       q->flows = NULL;
+init_failure:
+       q->flows_cnt = 0;
+       return err;
 }
 
 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
index 3ae9877ea2057d0ba517c84d38f6ba6a79ff6ef8..3278a76f6861576ba7e42cf9f91a62f96443cb3a 100644 (file)
@@ -1385,8 +1385,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
                if (next_time == 0 || next_time > q->root.cl_cfmin)
                        next_time = q->root.cl_cfmin;
        }
-       WARN_ON(next_time == 0);
-       qdisc_watchdog_schedule(&q->watchdog, next_time);
+       if (next_time)
+               qdisc_watchdog_schedule(&q->watchdog, next_time);
 }
 
 static int
index 79daa98208c391c780440144d69bc7be875c3476..bfb9f812e2ef9fa605b08dc1f534781573c3abf8 100644 (file)
@@ -237,7 +237,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
        /* Account for a different sized first fragment */
        if (msg_len >= first_len) {
                msg->can_delay = 0;
-               SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
+               if (msg_len > first_len)
+                       SCTP_INC_STATS(sock_net(asoc->base.sk),
+                                      SCTP_MIB_FRAGUSRMSGS);
        } else {
                /* Which may be the only one... */
                first_len = msg_len;
index 7339918a805d93db8a94fed627f99962e07e3267..0cd2e764f47ff0874438301324de25e4bf33dd95 100644 (file)
@@ -1010,7 +1010,7 @@ static const struct proto_ops inet6_seqpacket_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = inet_accept,
        .getname           = sctp_getname,
-       .poll_mask         = sctp_poll_mask,
+       .poll              = sctp_poll,
        .ioctl             = inet6_ioctl,
        .listen            = sctp_inet_listen,
        .shutdown          = inet_shutdown,
index 5dffbc4930086699cefa10f704de5fd2068169c8..67f73d3a1356b93d3896b6985a65e70615902b18 100644 (file)
@@ -1016,7 +1016,7 @@ static const struct proto_ops inet_seqpacket_ops = {
        .socketpair        = sock_no_socketpair,
        .accept            = inet_accept,
        .getname           = inet_getname,      /* Semantics are different.  */
-       .poll_mask         = sctp_poll_mask,
+       .poll              = sctp_poll,
        .ioctl             = inet_ioctl,
        .listen            = sctp_inet_listen,
        .shutdown          = inet_shutdown,     /* Looks harmless.  */
index d20f7addee19ecb794fa85f9ed73e8b40784a095..ce620e878538be99e1f79784582d0da48ba292ea 100644 (file)
@@ -7717,12 +7717,14 @@ out:
  * here, again, by modeling the current TCP/UDP code.  We don't have
  * a good way to test with it yet.
  */
-__poll_t sctp_poll_mask(struct socket *sock, __poll_t events)
+__poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct sctp_sock *sp = sctp_sk(sk);
        __poll_t mask;
 
+       poll_wait(file, sk_sleep(sk), wait);
+
        sock_rps_record_flow(sk);
 
        /* A TCP-style listening socket becomes readable when the accept queue
index 445b7ef61677cfdb1172486e432b9bd6a0f853d5..12cac85da994356ef24cf264e1fb8451f2e303dc 100644 (file)
@@ -282,7 +282,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
 
        if (dst) {
                /* Re-fetch, as under layers may have a higher minimum size */
-               pmtu = SCTP_TRUNC4(dst_mtu(dst));
+               pmtu = sctp_dst_mtu(dst);
                change = t->pathmtu != pmtu;
        }
        t->pathmtu = pmtu;
index da7f02edcd374c44437e34a2705f410317ea536d..05e4ffe5aabde6baa711b1396484cf037fbccaee 100644 (file)
@@ -45,6 +45,7 @@ static DEFINE_MUTEX(smc_create_lgr_pending);  /* serialize link group
                                                 */
 
 static void smc_tcp_listen_work(struct work_struct *);
+static void smc_connect_work(struct work_struct *);
 
 static void smc_set_keepalive(struct sock *sk, int val)
 {
@@ -122,6 +123,12 @@ static int smc_release(struct socket *sock)
                goto out;
 
        smc = smc_sk(sk);
+
+       /* cleanup for a dangling non-blocking connect */
+       flush_work(&smc->connect_work);
+       kfree(smc->connect_info);
+       smc->connect_info = NULL;
+
        if (sk->sk_state == SMC_LISTEN)
                /* smc_close_non_accepted() is called and acquires
                 * sock lock for child sockets again
@@ -140,7 +147,8 @@ static int smc_release(struct socket *sock)
                smc->clcsock = NULL;
        }
        if (smc->use_fallback) {
-               sock_put(sk); /* passive closing */
+               if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
+                       sock_put(sk); /* passive closing */
                sk->sk_state = SMC_CLOSED;
                sk->sk_state_change(sk);
        }
@@ -186,6 +194,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
        sk->sk_protocol = protocol;
        smc = smc_sk(sk);
        INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
+       INIT_WORK(&smc->connect_work, smc_connect_work);
        INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
        INIT_LIST_HEAD(&smc->accept_q);
        spin_lock_init(&smc->accept_q_lock);
@@ -409,12 +418,18 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
 {
        int rc;
 
-       if (reason_code < 0) /* error, fallback is not possible */
+       if (reason_code < 0) { /* error, fallback is not possible */
+               if (smc->sk.sk_state == SMC_INIT)
+                       sock_put(&smc->sk); /* passive closing */
                return reason_code;
+       }
        if (reason_code != SMC_CLC_DECL_REPLY) {
                rc = smc_clc_send_decline(smc, reason_code);
-               if (rc < 0)
+               if (rc < 0) {
+                       if (smc->sk.sk_state == SMC_INIT)
+                               sock_put(&smc->sk); /* passive closing */
                        return rc;
+               }
        }
        return smc_connect_fallback(smc);
 }
@@ -427,8 +442,6 @@ static int smc_connect_abort(struct smc_sock *smc, int reason_code,
                smc_lgr_forget(smc->conn.lgr);
        mutex_unlock(&smc_create_lgr_pending);
        smc_conn_free(&smc->conn);
-       if (reason_code < 0 && smc->sk.sk_state == SMC_INIT)
-               sock_put(&smc->sk); /* passive closing */
        return reason_code;
 }
 
@@ -576,6 +589,35 @@ static int __smc_connect(struct smc_sock *smc)
        return 0;
 }
 
+static void smc_connect_work(struct work_struct *work)
+{
+       struct smc_sock *smc = container_of(work, struct smc_sock,
+                                           connect_work);
+       int rc;
+
+       lock_sock(&smc->sk);
+       rc = kernel_connect(smc->clcsock, &smc->connect_info->addr,
+                           smc->connect_info->alen, smc->connect_info->flags);
+       if (smc->clcsock->sk->sk_err) {
+               smc->sk.sk_err = smc->clcsock->sk->sk_err;
+               goto out;
+       }
+       if (rc < 0) {
+               smc->sk.sk_err = -rc;
+               goto out;
+       }
+
+       rc = __smc_connect(smc);
+       if (rc < 0)
+               smc->sk.sk_err = -rc;
+
+out:
+       smc->sk.sk_state_change(&smc->sk);
+       kfree(smc->connect_info);
+       smc->connect_info = NULL;
+       release_sock(&smc->sk);
+}
+
 static int smc_connect(struct socket *sock, struct sockaddr *addr,
                       int alen, int flags)
 {
@@ -605,15 +647,32 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
 
        smc_copy_sock_settings_to_clc(smc);
        tcp_sk(smc->clcsock->sk)->syn_smc = 1;
-       rc = kernel_connect(smc->clcsock, addr, alen, flags);
-       if (rc)
-               goto out;
+       if (flags & O_NONBLOCK) {
+               if (smc->connect_info) {
+                       rc = -EALREADY;
+                       goto out;
+               }
+               smc->connect_info = kzalloc(alen + 2 * sizeof(int), GFP_KERNEL);
+               if (!smc->connect_info) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+               smc->connect_info->alen = alen;
+               smc->connect_info->flags = flags ^ O_NONBLOCK;
+               memcpy(&smc->connect_info->addr, addr, alen);
+               schedule_work(&smc->connect_work);
+               rc = -EINPROGRESS;
+       } else {
+               rc = kernel_connect(smc->clcsock, addr, alen, flags);
+               if (rc)
+                       goto out;
 
-       rc = __smc_connect(smc);
-       if (rc < 0)
-               goto out;
-       else
-               rc = 0; /* success cases including fallback */
+               rc = __smc_connect(smc);
+               if (rc < 0)
+                       goto out;
+               else
+                       rc = 0; /* success cases including fallback */
+       }
 
 out:
        release_sock(sk);
@@ -1273,40 +1332,26 @@ static __poll_t smc_accept_poll(struct sock *parent)
        return mask;
 }
 
-static __poll_t smc_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t smc_poll(struct file *file, struct socket *sock,
+                            poll_table *wait)
 {
        struct sock *sk = sock->sk;
        __poll_t mask = 0;
        struct smc_sock *smc;
-       int rc;
 
        if (!sk)
                return EPOLLNVAL;
 
        smc = smc_sk(sock->sk);
-       sock_hold(sk);
-       lock_sock(sk);
        if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
                /* delegate to CLC child sock */
-               release_sock(sk);
-               mask = smc->clcsock->ops->poll_mask(smc->clcsock, events);
-               lock_sock(sk);
+               mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
                sk->sk_err = smc->clcsock->sk->sk_err;
-               if (sk->sk_err) {
+               if (sk->sk_err)
                        mask |= EPOLLERR;
-               } else {
-                       /* if non-blocking connect finished ... */
-                       if (sk->sk_state == SMC_INIT &&
-                           mask & EPOLLOUT &&
-                           smc->clcsock->sk->sk_state != TCP_CLOSE) {
-                               rc = __smc_connect(smc);
-                               if (rc < 0)
-                                       mask |= EPOLLERR;
-                               /* success cases including fallback */
-                               mask |= EPOLLOUT | EPOLLWRNORM;
-                       }
-               }
        } else {
+               if (sk->sk_state != SMC_CLOSED)
+                       sock_poll_wait(file, sk_sleep(sk), wait);
                if (sk->sk_err)
                        mask |= EPOLLERR;
                if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
@@ -1332,10 +1377,7 @@ static __poll_t smc_poll_mask(struct socket *sock, __poll_t events)
                }
                if (smc->conn.urg_state == SMC_URG_VALID)
                        mask |= EPOLLPRI;
-
        }
-       release_sock(sk);
-       sock_put(sk);
 
        return mask;
 }
@@ -1415,7 +1457,8 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
 
        if (optlen < sizeof(int))
                return -EINVAL;
-       get_user(val, (int __user *)optval);
+       if (get_user(val, (int __user *)optval))
+               return -EFAULT;
 
        lock_sock(sk);
        switch (optname) {
@@ -1483,10 +1526,13 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
                        return -EBADF;
                return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
        }
+       lock_sock(&smc->sk);
        switch (cmd) {
        case SIOCINQ: /* same as FIONREAD */
-               if (smc->sk.sk_state == SMC_LISTEN)
+               if (smc->sk.sk_state == SMC_LISTEN) {
+                       release_sock(&smc->sk);
                        return -EINVAL;
+               }
                if (smc->sk.sk_state == SMC_INIT ||
                    smc->sk.sk_state == SMC_CLOSED)
                        answ = 0;
@@ -1495,8 +1541,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
                break;
        case SIOCOUTQ:
                /* output queue size (not send + not acked) */
-               if (smc->sk.sk_state == SMC_LISTEN)
+               if (smc->sk.sk_state == SMC_LISTEN) {
+                       release_sock(&smc->sk);
                        return -EINVAL;
+               }
                if (smc->sk.sk_state == SMC_INIT ||
                    smc->sk.sk_state == SMC_CLOSED)
                        answ = 0;
@@ -1506,8 +1554,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
                break;
        case SIOCOUTQNSD:
                /* output queue size (not send only) */
-               if (smc->sk.sk_state == SMC_LISTEN)
+               if (smc->sk.sk_state == SMC_LISTEN) {
+                       release_sock(&smc->sk);
                        return -EINVAL;
+               }
                if (smc->sk.sk_state == SMC_INIT ||
                    smc->sk.sk_state == SMC_CLOSED)
                        answ = 0;
@@ -1515,8 +1565,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
                        answ = smc_tx_prepared_sends(&smc->conn);
                break;
        case SIOCATMARK:
-               if (smc->sk.sk_state == SMC_LISTEN)
+               if (smc->sk.sk_state == SMC_LISTEN) {
+                       release_sock(&smc->sk);
                        return -EINVAL;
+               }
                if (smc->sk.sk_state == SMC_INIT ||
                    smc->sk.sk_state == SMC_CLOSED) {
                        answ = 0;
@@ -1532,8 +1584,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
                }
                break;
        default:
+               release_sock(&smc->sk);
                return -ENOIOCTLCMD;
        }
+       release_sock(&smc->sk);
 
        return put_user(answ, (int __user *)arg);
 }
@@ -1619,7 +1673,7 @@ static const struct proto_ops smc_sock_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = smc_accept,
        .getname        = smc_getname,
-       .poll_mask      = smc_poll_mask,
+       .poll           = smc_poll,
        .ioctl          = smc_ioctl,
        .listen         = smc_listen,
        .shutdown       = smc_shutdown,
index 51ae1f10d81aa9390e76e392096e3f93c15b65fe..d7ca265704821a1862f84f209550c4b19fc0db59 100644 (file)
@@ -187,11 +187,19 @@ struct smc_connection {
        struct work_struct      close_work;     /* peer sent some closing */
 };
 
+struct smc_connect_info {
+       int                     flags;
+       int                     alen;
+       struct sockaddr         addr;
+};
+
 struct smc_sock {                              /* smc sock container */
        struct sock             sk;
        struct socket           *clcsock;       /* internal tcp socket */
        struct smc_connection   conn;           /* smc connection */
        struct smc_sock         *listen_smc;    /* listen parent */
+       struct smc_connect_info *connect_info;  /* connect address & flags */
+       struct work_struct      connect_work;   /* handle non-blocking connect*/
        struct work_struct      tcp_listen_work;/* handle tcp socket accepts */
        struct work_struct      smc_listen_work;/* prepare new accept socket */
        struct list_head        accept_q;       /* sockets to be accepted */
index 717449b1da0b73d924488d43cd04ed0871607d1b..ae5d168653cecf804b20e49f27bb39bcf0385081 100644 (file)
@@ -250,6 +250,7 @@ out:
 int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
                     u8 expected_type)
 {
+       long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo;
        struct sock *clc_sk = smc->clcsock->sk;
        struct smc_clc_msg_hdr *clcm = buf;
        struct msghdr msg = {NULL, 0};
@@ -306,7 +307,6 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
        memset(&msg, 0, sizeof(struct msghdr));
        iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen);
        krflags = MSG_WAITALL;
-       smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
        len = sock_recvmsg(smc->clcsock, &msg, krflags);
        if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
                smc->sk.sk_err = EPROTO;
@@ -322,6 +322,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
        }
 
 out:
+       smc->clcsock->sk->sk_rcvtimeo = rcvtimeo;
        return reason_code;
 }
 
index fa41d988174146f6888d29db743b074d7b1ee1db..ac961dfb1ea1b775b666be3fdc0f292545703533 100644 (file)
@@ -107,6 +107,8 @@ static void smc_close_active_abort(struct smc_sock *smc)
        }
        switch (sk->sk_state) {
        case SMC_INIT:
+               sk->sk_state = SMC_PEERABORTWAIT;
+               break;
        case SMC_ACTIVE:
                sk->sk_state = SMC_PEERABORTWAIT;
                release_sock(sk);
index cee66640075242fc7fe863734ebf301d261e02d6..f82886b7d1d8394adada4998159a708c3c897a82 100644 (file)
@@ -495,7 +495,8 @@ out:
 
 void smc_tx_consumer_update(struct smc_connection *conn, bool force)
 {
-       union smc_host_cursor cfed, cons;
+       union smc_host_cursor cfed, cons, prod;
+       int sender_free = conn->rmb_desc->len;
        int to_confirm;
 
        smc_curs_write(&cons,
@@ -505,11 +506,18 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
                       smc_curs_read(&conn->rx_curs_confirmed, conn),
                       conn);
        to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons);
+       if (to_confirm > conn->rmbe_update_limit) {
+               smc_curs_write(&prod,
+                              smc_curs_read(&conn->local_rx_ctrl.prod, conn),
+                              conn);
+               sender_free = conn->rmb_desc->len -
+                             smc_curs_diff(conn->rmb_desc->len, &prod, &cfed);
+       }
 
        if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
            force ||
            ((to_confirm > conn->rmbe_update_limit) &&
-            ((to_confirm > (conn->rmb_desc->len / 2)) ||
+            ((sender_free <= (conn->rmb_desc->len / 2)) ||
              conn->local_rx_ctrl.prod_flags.write_blocked))) {
                if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
                    conn->alert_token_local) { /* connection healthy */
index 8a109012608a6132a65293c86cd175426b851cbe..85633622c94d011796517feb4d935b7ccba68445 100644 (file)
@@ -117,10 +117,8 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from);
 static int sock_mmap(struct file *file, struct vm_area_struct *vma);
 
 static int sock_close(struct inode *inode, struct file *file);
-static struct wait_queue_head *sock_get_poll_head(struct file *file,
-               __poll_t events);
-static __poll_t sock_poll_mask(struct file *file, __poll_t);
-static __poll_t sock_poll(struct file *file, struct poll_table_struct *wait);
+static __poll_t sock_poll(struct file *file,
+                             struct poll_table_struct *wait);
 static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 #ifdef CONFIG_COMPAT
 static long compat_sock_ioctl(struct file *file,
@@ -143,8 +141,6 @@ static const struct file_operations socket_file_ops = {
        .llseek =       no_llseek,
        .read_iter =    sock_read_iter,
        .write_iter =   sock_write_iter,
-       .get_poll_head = sock_get_poll_head,
-       .poll_mask =    sock_poll_mask,
        .poll =         sock_poll,
        .unlocked_ioctl = sock_ioctl,
 #ifdef CONFIG_COMPAT
@@ -1130,48 +1126,16 @@ out_release:
 }
 EXPORT_SYMBOL(sock_create_lite);
 
-static struct wait_queue_head *sock_get_poll_head(struct file *file,
-               __poll_t events)
-{
-       struct socket *sock = file->private_data;
-
-       if (!sock->ops->poll_mask)
-               return NULL;
-       sock_poll_busy_loop(sock, events);
-       return sk_sleep(sock->sk);
-}
-
-static __poll_t sock_poll_mask(struct file *file, __poll_t events)
-{
-       struct socket *sock = file->private_data;
-
-       /*
-        * We need to be sure we are in sync with the socket flags modification.
-        *
-        * This memory barrier is paired in the wq_has_sleeper.
-        */
-       smp_mb();
-
-       /* this socket can poll_ll so tell the system call */
-       return sock->ops->poll_mask(sock, events) |
-               (sk_can_busy_loop(sock->sk) ? POLL_BUSY_LOOP : 0);
-}
-
 /* No kernel lock held - perfect */
 static __poll_t sock_poll(struct file *file, poll_table *wait)
 {
        struct socket *sock = file->private_data;
-       __poll_t events = poll_requested_events(wait), mask = 0;
-
-       if (sock->ops->poll) {
-               sock_poll_busy_loop(sock, events);
-               mask = sock->ops->poll(file, sock, wait);
-       } else if (sock->ops->poll_mask) {
-               sock_poll_wait(file, sock_get_poll_head(file, events), wait);
-               mask = sock->ops->poll_mask(sock, events);
-       }
+       __poll_t events = poll_requested_events(wait);
 
-       return mask | sock_poll_busy_flag(sock);
+       sock_poll_busy_loop(sock, events);
+       if (!sock->ops->poll)
+               return 0;
+       return sock->ops->poll(file, sock, wait) | sock_poll_busy_flag(sock);
 }
 
 static int sock_mmap(struct file *file, struct vm_area_struct *vma)
index 1a96951835999091c81ba451700f0a74565d9c59..625acb27efcc272ccdc0f60d4d693d6761ed139b 100644 (file)
@@ -35,7 +35,6 @@ struct _strp_msg {
         */
        struct strp_msg strp;
        int accum_len;
-       int early_eaten;
 };
 
 static inline struct _strp_msg *_strp_msg(struct sk_buff *skb)
@@ -115,20 +114,6 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
        head = strp->skb_head;
        if (head) {
                /* Message already in progress */
-
-               stm = _strp_msg(head);
-               if (unlikely(stm->early_eaten)) {
-                       /* Already some number of bytes on the receive sock
-                        * data saved in skb_head, just indicate they
-                        * are consumed.
-                        */
-                       eaten = orig_len <= stm->early_eaten ?
-                               orig_len : stm->early_eaten;
-                       stm->early_eaten -= eaten;
-
-                       return eaten;
-               }
-
                if (unlikely(orig_offset)) {
                        /* Getting data with a non-zero offset when a message is
                         * in progress is not expected. If it does happen, we
@@ -297,9 +282,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
                                }
 
                                stm->accum_len += cand_len;
+                               eaten += cand_len;
                                strp->need_bytes = stm->strp.full_len -
                                                       stm->accum_len;
-                               stm->early_eaten = cand_len;
                                STRP_STATS_ADD(strp->stats.bytes, cand_len);
                                desc->count = 0; /* Stop reading socket */
                                break;
@@ -392,7 +377,7 @@ static int strp_read_sock(struct strparser *strp)
 /* Lower sock lock held */
 void strp_data_ready(struct strparser *strp)
 {
-       if (unlikely(strp->stopped))
+       if (unlikely(strp->stopped) || strp->paused)
                return;
 
        /* This check is needed to synchronize with do_strp_work.
@@ -407,9 +392,6 @@ void strp_data_ready(struct strparser *strp)
                return;
        }
 
-       if (strp->paused)
-               return;
-
        if (strp->need_bytes) {
                if (strp_peek_len(strp) < strp->need_bytes)
                        return;
index 3c85af058227d14bda8d9f598ec45e7b8db1785e..3fabf9f6a0f9d92eaccbc33a9600ca2d1370aa18 100644 (file)
@@ -987,8 +987,6 @@ bool xprt_prepare_transmit(struct rpc_task *task)
                task->tk_status = -EAGAIN;
                goto out_unlock;
        }
-       if (!bc_prealloc(req) && !req->rq_xmit_bytes_sent)
-               req->rq_xid = xprt_alloc_xid(xprt);
        ret = true;
 out_unlock:
        spin_unlock_bh(&xprt->transport_lock);
@@ -1298,7 +1296,12 @@ void xprt_retry_reserve(struct rpc_task *task)
 
 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
 {
-       return (__force __be32)xprt->xid++;
+       __be32 xid;
+
+       spin_lock(&xprt->reserve_lock);
+       xid = (__force __be32)xprt->xid++;
+       spin_unlock(&xprt->reserve_lock);
+       return xid;
 }
 
 static inline void xprt_init_xid(struct rpc_xprt *xprt)
@@ -1316,6 +1319,7 @@ void xprt_request_init(struct rpc_task *task)
        req->rq_task    = task;
        req->rq_xprt    = xprt;
        req->rq_buffer  = NULL;
+       req->rq_xid     = xprt_alloc_xid(xprt);
        req->rq_connect_cookie = xprt->connect_cookie - 1;
        req->rq_bytes_sent = 0;
        req->rq_snd_buf.len = 0;
index 9f666e0650e23c0d4275ae219c23c5e301df5ac4..2830709957bddeb13adf0f352abb9aaacba3ec55 100644 (file)
@@ -133,6 +133,8 @@ static void disc_dupl_alert(struct tipc_bearer *b, u32 node_addr,
 }
 
 /* tipc_disc_addr_trial(): - handle an address uniqueness trial from peer
+ * Returns true if message should be dropped by caller, i.e., if it is a
+ * trial message or we are inside trial period. Otherwise false.
  */
 static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
                                     struct tipc_media_addr *maddr,
@@ -168,8 +170,9 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
                msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
        }
 
+       /* Accept regular link requests/responses only after trial period */
        if (mtyp != DSC_TRIAL_MSG)
-               return false;
+               return trial;
 
        sugg_addr = tipc_node_try_addr(net, peer_id, src);
        if (sugg_addr)
@@ -284,7 +287,6 @@ static void tipc_disc_timeout(struct timer_list *t)
 {
        struct tipc_discoverer *d = from_timer(d, t, timer);
        struct tipc_net *tn = tipc_net(d->net);
-       u32 self = tipc_own_addr(d->net);
        struct tipc_media_addr maddr;
        struct sk_buff *skb = NULL;
        struct net *net = d->net;
@@ -298,12 +300,14 @@ static void tipc_disc_timeout(struct timer_list *t)
                goto exit;
        }
 
-       /* Did we just leave the address trial period ? */
-       if (!self && !time_before(jiffies, tn->addr_trial_end)) {
-               self = tn->trial_addr;
-               tipc_net_finalize(net, self);
-               msg_set_prevnode(buf_msg(d->skb), self);
+       /* Trial period over ? */
+       if (!time_before(jiffies, tn->addr_trial_end)) {
+               /* Did we just leave it ? */
+               if (!tipc_own_addr(net))
+                       tipc_net_finalize(net, tn->trial_addr);
+
                msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
+               msg_set_prevnode(buf_msg(d->skb), tipc_own_addr(net));
        }
 
        /* Adjust timeout interval according to discovery phase */
index 4fbaa0464405370601cb2fd1dd3b03733836d342..a7f6964c3a4b725a7cd06411dc1b5f3d48df778d 100644 (file)
@@ -121,12 +121,17 @@ int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
 
 void tipc_net_finalize(struct net *net, u32 addr)
 {
-       tipc_set_node_addr(net, addr);
-       smp_mb();
-       tipc_named_reinit(net);
-       tipc_sk_reinit(net);
-       tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
-                            TIPC_CLUSTER_SCOPE, 0, addr);
+       struct tipc_net *tn = tipc_net(net);
+
+       spin_lock_bh(&tn->node_list_lock);
+       if (!tipc_own_addr(net)) {
+               tipc_set_node_addr(net, addr);
+               tipc_named_reinit(net);
+               tipc_sk_reinit(net);
+               tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
+                                    TIPC_CLUSTER_SCOPE, 0, addr);
+       }
+       spin_unlock_bh(&tn->node_list_lock);
 }
 
 void tipc_net_stop(struct net *net)
index 6a44eb812baf4a2fe31eeb55b04023f9f402666b..0453bd451ce80c1935bb6588facc0f2c23ae8644 100644 (file)
@@ -797,6 +797,7 @@ static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
 }
 
 /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
+ * Returns suggested address if any, otherwise 0
  */
 u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
 {
@@ -819,12 +820,14 @@ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
        if (n) {
                addr = n->addr;
                tipc_node_put(n);
+               return addr;
        }
-       /* Even this node may be in trial phase */
+
+       /* Even this node may be in conflict */
        if (tn->trial_addr == addr)
                return tipc_node_suggest_addr(net, addr);
 
-       return addr;
+       return 0;
 }
 
 void tipc_node_check_dest(struct net *net, u32 addr,
index 14a5d055717d2a7b95ea353b15f53dfb81a39515..930852c54d7a6e97207c61a7c942e487781457e7 100644 (file)
@@ -692,9 +692,10 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
 }
 
 /**
- * tipc_poll - read pollmask
+ * tipc_poll - read and possibly block on pollmask
  * @file: file structure associated with the socket
  * @sock: socket for which to calculate the poll bits
+ * @wait: ???
  *
  * Returns pollmask value
  *
@@ -708,12 +709,15 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
  * imply that the operation will succeed, merely that it should be performed
  * and will not block.
  */
-static __poll_t tipc_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t tipc_poll(struct file *file, struct socket *sock,
+                             poll_table *wait)
 {
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
        __poll_t revents = 0;
 
+       sock_poll_wait(file, sk_sleep(sk), wait);
+
        if (sk->sk_shutdown & RCV_SHUTDOWN)
                revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
        if (sk->sk_shutdown == SHUTDOWN_MASK)
@@ -3033,7 +3037,7 @@ static const struct proto_ops msg_ops = {
        .socketpair     = tipc_socketpair,
        .accept         = sock_no_accept,
        .getname        = tipc_getname,
-       .poll_mask      = tipc_poll_mask,
+       .poll           = tipc_poll,
        .ioctl          = tipc_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = tipc_shutdown,
@@ -3054,7 +3058,7 @@ static const struct proto_ops packet_ops = {
        .socketpair     = tipc_socketpair,
        .accept         = tipc_accept,
        .getname        = tipc_getname,
-       .poll_mask      = tipc_poll_mask,
+       .poll           = tipc_poll,
        .ioctl          = tipc_ioctl,
        .listen         = tipc_listen,
        .shutdown       = tipc_shutdown,
@@ -3075,7 +3079,7 @@ static const struct proto_ops stream_ops = {
        .socketpair     = tipc_socketpair,
        .accept         = tipc_accept,
        .getname        = tipc_getname,
-       .poll_mask      = tipc_poll_mask,
+       .poll           = tipc_poll,
        .ioctl          = tipc_ioctl,
        .listen         = tipc_listen,
        .shutdown       = tipc_shutdown,
index a127d61e8af984d3aaefde49c94f48a9a9187d53..301f224304698950544088c16518ea2e14ff41a6 100644 (file)
@@ -712,7 +712,7 @@ static int __init tls_register(void)
        build_protos(tls_prots[TLSV4], &tcp_prot);
 
        tls_sw_proto_ops = inet_stream_ops;
-       tls_sw_proto_ops.poll_mask = tls_sw_poll_mask;
+       tls_sw_proto_ops.poll = tls_sw_poll;
        tls_sw_proto_ops.splice_read = tls_sw_splice_read;
 
 #ifdef CONFIG_TLS_DEVICE
index f127fac88acfe0046b0a7dd55bab4d6d486de105..4618f1c3113743b8b93f757bca303c8f91d95499 100644 (file)
@@ -440,7 +440,7 @@ alloc_encrypted:
                        ret = tls_push_record(sk, msg->msg_flags, record_type);
                        if (!ret)
                                continue;
-                       if (ret == -EAGAIN)
+                       if (ret < 0)
                                goto send_end;
 
                        copied -= try_to_copy;
@@ -701,6 +701,10 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
        nsg = skb_to_sgvec(skb, &sgin[1],
                           rxm->offset + tls_ctx->rx.prepend_size,
                           rxm->full_len - tls_ctx->rx.prepend_size);
+       if (nsg < 0) {
+               ret = nsg;
+               goto out;
+       }
 
        tls_make_aad(ctx->rx_aad_ciphertext,
                     rxm->full_len - tls_ctx->rx.overhead_size,
@@ -712,6 +716,7 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
                                rxm->full_len - tls_ctx->rx.overhead_size,
                                skb, sk->sk_allocation);
 
+out:
        if (sgin != &sgin_arr[0])
                kfree(sgin);
 
@@ -919,22 +924,23 @@ splice_read_end:
        return copied ? : err;
 }
 
-__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events)
+unsigned int tls_sw_poll(struct file *file, struct socket *sock,
+                        struct poll_table_struct *wait)
 {
+       unsigned int ret;
        struct sock *sk = sock->sk;
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
-       __poll_t mask;
 
-       /* Grab EPOLLOUT and EPOLLHUP from the underlying socket */
-       mask = ctx->sk_poll_mask(sock, events);
+       /* Grab POLLOUT and POLLHUP from the underlying socket */
+       ret = ctx->sk_poll(file, sock, wait);
 
-       /* Clear EPOLLIN bits, and set based on recv_pkt */
-       mask &= ~(EPOLLIN | EPOLLRDNORM);
+       /* Clear POLLIN bits, and set based on recv_pkt */
+       ret &= ~(POLLIN | POLLRDNORM);
        if (ctx->recv_pkt)
-               mask |= EPOLLIN | EPOLLRDNORM;
+               ret |= POLLIN | POLLRDNORM;
 
-       return mask;
+       return ret;
 }
 
 static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
@@ -1191,7 +1197,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
                sk->sk_data_ready = tls_data_ready;
                write_unlock_bh(&sk->sk_callback_lock);
 
-               sw_ctx_rx->sk_poll_mask = sk->sk_socket->ops->poll_mask;
+               sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
 
                strp_check_rcv(&sw_ctx_rx->strp);
        }
index 95b02a71fd47161735c51988463e5f5e4a7d44b3..e5473c03d667ad51308c3e8b705f3b1187f619e8 100644 (file)
@@ -638,8 +638,9 @@ static int unix_stream_connect(struct socket *, struct sockaddr *,
 static int unix_socketpair(struct socket *, struct socket *);
 static int unix_accept(struct socket *, struct socket *, int, bool);
 static int unix_getname(struct socket *, struct sockaddr *, int);
-static __poll_t unix_poll_mask(struct socket *, __poll_t);
-static __poll_t unix_dgram_poll_mask(struct socket *, __poll_t);
+static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
+static __poll_t unix_dgram_poll(struct file *, struct socket *,
+                                   poll_table *);
 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
 static int unix_shutdown(struct socket *, int);
 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
@@ -680,7 +681,7 @@ static const struct proto_ops unix_stream_ops = {
        .socketpair =   unix_socketpair,
        .accept =       unix_accept,
        .getname =      unix_getname,
-       .poll_mask =    unix_poll_mask,
+       .poll =         unix_poll,
        .ioctl =        unix_ioctl,
        .listen =       unix_listen,
        .shutdown =     unix_shutdown,
@@ -703,7 +704,7 @@ static const struct proto_ops unix_dgram_ops = {
        .socketpair =   unix_socketpair,
        .accept =       sock_no_accept,
        .getname =      unix_getname,
-       .poll_mask =    unix_dgram_poll_mask,
+       .poll =         unix_dgram_poll,
        .ioctl =        unix_ioctl,
        .listen =       sock_no_listen,
        .shutdown =     unix_shutdown,
@@ -725,7 +726,7 @@ static const struct proto_ops unix_seqpacket_ops = {
        .socketpair =   unix_socketpair,
        .accept =       unix_accept,
        .getname =      unix_getname,
-       .poll_mask =    unix_dgram_poll_mask,
+       .poll =         unix_dgram_poll,
        .ioctl =        unix_ioctl,
        .listen =       unix_listen,
        .shutdown =     unix_shutdown,
@@ -2629,10 +2630,13 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
        return err;
 }
 
-static __poll_t unix_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
-       __poll_t mask = 0;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* exceptional events? */
        if (sk->sk_err)
@@ -2661,11 +2665,15 @@ static __poll_t unix_poll_mask(struct socket *sock, __poll_t events)
        return mask;
 }
 
-static __poll_t unix_dgram_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
+                                   poll_table *wait)
 {
        struct sock *sk = sock->sk, *other;
-       int writable;
-       __poll_t mask = 0;
+       unsigned int writable;
+       __poll_t mask;
+
+       sock_poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        /* exceptional events? */
        if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
@@ -2691,7 +2699,7 @@ static __poll_t unix_dgram_poll_mask(struct socket *sock, __poll_t events)
        }
 
        /* No write status requested, avoid expensive OUT tests. */
-       if (!(events & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
+       if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
                return mask;
 
        writable = unix_writable(sk);
index bb5d5fa68c357af4962602b2bced2164c6e5ab44..c1076c19b8580688ff041f71aee0d05ce0906030 100644 (file)
@@ -850,11 +850,18 @@ static int vsock_shutdown(struct socket *sock, int mode)
        return err;
 }
 
-static __poll_t vsock_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t vsock_poll(struct file *file, struct socket *sock,
+                              poll_table *wait)
 {
-       struct sock *sk = sock->sk;
-       struct vsock_sock *vsk = vsock_sk(sk);
-       __poll_t mask = 0;
+       struct sock *sk;
+       __poll_t mask;
+       struct vsock_sock *vsk;
+
+       sk = sock->sk;
+       vsk = vsock_sk(sk);
+
+       poll_wait(file, sk_sleep(sk), wait);
+       mask = 0;
 
        if (sk->sk_err)
                /* Signify that there has been an error on this socket. */
@@ -1084,7 +1091,7 @@ static const struct proto_ops vsock_dgram_ops = {
        .socketpair = sock_no_socketpair,
        .accept = sock_no_accept,
        .getname = vsock_getname,
-       .poll_mask = vsock_poll_mask,
+       .poll = vsock_poll,
        .ioctl = sock_no_ioctl,
        .listen = sock_no_listen,
        .shutdown = vsock_shutdown,
@@ -1842,7 +1849,7 @@ static const struct proto_ops vsock_stream_ops = {
        .socketpair = sock_no_socketpair,
        .accept = vsock_accept,
        .getname = vsock_getname,
-       .poll_mask = vsock_poll_mask,
+       .poll = vsock_poll,
        .ioctl = sock_no_ioctl,
        .listen = vsock_listen,
        .shutdown = vsock_shutdown,
index 8e03bd3f3668b573c4d61a786e90a238abe9fe66..5d3cce9e8744d5207753107aeb55518f2848f50a 100644 (file)
@@ -201,7 +201,7 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
                return -ENODEV;
        }
 
-       if (le32_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
+       if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
                return virtio_transport_send_pkt_loopback(vsock, pkt);
 
        if (pkt->reply)
index c7bbe5f0aae8839bdfe5ac7b7bd02c6aad8ac8dc..4eece06be1e734c0a0c8de50cdeff2475dd20a59 100644 (file)
@@ -6231,7 +6231,7 @@ do {                                                                          \
                                  nl80211_check_s32);
        /*
         * Check HT operation mode based on
-        * IEEE 802.11 2012 8.4.2.59 HT Operation element.
+        * IEEE 802.11-2016 9.4.2.57 HT Operation element.
         */
        if (tb[NL80211_MESHCONF_HT_OPMODE]) {
                ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]);
@@ -6241,22 +6241,9 @@ do {                                                                         \
                                  IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
                        return -EINVAL;
 
-               if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) &&
-                   (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
-                       return -EINVAL;
+               /* NON_HT_STA bit is reserved, but some programs set it */
+               ht_opmode &= ~IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
 
-               switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) {
-               case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
-               case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
-                       if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)
-                               return -EINVAL;
-                       break;
-               case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
-               case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
-                       if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
-                               return -EINVAL;
-                       break;
-               }
                cfg->ht_opmode = ht_opmode;
                mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
        }
@@ -10962,9 +10949,12 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
                                    rem) {
                        u8 *mask_pat;
 
-                       nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
-                                        nl80211_packet_pattern_policy,
-                                        info->extack);
+                       err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+                                              nl80211_packet_pattern_policy,
+                                              info->extack);
+                       if (err)
+                               goto error;
+
                        err = -EINVAL;
                        if (!pat_tb[NL80211_PKTPAT_MASK] ||
                            !pat_tb[NL80211_PKTPAT_PATTERN])
@@ -11213,8 +11203,11 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
                            rem) {
                u8 *mask_pat;
 
-               nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
-                                nl80211_packet_pattern_policy, NULL);
+               err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+                                      nl80211_packet_pattern_policy, NULL);
+               if (err)
+                       return err;
+
                if (!pat_tb[NL80211_PKTPAT_MASK] ||
                    !pat_tb[NL80211_PKTPAT_PATTERN])
                        return -EINVAL;
index f93365ae0fdd76b6aab9b6227cfcbb96f41eed82..d49aa79b79970d403b5c165d4000b2aa1d493442 100644 (file)
@@ -1750,7 +1750,7 @@ static const struct proto_ops x25_proto_ops = {
        .socketpair =   sock_no_socketpair,
        .accept =       x25_accept,
        .getname =      x25_getname,
-       .poll_mask =    datagram_poll_mask,
+       .poll =         datagram_poll,
        .ioctl =        x25_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = compat_x25_ioctl,
index 36919a254ba370c37b4e199bfd68c285e25fdeb6..72335c2e8108996d07702086f1f1391faa33fd7d 100644 (file)
@@ -118,6 +118,9 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
        u64 addr;
        int err;
 
+       if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
+               return -EINVAL;
+
        if (!xskq_peek_addr(xs->umem->fq, &addr) ||
            len > xs->umem->chunk_size_nohr) {
                xs->rx_dropped++;
@@ -196,8 +199,11 @@ static void xsk_destruct_skb(struct sk_buff *skb)
 {
        u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
        struct xdp_sock *xs = xdp_sk(skb->sk);
+       unsigned long flags;
 
+       spin_lock_irqsave(&xs->tx_completion_lock, flags);
        WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
+       spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
 
        sock_wfree(skb);
 }
@@ -212,9 +218,6 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
        struct sk_buff *skb;
        int err = 0;
 
-       if (unlikely(!xs->tx))
-               return -ENOBUFS;
-
        mutex_lock(&xs->mutex);
 
        while (xskq_peek_desc(xs->tx, &desc)) {
@@ -227,22 +230,13 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
                        goto out;
                }
 
-               if (xskq_reserve_addr(xs->umem->cq)) {
-                       err = -EAGAIN;
+               if (xskq_reserve_addr(xs->umem->cq))
                        goto out;
-               }
 
-               len = desc.len;
-               if (unlikely(len > xs->dev->mtu)) {
-                       err = -EMSGSIZE;
+               if (xs->queue_id >= xs->dev->real_num_tx_queues)
                        goto out;
-               }
-
-               if (xs->queue_id >= xs->dev->real_num_tx_queues) {
-                       err = -ENXIO;
-                       goto out;
-               }
 
+               len = desc.len;
                skb = sock_alloc_send_skb(sk, len, 1, &err);
                if (unlikely(!skb)) {
                        err = -EAGAIN;
@@ -265,15 +259,15 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
                skb->destructor = xsk_destruct_skb;
 
                err = dev_direct_xmit(skb, xs->queue_id);
+               xskq_discard_desc(xs->tx);
                /* Ignore NET_XMIT_CN as packet might have been sent */
                if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
-                       err = -EAGAIN;
-                       /* SKB consumed by dev_direct_xmit() */
+                       /* SKB completed but not sent */
+                       err = -EBUSY;
                        goto out;
                }
 
                sent_frame = true;
-               xskq_discard_desc(xs->tx);
        }
 
 out:
@@ -294,15 +288,18 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
                return -ENXIO;
        if (unlikely(!(xs->dev->flags & IFF_UP)))
                return -ENETDOWN;
+       if (unlikely(!xs->tx))
+               return -ENOBUFS;
        if (need_wait)
                return -EOPNOTSUPP;
 
        return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
 }
 
-static __poll_t xsk_poll_mask(struct socket *sock, __poll_t events)
+static unsigned int xsk_poll(struct file *file, struct socket *sock,
+                            struct poll_table_struct *wait)
 {
-       __poll_t mask = datagram_poll_mask(sock, events);
+       unsigned int mask = datagram_poll(file, sock, wait);
        struct sock *sk = sock->sk;
        struct xdp_sock *xs = xdp_sk(sk);
 
@@ -693,7 +690,7 @@ static const struct proto_ops xsk_proto_ops = {
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
        .getname        = sock_no_getname,
-       .poll_mask      = xsk_poll_mask,
+       .poll           = xsk_poll,
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
@@ -751,6 +748,7 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
 
        xs = xdp_sk(sk);
        mutex_init(&xs->mutex);
+       spin_lock_init(&xs->tx_completion_lock);
 
        local_bh_disable();
        sock_prot_inuse_add(net, &xsk_proto, 1);
index ef6a6f0ec949049de2fc03d1a675ee0c1f48ba5e..52ecaf770642785140358ea4ff2713ccaca8a489 100644 (file)
@@ -62,14 +62,9 @@ static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
        return (entries > dcnt) ? dcnt : entries;
 }
 
-static inline u32 xskq_nb_free_lazy(struct xsk_queue *q, u32 producer)
-{
-       return q->nentries - (producer - q->cons_tail);
-}
-
 static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
 {
-       u32 free_entries = xskq_nb_free_lazy(q, producer);
+       u32 free_entries = q->nentries - (producer - q->cons_tail);
 
        if (free_entries >= dcnt)
                return free_entries;
@@ -129,7 +124,7 @@ static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
 {
        struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
 
-       if (xskq_nb_free(q, q->prod_tail, LAZY_UPDATE_THRESHOLD) == 0)
+       if (xskq_nb_free(q, q->prod_tail, 1) == 0)
                return -ENOSPC;
 
        ring->desc[q->prod_tail++ & q->ring_mask] = addr;
diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore
new file mode 100644 (file)
index 0000000..8ae4940
--- /dev/null
@@ -0,0 +1,49 @@
+cpustat
+fds_example
+lathist
+load_sock_ops
+lwt_len_hist
+map_perf_test
+offwaketime
+per_socket_stats_example
+sampleip
+sock_example
+sockex1
+sockex2
+sockex3
+spintest
+syscall_nrs.h
+syscall_tp
+task_fd_query
+tc_l2_redirect
+test_cgrp2_array_pin
+test_cgrp2_attach
+test_cgrp2_attach2
+test_cgrp2_sock
+test_cgrp2_sock2
+test_current_task_under_cgroup
+test_lru_dist
+test_map_in_map
+test_overhead
+test_probe_write_user
+trace_event
+trace_output
+tracex1
+tracex2
+tracex3
+tracex4
+tracex5
+tracex6
+tracex7
+xdp1
+xdp2
+xdp_adjust_tail
+xdp_fwd
+xdp_monitor
+xdp_redirect
+xdp_redirect_cpu
+xdp_redirect_map
+xdp_router_ipv4
+xdp_rxq_info
+xdp_tx_iptunnel
+xdpsock
index 95c16324760c0be1af8be927e1adffae0b582525..0b6f22feb2c9ce37787ea5384276c85a4e1171eb 100644 (file)
@@ -6,6 +6,7 @@
  */
 #define KBUILD_MODNAME "foo"
 #include <linux/if_ether.h>
+#include <linux/if_vlan.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/in.h>
@@ -108,11 +109,6 @@ static int parse_ipv6(void *data, uint64_t nh_off, void *data_end)
        return 0;
 }
 
-struct vlan_hdr {
-       uint16_t h_vlan_TCI;
-       uint16_t h_vlan_encapsulated_proto;
-};
-
 SEC("varlen")
 int handle_ingress(struct __sk_buff *skb)
 {
index 6caf47afa635ca680bb56b43ef78c7f62b293dd7..9d6dcaa9db9206ebe6a5bb14fe98c7001a543b6d 100644 (file)
@@ -6,6 +6,7 @@
  */
 #define _GNU_SOURCE
 #include <sched.h>
+#include <errno.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <asm/unistd.h>
@@ -44,8 +45,13 @@ static void test_task_rename(int cpu)
                exit(1);
        }
        start_time = time_get_ns();
-       for (i = 0; i < MAX_CNT; i++)
-               write(fd, buf, sizeof(buf));
+       for (i = 0; i < MAX_CNT; i++) {
+               if (write(fd, buf, sizeof(buf)) < 0) {
+                       printf("task rename failed: %s\n", strerror(errno));
+                       close(fd);
+                       return;
+               }
+       }
        printf("task_rename:%d: %lld events per sec\n",
               cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
        close(fd);
@@ -63,8 +69,13 @@ static void test_urandom_read(int cpu)
                exit(1);
        }
        start_time = time_get_ns();
-       for (i = 0; i < MAX_CNT; i++)
-               read(fd, buf, sizeof(buf));
+       for (i = 0; i < MAX_CNT; i++) {
+               if (read(fd, buf, sizeof(buf)) < 0) {
+                       printf("failed to read from /dev/urandom: %s\n", strerror(errno));
+                       close(fd);
+                       return;
+               }
+       }
        printf("urandom_read:%d: %lld events per sec\n",
               cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
        close(fd);
index 1fa1becfa641510ae67db4d0ea64c3971f6d2f4d..d08046ab81f043505e0ea42a2e8c85661ae68f76 100644 (file)
@@ -122,6 +122,16 @@ static void print_stacks(void)
        }
 }
 
+static inline int generate_load(void)
+{
+       if (system("dd if=/dev/zero of=/dev/null count=5000k status=none") < 0) {
+               printf("failed to generate some load with dd: %s\n", strerror(errno));
+               return -1;
+       }
+
+       return 0;
+}
+
 static void test_perf_event_all_cpu(struct perf_event_attr *attr)
 {
        int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
@@ -142,7 +152,11 @@ static void test_perf_event_all_cpu(struct perf_event_attr *attr)
                assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
                assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE) == 0);
        }
-       system("dd if=/dev/zero of=/dev/null count=5000k status=none");
+
+       if (generate_load() < 0) {
+               error = 1;
+               goto all_cpu_err;
+       }
        print_stacks();
 all_cpu_err:
        for (i--; i >= 0; i--) {
@@ -156,7 +170,7 @@ all_cpu_err:
 
 static void test_perf_event_task(struct perf_event_attr *attr)
 {
-       int pmu_fd;
+       int pmu_fd, error = 0;
 
        /* per task perf event, enable inherit so the "dd ..." command can be traced properly.
         * Enabling inherit will cause bpf_perf_prog_read_time helper failure.
@@ -171,10 +185,17 @@ static void test_perf_event_task(struct perf_event_attr *attr)
        }
        assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
        assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE) == 0);
-       system("dd if=/dev/zero of=/dev/null count=5000k status=none");
+
+       if (generate_load() < 0) {
+               error = 1;
+               goto err;
+       }
        print_stacks();
+err:
        ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
        close(pmu_fd);
+       if (error)
+               int_exit(0);
 }
 
 static void test_bpf_perf_event(void)
index b9c9549c4c272a944d818cef153e496f96dcec84..4bde9d066c4616430533cef02e32d295b0329b11 100755 (executable)
@@ -16,8 +16,8 @@
 BPF_FILE=xdp2skb_meta_kern.o
 DIR=$(dirname $0)
 
-export TC=/usr/sbin/tc
-export IP=/usr/sbin/ip
+[ -z "$TC" ] && TC=tc
+[ -z "$IP" ] && IP=ip
 
 function usage() {
     echo ""
@@ -53,7 +53,7 @@ function _call_cmd() {
     local allow_fail="$2"
     shift 2
     if [[ -n "$VERBOSE" ]]; then
-       echo "$(basename $cmd) $@"
+       echo "$cmd $@"
     fi
     if [[ -n "$DRYRUN" ]]; then
        return
index 6673cdb9f55cab3fb32faaca755f805e8c10ed8f..a7e94e7ff87df5f60f7a57522de77b5929e46029 100644 (file)
@@ -48,9 +48,9 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
        struct ethhdr *eth = data;
        struct ipv6hdr *ip6h;
        struct iphdr *iph;
-       int out_index;
        u16 h_proto;
        u64 nh_off;
+       int rc;
 
        nh_off = sizeof(*eth);
        if (data + nh_off > data_end)
@@ -101,7 +101,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
 
        fib_params.ifindex = ctx->ingress_ifindex;
 
-       out_index = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags);
+       rc = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags);
 
        /* verify egress index has xdp support
         * TO-DO bpf_map_lookup_elem(&tx_port, &key) fails with
@@ -109,7 +109,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
         * NOTE: without verification that egress index supports XDP
         *       forwarding packets are dropped.
         */
-       if (out_index > 0) {
+       if (rc == 0) {
                if (h_proto == htons(ETH_P_IP))
                        ip_decrease_ttl(iph);
                else if (h_proto == htons(ETH_P_IPV6))
@@ -117,7 +117,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
 
                memcpy(eth->h_dest, fib_params.dmac, ETH_ALEN);
                memcpy(eth->h_source, fib_params.smac, ETH_ALEN);
-               return bpf_redirect_map(&tx_port, out_index, 0);
+               return bpf_redirect_map(&tx_port, fib_params.ifindex, 0);
        }
 
        return XDP_PASS;
index d69c8d78d3fdef775f27d97b94401fabb5ccfd72..5904b15438313399d8bfa8fe7412ed30b5342556 100644 (file)
@@ -729,7 +729,7 @@ static void kick_tx(int fd)
        int ret;
 
        ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0);
-       if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN)
+       if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY)
                return;
        lassert(0);
 }
index 2960e26c6ea4c756064db41cc7d39c14f058068b..2535c3677c7b66a1650fc3a1a7fb9c99684ae8ea 100644 (file)
@@ -178,6 +178,8 @@ static const char *vbe_name(u32 index)
        return "(invalid)";
 }
 
+static struct page *__mbochs_get_page(struct mdev_state *mdev_state,
+                                     pgoff_t pgoff);
 static struct page *mbochs_get_page(struct mdev_state *mdev_state,
                                    pgoff_t pgoff);
 
@@ -394,7 +396,7 @@ static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
                   MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) {
                pos -= MBOCHS_MMIO_BAR_OFFSET;
                poff = pos & ~PAGE_MASK;
-               pg = mbochs_get_page(mdev_state, pos >> PAGE_SHIFT);
+               pg = __mbochs_get_page(mdev_state, pos >> PAGE_SHIFT);
                map = kmap(pg);
                if (is_write)
                        memcpy(map + poff, buf, count);
@@ -657,7 +659,7 @@ static void mbochs_put_pages(struct mdev_state *mdev_state)
        dev_dbg(dev, "%s: %d pages released\n", __func__, count);
 }
 
-static int mbochs_region_vm_fault(struct vm_fault *vmf)
+static vm_fault_t mbochs_region_vm_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct mdev_state *mdev_state = vma->vm_private_data;
@@ -695,7 +697,7 @@ static int mbochs_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
        return 0;
 }
 
-static int mbochs_dmabuf_vm_fault(struct vm_fault *vmf)
+static vm_fault_t mbochs_dmabuf_vm_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct mbochs_dmabuf *dmabuf = vma->vm_private_data;
@@ -803,29 +805,26 @@ static void mbochs_release_dmabuf(struct dma_buf *buf)
        mutex_unlock(&mdev_state->ops_lock);
 }
 
-static void *mbochs_kmap_atomic_dmabuf(struct dma_buf *buf,
-                                      unsigned long page_num)
+static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num)
 {
        struct mbochs_dmabuf *dmabuf = buf->priv;
        struct page *page = dmabuf->pages[page_num];
 
-       return kmap_atomic(page);
+       return kmap(page);
 }
 
-static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num)
+static void mbochs_kunmap_dmabuf(struct dma_buf *buf, unsigned long page_num,
+                                void *vaddr)
 {
-       struct mbochs_dmabuf *dmabuf = buf->priv;
-       struct page *page = dmabuf->pages[page_num];
-
-       return kmap(page);
+       kunmap(vaddr);
 }
 
 static struct dma_buf_ops mbochs_dmabuf_ops = {
        .map_dma_buf      = mbochs_map_dmabuf,
        .unmap_dma_buf    = mbochs_unmap_dmabuf,
        .release          = mbochs_release_dmabuf,
-       .map_atomic       = mbochs_kmap_atomic_dmabuf,
        .map              = mbochs_kmap_dmabuf,
+       .unmap            = mbochs_kunmap_dmabuf,
        .mmap             = mbochs_mmap_dmabuf,
 };
 
index c8156d61678cfbc6907a9176efbccb03aa8387ce..86321f06461e9835103950242930187c62837e1d 100644 (file)
@@ -214,7 +214,7 @@ hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj
 # Prefix -I with $(srctree) if it is not an absolute path.
 # skip if -I has no parameter
 addtree = $(if $(patsubst -I%,%,$(1)), \
-$(if $(filter-out -I/% -I./% -I../%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1)),$(1)))
+$(if $(filter-out -I/% -I./% -I../%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1)),$(1)),$(1))
 
 # Find all -I options and call addtree
 flags = $(foreach o,$($(1)),$(if $(filter -I%,$(o)),$(call addtree,$(o)),$(o)))
index 34d9e9ce97c29c5e0ca78e6b06085a6b29ffd8bd..514ed63ff5710789fda060eb06cf10813a4124ad 100644 (file)
@@ -239,6 +239,7 @@ cmd_record_mcount =                                         \
             "$(CC_FLAGS_FTRACE)" ]; then                       \
                $(sub_cmd_record_mcount)                        \
        fi;
+endif # -record-mcount
 endif # CONFIG_FTRACE_MCOUNT_RECORD
 
 ifdef CONFIG_STACK_VALIDATION
@@ -263,7 +264,6 @@ ifneq ($(RETPOLINE_CFLAGS),)
   objtool_args += --retpoline
 endif
 endif
-endif
 
 
 ifdef CONFIG_MODVERSIONS
@@ -590,7 +590,4 @@ endif
 # We never want them to be removed automatically.
 .SECONDARY: $(targets)
 
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable se we can use it in if_changed and friends.
-
 .PHONY: $(PHONY)
index 808d09f27ad4063424211a8264297a0b3945d3c5..17ef94c635cd5dcfd23c355576a351730076dd73 100644 (file)
@@ -88,7 +88,4 @@ PHONY += $(subdir-ymn)
 $(subdir-ymn):
        $(Q)$(MAKE) $(clean)=$@
 
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable se we can use it in if_changed and friends.
-
 .PHONY: $(PHONY)
index a763b4775d062965a82af7761aaa8cb5a28aeb8e..40867a41615ba812987100133793183e6f82a1d5 100644 (file)
@@ -54,8 +54,4 @@ PHONY += $(subdir-ym)
 $(subdir-ym):
        $(Q)$(MAKE) $(modbuiltin)=$@
 
-
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable se we can use it in if_changed and friends.
-
 .PHONY: $(PHONY)
index 51ca0244fc8ac4f8e2981fb9dbc9df3efe507861..ff5ca9817a85ab394740c7ec8f8459f02a9656f9 100644 (file)
@@ -35,8 +35,4 @@ modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
 $(modules):
        $(call cmd,modules_install,$(MODLIB)/$(modinst_dir))
 
-
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable so we can use it in if_changed and friends.
-
 .PHONY: $(PHONY)
index df4174405feb331a772abe871046d9260c43c690..dd92dbbbaa687b73f31b922187c0da15f66266e0 100644 (file)
@@ -149,8 +149,4 @@ ifneq ($(cmd_files),)
   include $(cmd_files)
 endif
 
-
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable se we can use it in if_changed and friends.
-
 .PHONY: $(PHONY)
index 171483bc0538d7faa5e4a34c5804a2f8e721f2ea..da56aa78d245da2835d7714d6bb81e15cb1cf3f4 100644 (file)
@@ -27,7 +27,4 @@ modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
 $(modules):
        $(call cmd,sign_ko,$(MODLIB)/$(modinst_dir))
 
-# Declare the contents of the .PHONY variable as phony.  We keep that
-# information in a variable se we can use it in if_changed and friends.
-
 .PHONY: $(PHONY)
index 208eb2825dab017a9d3fdc0bdb8beef053b5626d..6efcead3198989d2ab2ab6772c72d8bb61c89c4e 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
-cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1 && echo "y"
+cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1
 #include <stdio.h>
 int main(void)
 {
index e3b7362b0ee457b9601a8628609d8ebf78fcb09a..447857ffaf6be157841f8b0283d8ac67cb37dc5e 100755 (executable)
@@ -2606,12 +2606,6 @@ sub process {
                             "A patch subject line should describe the change not the tool that found it\n" . $herecurr);
                }
 
-# Check for old stable address
-               if ($line =~ /^\s*cc:\s*.*<?\bstable\@kernel\.org\b>?.*$/i) {
-                       ERROR("STABLE_ADDRESS",
-                             "The 'stable' address should be 'stable\@vger.kernel.org'\n" . $herecurr);
-               }
-
 # Check for unwanted Gerrit info
                if ($in_commit_log && $line =~ /^\s*change-id:/i) {
                        ERROR("GERRIT_CHANGE_ID",
@@ -5819,14 +5813,14 @@ sub process {
                    defined $stat &&
                    $stat =~ /^\+(?![^\{]*\{\s*).*\b(\w+)\s*\(.*$String\s*,/s &&
                    $1 !~ /^_*volatile_*$/) {
-                       my $specifier;
-                       my $extension;
-                       my $bad_specifier = "";
                        my $stat_real;
 
                        my $lc = $stat =~ tr@\n@@;
                        $lc = $lc + $linenr;
                        for (my $count = $linenr; $count <= $lc; $count++) {
+                               my $specifier;
+                               my $extension;
+                               my $bad_specifier = "";
                                my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0));
                                $fmt =~ s/%%//g;
 
index 5061abcc25409c4e095e8c5b25d3a8de3fb4db68..e6239f39abadd480f285e4b541b21141337f9497 100755 (executable)
@@ -57,6 +57,8 @@ try_decompress '\3757zXZ\000' abcde unxz
 try_decompress 'BZh'          xy    bunzip2
 try_decompress '\135\0\0\0'   xxx   unlzma
 try_decompress '\211\114\132' xy    'lzop -d'
+try_decompress '\002!L\030'   xxx   'lz4 -d'
+try_decompress '(\265/\375'   xxx   unzstd
 
 # Bail out:
 echo "$me: Cannot find vmlinux." >&2
index 3755af0cd9f7f24c1942fd9df216c525f79d04b3..75e4e22b986adcfd07197777c5d59d5601d3c920 100755 (executable)
@@ -1,4 +1,4 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m64 -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
index 94a383b21df6405f4a9f6b6c08758d6c822381d8..f63b41b0dd498d23b65b3c12fe47e3b2c87e148d 100644 (file)
@@ -171,6 +171,9 @@ struct symbol {
  * config BAZ
  *         int "BAZ Value"
  *         range 1..255
+ *
+ * Please, also check zconf.y:print_symbol() when modifying the
+ * list of property types!
  */
 enum prop_type {
        P_UNKNOWN,
index 65da87fce907ad2bc7b52adba4651dc2c32786be..5ca2df790d3cfa5f4253a33a303219aaa8fc4394 100644 (file)
@@ -156,7 +156,7 @@ static char *do_shell(int argc, char *argv[])
                nread--;
 
        /* remove trailing new lines */
-       while (buf[nread - 1] == '\n')
+       while (nread > 0 && buf[nread - 1] == '\n')
                nread--;
 
        buf[nread] = 0;
index 6f9b0aa32a82239b2bc1540d1b75949859ea48e9..4b68272ebdb96cb25e8d91de0d4cbc792a3079b6 100644 (file)
@@ -31,7 +31,7 @@ struct symbol *symbol_hash[SYMBOL_HASHSIZE];
 static struct menu *current_menu, *current_entry;
 
 %}
-%expect 32
+%expect 31
 
 %union
 {
@@ -337,7 +337,7 @@ choice_block:
 
 /* if entry */
 
-if_entry: T_IF expr nl
+if_entry: T_IF expr T_EOL
 {
        printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno());
        menu_add_entry(NULL);
@@ -717,6 +717,10 @@ static void print_symbol(FILE *out, struct menu *menu)
                        print_quoted_string(out, prop->text);
                        fputc('\n', out);
                        break;
+               case P_SYMBOL:
+                       fputs( "  symbol ", out);
+                       fprintf(out, "%s\n", prop->sym->name);
+                       break;
                default:
                        fprintf(out, "  unknown prop %d!\n", prop->type);
                        break;
index 66f08bb1cce978a1074141cc90adc7255c317ede..26de7d5aa5c89a5fd4f051c67029341f4f8849fa 100755 (executable)
@@ -152,6 +152,7 @@ regex_asm=(
 )
 regex_c=(
        '/^SYSCALL_DEFINE[0-9](\([[:alnum:]_]*\).*/sys_\1/'
+       '/^BPF_CALL_[0-9](\([[:alnum:]_]*\).*/\1/'
        '/^COMPAT_SYSCALL_DEFINE[0-9](\([[:alnum:]_]*\).*/compat_sys_\1/'
        '/^TRACE_EVENT(\([[:alnum:]_]*\).*/trace_\1/'
        '/^TRACE_EVENT(\([[:alnum:]_]*\).*/trace_\1_rcuidle/'
@@ -245,7 +246,7 @@ exuberant()
 {
        setup_regex exuberant asm c
        all_target_sources | xargs $1 -a                        \
-       -I __initdata,__exitdata,__initconst,                   \
+       -I __initdata,__exitdata,__initconst,__ro_after_init    \
        -I __initdata_memblock                                  \
        -I __refdata,__attribute,__maybe_unused,__always_unused \
        -I __acquires,__releases,__deprecated                   \
index f7403821db7f0aafdec4a2e9a6804b1b8c2a599b..b203f7758f9765f056c3e0d07e0286c49b181253 100644 (file)
@@ -142,6 +142,8 @@ static void kdf_dealloc(struct kdf_sdesc *sdesc)
  * The src pointer is defined as Z || other info where Z is the shared secret
  * from DH and other info is an arbitrary string (see SP800-56A section
  * 5.8.1.2).
+ *
+ * 'dlen' must be a multiple of the digest size.
  */
 static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen,
                   u8 *dst, unsigned int dlen, unsigned int zlen)
@@ -205,8 +207,8 @@ static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc,
 {
        uint8_t *outbuf = NULL;
        int ret;
-       size_t outbuf_len = round_up(buflen,
-                                    crypto_shash_digestsize(sdesc->shash.tfm));
+       size_t outbuf_len = roundup(buflen,
+                                   crypto_shash_digestsize(sdesc->shash.tfm));
 
        outbuf = kmalloc(outbuf_len, GFP_KERNEL);
        if (!outbuf) {
index f3d374d2ca045ce7325b20ad3cecb6304418d1b3..79d3709b06717a1f6452fe85b9922244b9f70381 100644 (file)
@@ -441,22 +441,16 @@ static int sel_release_policy(struct inode *inode, struct file *filp)
 static ssize_t sel_read_policy(struct file *filp, char __user *buf,
                               size_t count, loff_t *ppos)
 {
-       struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
        struct policy_load_memory *plm = filp->private_data;
        int ret;
 
-       mutex_lock(&fsi->mutex);
-
        ret = avc_has_perm(&selinux_state,
                           current_sid(), SECINITSID_SECURITY,
                          SECCLASS_SECURITY, SECURITY__READ_POLICY, NULL);
        if (ret)
-               goto out;
+               return ret;
 
-       ret = simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
-out:
-       mutex_unlock(&fsi->mutex);
-       return ret;
+       return simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
 }
 
 static vm_fault_t sel_mmap_policy_fault(struct vm_fault *vmf)
@@ -1188,25 +1182,29 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
        ret = -EINVAL;
        if (index >= fsi->bool_num || strcmp(name,
                                             fsi->bool_pending_names[index]))
-               goto out;
+               goto out_unlock;
 
        ret = -ENOMEM;
        page = (char *)get_zeroed_page(GFP_KERNEL);
        if (!page)
-               goto out;
+               goto out_unlock;
 
        cur_enforcing = security_get_bool_value(fsi->state, index);
        if (cur_enforcing < 0) {
                ret = cur_enforcing;
-               goto out;
+               goto out_unlock;
        }
        length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing,
                          fsi->bool_pending_values[index]);
-       ret = simple_read_from_buffer(buf, count, ppos, page, length);
-out:
        mutex_unlock(&fsi->mutex);
+       ret = simple_read_from_buffer(buf, count, ppos, page, length);
+out_free:
        free_page((unsigned long)page);
        return ret;
+
+out_unlock:
+       mutex_unlock(&fsi->mutex);
+       goto out_free;
 }
 
 static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
@@ -1219,6 +1217,17 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
        unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
        const char *name = filep->f_path.dentry->d_name.name;
 
+       if (count >= PAGE_SIZE)
+               return -ENOMEM;
+
+       /* No partial writes. */
+       if (*ppos != 0)
+               return -EINVAL;
+
+       page = memdup_user_nul(buf, count);
+       if (IS_ERR(page))
+               return PTR_ERR(page);
+
        mutex_lock(&fsi->mutex);
 
        length = avc_has_perm(&selinux_state,
@@ -1233,22 +1242,6 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
                                             fsi->bool_pending_names[index]))
                goto out;
 
-       length = -ENOMEM;
-       if (count >= PAGE_SIZE)
-               goto out;
-
-       /* No partial writes. */
-       length = -EINVAL;
-       if (*ppos != 0)
-               goto out;
-
-       page = memdup_user_nul(buf, count);
-       if (IS_ERR(page)) {
-               length = PTR_ERR(page);
-               page = NULL;
-               goto out;
-       }
-
        length = -EINVAL;
        if (sscanf(page, "%d", &new_value) != 1)
                goto out;
@@ -1280,6 +1273,17 @@ static ssize_t sel_commit_bools_write(struct file *filep,
        ssize_t length;
        int new_value;
 
+       if (count >= PAGE_SIZE)
+               return -ENOMEM;
+
+       /* No partial writes. */
+       if (*ppos != 0)
+               return -EINVAL;
+
+       page = memdup_user_nul(buf, count);
+       if (IS_ERR(page))
+               return PTR_ERR(page);
+
        mutex_lock(&fsi->mutex);
 
        length = avc_has_perm(&selinux_state,
@@ -1289,22 +1293,6 @@ static ssize_t sel_commit_bools_write(struct file *filep,
        if (length)
                goto out;
 
-       length = -ENOMEM;
-       if (count >= PAGE_SIZE)
-               goto out;
-
-       /* No partial writes. */
-       length = -EINVAL;
-       if (*ppos != 0)
-               goto out;
-
-       page = memdup_user_nul(buf, count);
-       if (IS_ERR(page)) {
-               length = PTR_ERR(page);
-               page = NULL;
-               goto out;
-       }
-
        length = -EINVAL;
        if (sscanf(page, "%d", &new_value) != 1)
                goto out;
index 7ad226018f51674b8e97a5d6ff2aaeabd9163bbd..19de675d4504501f8a48c28302dd9ad552b70877 100644 (file)
@@ -2296,6 +2296,7 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
        struct smack_known *skp = smk_of_task_struct(p);
 
        isp->smk_inode = skp;
+       isp->smk_flags |= SMK_INODE_INSTANT;
 }
 
 /*
index 69616d00481c2cdff6331d8406bb18550db7824d..b53026a72e734e29f63a76eb711458013fa70290 100644 (file)
@@ -635,7 +635,7 @@ static int snd_rawmidi_info_select_user(struct snd_card *card,
 int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
                              struct snd_rawmidi_params * params)
 {
-       char *newbuf;
+       char *newbuf, *oldbuf;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
        
        if (substream->append && substream->use_count > 1)
@@ -648,13 +648,17 @@ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
                return -EINVAL;
        }
        if (params->buffer_size != runtime->buffer_size) {
-               newbuf = krealloc(runtime->buffer, params->buffer_size,
-                                 GFP_KERNEL);
+               newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
                if (!newbuf)
                        return -ENOMEM;
+               spin_lock_irq(&runtime->lock);
+               oldbuf = runtime->buffer;
                runtime->buffer = newbuf;
                runtime->buffer_size = params->buffer_size;
                runtime->avail = runtime->buffer_size;
+               runtime->appl_ptr = runtime->hw_ptr = 0;
+               spin_unlock_irq(&runtime->lock);
+               kfree(oldbuf);
        }
        runtime->avail_min = params->avail_min;
        substream->active_sensing = !params->no_active_sensing;
@@ -665,7 +669,7 @@ EXPORT_SYMBOL(snd_rawmidi_output_params);
 int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
                             struct snd_rawmidi_params * params)
 {
-       char *newbuf;
+       char *newbuf, *oldbuf;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
 
        snd_rawmidi_drain_input(substream);
@@ -676,12 +680,16 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
                return -EINVAL;
        }
        if (params->buffer_size != runtime->buffer_size) {
-               newbuf = krealloc(runtime->buffer, params->buffer_size,
-                                 GFP_KERNEL);
+               newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
                if (!newbuf)
                        return -ENOMEM;
+               spin_lock_irq(&runtime->lock);
+               oldbuf = runtime->buffer;
                runtime->buffer = newbuf;
                runtime->buffer_size = params->buffer_size;
+               runtime->appl_ptr = runtime->hw_ptr = 0;
+               spin_unlock_irq(&runtime->lock);
+               kfree(oldbuf);
        }
        runtime->avail_min = params->avail_min;
        return 0;
index 61a07fe34cd271e60dc0c31a7dddae750c2532b1..56ca78423040f09e6d0569b651ba631105b8bd02 100644 (file)
@@ -2004,7 +2004,8 @@ static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client,
        struct snd_seq_client *cptr = NULL;
 
        /* search for next client */
-       info->client++;
+       if (info->client < INT_MAX)
+               info->client++;
        if (info->client < 0)
                info->client = 0;
        for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) {
index 665089c455603c0c683144c419981f8215780e85..b6f076bbc72d14be37893e20b19cbfaedf2f728b 100644 (file)
@@ -1520,7 +1520,7 @@ static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
                                } else {
                                        if (id.subdevice < 0)
                                                id.subdevice = 0;
-                                       else
+                                       else if (id.subdevice < INT_MAX)
                                                id.subdevice++;
                                }
                        }
index d91c87e41756ea5fceaee73d84e211b9ebba929d..20a171ac4bb2f7cff9715122c4e959c60fc9b485 100644 (file)
@@ -2899,8 +2899,9 @@ static int hda_codec_runtime_suspend(struct device *dev)
        list_for_each_entry(pcm, &codec->pcm_list_head, list)
                snd_pcm_suspend_all(pcm->pcm);
        state = hda_call_codec_suspend(codec);
-       if (codec_has_clkstop(codec) && codec_has_epss(codec) &&
-           (state & AC_PWRST_CLK_STOP_OK))
+       if (codec->link_down_at_suspend ||
+           (codec_has_clkstop(codec) && codec_has_epss(codec) &&
+            (state & AC_PWRST_CLK_STOP_OK)))
                snd_hdac_codec_link_down(&codec->core);
        snd_hdac_link_power(&codec->core, false);
        return 0;
index 681c360f29f9d628cf4462c9bb7ef92879f27d91..a8b1b31f161c26f739892ea6b52e79ba2ebca291 100644 (file)
@@ -258,6 +258,7 @@ struct hda_codec {
        unsigned int power_save_node:1; /* advanced PM for each widget */
        unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */
        unsigned int force_pin_prefix:1; /* Add location prefix */
+       unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
 #ifdef CONFIG_PM
        unsigned long power_on_acct;
        unsigned long power_off_acct;
index 04e949aa01ada5492765cd313608624f3a42c7b9..321e95c409c1427ddd5a56f802101fc9fe248bff 100644 (file)
@@ -991,6 +991,7 @@ struct ca0132_spec {
 enum {
        QUIRK_NONE,
        QUIRK_ALIENWARE,
+       QUIRK_ALIENWARE_M17XR4,
        QUIRK_SBZ,
        QUIRK_R3DI,
 };
@@ -1040,13 +1041,15 @@ static const struct hda_pintbl r3di_pincfgs[] = {
 };
 
 static const struct snd_pci_quirk ca0132_quirks[] = {
+       SND_PCI_QUIRK(0x1028, 0x057b, "Alienware M17x R4", QUIRK_ALIENWARE_M17XR4),
        SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE),
        SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE),
        SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
        SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ),
        SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ),
        SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
-       SND_PCI_QUIRK(0x1458, 0xA036, "Recon3Di", QUIRK_R3DI),
+       SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
+       SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
        {}
 };
 
@@ -5663,7 +5666,7 @@ static const char * const ca0132_alt_slave_pfxs[] = {
  * I think this has to do with the pin for rear surround being 0x11,
  * and the center/lfe being 0x10. Usually the pin order is the opposite.
  */
-const struct snd_pcm_chmap_elem ca0132_alt_chmaps[] = {
+static const struct snd_pcm_chmap_elem ca0132_alt_chmaps[] = {
        { .channels = 2,
          .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
        { .channels = 4,
@@ -5966,7 +5969,7 @@ static int ca0132_build_pcms(struct hda_codec *codec)
        info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[0];
 
        /* With the DSP enabled, desktops don't use this ADC. */
-       if (spec->use_alt_functions) {
+       if (!spec->use_alt_functions) {
                info = snd_hda_codec_pcm_new(codec, "CA0132 Analog Mic-In2");
                if (!info)
                        return -ENOMEM;
@@ -6130,7 +6133,10 @@ static void ca0132_init_dmic(struct hda_codec *codec)
         * Bit   6: set to select Data2, clear for Data1
         * Bit   7: set to enable DMic, clear for AMic
         */
-       val = 0x23;
+       if (spec->quirk == QUIRK_ALIENWARE_M17XR4)
+               val = 0x33;
+       else
+               val = 0x23;
        /* keep a copy of dmic ctl val for enable/disable dmic purpuse */
        spec->dmic_ctl = val;
        snd_hda_codec_write(codec, spec->input_pins[0], 0,
@@ -7223,7 +7229,7 @@ static int ca0132_init(struct hda_codec *codec)
 
        snd_hda_sequence_write(codec, spec->base_init_verbs);
 
-       if (spec->quirk != QUIRK_NONE)
+       if (spec->use_alt_functions)
                ca0132_alt_init(codec);
 
        ca0132_download_dsp(codec);
@@ -7237,8 +7243,9 @@ static int ca0132_init(struct hda_codec *codec)
        case QUIRK_R3DI:
                r3di_setup_defaults(codec);
                break;
-       case QUIRK_NONE:
-       case QUIRK_ALIENWARE:
+       case QUIRK_SBZ:
+               break;
+       default:
                ca0132_setup_defaults(codec);
                ca0132_init_analog_mic2(codec);
                ca0132_init_dmic(codec);
@@ -7343,7 +7350,6 @@ static const struct hda_codec_ops ca0132_patch_ops = {
 static void ca0132_config(struct hda_codec *codec)
 {
        struct ca0132_spec *spec = codec->spec;
-       struct auto_pin_cfg *cfg = &spec->autocfg;
 
        spec->dacs[0] = 0x2;
        spec->dacs[1] = 0x3;
@@ -7405,12 +7411,7 @@ static void ca0132_config(struct hda_codec *codec)
                /* SPDIF I/O */
                spec->dig_out = 0x05;
                spec->multiout.dig_out_nid = spec->dig_out;
-               cfg->dig_out_pins[0] = 0x0c;
-               cfg->dig_outs = 1;
-               cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
                spec->dig_in = 0x09;
-               cfg->dig_in_pin = 0x0e;
-               cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
                break;
        case QUIRK_R3DI:
                codec_dbg(codec, "%s: QUIRK_R3DI applied.\n", __func__);
@@ -7438,9 +7439,6 @@ static void ca0132_config(struct hda_codec *codec)
                /* SPDIF I/O */
                spec->dig_out = 0x05;
                spec->multiout.dig_out_nid = spec->dig_out;
-               cfg->dig_out_pins[0] = 0x0c;
-               cfg->dig_outs = 1;
-               cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
                break;
        default:
                spec->num_outputs = 2;
@@ -7463,12 +7461,7 @@ static void ca0132_config(struct hda_codec *codec)
                /* SPDIF I/O */
                spec->dig_out = 0x05;
                spec->multiout.dig_out_nid = spec->dig_out;
-               cfg->dig_out_pins[0] = 0x0c;
-               cfg->dig_outs = 1;
-               cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
                spec->dig_in = 0x09;
-               cfg->dig_in_pin = 0x0e;
-               cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
                break;
        }
 }
@@ -7476,7 +7469,7 @@ static void ca0132_config(struct hda_codec *codec)
 static int ca0132_prepare_verbs(struct hda_codec *codec)
 {
 /* Verbs + terminator (an empty element) */
-#define NUM_SPEC_VERBS 4
+#define NUM_SPEC_VERBS 2
        struct ca0132_spec *spec = codec->spec;
 
        spec->chip_init_verbs = ca0132_init_verbs0;
@@ -7488,34 +7481,24 @@ static int ca0132_prepare_verbs(struct hda_codec *codec)
        if (!spec->spec_init_verbs)
                return -ENOMEM;
 
-       /* HP jack autodetection */
-       spec->spec_init_verbs[0].nid = spec->unsol_tag_hp;
-       spec->spec_init_verbs[0].param = AC_VERB_SET_UNSOLICITED_ENABLE;
-       spec->spec_init_verbs[0].verb = AC_USRSP_EN | spec->unsol_tag_hp;
-
-       /* MIC1 jack autodetection */
-       spec->spec_init_verbs[1].nid = spec->unsol_tag_amic1;
-       spec->spec_init_verbs[1].param = AC_VERB_SET_UNSOLICITED_ENABLE;
-       spec->spec_init_verbs[1].verb = AC_USRSP_EN | spec->unsol_tag_amic1;
-
        /* config EAPD */
-       spec->spec_init_verbs[2].nid = 0x0b;
-       spec->spec_init_verbs[2].param = 0x78D;
-       spec->spec_init_verbs[2].verb = 0x00;
+       spec->spec_init_verbs[0].nid = 0x0b;
+       spec->spec_init_verbs[0].param = 0x78D;
+       spec->spec_init_verbs[0].verb = 0x00;
 
        /* Previously commented configuration */
        /*
-       spec->spec_init_verbs[3].nid = 0x0b;
-       spec->spec_init_verbs[3].param = AC_VERB_SET_EAPD_BTLENABLE;
+       spec->spec_init_verbs[2].nid = 0x0b;
+       spec->spec_init_verbs[2].param = AC_VERB_SET_EAPD_BTLENABLE;
+       spec->spec_init_verbs[2].verb = 0x02;
+
+       spec->spec_init_verbs[3].nid = 0x10;
+       spec->spec_init_verbs[3].param = 0x78D;
        spec->spec_init_verbs[3].verb = 0x02;
 
        spec->spec_init_verbs[4].nid = 0x10;
-       spec->spec_init_verbs[4].param = 0x78D;
+       spec->spec_init_verbs[4].param = AC_VERB_SET_EAPD_BTLENABLE;
        spec->spec_init_verbs[4].verb = 0x02;
-
-       spec->spec_init_verbs[5].nid = 0x10;
-       spec->spec_init_verbs[5].param = AC_VERB_SET_EAPD_BTLENABLE;
-       spec->spec_init_verbs[5].verb = 0x02;
        */
 
        /* Terminator: spec->spec_init_verbs[NUM_SPEC_VERBS-1] */
index e7fcfc3b8885fb7470dc1b10a49f305f7bca323d..f641c20095f71bb93edef945be21cbc141de280f 100644 (file)
@@ -964,6 +964,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
        SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
        SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
        SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
+       SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
        SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
index 8840daf9c6a300899efaf02898430d158d5b972a..8a49415aebacb79cd3da6b90b1a34e041da02bbd 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/pm_runtime.h>
 #include <sound/core.h>
 #include <sound/jack.h>
 #include <sound/asoundef.h>
@@ -764,8 +765,10 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid,
 
        if (pin_idx < 0)
                return;
+       mutex_lock(&spec->pcm_lock);
        if (hdmi_present_sense(get_pin(spec, pin_idx), 1))
                snd_hda_jack_report_sync(codec);
+       mutex_unlock(&spec->pcm_lock);
 }
 
 static void jack_callback(struct hda_codec *codec,
@@ -1628,21 +1631,23 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
 static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
 {
        struct hda_codec *codec = per_pin->codec;
-       struct hdmi_spec *spec = codec->spec;
        int ret;
 
        /* no temporary power up/down needed for component notifier */
-       if (!codec_has_acomp(codec))
-               snd_hda_power_up_pm(codec);
+       if (!codec_has_acomp(codec)) {
+               ret = snd_hda_power_up_pm(codec);
+               if (ret < 0 && pm_runtime_suspended(hda_codec_dev(codec))) {
+                       snd_hda_power_down_pm(codec);
+                       return false;
+               }
+       }
 
-       mutex_lock(&spec->pcm_lock);
        if (codec_has_acomp(codec)) {
                sync_eld_via_acomp(codec, per_pin);
                ret = false; /* don't call snd_hda_jack_report_sync() */
        } else {
                ret = hdmi_present_sense_via_verbs(per_pin, repoll);
        }
-       mutex_unlock(&spec->pcm_lock);
 
        if (!codec_has_acomp(codec))
                snd_hda_power_down_pm(codec);
@@ -1654,12 +1659,16 @@ static void hdmi_repoll_eld(struct work_struct *work)
 {
        struct hdmi_spec_per_pin *per_pin =
        container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
+       struct hda_codec *codec = per_pin->codec;
+       struct hdmi_spec *spec = codec->spec;
 
        if (per_pin->repoll_count++ > 6)
                per_pin->repoll_count = 0;
 
+       mutex_lock(&spec->pcm_lock);
        if (hdmi_present_sense(per_pin, per_pin->repoll_count))
                snd_hda_jack_report_sync(per_pin->codec);
+       mutex_unlock(&spec->pcm_lock);
 }
 
 static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
@@ -3741,6 +3750,11 @@ static int patch_atihdmi(struct hda_codec *codec)
 
        spec->chmap.channels_max = max(spec->chmap.channels_max, 8u);
 
+       /* AMD GPUs have neither EPSS nor CLKSTOP bits, hence preventing
+        * the link-down as is.  Tell the core to allow it.
+        */
+       codec->link_down_at_suspend = 1;
+
        return 0;
 }
 
index e9bd33ea538f239891c031a1a81e075a35c75043..f6af3e1c2b932d34c1de567b4229b0eb686af637 100644 (file)
@@ -2366,6 +2366,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
        SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
        SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
+       SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -2545,6 +2546,7 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu Lifebook S7110", ALC262_FIXUP_FSC_S7110),
        SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ),
        SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN),
+       SND_PCI_QUIRK(0x1734, 0x1141, "FSC ESPRIMO U9210", ALC262_FIXUP_FSC_H270),
        SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270),
        SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000),
        SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ),
@@ -4995,7 +4997,6 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
        struct alc_spec *spec = codec->spec;
 
        if (action == HDA_FIXUP_ACT_PRE_PROBE) {
-               spec->shutup = alc_no_shutup; /* reduce click noise */
                spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
                spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
                codec->power_save_node = 0; /* avoid click noises */
@@ -5394,6 +5395,13 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
 /* for hda_fixup_thinkpad_acpi() */
 #include "thinkpad_helper.c"
 
+static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       alc_fixup_no_shutup(codec, fix, action); /* reduce click noise */
+       hda_fixup_thinkpad_acpi(codec, fix, action);
+}
+
 /* for dell wmi mic mute led */
 #include "dell_wmi_helper.c"
 
@@ -5946,7 +5954,7 @@ static const struct hda_fixup alc269_fixups[] = {
        },
        [ALC269_FIXUP_THINKPAD_ACPI] = {
                .type = HDA_FIXUP_FUNC,
-               .v.func = hda_fixup_thinkpad_acpi,
+               .v.func = alc_fixup_thinkpad_acpi,
                .chained = true,
                .chain_id = ALC269_FIXUP_SKU_IGNORE,
        },
@@ -6562,6 +6570,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
        SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
        SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
+       SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
        SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
        SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
@@ -6603,8 +6612,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+       SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
-       SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
@@ -6782,6 +6791,17 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x14, 0x90170110},
                {0x19, 0x02a11030},
                {0x21, 0x02211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
+               {0x14, 0x90170110},
+               {0x19, 0x02a11030},
+               {0x1a, 0x02a11040},
+               {0x1b, 0x01014020},
+               {0x21, 0x0221101f}),
+       SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
+               {0x14, 0x90170110},
+               {0x19, 0x02a11020},
+               {0x1a, 0x02a11030},
+               {0x21, 0x0221101f}),
        SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x12, 0x90a60140},
                {0x14, 0x90170110},
index 6c85f13ab23f17f7ef4031f5f73797fb383584a7..54f6252faca684b23ef91617318c8ae11e3de04e 100644 (file)
@@ -1018,6 +1018,7 @@ static int snd_lx6464es_create(struct snd_card *card,
        chip->port_dsp_bar = pci_ioremap_bar(pci, 2);
        if (!chip->port_dsp_bar) {
                dev_err(card->dev, "cannot remap PCI memory region\n");
+               err = -ENOMEM;
                goto remap_pci_failed;
        }
 
index caae4843cb7001fbee1fa9b222850df7006850fb..16e006f708ca0cbd44a63135bb996b8db7c3ba9e 100644 (file)
@@ -91,6 +91,7 @@ struct kvm_regs {
 #define KVM_VGIC_V3_ADDR_TYPE_DIST     2
 #define KVM_VGIC_V3_ADDR_TYPE_REDIST   3
 #define KVM_VGIC_ITS_ADDR_TYPE         4
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION    5
 
 #define KVM_VGIC_V3_DIST_SIZE          SZ_64K
 #define KVM_VGIC_V3_REDIST_SIZE                (2 * SZ_64K)
index 04b3256f8e6d5f8e3e368b043f0fdcfeb7c23164..4e76630dd6554673d71ad647c1108bb54f1bcea2 100644 (file)
@@ -91,6 +91,7 @@ struct kvm_regs {
 #define KVM_VGIC_V3_ADDR_TYPE_DIST     2
 #define KVM_VGIC_V3_ADDR_TYPE_REDIST   3
 #define KVM_VGIC_ITS_ADDR_TYPE         4
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION    5
 
 #define KVM_VGIC_V3_DIST_SIZE          SZ_64K
 #define KVM_VGIC_V3_REDIST_SIZE                (2 * SZ_64K)
index 833ed9a16adfd03e0b6cb70adc19fe03055f7344..1b32b56a03d34ce2a5f0b7f79c621f87d8c89dbf 100644 (file)
@@ -633,6 +633,7 @@ struct kvm_ppc_cpu_char {
 #define KVM_REG_PPC_PSSCR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
 
 #define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
+#define KVM_REG_PPC_ONLINE     (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf)
 
 /* Transactional Memory checkpointed state:
  * This is all GPRs, all VSX regs and a subset of SPRs
index 389c36fd82990f3f6b390342f56375ac0067054a..ac5ba55066dd76a26f133d91623309036bcad4c8 100644 (file)
 #define __NR_pkey_alloc                384
 #define __NR_pkey_free         385
 #define __NR_pkey_mprotect     386
+#define __NR_rseq              387
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index fb00a2fca9901eb02ea7b730ddbac957e8ecc947..5701f5cecd3125fbce64ead21d89d02fc8fa25af 100644 (file)
 #define X86_FEATURE_AMD_IBPB           (13*32+12) /* "" Indirect Branch Prediction Barrier */
 #define X86_FEATURE_AMD_IBRS           (13*32+14) /* "" Indirect Branch Restricted Speculation */
 #define X86_FEATURE_AMD_STIBP          (13*32+15) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_SSBD           (13*32+24) /* "" Speculative Store Bypass Disable */
 #define X86_FEATURE_VIRT_SSBD          (13*32+25) /* Virtualized Speculative Store Bypass Disable */
+#define X86_FEATURE_AMD_SSB_NO         (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
 
 /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
 #define X86_FEATURE_DTHERM             (14*32+ 0) /* Digital Thermal Sensor */
index ac6b1a12c9b7cd6319dc3697500d56c938de8d90..b76b77dcfd1fcc52ded0a99b7e7f3cec8cde90ce 100644 (file)
@@ -29,9 +29,10 @@ static bool has_perf_query_support(void)
        if (perf_query_supported)
                goto out;
 
-       fd = open(bin_name, O_RDONLY);
+       fd = open("/", O_RDONLY);
        if (fd < 0) {
-               p_err("perf_query_support: %s", strerror(errno));
+               p_err("perf_query_support: cannot open directory \"/\" (%s)",
+                     strerror(errno));
                goto out;
        }
 
index a4f435203feff52f9d7c9a04bf8d5c10c31d2c73..959aa53ab6789f839442326359701b17ba9e337c 100644 (file)
@@ -90,7 +90,9 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
        }
 
        wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
-               nsecs / 1000000000;
+               (real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
+               1000000000;
+
 
        if (!localtime_r(&wallclock_secs, &load_tm)) {
                snprintf(buf, size, "%llu", nsecs / 1000000000);
@@ -692,15 +694,19 @@ static int do_load(int argc, char **argv)
                return -1;
        }
 
-       if (do_pin_fd(prog_fd, argv[1])) {
-               p_err("failed to pin program");
-               return -1;
-       }
+       if (do_pin_fd(prog_fd, argv[1]))
+               goto err_close_obj;
 
        if (json_output)
                jsonw_null(json_wtr);
 
+       bpf_object__close(obj);
+
        return 0;
+
+err_close_obj:
+       bpf_object__close(obj);
+       return -1;
 }
 
 static int do_help(int argc, char **argv)
index a4bbb984941df2c150ec7209e4905ac1baacbf81..950c1504ca37ecda43542a46730fe805a0f05866 100644 (file)
@@ -63,8 +63,8 @@ dep-cmd = $(if $(wildcard $(fixdep)),
            $(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp;           \
            rm -f $(depfile);                                                    \
            mv -f $(dot-target).tmp $(dot-target).cmd,                           \
-           printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
-           printf '\# using basic dep data\n\n' >> $(dot-target).cmd;           \
+           printf '$(pound) cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
+           printf '$(pound) using basic dep data\n\n' >> $(dot-target).cmd;           \
            cat $(depfile) >> $(dot-target).cmd;                                 \
            printf '\n%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
 
@@ -98,4 +98,4 @@ cxx_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXX
 ###
 ## HOSTCC C flags
 
-host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CHOSTFLAGS) -D"BUILD_STR(s)=\#s" $(CHOSTFLAGS_$(basetarget).o) $(CHOSTFLAGS_$(obj))
+host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj))
index 5eb4b5ad79cb778f0e949a07f719743d7a62d3c7..5edf65e684ab70bb65bfd0e8dc821a61b605be6f 100644 (file)
@@ -43,7 +43,7 @@ $(OUTPUT)fixdep-in.o: FORCE
        $(Q)$(MAKE) $(build)=fixdep
 
 $(OUTPUT)fixdep: $(OUTPUT)fixdep-in.o
-       $(QUIET_LINK)$(HOSTCC) $(LDFLAGS) -o $@ $<
+       $(QUIET_LINK)$(HOSTCC) $(HOSTLDFLAGS) -o $@ $<
 
 FORCE:
 
index 6fdff5945c8a08f27af713f6b59cb27b315da447..9c660e1688abe1cd6bf0e22bf709515e8a463e0d 100644 (file)
@@ -680,6 +680,13 @@ struct drm_get_cap {
  */
 #define DRM_CLIENT_CAP_ATOMIC  3
 
+/**
+ * DRM_CLIENT_CAP_ASPECT_RATIO
+ *
+ * If set to 1, the DRM core will provide aspect ratio information in modes.
+ */
+#define DRM_CLIENT_CAP_ASPECT_RATIO    4
+
 /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
 struct drm_set_client_cap {
        __u64 capability;
index e0b06784f2279d9f428da4a0bce526721a452570..59b19b6a40d73ea6575f8810a6f4345a931c5a01 100644 (file)
@@ -2630,7 +2630,7 @@ struct bpf_fib_lookup {
        union {
                /* inputs to lookup */
                __u8    tos;            /* AF_INET  */
-               __be32  flowlabel;      /* AF_INET6 */
+               __be32  flowinfo;       /* AF_INET6, flow_label + priority */
 
                /* output: metric of fib result (IPv4/IPv6 only) */
                __u32   rt_metric;
index 68699f654118592527096dc26336f57da6a01cdc..cf01b68242448512416c1b1aa25f0904915aad0a 100644 (file)
@@ -333,6 +333,7 @@ enum {
        IFLA_BRPORT_BCAST_FLOOD,
        IFLA_BRPORT_GROUP_FWD_MASK,
        IFLA_BRPORT_NEIGH_SUPPRESS,
+       IFLA_BRPORT_ISOLATED,
        __IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -516,6 +517,7 @@ enum {
        IFLA_VXLAN_COLLECT_METADATA,
        IFLA_VXLAN_LABEL,
        IFLA_VXLAN_GPE,
+       IFLA_VXLAN_TTL_INHERIT,
        __IFLA_VXLAN_MAX
 };
 #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
index 39e364c70caf780312808e179a1bb234aa45460e..b6270a3b38e9f3fb410e8c80d8658b2c01a8ef96 100644 (file)
@@ -948,6 +948,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_S390_BPB 152
 #define KVM_CAP_GET_MSR_FEATURES 153
 #define KVM_CAP_HYPERV_EVENTFD 154
+#define KVM_CAP_HYPERV_TLBFLUSH 155
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index 38047c6aa57576d170b3281eb0de0376894e9f41..f4a25bd1871fb856a8295b77c825323cd3f7fc25 100644 (file)
@@ -164,6 +164,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
                "lbug_with_loc",
                "fortify_panic",
                "usercopy_abort",
+               "machine_real_restart",
        };
 
        if (func->bind == STB_WEAK)
index 4e60e105583ee803916589ca56df0e81e12b8fb3..0d1acb704f641df7762f12f159afd70313089cc0 100644 (file)
@@ -302,19 +302,34 @@ static int read_symbols(struct elf *elf)
                                continue;
                        sym->pfunc = sym->cfunc = sym;
                        coldstr = strstr(sym->name, ".cold.");
-                       if (coldstr) {
-                               coldstr[0] = '\0';
-                               pfunc = find_symbol_by_name(elf, sym->name);
-                               coldstr[0] = '.';
-
-                               if (!pfunc) {
-                                       WARN("%s(): can't find parent function",
-                                            sym->name);
-                                       goto err;
-                               }
-
-                               sym->pfunc = pfunc;
-                               pfunc->cfunc = sym;
+                       if (!coldstr)
+                               continue;
+
+                       coldstr[0] = '\0';
+                       pfunc = find_symbol_by_name(elf, sym->name);
+                       coldstr[0] = '.';
+
+                       if (!pfunc) {
+                               WARN("%s(): can't find parent function",
+                                    sym->name);
+                               goto err;
+                       }
+
+                       sym->pfunc = pfunc;
+                       pfunc->cfunc = sym;
+
+                       /*
+                        * Unfortunately, -fnoreorder-functions puts the child
+                        * inside the parent.  Remove the overlap so we can
+                        * have sane assumptions.
+                        *
+                        * Note that pfunc->len now no longer matches
+                        * pfunc->sym.st_size.
+                        */
+                       if (sym->sec == pfunc->sec &&
+                           sym->offset >= pfunc->offset &&
+                           sym->offset + sym->len == pfunc->offset + pfunc->len) {
+                               pfunc->len -= sym->len;
                        }
                }
        }
index 5dfe102fb5b533979a2fb726b621cedd398d0935..b10a90b6a7181f8968420a875a2b2fc2b3919321 100644 (file)
@@ -178,6 +178,9 @@ Print count deltas for fixed number of times.
 This option should be used together with "-I" option.
        example: 'perf stat -I 1000 --interval-count 2 -e cycles -a'
 
+--interval-clear::
+Clear the screen before next interval.
+
 --timeout msecs::
 Stop the 'perf stat' session and print count deltas after N milliseconds (minimum: 10 ms).
 This option is not supported with the "-I" option.
index b5ac356ba323c8a363b96e10082205078f12a3f8..f5a3b402589eacc6c8a9789b099c86bd3b946587 100644 (file)
@@ -207,8 +207,7 @@ ifdef PYTHON_CONFIG
   PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
   PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
   PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
-  PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
-  PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS))
+  PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null)
   FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
 endif
 
index 3598b8b75d274c8ebcc6fc0452091dec34c797b7..ef5d59a5742e2467fc409d52aebbfe4111ce0710 100644 (file)
@@ -243,7 +243,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
        u64 ip;
        u64 skip_slot = -1;
 
-       if (chain->nr < 3)
+       if (!chain || chain->nr < 3)
                return skip_slot;
 
        ip = chain->ips[2];
index 4dfe42666d0ce6e20214e70f0c2a6a3884106290..f0b1709a5ffb2b0901d7f2492252876d17bc25a0 100644 (file)
 330    common  pkey_alloc              __x64_sys_pkey_alloc
 331    common  pkey_free               __x64_sys_pkey_free
 332    common  statx                   __x64_sys_statx
+333    common  io_pgetevents           __x64_sys_io_pgetevents
+334    common  rseq                    __x64_sys_rseq
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
index 4b2caf6d48e794d3cd5a88aaa978db10aabb3185..fead6b3b4206e409fc4042ce5d850cae2629bae9 100644 (file)
@@ -226,7 +226,7 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op)
                else if (rm[2].rm_so != rm[2].rm_eo)
                        prefix[0] = '+';
                else
-                       strncpy(prefix, "+0", 2);
+                       scnprintf(prefix, sizeof(prefix), "+0");
        }
 
        /* Rename register */
index 63eb49082774c94dfbabe7a18db73bdc2403fb6a..44195514b19e65a5ee0287b48fa0ab25fa44d66f 100644 (file)
@@ -1098,7 +1098,7 @@ static void *worker_thread(void *__tdata)
        u8 *global_data;
        u8 *process_data;
        u8 *thread_data;
-       u64 bytes_done;
+       u64 bytes_done, secs;
        long work_done;
        u32 l;
        struct rusage rusage;
@@ -1254,7 +1254,8 @@ static void *worker_thread(void *__tdata)
        timersub(&stop, &start0, &diff);
        td->runtime_ns = diff.tv_sec * NSEC_PER_SEC;
        td->runtime_ns += diff.tv_usec * NSEC_PER_USEC;
-       td->speed_gbs = bytes_done / (td->runtime_ns / NSEC_PER_SEC) / 1e9;
+       secs = td->runtime_ns / NSEC_PER_SEC;
+       td->speed_gbs = secs ? bytes_done / secs / 1e9 : 0;
 
        getrusage(RUSAGE_THREAD, &rusage);
        td->system_time_ns = rusage.ru_stime.tv_sec * NSEC_PER_SEC;
index 5eb22cc563636c11d4e12bf42c71f00b3e1255db..8180319285af3377810c30c0298f37c73cb9bb8d 100644 (file)
@@ -283,6 +283,15 @@ out_put:
        return ret;
 }
 
+static int process_feature_event(struct perf_tool *tool,
+                                union perf_event *event,
+                                struct perf_session *session)
+{
+       if (event->feat.feat_id < HEADER_LAST_FEATURE)
+               return perf_event__process_feature(tool, event, session);
+       return 0;
+}
+
 static int hist_entry__tty_annotate(struct hist_entry *he,
                                    struct perf_evsel *evsel,
                                    struct perf_annotate *ann)
@@ -471,7 +480,7 @@ int cmd_annotate(int argc, const char **argv)
                        .attr   = perf_event__process_attr,
                        .build_id = perf_event__process_build_id,
                        .tracing_data   = perf_event__process_tracing_data,
-                       .feature        = perf_event__process_feature,
+                       .feature        = process_feature_event,
                        .ordered_events = true,
                        .ordering_requires_timestamps = true,
                },
index 307b3594525f34cc9e14d758b71bcdf266414c3e..6a8738f7ead3613e691a7dc6aac523d585de39b2 100644 (file)
@@ -56,16 +56,16 @@ struct c2c_hist_entry {
 
        struct compute_stats     cstats;
 
+       unsigned long            paddr;
+       unsigned long            paddr_cnt;
+       bool                     paddr_zero;
+       char                    *nodestr;
+
        /*
         * must be at the end,
         * because of its callchain dynamic entry
         */
        struct hist_entry       he;
-
-       unsigned long            paddr;
-       unsigned long            paddr_cnt;
-       bool                     paddr_zero;
-       char                    *nodestr;
 };
 
 static char const *coalesce_default = "pid,iaddr";
index cdb5b694983273de734fa1f45848c49eeac239d2..c04dc7b537971a07801153db1f9cdaf5da8a2c34 100644 (file)
@@ -217,7 +217,8 @@ static int process_feature_event(struct perf_tool *tool,
        }
 
        /*
-        * All features are received, we can force the
+        * (feat_id = HEADER_LAST_FEATURE) is the end marker which
+        * means all features are received, now we can force the
         * group if needed.
         */
        setup_forced_leader(rep, session->evlist);
index b3bf35512d2198a94e46a7ecaf6052bca616ee7d..568ddfac3213e084c1f4c6077cd73943bf0644b9 100644 (file)
@@ -180,6 +180,18 @@ static struct {
                                  PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE
        },
 
+       [PERF_TYPE_HW_CACHE] = {
+               .user_set = false,
+
+               .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
+                             PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
+                             PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
+                             PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
+                             PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
+
+               .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
+       },
+
        [PERF_TYPE_RAW] = {
                .user_set = false,
 
@@ -1822,6 +1834,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
        struct perf_evlist *evlist;
        struct perf_evsel *evsel, *pos;
        int err;
+       static struct perf_evsel_script *es;
 
        err = perf_event__process_attr(tool, event, pevlist);
        if (err)
@@ -1830,6 +1843,19 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
        evlist = *pevlist;
        evsel = perf_evlist__last(*pevlist);
 
+       if (!evsel->priv) {
+               if (scr->per_event_dump) {
+                       evsel->priv = perf_evsel_script__new(evsel,
+                                               scr->session->data);
+               } else {
+                       es = zalloc(sizeof(*es));
+                       if (!es)
+                               return -ENOMEM;
+                       es->fp = stdout;
+                       evsel->priv = es;
+               }
+       }
+
        if (evsel->attr.type >= PERF_TYPE_MAX &&
            evsel->attr.type != PERF_TYPE_SYNTH)
                return 0;
@@ -3018,6 +3044,15 @@ int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
        return set_maps(script);
 }
 
+static int process_feature_event(struct perf_tool *tool,
+                                union perf_event *event,
+                                struct perf_session *session)
+{
+       if (event->feat.feat_id < HEADER_LAST_FEATURE)
+               return perf_event__process_feature(tool, event, session);
+       return 0;
+}
+
 #ifdef HAVE_AUXTRACE_SUPPORT
 static int perf_script__process_auxtrace_info(struct perf_tool *tool,
                                              union perf_event *event,
@@ -3062,7 +3097,7 @@ int cmd_script(int argc, const char **argv)
                        .attr            = process_attr,
                        .event_update   = perf_event__process_event_update,
                        .tracing_data    = perf_event__process_tracing_data,
-                       .feature         = perf_event__process_feature,
+                       .feature         = process_feature_event,
                        .build_id        = perf_event__process_build_id,
                        .id_index        = perf_event__process_id_index,
                        .auxtrace_info   = perf_script__process_auxtrace_info,
@@ -3113,8 +3148,9 @@ int cmd_script(int argc, const char **argv)
                     "+field to add and -field to remove."
                     "Valid types: hw,sw,trace,raw,synth. "
                     "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
-                    "addr,symoff,period,iregs,uregs,brstack,brstacksym,flags,"
-                    "bpf-output,callindent,insn,insnlen,brstackinsn,synth,phys_addr",
+                    "addr,symoff,srcline,period,iregs,uregs,brstack,"
+                    "brstacksym,flags,bpf-output,brstackinsn,brstackoff,"
+                    "callindent,insn,insnlen,synth,phys_addr,metric,misc",
                     parse_output_fields),
        OPT_BOOLEAN('a', "all-cpus", &system_wide,
                    "system-wide collection from all CPUs"),
index 096ccb25c11ff7786c6df0c24b695cd0eff5bec0..05be023c3f0eda0066394651602f5be4ba6823ca 100644 (file)
@@ -65,6 +65,7 @@
 #include "util/tool.h"
 #include "util/string2.h"
 #include "util/metricgroup.h"
+#include "util/top.h"
 #include "asm/bug.h"
 
 #include <linux/time64.h>
@@ -144,6 +145,8 @@ static struct target target = {
 
 typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu);
 
+#define METRIC_ONLY_LEN 20
+
 static int                     run_count                       =  1;
 static bool                    no_inherit                      = false;
 static volatile pid_t          child_pid                       = -1;
@@ -173,6 +176,7 @@ static struct cpu_map               *aggr_map;
 static aggr_get_id_t           aggr_get_id;
 static bool                    append_file;
 static bool                    interval_count;
+static bool                    interval_clear;
 static const char              *output_name;
 static int                     output_fd;
 static int                     print_free_counters_hint;
@@ -180,6 +184,7 @@ static int                  print_mixed_hw_group_error;
 static u64                     *walltime_run;
 static bool                    ru_display                      = false;
 static struct rusage           ru_data;
+static unsigned int            metric_only_len                 = METRIC_ONLY_LEN;
 
 struct perf_stat {
        bool                     record;
@@ -967,8 +972,6 @@ static void print_metric_csv(void *ctx,
        fprintf(out, "%s%s%s%s", csv_sep, vals, csv_sep, unit);
 }
 
-#define METRIC_ONLY_LEN 20
-
 /* Filter out some columns that don't work well in metrics only mode */
 
 static bool valid_only_metric(const char *unit)
@@ -999,22 +1002,20 @@ static void print_metric_only(void *ctx, const char *color, const char *fmt,
 {
        struct outstate *os = ctx;
        FILE *out = os->fh;
-       int n;
-       char buf[1024];
-       unsigned mlen = METRIC_ONLY_LEN;
+       char buf[1024], str[1024];
+       unsigned mlen = metric_only_len;
 
        if (!valid_only_metric(unit))
                return;
        unit = fixunit(buf, os->evsel, unit);
-       if (color)
-               n = color_fprintf(out, color, fmt, val);
-       else
-               n = fprintf(out, fmt, val);
-       if (n > METRIC_ONLY_LEN)
-               n = METRIC_ONLY_LEN;
        if (mlen < strlen(unit))
                mlen = strlen(unit) + 1;
-       fprintf(out, "%*s", mlen - n, "");
+
+       if (color)
+               mlen += strlen(color) + sizeof(PERF_COLOR_RESET) - 1;
+
+       color_snprintf(str, sizeof(str), color ?: "", fmt, val);
+       fprintf(out, "%*s ", mlen, str);
 }
 
 static void print_metric_only_csv(void *ctx, const char *color __maybe_unused,
@@ -1054,7 +1055,7 @@ static void print_metric_header(void *ctx, const char *color __maybe_unused,
        if (csv_output)
                fprintf(os->fh, "%s%s", unit, csv_sep);
        else
-               fprintf(os->fh, "%-*s ", METRIC_ONLY_LEN, unit);
+               fprintf(os->fh, "%*s ", metric_only_len, unit);
 }
 
 static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
@@ -1704,9 +1705,12 @@ static void print_interval(char *prefix, struct timespec *ts)
        FILE *output = stat_config.output;
        static int num_print_interval;
 
+       if (interval_clear)
+               puts(CONSOLE_CLEAR);
+
        sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep);
 
-       if (num_print_interval == 0 && !csv_output) {
+       if ((num_print_interval == 0 && !csv_output) || interval_clear) {
                switch (stat_config.aggr_mode) {
                case AGGR_SOCKET:
                        fprintf(output, "#           time socket cpus");
@@ -1719,7 +1723,7 @@ static void print_interval(char *prefix, struct timespec *ts)
                                fprintf(output, "             counts %*s events\n", unit_width, "unit");
                        break;
                case AGGR_NONE:
-                       fprintf(output, "#           time CPU");
+                       fprintf(output, "#           time CPU    ");
                        if (!metric_only)
                                fprintf(output, "                counts %*s events\n", unit_width, "unit");
                        break;
@@ -1738,7 +1742,7 @@ static void print_interval(char *prefix, struct timespec *ts)
                }
        }
 
-       if (num_print_interval == 0 && metric_only)
+       if ((num_print_interval == 0 || interval_clear) && metric_only)
                print_metric_headers(" ", true);
        if (++num_print_interval == 25)
                num_print_interval = 0;
@@ -2057,6 +2061,8 @@ static const struct option stat_options[] = {
                    "(overhead is possible for values <= 100ms)"),
        OPT_INTEGER(0, "interval-count", &stat_config.times,
                    "print counts for fixed number of times"),
+       OPT_BOOLEAN(0, "interval-clear", &interval_clear,
+                   "clear screen in between new interval"),
        OPT_UINTEGER(0, "timeout", &stat_config.timeout,
                    "stop workload and print counts after a timeout period in ms (>= 10ms)"),
        OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
@@ -2436,14 +2442,13 @@ static int add_default_attributes(void)
        (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
        (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)                          },
 };
+       struct parse_events_error errinfo;
 
        /* Set attrs if no event is selected and !null_run: */
        if (null_run)
                return 0;
 
        if (transaction_run) {
-               struct parse_events_error errinfo;
-
                if (pmu_have_event("cpu", "cycles-ct") &&
                    pmu_have_event("cpu", "el-start"))
                        err = parse_events(evsel_list, transaction_attrs,
@@ -2454,6 +2459,7 @@ static int add_default_attributes(void)
                                           &errinfo);
                if (err) {
                        fprintf(stderr, "Cannot set up transaction events\n");
+                       parse_events_print_error(&errinfo, transaction_attrs);
                        return -1;
                }
                return 0;
@@ -2479,10 +2485,11 @@ static int add_default_attributes(void)
                    pmu_have_event("msr", "smi")) {
                        if (!force_metric_only)
                                metric_only = true;
-                       err = parse_events(evsel_list, smi_cost_attrs, NULL);
+                       err = parse_events(evsel_list, smi_cost_attrs, &errinfo);
                } else {
                        fprintf(stderr, "To measure SMI cost, it needs "
                                "msr/aperf/, msr/smi/ and cpu/cycles/ support\n");
+                       parse_events_print_error(&errinfo, smi_cost_attrs);
                        return -1;
                }
                if (err) {
@@ -2517,12 +2524,13 @@ static int add_default_attributes(void)
                if (topdown_attrs[0] && str) {
                        if (warn)
                                arch_topdown_group_warn();
-                       err = parse_events(evsel_list, str, NULL);
+                       err = parse_events(evsel_list, str, &errinfo);
                        if (err) {
                                fprintf(stderr,
                                        "Cannot set up top down events %s: %d\n",
                                        str, err);
                                free(str);
+                               parse_events_print_error(&errinfo, str);
                                return -1;
                        }
                } else {
index 0c6d1002b524eaf62ef62cc32763b041b2f33ba1..ac1bcdc17dae7554f51a780b843605c441c6abbf 100644 (file)
@@ -35,6 +35,7 @@
 #include <sys/mman.h>
 #include <syscall.h> /* for gettid() */
 #include <err.h>
+#include <linux/kernel.h>
 
 #include "jvmti_agent.h"
 #include "../util/jitdump.h"
@@ -249,7 +250,7 @@ void *jvmti_open(void)
        /*
         * jitdump file name
         */
-       snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
+       scnprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
 
        fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666);
        if (fd == -1)
index 17783913d3306a15c13d45745f447aa5ebafb487..215ba30b85343ad1874b1fc52c05fccbd8948bb3 100644 (file)
@@ -1,7 +1,7 @@
 hostprogs := jevents
 
 jevents-y      += json.o jsmn.o jevents.o
-CHOSTFLAGS_jevents.o   = -I$(srctree)/tools/include
+HOSTCFLAGS_jevents.o   = -I$(srctree)/tools/include
 pmu-events-y   += pmu-events.o
 JDIR           =  pmu-events/arch/$(SRCARCH)
 JSON           =  $(shell [ -d $(JDIR) ] &&                            \
index 38dfb720fb6f78757dfe3e7aa8d01014ffdc7927..54ace2f6bc3650388ec1f7e96c7c0b1ea2495232 100644 (file)
@@ -31,10 +31,8 @@ def flag_str(event_name, field_name, value):
     string = ""
 
     if flag_fields[event_name][field_name]:
-       print_delim = 0
-        keys = flag_fields[event_name][field_name]['values'].keys()
-        keys.sort()
-        for idx in keys:
+        print_delim = 0
+        for idx in sorted(flag_fields[event_name][field_name]['values']):
             if not value and not idx:
                 string += flag_fields[event_name][field_name]['values'][idx]
                 break
@@ -51,14 +49,12 @@ def symbol_str(event_name, field_name, value):
     string = ""
 
     if symbolic_fields[event_name][field_name]:
-        keys = symbolic_fields[event_name][field_name]['values'].keys()
-        keys.sort()
-        for idx in keys:
+        for idx in sorted(symbolic_fields[event_name][field_name]['values']):
             if not value and not idx:
-               string = symbolic_fields[event_name][field_name]['values'][idx]
+                string = symbolic_fields[event_name][field_name]['values'][idx]
                 break
-           if (value == idx):
-               string = symbolic_fields[event_name][field_name]['values'][idx]
+            if (value == idx):
+                string = symbolic_fields[event_name][field_name]['values'][idx]
                 break
 
     return string
@@ -74,19 +70,17 @@ def trace_flag_str(value):
     string = ""
     print_delim = 0
 
-    keys = trace_flags.keys()
-
-    for idx in keys:
-       if not value and not idx:
-           string += "NONE"
-           break
-
-       if idx and (value & idx) == idx:
-           if print_delim:
-               string += " | ";
-           string += trace_flags[idx]
-           print_delim = 1
-           value &= ~idx
+    for idx in trace_flags:
+        if not value and not idx:
+            string += "NONE"
+            break
+
+        if idx and (value & idx) == idx:
+            if print_delim:
+                string += " | ";
+            string += trace_flags[idx]
+            print_delim = 1
+            value &= ~idx
 
     return string
 
index 81a56cd2b3c166315bfb376d93265bcdf0463e66..21a7a129809443a9231019d955fb5d945278cd46 100755 (executable)
@@ -8,6 +8,7 @@
 # PerfEvent is the base class for all perf event sample, PebsEvent
 # is a HW base Intel x86 PEBS event, and user could add more SW/HW
 # event classes based on requirements.
+from __future__ import print_function
 
 import struct
 
@@ -44,7 +45,8 @@ class PerfEvent(object):
                 PerfEvent.event_num += 1
 
         def show(self):
-                print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
+                print("PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" %
+                      (self.name, self.symbol, self.comm, self.dso))
 
 #
 # Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
index fdd92f699055713e2d1fec1c99a61489e5812a64..cac7b2542ee8c99b814b9a0baddeb60ba6727c0b 100644 (file)
@@ -11,7 +11,7 @@
 try:
        import wx
 except ImportError:
-       raise ImportError, "You need to install the wxpython lib for this script"
+       raise ImportError("You need to install the wxpython lib for this script")
 
 
 class RootFrame(wx.Frame):
index f6c84966e4f89cb2ffcd0ad8d4b94a34d134811c..7384dcb628c4326c3d98dca18639b8439fc5c8ac 100644 (file)
@@ -5,6 +5,7 @@
 # This software may be distributed under the terms of the GNU General
 # Public License ("GPL") version 2 as published by the Free Software
 # Foundation.
+from __future__ import print_function
 
 import errno, os
 
@@ -33,7 +34,7 @@ def nsecs_str(nsecs):
     return str
 
 def add_stats(dict, key, value):
-       if not dict.has_key(key):
+       if key not in dict:
                dict[key] = (value, value, value, 1)
        else:
                min, max, avg, count = dict[key]
@@ -72,10 +73,10 @@ try:
 except:
        if not audit_package_warned:
                audit_package_warned = True
-               print "Install the audit-libs-python package to get syscall names.\n" \
-                    "For example:\n  # apt-get install python-audit (Ubuntu)" \
-                    "\n  # yum install audit-libs-python (Fedora)" \
-                    "\n  etc.\n"
+               print("Install the audit-libs-python package to get syscall names.\n"
+                    "For example:\n  # apt-get install python-audit (Ubuntu)"
+                    "\n  # yum install audit-libs-python (Fedora)"
+                    "\n  etc.\n")
 
 def syscall_name(id):
        try:
index de66cb3b72c9e6be9dc5d884611e0522def92631..3473e7f66081c93104e951afb4553f5b59e03b6d 100644 (file)
@@ -9,13 +9,17 @@
 # This software is distributed under the terms of the GNU General
 # Public License ("GPL") version 2 as published by the Free Software
 # Foundation.
-
+from __future__ import print_function
 
 import os
 import sys
 
 from collections import defaultdict
-from UserList import UserList
+try:
+    from UserList import UserList
+except ImportError:
+    # Python 3: UserList moved to the collections package
+    from collections import UserList
 
 sys.path.append(os.environ['PERF_EXEC_PATH'] + \
        '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
@@ -300,7 +304,7 @@ class TimeSliceList(UserList):
                if i == -1:
                        return
 
-               for i in xrange(i, len(self.data)):
+               for i in range(i, len(self.data)):
                        timeslice = self.data[i]
                        if timeslice.start > end:
                                return
@@ -336,8 +340,8 @@ class SchedEventProxy:
                on_cpu_task = self.current_tsk[headers.cpu]
 
                if on_cpu_task != -1 and on_cpu_task != prev_pid:
-                       print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
-                               (headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
+                       print("Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
+                               headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
 
                threads[prev_pid] = prev_comm
                threads[next_pid] = next_comm
index 2bde505e2e7ea0c2b1157e2734be89d291351e66..dd850a26d579914fde328d16cbb3ade2d3a8b992 100644 (file)
@@ -422,7 +422,7 @@ static const char *shell_test__description(char *description, size_t size,
 
 #define for_each_shell_test(dir, base, ent)    \
        while ((ent = readdir(dir)) != NULL)    \
-               if (!is_directory(base, ent))
+               if (!is_directory(base, ent) && ent->d_name[0] != '.')
 
 static const char *shell_tests__dir(char *path, size_t size)
 {
index 7d40770684549d8691d63a403816b76b5bb7c3ad..61211918bfbaa5eaaba1b90c6664ebbe506da98b 100644 (file)
@@ -1309,6 +1309,11 @@ static int test__checkevent_config_cache(struct perf_evlist *evlist)
        return 0;
 }
 
+static bool test__intel_pt_valid(void)
+{
+       return !!perf_pmu__find("intel_pt");
+}
+
 static int test__intel_pt(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel = perf_evlist__first(evlist);
@@ -1375,6 +1380,7 @@ struct evlist_test {
        const char *name;
        __u32 type;
        const int id;
+       bool (*valid)(void);
        int (*check)(struct perf_evlist *evlist);
 };
 
@@ -1648,6 +1654,7 @@ static struct evlist_test test__events[] = {
        },
        {
                .name  = "intel_pt//u",
+               .valid = test__intel_pt_valid,
                .check = test__intel_pt,
                .id    = 52,
        },
@@ -1686,17 +1693,24 @@ static struct terms_test test__terms[] = {
 
 static int test_event(struct evlist_test *e)
 {
+       struct parse_events_error err = { .idx = 0, };
        struct perf_evlist *evlist;
        int ret;
 
+       if (e->valid && !e->valid()) {
+               pr_debug("... SKIP");
+               return 0;
+       }
+
        evlist = perf_evlist__new();
        if (evlist == NULL)
                return -ENOMEM;
 
-       ret = parse_events(evlist, e->name, NULL);
+       ret = parse_events(evlist, e->name, &err);
        if (ret) {
-               pr_debug("failed to parse event '%s', err %d\n",
-                        e->name, ret);
+               pr_debug("failed to parse event '%s', err %d, str '%s'\n",
+                        e->name, ret, err.str);
+               parse_events_print_error(&err, e->name);
        } else {
                ret = e->check(evlist);
        }
@@ -1714,10 +1728,11 @@ static int test_events(struct evlist_test *events, unsigned cnt)
        for (i = 0; i < cnt; i++) {
                struct evlist_test *e = &events[i];
 
-               pr_debug("running test %d '%s'\n", e->id, e->name);
+               pr_debug("running test %d '%s'", e->id, e->name);
                ret1 = test_event(e);
                if (ret1)
                        ret2 = ret1;
+               pr_debug("\n");
        }
 
        return ret2;
@@ -1799,7 +1814,7 @@ static int test_pmu_events(void)
        }
 
        while (!ret && (ent = readdir(dir))) {
-               struct evlist_test e;
+               struct evlist_test e = { .id = 0, };
                char name[2 * NAME_MAX + 1 + 12 + 3];
 
                /* Names containing . are special and cannot be used directly */
index 2630570396937fc4b26b844e5bfefc0e74d16409..94e513e62b34f378827cd5a2bdf4984cc00a33f0 100755 (executable)
@@ -14,35 +14,40 @@ libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1
 nm -Dg $libc 2>/dev/null | fgrep -q inet_pton || exit 254
 
 trace_libc_inet_pton_backtrace() {
-       idx=0
-       expected[0]="ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)"
-       expected[1]=".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$"
+
+       expected=`mktemp -u /tmp/expected.XXX`
+
+       echo "ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)" > $expected
+       echo ".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
        case "$(uname -m)" in
        s390x)
                eventattr='call-graph=dwarf,max-stack=4'
-               expected[2]="gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$"
-               expected[3]="(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$"
-               expected[4]="main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$"
+               echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
+               echo "(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
+               echo "main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
                ;;
        *)
                eventattr='max-stack=3'
-               expected[2]="getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$"
-               expected[3]=".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$"
+               echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+               echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
                ;;
        esac
 
-       file=`mktemp -u /tmp/perf.data.XXX`
+       perf_data=`mktemp -u /tmp/perf.data.XXX`
+       perf_script=`mktemp -u /tmp/perf.script.XXX`
+       perf record -e probe_libc:inet_pton/$eventattr/ -o $perf_data ping -6 -c 1 ::1 > /dev/null 2>&1
+       perf script -i $perf_data > $perf_script
 
-       perf record -e probe_libc:inet_pton/$eventattr/ -o $file ping -6 -c 1 ::1 > /dev/null 2>&1
-       perf script -i $file | while read line ; do
+       exec 3<$perf_script
+       exec 4<$expected
+       while read line <&3 && read -r pattern <&4; do
+               [ -z "$pattern" ] && break
                echo $line
-               echo "$line" | egrep -q "${expected[$idx]}"
+               echo "$line" | egrep -q "$pattern"
                if [ $? -ne 0 ] ; then
-                       printf "FAIL: expected backtrace entry %d \"%s\" got \"%s\"\n" $idx "${expected[$idx]}" "$line"
+                       printf "FAIL: expected backtrace entry \"%s\" got \"%s\"\n" "$pattern" "$line"
                        exit 1
                fi
-               let idx+=1
-               [ -z "${expected[$idx]}" ] && break
        done
 
        # If any statements are executed from this point onwards,
@@ -58,6 +63,6 @@ skip_if_no_perf_probe && \
 perf probe -q $libc inet_pton && \
 trace_libc_inet_pton_backtrace
 err=$?
-rm -f ${file}
+rm -f ${perf_data} ${perf_script} ${expected}
 perf probe -q -d probe_libc:inet_pton
 exit $err
index 55ad9793d5443da34ee4c6c76ea5d7fcba80f6fd..4ce276efe6b4c1855e904a30a2e9efcb6ed01a4e 100755 (executable)
@@ -17,7 +17,7 @@ skip_if_no_perf_probe || exit 2
 file=$(mktemp /tmp/temporary_file.XXXXX)
 
 trace_open_vfs_getname() {
-       evts=$(echo $(perf list syscalls:sys_enter_open* |& egrep 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/')
+       evts=$(echo $(perf list syscalls:sys_enter_open* 2>&1 | egrep 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/')
        perf trace -e $evts touch $file 2>&1 | \
        egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
 }
index 40e30a26b23cc260536977fb9a0b17db54aa207a..9497d02f69e6669d8ca19ed753beb1a8477f4006 100644 (file)
@@ -45,6 +45,7 @@ static int session_write_header(char *path)
 
        perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
        perf_header__set_feat(&session->header, HEADER_NRCPUS);
+       perf_header__set_feat(&session->header, HEADER_ARCH);
 
        session->header.data_size += DATA_SIZE;
 
index b085f1b3e34dacdd4764d0704e0c65c6644debf1..4ab663ec3e5ea108ee7df9a189ecc2bc4e996843 100644 (file)
@@ -382,7 +382,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
                        gtk_tree_store_set(store, &iter, col_idx++, s, -1);
                }
 
-               if (hists__has_callchains(hists) &&
+               if (hist_entry__has_callchains(h) &&
                    symbol_conf.use_callchain && hists__has(hists, sym)) {
                        if (callchain_param.mode == CHAIN_GRAPH_REL)
                                total = symbol_conf.cumulate_callchain ?
index bf31ceab33bd487d0021ccd9818384dde51fd371..89512504551b0b198a44ebe30e81d0972b86ce77 100644 (file)
@@ -146,8 +146,15 @@ getBPFObjectFromModule(llvm::Module *Module)
        raw_svector_ostream ostream(*Buffer);
 
        legacy::PassManager PM;
-       if (TargetMachine->addPassesToEmitFile(PM, ostream,
-                                              TargetMachine::CGFT_ObjectFile)) {
+       bool NotAdded;
+#if CLANG_VERSION_MAJOR < 7
+       NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream,
+                                                     TargetMachine::CGFT_ObjectFile);
+#else
+       NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream, nullptr,
+                                                     TargetMachine::CGFT_ObjectFile);
+#endif
+       if (NotAdded) {
                llvm::errs() << "TargetMachine can't emit a file of this type\n";
                return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);;
        }
index 540cd2dcd3e7098b7335c534a0aa7534ba87c889..653ff65aa2c37991763045c1c1bdb9f76f5d473f 100644 (file)
@@ -2129,6 +2129,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
        int cpu_nr = ff->ph->env.nr_cpus_avail;
        u64 size = 0;
        struct perf_header *ph = ff->ph;
+       bool do_core_id_test = true;
 
        ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
        if (!ph->env.cpu)
@@ -2183,6 +2184,13 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
                return 0;
        }
 
+       /* On s390 the socket_id number is not related to the numbers of cpus.
+        * The socket_id number might be higher than the numbers of cpus.
+        * This depends on the configuration.
+        */
+       if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
+               do_core_id_test = false;
+
        for (i = 0; i < (u32)cpu_nr; i++) {
                if (do_read_u32(ff, &nr))
                        goto free_cpu;
@@ -2192,7 +2200,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
                if (do_read_u32(ff, &nr))
                        goto free_cpu;
 
-               if (nr != (u32)-1 && nr > (u32)cpu_nr) {
+               if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
                        pr_debug("socket_id number is too big."
                                 "You may need to upgrade the perf tool.\n");
                        goto free_cpu;
@@ -3456,7 +3464,7 @@ int perf_event__process_feature(struct perf_tool *tool,
                pr_warning("invalid record type %d in pipe-mode\n", type);
                return 0;
        }
-       if (feat == HEADER_RESERVED || feat > HEADER_LAST_FEATURE) {
+       if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
                pr_warning("invalid record type %d in pipe-mode\n", type);
                return -1;
        }
index 52e8fda93a4723f8b19b8fcaf7e6635aae505a6e..828cb9794c7668c9e48d3b9fa527cff394580923 100644 (file)
@@ -370,9 +370,11 @@ void hists__delete_entries(struct hists *hists)
 
 static int hist_entry__init(struct hist_entry *he,
                            struct hist_entry *template,
-                           bool sample_self)
+                           bool sample_self,
+                           size_t callchain_size)
 {
        *he = *template;
+       he->callchain_size = callchain_size;
 
        if (symbol_conf.cumulate_callchain) {
                he->stat_acc = malloc(sizeof(he->stat));
@@ -473,7 +475,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
 
        he = ops->new(callchain_size);
        if (he) {
-               err = hist_entry__init(he, template, sample_self);
+               err = hist_entry__init(he, template, sample_self, callchain_size);
                if (err) {
                        ops->free(he);
                        he = NULL;
@@ -619,9 +621,11 @@ __hists__add_entry(struct hists *hists,
                .raw_data = sample->raw_data,
                .raw_size = sample->raw_size,
                .ops = ops,
-       };
+       }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
 
-       return hists__findnew_entry(hists, &entry, al, sample_self);
+       if (!hists->has_callchains && he && he->callchain_size != 0)
+               hists->has_callchains = true;
+       return he;
 }
 
 struct hist_entry *hists__add_entry(struct hists *hists,
index 06607c434949da48b53099d6129a53e71037711a..73049f7f0f6039e551daedb234d0f1d0a24b544e 100644 (file)
@@ -85,6 +85,7 @@ struct hists {
        struct events_stats     stats;
        u64                     event_stream;
        u16                     col_len[HISTC_NR_COLS];
+       bool                    has_callchains;
        int                     socket_filter;
        struct perf_hpp_list    *hpp_list;
        struct list_head        hpp_formats;
@@ -222,8 +223,7 @@ static inline struct hists *evsel__hists(struct perf_evsel *evsel)
 
 static __pure inline bool hists__has_callchains(struct hists *hists)
 {
-       const struct perf_evsel *evsel = hists_to_evsel(hists);
-       return evsel__has_callchain(evsel);
+       return hists->has_callchains;
 }
 
 int hists__init(void);
index ba4c9dd186434a33c8c33a59ab8884fd7c679dd3..d426761a549d02d67756c541ea7ab0b2a0495e68 100644 (file)
@@ -366,7 +366,7 @@ static int intel_pt_get_cyc(unsigned int byte, const unsigned char *buf,
                if (len < offs)
                        return INTEL_PT_NEED_MORE_BYTES;
                byte = buf[offs++];
-               payload |= (byte >> 1) << shift;
+               payload |= ((uint64_t)byte >> 1) << shift;
        }
 
        packet->type = INTEL_PT_CYC;
index 976e658e38dce762163bb583f1ab02b39231a742..5e94857dfca2c8c47ae289b79dd2a717ae4a82b5 100644 (file)
@@ -266,16 +266,16 @@ static const char *kinc_fetch_script =
 "#!/usr/bin/env sh\n"
 "if ! test -d \"$KBUILD_DIR\"\n"
 "then\n"
-"      exit -1\n"
+"      exit 1\n"
 "fi\n"
 "if ! test -f \"$KBUILD_DIR/include/generated/autoconf.h\"\n"
 "then\n"
-"      exit -1\n"
+"      exit 1\n"
 "fi\n"
 "TMPDIR=`mktemp -d`\n"
 "if test -z \"$TMPDIR\"\n"
 "then\n"
-"    exit -1\n"
+"    exit 1\n"
 "fi\n"
 "cat << EOF > $TMPDIR/Makefile\n"
 "obj-y := dummy.o\n"
index 155d2570274fdae6fbe7caea7bfa1e07953f7948..da8fe57691b8cd0d4c1c22d0cf0fa62595385ac5 100644 (file)
@@ -227,11 +227,16 @@ event_def: event_pmu |
 event_pmu:
 PE_NAME opt_pmu_config
 {
+       struct parse_events_state *parse_state = _parse_state;
+       struct parse_events_error *error = parse_state->error;
        struct list_head *list, *orig_terms, *terms;
 
        if (parse_events_copy_term_list($2, &orig_terms))
                YYABORT;
 
+       if (error)
+               error->idx = @1.first_column;
+
        ALLOC_LIST(list);
        if (parse_events_add_pmu(_parse_state, list, $1, $2, false, false)) {
                struct perf_pmu *pmu = NULL;
index d2fb597c9a8c78d8e8fd8a9890e67f8b8f4432d7..3ba6a1742f9198b2b5279511939d42658ba8a184 100644 (file)
@@ -234,6 +234,74 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias,
        return 0;
 }
 
+static void perf_pmu_assign_str(char *name, const char *field, char **old_str,
+                               char **new_str)
+{
+       if (!*old_str)
+               goto set_new;
+
+       if (*new_str) { /* Have new string, check with old */
+               if (strcasecmp(*old_str, *new_str))
+                       pr_debug("alias %s differs in field '%s'\n",
+                                name, field);
+               zfree(old_str);
+       } else          /* Nothing new --> keep old string */
+               return;
+set_new:
+       *old_str = *new_str;
+       *new_str = NULL;
+}
+
+static void perf_pmu_update_alias(struct perf_pmu_alias *old,
+                                 struct perf_pmu_alias *newalias)
+{
+       perf_pmu_assign_str(old->name, "desc", &old->desc, &newalias->desc);
+       perf_pmu_assign_str(old->name, "long_desc", &old->long_desc,
+                           &newalias->long_desc);
+       perf_pmu_assign_str(old->name, "topic", &old->topic, &newalias->topic);
+       perf_pmu_assign_str(old->name, "metric_expr", &old->metric_expr,
+                           &newalias->metric_expr);
+       perf_pmu_assign_str(old->name, "metric_name", &old->metric_name,
+                           &newalias->metric_name);
+       perf_pmu_assign_str(old->name, "value", &old->str, &newalias->str);
+       old->scale = newalias->scale;
+       old->per_pkg = newalias->per_pkg;
+       old->snapshot = newalias->snapshot;
+       memcpy(old->unit, newalias->unit, sizeof(old->unit));
+}
+
+/* Delete an alias entry. */
+static void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
+{
+       zfree(&newalias->name);
+       zfree(&newalias->desc);
+       zfree(&newalias->long_desc);
+       zfree(&newalias->topic);
+       zfree(&newalias->str);
+       zfree(&newalias->metric_expr);
+       zfree(&newalias->metric_name);
+       parse_events_terms__purge(&newalias->terms);
+       free(newalias);
+}
+
+/* Merge an alias, search in alias list. If this name is already
+ * present merge both of them to combine all information.
+ */
+static bool perf_pmu_merge_alias(struct perf_pmu_alias *newalias,
+                                struct list_head *alist)
+{
+       struct perf_pmu_alias *a;
+
+       list_for_each_entry(a, alist, list) {
+               if (!strcasecmp(newalias->name, a->name)) {
+                       perf_pmu_update_alias(a, newalias);
+                       perf_pmu_free_alias(newalias);
+                       return true;
+               }
+       }
+       return false;
+}
+
 static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
                                 char *desc, char *val,
                                 char *long_desc, char *topic,
@@ -241,9 +309,11 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
                                 char *metric_expr,
                                 char *metric_name)
 {
+       struct parse_events_term *term;
        struct perf_pmu_alias *alias;
        int ret;
        int num;
+       char newval[256];
 
        alias = malloc(sizeof(*alias));
        if (!alias)
@@ -262,6 +332,27 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
                return ret;
        }
 
+       /* Scan event and remove leading zeroes, spaces, newlines, some
+        * platforms have terms specified as
+        * event=0x0091 (read from files ../<PMU>/events/<FILE>
+        * and terms specified as event=0x91 (read from JSON files).
+        *
+        * Rebuild string to make alias->str member comparable.
+        */
+       memset(newval, 0, sizeof(newval));
+       ret = 0;
+       list_for_each_entry(term, &alias->terms, list) {
+               if (ret)
+                       ret += scnprintf(newval + ret, sizeof(newval) - ret,
+                                        ",");
+               if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
+                       ret += scnprintf(newval + ret, sizeof(newval) - ret,
+                                        "%s=%#x", term->config, term->val.num);
+               else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
+                       ret += scnprintf(newval + ret, sizeof(newval) - ret,
+                                        "%s=%s", term->config, term->val.str);
+       }
+
        alias->name = strdup(name);
        if (dir) {
                /*
@@ -285,9 +376,10 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
                snprintf(alias->unit, sizeof(alias->unit), "%s", unit);
        }
        alias->per_pkg = perpkg && sscanf(perpkg, "%d", &num) == 1 && num == 1;
-       alias->str = strdup(val);
+       alias->str = strdup(newval);
 
-       list_add_tail(&alias->list, list);
+       if (!perf_pmu_merge_alias(alias, list))
+               list_add_tail(&alias->list, list);
 
        return 0;
 }
@@ -303,6 +395,9 @@ static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FI
 
        buf[ret] = 0;
 
+       /* Remove trailing newline from sysfs file */
+       rtrim(buf);
+
        return __perf_pmu__new_alias(list, dir, name, NULL, buf, NULL, NULL, NULL,
                                     NULL, NULL, NULL);
 }
index 46e9e19ab1ac43a9bee5349c8826d4c990c976ad..bc32e57d17be76bddbc561bcbafe3b06e5295461 100644 (file)
@@ -908,14 +908,11 @@ static void python_process_tracepoint(struct perf_sample *sample,
        if (_PyTuple_Resize(&t, n) == -1)
                Py_FatalError("error resizing Python tuple");
 
-       if (!dict) {
+       if (!dict)
                call_object(handler, t, handler_name);
-       } else {
+       else
                call_object(handler, t, default_handler_name);
-               Py_DECREF(dict);
-       }
 
-       Py_XDECREF(all_entries_dict);
        Py_DECREF(t);
 }
 
@@ -1235,7 +1232,6 @@ static void python_process_general_event(struct perf_sample *sample,
 
        call_object(handler, t, handler_name);
 
-       Py_DECREF(dict);
        Py_DECREF(t);
 }
 
@@ -1627,6 +1623,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
        fprintf(ofp, "# See the perf-script-python Documentation for the list "
                "of available functions.\n\n");
 
+       fprintf(ofp, "from __future__ import print_function\n\n");
        fprintf(ofp, "import os\n");
        fprintf(ofp, "import sys\n\n");
 
@@ -1636,10 +1633,10 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
        fprintf(ofp, "from Core import *\n\n\n");
 
        fprintf(ofp, "def trace_begin():\n");
-       fprintf(ofp, "\tprint \"in trace_begin\"\n\n");
+       fprintf(ofp, "\tprint(\"in trace_begin\")\n\n");
 
        fprintf(ofp, "def trace_end():\n");
-       fprintf(ofp, "\tprint \"in trace_end\"\n\n");
+       fprintf(ofp, "\tprint(\"in trace_end\")\n\n");
 
        while ((event = trace_find_next_event(pevent, event))) {
                fprintf(ofp, "def %s__%s(", event->system, event->name);
@@ -1675,7 +1672,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
                        "common_secs, common_nsecs,\n\t\t\t"
                        "common_pid, common_comm)\n\n");
 
-               fprintf(ofp, "\t\tprint \"");
+               fprintf(ofp, "\t\tprint(\"");
 
                not_first = 0;
                count = 0;
@@ -1736,31 +1733,31 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
                                fprintf(ofp, "%s", f->name);
                }
 
-               fprintf(ofp, ")\n\n");
+               fprintf(ofp, "))\n\n");
 
-               fprintf(ofp, "\t\tprint 'Sample: {'+"
-                       "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}'\n\n");
+               fprintf(ofp, "\t\tprint('Sample: {'+"
+                       "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}')\n\n");
 
                fprintf(ofp, "\t\tfor node in common_callchain:");
                fprintf(ofp, "\n\t\t\tif 'sym' in node:");
-               fprintf(ofp, "\n\t\t\t\tprint \"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name'])");
+               fprintf(ofp, "\n\t\t\t\tprint(\"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name']))");
                fprintf(ofp, "\n\t\t\telse:");
-               fprintf(ofp, "\n\t\t\t\tprint \"\t[%%x]\" %% (node['ip'])\n\n");
-               fprintf(ofp, "\t\tprint \"\\n\"\n\n");
+               fprintf(ofp, "\n\t\t\t\tprint(\"\t[%%x]\" %% (node['ip']))\n\n");
+               fprintf(ofp, "\t\tprint()\n\n");
 
        }
 
        fprintf(ofp, "def trace_unhandled(event_name, context, "
                "event_fields_dict, perf_sample_dict):\n");
 
-       fprintf(ofp, "\t\tprint get_dict_as_string(event_fields_dict)\n");
-       fprintf(ofp, "\t\tprint 'Sample: {'+"
-               "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}'\n\n");
+       fprintf(ofp, "\t\tprint(get_dict_as_string(event_fields_dict))\n");
+       fprintf(ofp, "\t\tprint('Sample: {'+"
+               "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}')\n\n");
 
        fprintf(ofp, "def print_header("
                "event_name, cpu, secs, nsecs, pid, comm):\n"
-               "\tprint \"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t"
-               "(event_name, cpu, secs, nsecs, pid, comm),\n\n");
+               "\tprint(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t"
+               "(event_name, cpu, secs, nsecs, pid, comm), end=\"\")\n\n");
 
        fprintf(ofp, "def get_dict_as_string(a_dict, delimiter=' '):\n"
                "\treturn delimiter.join"
index 7cf2d5cc038ea07accaf5ef631c9b23b7b0c207b..8bf302cafcecd6b285d68e2b2c56130019dea101 100644 (file)
@@ -112,6 +112,8 @@ struct hist_entry {
 
        char                    level;
        u8                      filtered;
+
+       u16                     callchain_size;
        union {
                /*
                 * Since perf diff only supports the stdio output, TUI
@@ -153,7 +155,7 @@ struct hist_entry {
 
 static __pure inline bool hist_entry__has_callchains(struct hist_entry *he)
 {
-       return hists__has_callchains(he->hists);
+       return he->callchain_size != 0;
 }
 
 static inline bool hist_entry__has_pairs(struct hist_entry *he)
index ca9ef70176249294644a1beeea2b141086cfe75c..d39e4ff7d0bf9256b4b5ff2c03b2cd4e24307fe4 100644 (file)
@@ -56,7 +56,7 @@ name as necessary to disambiguate it from others is necessary.  Note that option
 .PP
 \fB--hide column\fP do not show the specified built-in columns.  May be invoked multiple times, or with a comma-separated list of column names.  Use "--hide sysfs" to hide the sysfs statistics columns as a group.
 .PP
-\fB--enable column\fP show the specified built-in columns, which are otherwise disabled, by default.  Currently the only built-in counters disabled by default are "usec" and "Time_Of_Day_Seconds".
+\fB--enable column\fP show the specified built-in columns, which are otherwise disabled, by default.  Currently the only built-in counters disabled by default are "usec", "Time_Of_Day_Seconds", "APIC" and "X2APIC".
 The column name "all" can be used to enable all disabled-by-default built-in counters.
 .PP
 \fB--show column\fP show only the specified built-in columns.  May be invoked multiple times, or with a comma-separated list of column names.  Use "--show sysfs" to show the sysfs statistics columns as a group.
index d6cff3070ebde60d2fa9a54deec6c147b6bda484..4d14bbbf9b639b7152b73d75363672826fd274da 100644 (file)
@@ -109,6 +109,7 @@ unsigned int has_hwp_activity_window;       /* IA32_HWP_REQUEST[bits 41:32] */
 unsigned int has_hwp_epp;              /* IA32_HWP_REQUEST[bits 31:24] */
 unsigned int has_hwp_pkg;              /* IA32_HWP_REQUEST_PKG */
 unsigned int has_misc_feature_control;
+unsigned int first_counter_read = 1;
 
 #define RAPL_PKG               (1 << 0)
                                        /* 0x610 MSR_PKG_POWER_LIMIT */
@@ -170,6 +171,8 @@ struct thread_data {
        unsigned long long  irq_count;
        unsigned int smi_count;
        unsigned int cpu_id;
+       unsigned int apic_id;
+       unsigned int x2apic_id;
        unsigned int flags;
 #define CPU_IS_FIRST_THREAD_IN_CORE    0x2
 #define CPU_IS_FIRST_CORE_IN_PACKAGE   0x4
@@ -381,19 +384,23 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr)
 }
 
 /*
- * Each string in this array is compared in --show and --hide cmdline.
- * Thus, strings that are proper sub-sets must follow their more specific peers.
+ * This list matches the column headers, except
+ * 1. built-in only, the sysfs counters are not here -- we learn of those at run-time
+ * 2. Core and CPU are moved to the end, we can't have strings that contain them
+ *    matching on them for --show and --hide.
  */
 struct msr_counter bic[] = {
        { 0x0, "usec" },
        { 0x0, "Time_Of_Day_Seconds" },
        { 0x0, "Package" },
+       { 0x0, "Node" },
        { 0x0, "Avg_MHz" },
+       { 0x0, "Busy%" },
        { 0x0, "Bzy_MHz" },
        { 0x0, "TSC_MHz" },
        { 0x0, "IRQ" },
        { 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL},
-       { 0x0, "Busy%" },
+       { 0x0, "sysfs" },
        { 0x0, "CPU%c1" },
        { 0x0, "CPU%c3" },
        { 0x0, "CPU%c6" },
@@ -424,73 +431,73 @@ struct msr_counter bic[] = {
        { 0x0, "Cor_J" },
        { 0x0, "GFX_J" },
        { 0x0, "RAM_J" },
-       { 0x0, "Core" },
-       { 0x0, "CPU" },
        { 0x0, "Mod%c6" },
-       { 0x0, "sysfs" },
        { 0x0, "Totl%C0" },
        { 0x0, "Any%C0" },
        { 0x0, "GFX%C0" },
        { 0x0, "CPUGFX%" },
-       { 0x0, "Node%" },
+       { 0x0, "Core" },
+       { 0x0, "CPU" },
+       { 0x0, "APIC" },
+       { 0x0, "X2APIC" },
 };
 
-
-
 #define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
 #define        BIC_USEC        (1ULL << 0)
 #define        BIC_TOD         (1ULL << 1)
 #define        BIC_Package     (1ULL << 2)
-#define        BIC_Avg_MHz     (1ULL << 3)
-#define        BIC_Bzy_MHz     (1ULL << 4)
-#define        BIC_TSC_MHz     (1ULL << 5)
-#define        BIC_IRQ         (1ULL << 6)
-#define        BIC_SMI         (1ULL << 7)
-#define        BIC_Busy        (1ULL << 8)
-#define        BIC_CPU_c1      (1ULL << 9)
-#define        BIC_CPU_c3      (1ULL << 10)
-#define        BIC_CPU_c6      (1ULL << 11)
-#define        BIC_CPU_c7      (1ULL << 12)
-#define        BIC_ThreadC     (1ULL << 13)
-#define        BIC_CoreTmp     (1ULL << 14)
-#define        BIC_CoreCnt     (1ULL << 15)
-#define        BIC_PkgTmp      (1ULL << 16)
-#define        BIC_GFX_rc6     (1ULL << 17)
-#define        BIC_GFXMHz      (1ULL << 18)
-#define        BIC_Pkgpc2      (1ULL << 19)
-#define        BIC_Pkgpc3      (1ULL << 20)
-#define        BIC_Pkgpc6      (1ULL << 21)
-#define        BIC_Pkgpc7      (1ULL << 22)
-#define        BIC_Pkgpc8      (1ULL << 23)
-#define        BIC_Pkgpc9      (1ULL << 24)
-#define        BIC_Pkgpc10     (1ULL << 25)
-#define BIC_CPU_LPI    (1ULL << 26)
-#define BIC_SYS_LPI    (1ULL << 27)
-#define        BIC_PkgWatt     (1ULL << 26)
-#define        BIC_CorWatt     (1ULL << 27)
-#define        BIC_GFXWatt     (1ULL << 28)
-#define        BIC_PkgCnt      (1ULL << 29)
-#define        BIC_RAMWatt     (1ULL << 30)
-#define        BIC_PKG__       (1ULL << 31)
-#define        BIC_RAM__       (1ULL << 32)
-#define        BIC_Pkg_J       (1ULL << 33)
-#define        BIC_Cor_J       (1ULL << 34)
-#define        BIC_GFX_J       (1ULL << 35)
-#define        BIC_RAM_J       (1ULL << 36)
-#define        BIC_Core        (1ULL << 37)
-#define        BIC_CPU         (1ULL << 38)
-#define        BIC_Mod_c6      (1ULL << 39)
-#define        BIC_sysfs       (1ULL << 40)
-#define        BIC_Totl_c0     (1ULL << 41)
-#define        BIC_Any_c0      (1ULL << 42)
-#define        BIC_GFX_c0      (1ULL << 43)
-#define        BIC_CPUGFX      (1ULL << 44)
-#define        BIC_Node        (1ULL << 45)
-
-#define BIC_DISABLED_BY_DEFAULT        (BIC_USEC | BIC_TOD)
+#define        BIC_Node        (1ULL << 3)
+#define        BIC_Avg_MHz     (1ULL << 4)
+#define        BIC_Busy        (1ULL << 5)
+#define        BIC_Bzy_MHz     (1ULL << 6)
+#define        BIC_TSC_MHz     (1ULL << 7)
+#define        BIC_IRQ         (1ULL << 8)
+#define        BIC_SMI         (1ULL << 9)
+#define        BIC_sysfs       (1ULL << 10)
+#define        BIC_CPU_c1      (1ULL << 11)
+#define        BIC_CPU_c3      (1ULL << 12)
+#define        BIC_CPU_c6      (1ULL << 13)
+#define        BIC_CPU_c7      (1ULL << 14)
+#define        BIC_ThreadC     (1ULL << 15)
+#define        BIC_CoreTmp     (1ULL << 16)
+#define        BIC_CoreCnt     (1ULL << 17)
+#define        BIC_PkgTmp      (1ULL << 18)
+#define        BIC_GFX_rc6     (1ULL << 19)
+#define        BIC_GFXMHz      (1ULL << 20)
+#define        BIC_Pkgpc2      (1ULL << 21)
+#define        BIC_Pkgpc3      (1ULL << 22)
+#define        BIC_Pkgpc6      (1ULL << 23)
+#define        BIC_Pkgpc7      (1ULL << 24)
+#define        BIC_Pkgpc8      (1ULL << 25)
+#define        BIC_Pkgpc9      (1ULL << 26)
+#define        BIC_Pkgpc10     (1ULL << 27)
+#define BIC_CPU_LPI    (1ULL << 28)
+#define BIC_SYS_LPI    (1ULL << 29)
+#define        BIC_PkgWatt     (1ULL << 30)
+#define        BIC_CorWatt     (1ULL << 31)
+#define        BIC_GFXWatt     (1ULL << 32)
+#define        BIC_PkgCnt      (1ULL << 33)
+#define        BIC_RAMWatt     (1ULL << 34)
+#define        BIC_PKG__       (1ULL << 35)
+#define        BIC_RAM__       (1ULL << 36)
+#define        BIC_Pkg_J       (1ULL << 37)
+#define        BIC_Cor_J       (1ULL << 38)
+#define        BIC_GFX_J       (1ULL << 39)
+#define        BIC_RAM_J       (1ULL << 40)
+#define        BIC_Mod_c6      (1ULL << 41)
+#define        BIC_Totl_c0     (1ULL << 42)
+#define        BIC_Any_c0      (1ULL << 43)
+#define        BIC_GFX_c0      (1ULL << 44)
+#define        BIC_CPUGFX      (1ULL << 45)
+#define        BIC_Core        (1ULL << 46)
+#define        BIC_CPU         (1ULL << 47)
+#define        BIC_APIC        (1ULL << 48)
+#define        BIC_X2APIC      (1ULL << 49)
+
+#define BIC_DISABLED_BY_DEFAULT        (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
 
 unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAULT);
-unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs;
+unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC;
 
 #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
 #define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME)
@@ -517,17 +524,34 @@ void help(void)
        "when COMMAND completes.\n"
        "If no COMMAND is specified, turbostat wakes every 5-seconds\n"
        "to print statistics, until interrupted.\n"
-       "--add          add a counter\n"
-       "               eg. --add msr0x10,u64,cpu,delta,MY_TSC\n"
-       "--cpu  cpu-set limit output to summary plus cpu-set:\n"
-       "               {core | package | j,k,l..m,n-p }\n"
-       "--quiet        skip decoding system configuration header\n"
-       "--interval sec.subsec  Override default 5-second measurement interval\n"
-       "--help         print this help message\n"
-       "--list         list column headers only\n"
-       "--num_iterations num   number of the measurement iterations\n"
-       "--out file     create or truncate \"file\" for all output\n"
-       "--version      print version information\n"
+       "  -a, --add    add a counter\n"
+       "                 eg. --add msr0x10,u64,cpu,delta,MY_TSC\n"
+       "  -c, --cpu    cpu-set limit output to summary plus cpu-set:\n"
+       "                 {core | package | j,k,l..m,n-p }\n"
+       "  -d, --debug  displays usec, Time_Of_Day_Seconds and more debugging\n"
+       "  -D, --Dump   displays the raw counter values\n"
+       "  -e, --enable [all | column]\n"
+       "               shows all or the specified disabled column\n"
+       "  -H, --hide [column|column,column,...]\n"
+       "               hide the specified column(s)\n"
+       "  -i, --interval sec.subsec\n"
+       "               Override default 5-second measurement interval\n"
+       "  -J, --Joules displays energy in Joules instead of Watts\n"
+       "  -l, --list   list column headers only\n"
+       "  -n, --num_iterations num\n"
+       "               number of the measurement iterations\n"
+       "  -o, --out file\n"
+       "               create or truncate \"file\" for all output\n"
+       "  -q, --quiet  skip decoding system configuration header\n"
+       "  -s, --show [column|column,column,...]\n"
+       "               show only the specified column(s)\n"
+       "  -S, --Summary\n"
+       "               limits output to 1-line system summary per interval\n"
+       "  -T, --TCC temperature\n"
+       "               sets the Thermal Control Circuit temperature in\n"
+       "                 degrees Celsius\n"
+       "  -h, --help   print this help message\n"
+       "  -v, --version        print version information\n"
        "\n"
        "For more help, run \"man turbostat\"\n");
 }
@@ -601,6 +625,10 @@ void print_header(char *delim)
                outp += sprintf(outp, "%sCore", (printed++ ? delim : ""));
        if (DO_BIC(BIC_CPU))
                outp += sprintf(outp, "%sCPU", (printed++ ? delim : ""));
+       if (DO_BIC(BIC_APIC))
+               outp += sprintf(outp, "%sAPIC", (printed++ ? delim : ""));
+       if (DO_BIC(BIC_X2APIC))
+               outp += sprintf(outp, "%sX2APIC", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Avg_MHz))
                outp += sprintf(outp, "%sAvg_MHz", (printed++ ? delim : ""));
        if (DO_BIC(BIC_Busy))
@@ -880,6 +908,10 @@ int format_counters(struct thread_data *t, struct core_data *c,
                        outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
                if (DO_BIC(BIC_CPU))
                        outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+               if (DO_BIC(BIC_APIC))
+                       outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+               if (DO_BIC(BIC_X2APIC))
+                       outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
        } else {
                if (DO_BIC(BIC_Package)) {
                        if (p)
@@ -904,6 +936,10 @@ int format_counters(struct thread_data *t, struct core_data *c,
                }
                if (DO_BIC(BIC_CPU))
                        outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->cpu_id);
+               if (DO_BIC(BIC_APIC))
+                       outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->apic_id);
+               if (DO_BIC(BIC_X2APIC))
+                       outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->x2apic_id);
        }
 
        if (DO_BIC(BIC_Avg_MHz))
@@ -1231,6 +1267,12 @@ delta_thread(struct thread_data *new, struct thread_data *old,
        int i;
        struct msr_counter *mp;
 
+       /* we run cpuid just the 1st time, copy the results */
+       if (DO_BIC(BIC_APIC))
+               new->apic_id = old->apic_id;
+       if (DO_BIC(BIC_X2APIC))
+               new->x2apic_id = old->x2apic_id;
+
        /*
         * the timestamps from start of measurement interval are in "old"
         * the timestamp from end of measurement interval are in "new"
@@ -1393,6 +1435,12 @@ int sum_counters(struct thread_data *t, struct core_data *c,
        int i;
        struct msr_counter *mp;
 
+       /* copy un-changing apic_id's */
+       if (DO_BIC(BIC_APIC))
+               average.threads.apic_id = t->apic_id;
+       if (DO_BIC(BIC_X2APIC))
+               average.threads.x2apic_id = t->x2apic_id;
+
        /* remember first tv_begin */
        if (average.threads.tv_begin.tv_sec == 0)
                average.threads.tv_begin = t->tv_begin;
@@ -1619,6 +1667,34 @@ int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp)
        return 0;
 }
 
+void get_apic_id(struct thread_data *t)
+{
+       unsigned int eax, ebx, ecx, edx, max_level;
+
+       eax = ebx = ecx = edx = 0;
+
+       if (!genuine_intel)
+               return;
+
+       __cpuid(0, max_level, ebx, ecx, edx);
+
+       __cpuid(1, eax, ebx, ecx, edx);
+       t->apic_id = (ebx >> 24) & 0xf;
+
+       if (max_level < 0xb)
+               return;
+
+       if (!DO_BIC(BIC_X2APIC))
+               return;
+
+       ecx = 0;
+       __cpuid(0xb, eax, ebx, ecx, edx);
+       t->x2apic_id = edx;
+
+       if (debug && (t->apic_id != t->x2apic_id))
+               fprintf(stderr, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id);
+}
+
 /*
  * get_counters(...)
  * migrate to cpu
@@ -1632,7 +1708,6 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
        struct msr_counter *mp;
        int i;
 
-
        gettimeofday(&t->tv_begin, (struct timezone *)NULL);
 
        if (cpu_migrate(cpu)) {
@@ -1640,6 +1715,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
                return -1;
        }
 
+       if (first_counter_read)
+               get_apic_id(t);
 retry:
        t->tsc = rdtsc();       /* we are running on local CPU of interest */
 
@@ -2432,6 +2509,12 @@ void set_node_data(void)
                if (pni[pkg].count > topo.nodes_per_pkg)
                        topo.nodes_per_pkg = pni[0].count;
 
+       /* Fake 1 node per pkg for machines that don't
+        * expose nodes and thus avoid -nan results
+        */
+       if (topo.nodes_per_pkg == 0)
+               topo.nodes_per_pkg = 1;
+
        for (cpu = 0; cpu < topo.num_cpus; cpu++) {
                pkg = cpus[cpu].physical_package_id;
                node = cpus[cpu].physical_node_id;
@@ -2879,6 +2962,7 @@ void do_sleep(void)
        }
 }
 
+
 void turbostat_loop()
 {
        int retval;
@@ -2892,6 +2976,7 @@ restart:
 
        snapshot_proc_sysfs_files();
        retval = for_all_cpus(get_counters, EVEN_COUNTERS);
+       first_counter_read = 0;
        if (retval < -1) {
                exit(retval);
        } else if (retval == -1) {
@@ -4392,7 +4477,7 @@ void process_cpuid()
        if (!quiet) {
                fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
                        max_level, family, model, stepping, family, model, stepping);
-               fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s\n",
+               fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n",
                        ecx & (1 << 0) ? "SSE3" : "-",
                        ecx & (1 << 3) ? "MONITOR" : "-",
                        ecx & (1 << 6) ? "SMX" : "-",
@@ -4401,6 +4486,7 @@ void process_cpuid()
                        edx & (1 << 4) ? "TSC" : "-",
                        edx & (1 << 5) ? "MSR" : "-",
                        edx & (1 << 22) ? "ACPI-TM" : "-",
+                       edx & (1 << 28) ? "HT" : "-",
                        edx & (1 << 29) ? "TM" : "-");
        }
 
@@ -4652,7 +4738,6 @@ void process_cpuid()
        return;
 }
 
-
 /*
  * in /dev/cpu/ return success for names that are numbers
  * ie. filter out ".", "..", "microcode".
@@ -4842,6 +4927,13 @@ void init_counter(struct thread_data *thread_base, struct core_data *core_base,
        struct core_data *c;
        struct pkg_data *p;
 
+
+       /* Workaround for systems where physical_node_id==-1
+        * and logical_node_id==(-1 - topo.num_cpus)
+        */
+       if (node_id < 0)
+               node_id = 0;
+
        t = GET_THREAD(thread_base, thread_id, core_id, node_id, pkg_id);
        c = GET_CORE(core_base, core_id, node_id, pkg_id);
        p = GET_PKG(pkg_base, pkg_id);
@@ -4946,6 +5038,7 @@ int fork_it(char **argv)
 
        snapshot_proc_sysfs_files();
        status = for_all_cpus(get_counters, EVEN_COUNTERS);
+       first_counter_read = 0;
        if (status)
                exit(status);
        /* clear affinity side-effect of get_counters() */
@@ -5009,7 +5102,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 18.06.01"
+       fprintf(outf, "turbostat version 18.06.20"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
@@ -5381,7 +5474,7 @@ void cmdline(int argc, char **argv)
                        break;
                case 'e':
                        /* --enable specified counter */
-                       bic_enabled |= bic_lookup(optarg, SHOW_LIST);
+                       bic_enabled = bic_enabled | bic_lookup(optarg, SHOW_LIST);
                        break;
                case 'd':
                        debug++;
@@ -5465,7 +5558,6 @@ void cmdline(int argc, char **argv)
 int main(int argc, char **argv)
 {
        outf = stderr;
-
        cmdline(argc, argv);
 
        if (!quiet)
index a8fb63edcf8948df54b6aaa2f225def65d5a705f..e2926f72a821471214817f7ddb1c253a93b1ee02 100644 (file)
@@ -1991,8 +1991,7 @@ static void nfit_test0_setup(struct nfit_test *t)
        pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES;
        pcap->header.length = sizeof(*pcap);
        pcap->highest_capability = 1;
-       pcap->capabilities = ACPI_NFIT_CAPABILITY_CACHE_FLUSH |
-               ACPI_NFIT_CAPABILITY_MEM_FLUSH;
+       pcap->capabilities = ACPI_NFIT_CAPABILITY_MEM_FLUSH;
        offset += pcap->header.length;
 
        if (t->setup_hotplug) {
index 1eefe211a4a88a3dfbac5be585932384061b9edd..b4994a94968bfd9d12965fd630cba7e99458a30a 100644 (file)
@@ -6,4 +6,15 @@ CONFIG_TEST_BPF=m
 CONFIG_CGROUP_BPF=y
 CONFIG_NETDEVSIM=m
 CONFIG_NET_CLS_ACT=y
+CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_IPIP=y
+CONFIG_IPV6=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
+CONFIG_IPV6_GRE=y
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_HMAC=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_VXLAN=y
+CONFIG_GENEVE=y
index 35669ccd4d23b26c7505e8829bcf3876e3bcb3e1..9df0d2ac45f8453b9529c4ea90fb19dba3f86480 100755 (executable)
@@ -1,6 +1,15 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ "$(id -u)" != "0" ]; then
+       echo $msg please run this as root >&2
+       exit $ksft_skip
+fi
+
 SRC_TREE=../../../../
 
 test_run()
index ce2e15e4f9760e205ed8e91ab5260a23172ab2e5..677686198df34d799e67c0eb15ab25f4b68eba4c 100755 (executable)
@@ -1,6 +1,15 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ $UID != 0 ]; then
+       echo $msg please run this as root >&2
+       exit $ksft_skip
+fi
+
 GREEN='\033[0;92m'
 RED='\033[0;31m'
 NC='\033[0m' # No Color
index 1c77994b5e713dfe8aae357dd083c4713080f62a..270fa8f49573207bc973cce2302b53341a6fec5b 100755 (executable)
 # An UDP datagram is sent from fb00::1 to fb00::6. The test succeeds if this
 # datagram can be read on NS6 when binding to fb00::6.
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ $UID != 0 ]; then
+       echo $msg please run this as root >&2
+       exit $ksft_skip
+fi
+
 TMP_FILE="/tmp/selftest_lwt_seg6local.txt"
 
 cleanup()
index e78aad0a68bb9963368a5236377144c8e61cb230..be800d0e7a841abfbc60545cf63fe33219db0c35 100755 (executable)
@@ -163,6 +163,10 @@ def bpftool(args, JSON=True, ns="", fail=True):
 
 def bpftool_prog_list(expected=None, ns=""):
     _, progs = bpftool("prog show", JSON=True, ns=ns, fail=True)
+    # Remove the base progs
+    for p in base_progs:
+        if p in progs:
+            progs.remove(p)
     if expected is not None:
         if len(progs) != expected:
             fail(True, "%d BPF programs loaded, expected %d" %
@@ -171,6 +175,10 @@ def bpftool_prog_list(expected=None, ns=""):
 
 def bpftool_map_list(expected=None, ns=""):
     _, maps = bpftool("map show", JSON=True, ns=ns, fail=True)
+    # Remove the base maps
+    for m in base_maps:
+        if m in maps:
+            maps.remove(m)
     if expected is not None:
         if len(maps) != expected:
             fail(True, "%d BPF maps loaded, expected %d" %
@@ -585,8 +593,8 @@ skip(os.getuid() != 0, "test must be run as root")
 # Check tools
 ret, progs = bpftool("prog", fail=False)
 skip(ret != 0, "bpftool not installed")
-# Check no BPF programs are loaded
-skip(len(progs) != 0, "BPF programs already loaded on the system")
+base_progs = progs
+_, base_maps = bpftool("map")
 
 # Check netdevsim
 ret, out = cmd("modprobe netdevsim", fail=False)
index 05c8cb71724ae8c1d8d7c3e3453bce9a83092b96..9e78df207919366fbdd048f7645568ac701ad5f5 100644 (file)
@@ -1413,18 +1413,12 @@ out:
 
 int main(int argc, char **argv)
 {
-       struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
        int iov_count = 1, length = 1024, rate = 1;
        struct sockmap_options options = {0};
        int opt, longindex, err, cg_fd = 0;
        char *bpf_file = BPF_SOCKMAP_FILENAME;
        int test = PING_PONG;
 
-       if (setrlimit(RLIMIT_MEMLOCK, &r)) {
-               perror("setrlimit(RLIMIT_MEMLOCK)");
-               return 1;
-       }
-
        if (argc < 2)
                return test_suite();
 
index aeb2901f21f4737558efbecec73974b17c610a38..546aee3e9fb457ae166c0fda8bc0c3b484f1a19b 100755 (executable)
@@ -608,28 +608,26 @@ setup_xfrm_tunnel()
 test_xfrm_tunnel()
 {
        config_device
-        #tcpdump -nei veth1 ip &
-       output=$(mktemp)
-       cat /sys/kernel/debug/tracing/trace_pipe | tee $output &
-        setup_xfrm_tunnel
+       > /sys/kernel/debug/tracing/trace
+       setup_xfrm_tunnel
        tc qdisc add dev veth1 clsact
        tc filter add dev veth1 proto ip ingress bpf da obj test_tunnel_kern.o \
                sec xfrm_get_state
        ip netns exec at_ns0 ping $PING_ARG 10.1.1.200
        sleep 1
-       grep "reqid 1" $output
+       grep "reqid 1" /sys/kernel/debug/tracing/trace
        check_err $?
-       grep "spi 0x1" $output
+       grep "spi 0x1" /sys/kernel/debug/tracing/trace
        check_err $?
-       grep "remote ip 0xac100164" $output
+       grep "remote ip 0xac100164" /sys/kernel/debug/tracing/trace
        check_err $?
        cleanup
 
        if [ $ret -ne 0 ]; then
-                echo -e ${RED}"FAIL: xfrm tunnel"${NC}
-                return 1
-        fi
-        echo -e ${GREEN}"PASS: xfrm tunnel"${NC}
+               echo -e ${RED}"FAIL: xfrm tunnel"${NC}
+               return 1
+       fi
+       echo -e ${GREEN}"PASS: xfrm tunnel"${NC}
 }
 
 attach_bpf()
@@ -657,6 +655,10 @@ cleanup()
        ip link del ip6geneve11 2> /dev/null
        ip link del erspan11 2> /dev/null
        ip link del ip6erspan11 2> /dev/null
+       ip xfrm policy delete dir out src 10.1.1.200/32 dst 10.1.1.100/32 2> /dev/null
+       ip xfrm policy delete dir in src 10.1.1.100/32 dst 10.1.1.200/32 2> /dev/null
+       ip xfrm state delete src 172.16.1.100 dst 172.16.1.200 proto esp spi 0x1 2> /dev/null
+       ip xfrm state delete src 172.16.1.200 dst 172.16.1.100 proto esp spi 0x2 2> /dev/null
 }
 
 cleanup_exit()
@@ -668,7 +670,7 @@ cleanup_exit()
 
 check()
 {
-       ip link help $1 2>&1 | grep -q "^Usage:"
+       ip link help 2>&1 | grep -q "\s$1\s"
        if [ $? -ne 0 ];then
                echo "SKIP $1: iproute2 not support"
        cleanup
index 2ecd27b670d77e29e817d607ac80fd100c683884..f5f7bcc960465bd94367b135063152345b9a40bd 100644 (file)
@@ -4974,6 +4974,24 @@ static struct bpf_test tests[] = {
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_LWT_XMIT,
        },
+       {
+               "make headroom for LWT_XMIT",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_MOV64_IMM(BPF_REG_2, 34),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
+                       /* split for s390 to succeed */
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+                       BPF_MOV64_IMM(BPF_REG_2, 42),
+                       BPF_MOV64_IMM(BPF_REG_3, 0),
+                       BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+       },
        {
                "invalid access of tc_classid for LWT_IN",
                .insns = {
@@ -12554,8 +12572,11 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
        }
 
        if (fd_prog >= 0) {
+               __u8 tmp[TEST_DATA_LEN << 2];
+               __u32 size_tmp = sizeof(tmp);
+
                err = bpf_prog_test_run(fd_prog, 1, test->data,
-                                       sizeof(test->data), NULL, NULL,
+                                       sizeof(test->data), tmp, &size_tmp,
                                        &retval, NULL);
                if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
                        printf("Unexpected bpf_prog_test_run error\n");
index 128e548aa377d600f16fa0b9cdb4fbdf9c540cdc..1a0ac3a29ec5f8c9f0052e47e074f40089c634c5 100644 (file)
@@ -12,3 +12,4 @@ tcp_mmap
 udpgso
 udpgso_bench_rx
 udpgso_bench_tx
+tcp_inq
index 7ba089b33e8b8248ec08d8421a582be66c9f7e87..cd3a2f1545b54c23dab9b534bce9528c57b6c2ec 100644 (file)
@@ -12,3 +12,5 @@ CONFIG_NET_IPVTI=y
 CONFIG_INET6_XFRM_MODE_TUNNEL=y
 CONFIG_IPV6_VTI=y
 CONFIG_DUMMY=y
+CONFIG_BRIDGE=y
+CONFIG_VLAN_8021Q=y
old mode 100644 (file)
new mode 100755 (executable)
index 78245d6..0f45633
@@ -740,13 +740,6 @@ ipv6_rt_add()
        run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64"
        log_test $? 2 "Attempt to add duplicate route - reject route"
 
-       # iproute2 prepend only sets NLM_F_CREATE
-       # - adds a new route; does NOT convert existing route to ECMP
-       add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
-       run_cmd "$IP -6 ro prepend 2001:db8:104::/64 via 2001:db8:103::2"
-       check_route6 "2001:db8:104::/64 via 2001:db8:101::2 dev veth1 metric 1024 2001:db8:104::/64 via 2001:db8:103::2 dev veth3 metric 1024"
-       log_test $? 0 "Add new route for existing prefix (w/o NLM_F_EXCL)"
-
        # route append with same prefix adds a new route
        # - iproute2 sets NLM_F_CREATE | NLM_F_APPEND
        add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
@@ -754,27 +747,6 @@ ipv6_rt_add()
        check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
        log_test $? 0 "Append nexthop to existing route - gw"
 
-       add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
-       run_cmd "$IP -6 ro append 2001:db8:104::/64 dev veth3"
-       check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop dev veth3 weight 1"
-       log_test $? 0 "Append nexthop to existing route - dev only"
-
-       # multipath route can not have a nexthop that is a reject route
-       add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
-       run_cmd "$IP -6 ro append unreachable 2001:db8:104::/64"
-       log_test $? 2 "Append nexthop to existing route - reject route"
-
-       # reject route can not be converted to multipath route
-       run_cmd "$IP -6 ro flush 2001:db8:104::/64"
-       run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64"
-       run_cmd "$IP -6 ro append 2001:db8:104::/64 via 2001:db8:103::2"
-       log_test $? 2 "Append nexthop to existing reject route - gw"
-
-       run_cmd "$IP -6 ro flush 2001:db8:104::/64"
-       run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64"
-       run_cmd "$IP -6 ro append 2001:db8:104::/64 dev veth3"
-       log_test $? 2 "Append nexthop to existing reject route - dev only"
-
        # insert mpath directly
        add_route6 "2001:db8:104::/64" "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
        check_route6  "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
@@ -819,13 +791,6 @@ ipv6_rt_replace_single()
        check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::3 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
        log_test $? 0 "Single path with multipath"
 
-       # single path with reject
-       #
-       add_initial_route6 "nexthop via 2001:db8:101::2"
-       run_cmd "$IP -6 ro replace unreachable 2001:db8:104::/64"
-       check_route6 "unreachable 2001:db8:104::/64 dev lo metric 1024"
-       log_test $? 0 "Single path with reject route"
-
        # single path with single path using MULTIPATH attribute
        #
        add_initial_route6 "via 2001:db8:101::2"
@@ -873,12 +838,6 @@ ipv6_rt_replace_mpath()
        check_route6 "2001:db8:104::/64 via 2001:db8:101::3 dev veth1 metric 1024"
        log_test $? 0 "Multipath with single path via multipath attribute"
 
-       # multipath with reject
-       add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
-       run_cmd "$IP -6 ro replace unreachable 2001:db8:104::/64"
-       check_route6 "unreachable 2001:db8:104::/64 dev lo metric 1024"
-       log_test $? 0 "Multipath with reject route"
-
        # route replace fails - invalid nexthop 1
        add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
        run_cmd "$IP -6 ro replace 2001:db8:104::/64 nexthop via 2001:db8:111::3 nexthop via 2001:db8:103::3"
index 792fa4d0285e80e6cd36fdd83e3b5946b0538b3f..850767befa47a5fe7ca4bf4733fa670e55c6bf37 100755 (executable)
@@ -35,9 +35,6 @@ run_udp() {
 
        echo "udp gso"
        run_in_netns ${args} -S
-
-       echo "udp gso zerocopy"
-       run_in_netns ${args} -S -z
 }
 
 run_tcp() {
index 6ccb154cb4aa4f36184811d406ed9f4317647e4f..22f8df1ad7d484418235b6dadd290baca3bf3c6c 100755 (executable)
@@ -7,13 +7,16 @@
 #
 # Released under the terms of the GPL v2.
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 . ./common_tests
 
 if [ -e $REBOOT_FLAG  ]; then
     rm $REBOOT_FLAG
 else
     prlog "pstore_crash_test has not been executed yet. we skip further tests."
-    exit 0
+    exit $ksft_skip
 fi
 
 prlog -n "Mounting pstore filesystem ... "
index 6a9f602a8718691b086b08544e41aa0a17667e18..615252331813416675184c19066730624e53a96c 100644 (file)
@@ -137,6 +137,30 @@ unsigned int yield_mod_cnt, nr_abort;
        "subic. %%" INJECT_ASM_REG ", %%" INJECT_ASM_REG ", 1\n\t" \
        "bne 222b\n\t" \
        "333:\n\t"
+
+#elif defined(__mips__)
+
+#define RSEQ_INJECT_INPUT \
+       , [loop_cnt_1]"m"(loop_cnt[1]) \
+       , [loop_cnt_2]"m"(loop_cnt[2]) \
+       , [loop_cnt_3]"m"(loop_cnt[3]) \
+       , [loop_cnt_4]"m"(loop_cnt[4]) \
+       , [loop_cnt_5]"m"(loop_cnt[5]) \
+       , [loop_cnt_6]"m"(loop_cnt[6])
+
+#define INJECT_ASM_REG "$5"
+
+#define RSEQ_INJECT_CLOBBER \
+       , INJECT_ASM_REG
+
+#define RSEQ_INJECT_ASM(n) \
+       "lw " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \
+       "beqz " INJECT_ASM_REG ", 333f\n\t" \
+       "222:\n\t" \
+       "addiu " INJECT_ASM_REG ", -1\n\t" \
+       "bnez " INJECT_ASM_REG ", 222b\n\t" \
+       "333:\n\t"
+
 #else
 #error unsupported target
 #endif
index 3b055f9aeaab56bcbe91f9bc493ac2d08c527074..3cea19877227a03c4c501bffa6a687cbe32ad126 100644 (file)
@@ -57,6 +57,7 @@ do {                                                                  \
 #define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown,          \
                                abort_label, version, flags,            \
                                start_ip, post_commit_offset, abort_ip) \
+               ".balign 32\n\t"                                        \
                __rseq_str(table_label) ":\n\t"                         \
                ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
                ".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \
diff --git a/tools/testing/selftests/rseq/rseq-mips.h b/tools/testing/selftests/rseq/rseq-mips.h
new file mode 100644 (file)
index 0000000..7f48ecf
--- /dev/null
@@ -0,0 +1,725 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * Author: Paul Burton <paul.burton@mips.com>
+ * (C) Copyright 2018 MIPS Tech LLC
+ *
+ * Based on rseq-arm.h:
+ * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define RSEQ_SIG       0x53053053
+
+#define rseq_smp_mb()  __asm__ __volatile__ ("sync" ::: "memory")
+#define rseq_smp_rmb() rseq_smp_mb()
+#define rseq_smp_wmb() rseq_smp_mb()
+
+#define rseq_smp_load_acquire(p)                                       \
+__extension__ ({                                                       \
+       __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p);                       \
+       rseq_smp_mb();                                                  \
+       ____p1;                                                         \
+})
+
+#define rseq_smp_acquire__after_ctrl_dep()     rseq_smp_rmb()
+
+#define rseq_smp_store_release(p, v)                                   \
+do {                                                                   \
+       rseq_smp_mb();                                                  \
+       RSEQ_WRITE_ONCE(*p, v);                                         \
+} while (0)
+
+#ifdef RSEQ_SKIP_FASTPATH
+#include "rseq-skip.h"
+#else /* !RSEQ_SKIP_FASTPATH */
+
+#if _MIPS_SZLONG == 64
+# define LONG                  ".dword"
+# define LONG_LA               "dla"
+# define LONG_L                        "ld"
+# define LONG_S                        "sd"
+# define LONG_ADDI             "daddiu"
+# define U32_U64_PAD(x)                x
+#elif _MIPS_SZLONG == 32
+# define LONG                  ".word"
+# define LONG_LA               "la"
+# define LONG_L                        "lw"
+# define LONG_S                        "sw"
+# define LONG_ADDI             "addiu"
+# ifdef __BIG_ENDIAN
+#  define U32_U64_PAD(x)       "0x0, " x
+# else
+#  define U32_U64_PAD(x)       x ", 0x0"
+# endif
+#else
+# error unsupported _MIPS_SZLONG
+#endif
+
+#define __RSEQ_ASM_DEFINE_TABLE(version, flags,        start_ip, \
+                               post_commit_offset, abort_ip) \
+               ".pushsection __rseq_table, \"aw\"\n\t" \
+               ".balign 32\n\t" \
+               ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \
+               ".popsection\n\t"
+
+#define RSEQ_ASM_DEFINE_TABLE(start_ip, post_commit_ip, abort_ip) \
+       __RSEQ_ASM_DEFINE_TABLE(0x0, 0x0, start_ip, \
+                               (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
+               RSEQ_INJECT_ASM(1) \
+               LONG_LA " $4, " __rseq_str(cs_label) "\n\t" \
+               LONG_S  " $4, %[" __rseq_str(rseq_cs) "]\n\t" \
+               __rseq_str(label) ":\n\t"
+
+#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
+               RSEQ_INJECT_ASM(2) \
+               "lw  $4, %[" __rseq_str(current_cpu_id) "]\n\t" \
+               "bne $4, %[" __rseq_str(cpu_id) "], " __rseq_str(label) "\n\t"
+
+#define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \
+                               abort_label, version, flags, \
+                               start_ip, post_commit_offset, abort_ip) \
+               ".balign 32\n\t" \
+               __rseq_str(table_label) ":\n\t" \
+               ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \
+               LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \
+               ".word " __rseq_str(RSEQ_SIG) "\n\t" \
+               __rseq_str(label) ":\n\t" \
+               teardown \
+               "b %l[" __rseq_str(abort_label) "]\n\t"
+
+#define RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, abort_label, \
+                             start_ip, post_commit_ip, abort_ip) \
+       __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \
+                               abort_label, 0x0, 0x0, start_ip, \
+                               (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \
+               __rseq_str(label) ":\n\t" \
+               teardown \
+               "b %l[" __rseq_str(cmpfail_label) "]\n\t"
+
+#define rseq_workaround_gcc_asm_size_guess()   __asm__ __volatile__("")
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[error2]\n\t"
+#endif
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(5)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
+                              off_t voffp, intptr_t *load, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "beq $4, %[expectnot], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "beq $4, %[expectnot], %l[error2]\n\t"
+#endif
+               LONG_S " $4, %[load]\n\t"
+               LONG_ADDI " $4, %[voffp]\n\t"
+               LONG_L " $4, 0($4)\n\t"
+               /* final store */
+               LONG_S " $4, %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(5)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expectnot]           "r" (expectnot),
+                 [voffp]               "Ir" (voffp),
+                 [load]                "m" (*load)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_addv(intptr_t *v, intptr_t count, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+               LONG_L " $4, %[v]\n\t"
+               LONG_ADDI " $4, %[count]\n\t"
+               /* final store */
+               LONG_S " $4, %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(4)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 [v]                   "m" (*v),
+                 [count]               "Ir" (count)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
+                                intptr_t *v2, intptr_t newv2,
+                                intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[error2]\n\t"
+#endif
+               /* try store */
+               LONG_S " %[newv2], %[v2]\n\t"
+               RSEQ_INJECT_ASM(5)
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* try store input */
+                 [v2]                  "m" (*v2),
+                 [newv2]               "r" (newv2),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
+                                        intptr_t *v2, intptr_t newv2,
+                                        intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[error2]\n\t"
+#endif
+               /* try store */
+               LONG_S " %[newv2], %[v2]\n\t"
+               RSEQ_INJECT_ASM(5)
+               "sync\n\t"      /* full sync provides store-release */
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* try store input */
+                 [v2]                  "m" (*v2),
+                 [newv2]               "r" (newv2),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
+                             intptr_t *v2, intptr_t expect2,
+                             intptr_t newv, int cpu)
+{
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(4)
+               LONG_L " $4, %[v2]\n\t"
+               "bne $4, %[expect2], %l[cmpfail]\n\t"
+               RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], %l[error2]\n\t"
+               LONG_L " $4, %[v2]\n\t"
+               "bne $4, %[expect2], %l[error3]\n\t"
+#endif
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               "b 5f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+               "5:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* cmp2 input */
+                 [v2]                  "m" (*v2),
+                 [expect2]             "r" (expect2),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv)
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2, error3
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_bug("1st expected value comparison failed");
+error3:
+       rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
+                                void *dst, void *src, size_t len,
+                                intptr_t newv, int cpu)
+{
+       uintptr_t rseq_scratch[3];
+
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               LONG_S " %[src], %[rseq_scratch0]\n\t"
+               LONG_S "  %[dst], %[rseq_scratch1]\n\t"
+               LONG_S " %[len], %[rseq_scratch2]\n\t"
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], 5f\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], 7f\n\t"
+#endif
+               /* try memcpy */
+               "beqz %[len], 333f\n\t" \
+               "222:\n\t" \
+               "lb   $4, 0(%[src])\n\t" \
+               "sb   $4, 0(%[dst])\n\t" \
+               LONG_ADDI " %[src], 1\n\t" \
+               LONG_ADDI " %[dst], 1\n\t" \
+               LONG_ADDI " %[len], -1\n\t" \
+               "bnez %[len], 222b\n\t" \
+               "333:\n\t" \
+               RSEQ_INJECT_ASM(5)
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               /* teardown */
+               LONG_L " %[len], %[rseq_scratch2]\n\t"
+               LONG_L " %[dst], %[rseq_scratch1]\n\t"
+               LONG_L " %[src], %[rseq_scratch0]\n\t"
+               "b 8f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4,
+                                     /* teardown */
+                                     LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                     LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                     LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                     abort, 1b, 2b, 4f)
+               RSEQ_ASM_DEFINE_CMPFAIL(5,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_DEFINE_CMPFAIL(6,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       error1)
+               RSEQ_ASM_DEFINE_CMPFAIL(7,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       error2)
+#endif
+               "8:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv),
+                 /* try memcpy input */
+                 [dst]                 "r" (dst),
+                 [src]                 "r" (src),
+                 [len]                 "r" (len),
+                 [rseq_scratch0]       "m" (rseq_scratch[0]),
+                 [rseq_scratch1]       "m" (rseq_scratch[1]),
+                 [rseq_scratch2]       "m" (rseq_scratch[2])
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_workaround_gcc_asm_size_guess();
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_workaround_gcc_asm_size_guess();
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
+                                        void *dst, void *src, size_t len,
+                                        intptr_t newv, int cpu)
+{
+       uintptr_t rseq_scratch[3];
+
+       RSEQ_INJECT_C(9)
+
+       rseq_workaround_gcc_asm_size_guess();
+       __asm__ __volatile__ goto (
+               RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+               LONG_S " %[src], %[rseq_scratch0]\n\t"
+               LONG_S " %[dst], %[rseq_scratch1]\n\t"
+               LONG_S " %[len], %[rseq_scratch2]\n\t"
+               /* Start rseq by storing table entry pointer into rseq_cs. */
+               RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+               RSEQ_INJECT_ASM(3)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], 5f\n\t"
+               RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+               LONG_L " $4, %[v]\n\t"
+               "bne $4, %[expect], 7f\n\t"
+#endif
+               /* try memcpy */
+               "beqz %[len], 333f\n\t" \
+               "222:\n\t" \
+               "lb   $4, 0(%[src])\n\t" \
+               "sb   $4, 0(%[dst])\n\t" \
+               LONG_ADDI " %[src], 1\n\t" \
+               LONG_ADDI " %[dst], 1\n\t" \
+               LONG_ADDI " %[len], -1\n\t" \
+               "bnez %[len], 222b\n\t" \
+               "333:\n\t" \
+               RSEQ_INJECT_ASM(5)
+               "sync\n\t"      /* full sync provides store-release */
+               /* final store */
+               LONG_S " %[newv], %[v]\n\t"
+               "2:\n\t"
+               RSEQ_INJECT_ASM(6)
+               /* teardown */
+               LONG_L " %[len], %[rseq_scratch2]\n\t"
+               LONG_L " %[dst], %[rseq_scratch1]\n\t"
+               LONG_L " %[src], %[rseq_scratch0]\n\t"
+               "b 8f\n\t"
+               RSEQ_ASM_DEFINE_ABORT(3, 4,
+                                     /* teardown */
+                                     LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                     LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                     LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                     abort, 1b, 2b, 4f)
+               RSEQ_ASM_DEFINE_CMPFAIL(5,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+               RSEQ_ASM_DEFINE_CMPFAIL(6,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       error1)
+               RSEQ_ASM_DEFINE_CMPFAIL(7,
+                                       /* teardown */
+                                       LONG_L " %[len], %[rseq_scratch2]\n\t"
+                                       LONG_L " %[dst], %[rseq_scratch1]\n\t"
+                                       LONG_L " %[src], %[rseq_scratch0]\n\t",
+                                       error2)
+#endif
+               "8:\n\t"
+               : /* gcc asm goto does not allow outputs */
+               : [cpu_id]              "r" (cpu),
+                 [current_cpu_id]      "m" (__rseq_abi.cpu_id),
+                 [rseq_cs]             "m" (__rseq_abi.rseq_cs),
+                 /* final store input */
+                 [v]                   "m" (*v),
+                 [expect]              "r" (expect),
+                 [newv]                "r" (newv),
+                 /* try memcpy input */
+                 [dst]                 "r" (dst),
+                 [src]                 "r" (src),
+                 [len]                 "r" (len),
+                 [rseq_scratch0]       "m" (rseq_scratch[0]),
+                 [rseq_scratch1]       "m" (rseq_scratch[1]),
+                 [rseq_scratch2]       "m" (rseq_scratch[2])
+                 RSEQ_INJECT_INPUT
+               : "$4", "memory"
+                 RSEQ_INJECT_CLOBBER
+               : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+                 , error1, error2
+#endif
+       );
+       rseq_workaround_gcc_asm_size_guess();
+       return 0;
+abort:
+       rseq_workaround_gcc_asm_size_guess();
+       RSEQ_INJECT_FAILED
+       return -1;
+cmpfail:
+       rseq_workaround_gcc_asm_size_guess();
+       return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+       rseq_workaround_gcc_asm_size_guess();
+       rseq_bug("cpu_id comparison failed");
+error2:
+       rseq_workaround_gcc_asm_size_guess();
+       rseq_bug("expected value comparison failed");
+#endif
+}
+
+#endif /* !RSEQ_SKIP_FASTPATH */
index 0a808575cbc443489a5713639f7076eacedbdde4..86ce22417e0d7f89b0af1df374d1fd240306fd8c 100644 (file)
@@ -73,6 +73,8 @@ extern __thread volatile struct rseq __rseq_abi;
 #include <rseq-arm.h>
 #elif defined(__PPC__)
 #include <rseq-ppc.h>
+#elif defined(__mips__)
+#include <rseq-mips.h>
 #else
 #error unsupported target
 #endif
@@ -131,17 +133,27 @@ static inline uint32_t rseq_current_cpu(void)
        return cpu;
 }
 
+static inline void rseq_clear_rseq_cs(void)
+{
+#ifdef __LP64__
+       __rseq_abi.rseq_cs.ptr = 0;
+#else
+       __rseq_abi.rseq_cs.ptr.ptr32 = 0;
+#endif
+}
+
 /*
- * rseq_prepare_unload() should be invoked by each thread using rseq_finish*()
- * at least once between their last rseq_finish*() and library unload of the
- * library defining the rseq critical section (struct rseq_cs). This also
- * applies to use of rseq in code generated by JIT: rseq_prepare_unload()
- * should be invoked at least once by each thread using rseq_finish*() before
- * reclaim of the memory holding the struct rseq_cs.
+ * rseq_prepare_unload() should be invoked by each thread executing a rseq
+ * critical section at least once between their last critical section and
+ * library unload of the library defining the rseq critical section
+ * (struct rseq_cs). This also applies to use of rseq in code generated by
+ * JIT: rseq_prepare_unload() should be invoked at least once by each
+ * thread executing a rseq critical section before reclaim of the memory
+ * holding the struct rseq_cs.
  */
 static inline void rseq_prepare_unload(void)
 {
-       __rseq_abi.rseq_cs = 0;
+       rseq_clear_rseq_cs();
 }
 
 #endif  /* RSEQ_H_ */
old mode 100644 (file)
new mode 100755 (executable)
index 2082eeffd779d586b558f45883c5dc1cc08a865b..a19531dba4dc311d00e33fcb637b91bced262bf2 100644 (file)
@@ -1,7 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+uname_M := $(shell uname -m 2>/dev/null || echo not)
+ARCH ?= $(shell echo $(uname_M) | sed -e s/x86_64/x86/)
+
+ifneq ($(ARCH),sparc64)
+nothing:
+.PHONY: all clean run_tests install
+.SILENT:
+else
+
 SUBDIRS := drivers
 
 TEST_PROGS := run.sh
 
+
 .PHONY: all clean
 
 include ../lib.mk
@@ -18,10 +29,6 @@ all:
                fi \
        done
 
-override define RUN_TESTS
-       @cd $(OUTPUT); ./run.sh
-endef
-
 override define INSTALL_RULE
        mkdir -p $(INSTALL_PATH)
        install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)
@@ -33,10 +40,6 @@ override define INSTALL_RULE
        done;
 endef
 
-override define EMIT_TESTS
-       echo "./run.sh"
-endef
-
 override define CLEAN
        @for DIR in $(SUBDIRS); do              \
                BUILD_TARGET=$(OUTPUT)/$$DIR;   \
@@ -44,3 +47,4 @@ override define CLEAN
                make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
        done
 endef
+endif
index 6264f40bbdbc9dedf9fd41519823fa31771d6cd0..deb0df4155659ec1f4b13a4d74c5128673690226 100644 (file)
@@ -1,4 +1,4 @@
-
+# SPDX-License-Identifier: GPL-2.0
 INCLUDEDIR := -I.
 CFLAGS := $(CFLAGS) $(INCLUDEDIR) -Wall -O2 -g
 
index 24cff498b31aa831b388e638929f29c36db07dbd..fc9f8cde7d4223c3fd564105942d4d648acb8627 100755 (executable)
@@ -2,6 +2,19 @@
 # SPDX-License-Identifier: GPL-2.0
 # Runs static keys kernel module tests
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if ! /sbin/modprobe -q -n test_static_key_base; then
+       echo "static_key: module test_static_key_base is not found [SKIP]"
+       exit $ksft_skip
+fi
+
+if ! /sbin/modprobe -q -n test_static_keys; then
+       echo "static_key: module test_static_keys is not found [SKIP]"
+       exit $ksft_skip
+fi
+
 if /sbin/modprobe -q test_static_key_base; then
        if /sbin/modprobe -q test_static_keys; then
                echo "static_key: ok"
diff --git a/tools/testing/selftests/sync/config b/tools/testing/selftests/sync/config
new file mode 100644 (file)
index 0000000..1ab7e81
--- /dev/null
@@ -0,0 +1,4 @@
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
index ec232c3cfcaac3b8f52936eabb908a0c316183f8..584eb8ea780a49220782d08e104756199fc19934 100755 (executable)
@@ -14,6 +14,9 @@
 
 # This performs a series tests against the proc sysctl interface.
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 TEST_NAME="sysctl"
 TEST_DRIVER="test_${TEST_NAME}"
 TEST_DIR=$(dirname $0)
@@ -41,7 +44,7 @@ test_modprobe()
                echo "$0: $DIR not present" >&2
                echo "You must have the following enabled in your kernel:" >&2
                cat $TEST_DIR/config >&2
-               exit 1
+               exit $ksft_skip
        fi
 }
 
@@ -98,28 +101,30 @@ test_reqs()
        uid=$(id -u)
        if [ $uid -ne 0 ]; then
                echo $msg must be run as root >&2
-               exit 0
+               exit $ksft_skip
        fi
 
        if ! which perl 2> /dev/null > /dev/null; then
                echo "$0: You need perl installed"
-               exit 1
+               exit $ksft_skip
        fi
        if ! which getconf 2> /dev/null > /dev/null; then
                echo "$0: You need getconf installed"
-               exit 1
+               exit $ksft_skip
        fi
        if ! which diff 2> /dev/null > /dev/null; then
                echo "$0: You need diff installed"
-               exit 1
+               exit $ksft_skip
        fi
 }
 
 function load_req_mod()
 {
-       trap "test_modprobe" EXIT
-
        if [ ! -d $DIR ]; then
+               if ! modprobe -q -n $TEST_DRIVER; then
+                       echo "$0: module $TEST_DRIVER not found [SKIP]"
+                       exit $ksft_skip
+               fi
                modprobe $TEST_DRIVER
                if [ $? -ne 0 ]; then
                        exit
@@ -765,6 +770,7 @@ function parse_args()
 test_reqs
 allow_user_defaults
 check_production_sysctl_writes_strict
+test_modprobe
 load_req_mod
 
 trap "test_finish" EXIT
index d60506fc77f8bcba61f222db0b0df05a38e2e68b..f9b31a57439b759c1813ca94ac948a998e9dca51 100755 (executable)
@@ -2,6 +2,13 @@
 # SPDX-License-Identifier: GPL-2.0
 # Runs copy_to/from_user infrastructure using test_user_copy kernel module
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if ! /sbin/modprobe -q -n test_user_copy; then
+       echo "user: module test_user_copy is not found [SKIP]"
+       exit $ksft_skip
+fi
 if /sbin/modprobe -q test_user_copy; then
        /sbin/modprobe -q -r test_user_copy
        echo "user_copy: ok"
index 1097f04e4d80e6cff93bb9e912c4c38894cd5959..bcec71250873108efdeae50eab0874b3924204c4 100644 (file)
@@ -16,6 +16,8 @@
 #include <unistd.h>
 #include <string.h>
 
+#include "../kselftest.h"
+
 #define MAP_SIZE 1048576
 
 struct map_list {
@@ -169,7 +171,7 @@ int main(int argc, char **argv)
                printf("Either the sysctl compact_unevictable_allowed is not\n"
                       "set to 1 or couldn't read the proc file.\n"
                       "Skipping the test\n");
-               return 0;
+               return KSFT_SKIP;
        }
 
        lim.rlim_cur = RLIM_INFINITY;
index 4997b9222cfa5055f9c07f4f1f0a1454bae89d6e..637b6d0ac0d0bf63d88ff5f5782a65453b486a7a 100644 (file)
@@ -9,6 +9,8 @@
 #include <stdbool.h>
 #include "mlock2.h"
 
+#include "../kselftest.h"
+
 struct vm_boundaries {
        unsigned long start;
        unsigned long end;
@@ -303,7 +305,7 @@ static int test_mlock_lock()
        if (mlock2_(map, 2 * page_size, 0)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("mlock2(0)");
                goto unmap;
@@ -412,7 +414,7 @@ static int test_mlock_onfault()
        if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("mlock2(MLOCK_ONFAULT)");
                goto unmap;
@@ -425,7 +427,7 @@ static int test_mlock_onfault()
        if (munlock(map, 2 * page_size)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("munlock()");
                goto unmap;
@@ -457,7 +459,7 @@ static int test_lock_onfault_of_present()
        if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("mlock2(MLOCK_ONFAULT)");
                goto unmap;
@@ -583,7 +585,7 @@ static int test_vma_management(bool call_mlock)
        if (call_mlock && mlock2_(map, 3 * page_size, MLOCK_ONFAULT)) {
                if (errno == ENOSYS) {
                        printf("Cannot call new mlock family, skipping test\n");
-                       _exit(0);
+                       _exit(KSFT_SKIP);
                }
                perror("mlock(ONFAULT)\n");
                goto out;
index 22d56467383029b24b52b95e6ee09cc0bb6bf835..88cbe5575f0cf9e0d8f165ecbe27002a3c5ed8a1 100755 (executable)
@@ -2,6 +2,9 @@
 # SPDX-License-Identifier: GPL-2.0
 #please run as root
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 mnt=./huge
 exitcode=0
 
@@ -36,7 +39,7 @@ if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
                echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages
                if [ $? -ne 0 ]; then
                        echo "Please run this test as root"
-                       exit 1
+                       exit $ksft_skip
                fi
                while read name size unit; do
                        if [ "$name" = "HugePages_Free:" ]; then
index de2f9ec8a87fb342a7a595a13b009358d9eae000..7b8171e3128a8715a62a10e020c69ea3ca1c5321 100644 (file)
@@ -69,6 +69,8 @@
 #include <setjmp.h>
 #include <stdbool.h>
 
+#include "../kselftest.h"
+
 #ifdef __NR_userfaultfd
 
 static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
@@ -1322,7 +1324,7 @@ int main(int argc, char **argv)
 int main(void)
 {
        printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n");
-       return 0;
+       return KSFT_SKIP;
 }
 
 #endif /* __NR_userfaultfd */
index 246145b84a127c341fd1fdc4fb41bcf6c7d51644..4d9dc3f2fd7048212181c51f03cef4d1650e07c9 100644 (file)
@@ -610,21 +610,41 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss)
         */
        for (int i = 0; i < NGREG; i++) {
                greg_t req = requested_regs[i], res = resulting_regs[i];
+
                if (i == REG_TRAPNO || i == REG_IP)
                        continue;       /* don't care */
-               if (i == REG_SP) {
-                       printf("\tSP: %llx -> %llx\n", (unsigned long long)req,
-                              (unsigned long long)res);
 
+               if (i == REG_SP) {
                        /*
-                        * In many circumstances, the high 32 bits of rsp
-                        * are zeroed.  For example, we could be a real
-                        * 32-bit program, or we could hit any of a number
-                        * of poorly-documented IRET or segmented ESP
-                        * oddities.  If this happens, it's okay.
+                        * If we were using a 16-bit stack segment, then
+                        * the kernel is a bit stuck: IRET only restores
+                        * the low 16 bits of ESP/RSP if SS is 16-bit.
+                        * The kernel uses a hack to restore bits 31:16,
+                        * but that hack doesn't help with bits 63:32.
+                        * On Intel CPUs, bits 63:32 end up zeroed, and, on
+                        * AMD CPUs, they leak the high bits of the kernel
+                        * espfix64 stack pointer.  There's very little that
+                        * the kernel can do about it.
+                        *
+                        * Similarly, if we are returning to a 32-bit context,
+                        * the CPU will often lose the high 32 bits of RSP.
                         */
-                       if (res == (req & 0xFFFFFFFF))
-                               continue;  /* OK; not expected to work */
+
+                       if (res == req)
+                               continue;
+
+                       if (cs_bits != 64 && ((res ^ req) & 0xFFFFFFFF) == 0) {
+                               printf("[NOTE]\tSP: %llx -> %llx\n",
+                                      (unsigned long long)req,
+                                      (unsigned long long)res);
+                               continue;
+                       }
+
+                       printf("[FAIL]\tSP mismatch: requested 0x%llx; got 0x%llx\n",
+                              (unsigned long long)requested_regs[i],
+                              (unsigned long long)resulting_regs[i]);
+                       nerrs++;
+                       continue;
                }
 
                bool ignore_reg = false;
@@ -654,25 +674,18 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss)
 #endif
 
                /* Sanity check on the kernel */
-               if (i == REG_CX && requested_regs[i] != resulting_regs[i]) {
+               if (i == REG_CX && req != res) {
                        printf("[FAIL]\tCX (saved SP) mismatch: requested 0x%llx; got 0x%llx\n",
-                              (unsigned long long)requested_regs[i],
-                              (unsigned long long)resulting_regs[i]);
+                              (unsigned long long)req,
+                              (unsigned long long)res);
                        nerrs++;
                        continue;
                }
 
-               if (requested_regs[i] != resulting_regs[i] && !ignore_reg) {
-                       /*
-                        * SP is particularly interesting here.  The
-                        * usual cause of failures is that we hit the
-                        * nasty IRET case of returning to a 16-bit SS,
-                        * in which case bits 16:31 of the *kernel*
-                        * stack pointer persist in ESP.
-                        */
+               if (req != res && !ignore_reg) {
                        printf("[FAIL]\tReg %d mismatch: requested 0x%llx; got 0x%llx\n",
-                              i, (unsigned long long)requested_regs[i],
-                              (unsigned long long)resulting_regs[i]);
+                              i, (unsigned long long)req,
+                              (unsigned long long)res);
                        nerrs++;
                }
        }
index 754de7da426a80a2ae386042d30a5904b44446e6..232e958ec454756501f2caa8eaf2133067fe10ac 100755 (executable)
@@ -2,6 +2,9 @@
 # SPDX-License-Identifier: GPL-2.0
 TCID="zram.sh"
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 . ./zram_lib.sh
 
 run_zram () {
@@ -24,5 +27,5 @@ elif [ -b /dev/zram0 ]; then
 else
        echo "$TCID : No zram.ko module or /dev/zram0 device file not found"
        echo "$TCID : CONFIG_ZRAM is not set"
-       exit 1
+       exit $ksft_skip
 fi
index f6a9c73e7a442e7988b0820ebc809a342981df91..9e73a4fb9b0aa9b2a2e81368badfbe278876695d 100755 (executable)
@@ -18,6 +18,9 @@ MODULE=0
 dev_makeswap=-1
 dev_mounted=-1
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
 trap INT
 
 check_prereqs()
@@ -27,7 +30,7 @@ check_prereqs()
 
        if [ $uid -ne 0 ]; then
                echo $msg must be run as root >&2
-               exit 0
+               exit $ksft_skip
        fi
 }
 
index 9a45f90e2d08974c42c6e6dc242b5cfd35d5e120..369ee308b6686ca4a106581b91f8d382e45c79e8 100644 (file)
@@ -36,7 +36,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
         */
        BUG_ON((unsigned long) page & 0x03);
 #ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
        BUG_ON(sg_is_chain(sg));
 #endif
        sg->page_link = page_link | (unsigned long) page;
@@ -67,7 +66,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
 static inline struct page *sg_page(struct scatterlist *sg)
 {
 #ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
        BUG_ON(sg_is_chain(sg));
 #endif
        return (struct page *)((sg)->page_link & ~0x3);
@@ -116,9 +114,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
  **/
 static inline void sg_mark_end(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        /*
         * Set termination bit, clear potential chain bit
         */
@@ -136,17 +131,11 @@ static inline void sg_mark_end(struct scatterlist *sg)
  **/
 static inline void sg_unmark_end(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        sg->page_link &= ~0x02;
 }
 
 static inline struct scatterlist *sg_next(struct scatterlist *sg)
 {
-#ifdef CONFIG_DEBUG_SG
-       BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
        if (sg_is_last(sg))
                return NULL;
 
@@ -160,13 +149,6 @@ static inline struct scatterlist *sg_next(struct scatterlist *sg)
 static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
 {
        memset(sgl, 0, sizeof(*sgl) * nents);
-#ifdef CONFIG_DEBUG_SG
-       {
-               unsigned int i;
-               for (i = 0; i < nents; i++)
-                       sgl[i].sg_magic = SG_MAGIC;
-       }
-#endif
        sg_mark_end(&sgl[nents - 1]);
 }
 
index 72143cfaf6ec39404dad5f72a8cf08c5e5fefc7e..ea434ddc849925c6e2577a9ed6acea906ea8eafd 100644 (file)
@@ -47,7 +47,7 @@ config KVM_GENERIC_DIRTYLOG_READ_PROTECT
 
 config KVM_COMPAT
        def_bool y
-       depends on KVM && COMPAT && !S390
+       depends on KVM && COMPAT && !(S390 || ARM64)
 
 config HAVE_KVM_IRQ_BYPASS
        bool
index 8d90de213ce9b89340b7dc11927862f8344829c7..1d90d79706bd5b71d3914ecd808d2bd6c127286c 100644 (file)
@@ -297,6 +297,8 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
        phys_addr_t next;
 
        assert_spin_locked(&kvm->mmu_lock);
+       WARN_ON(size & ~PAGE_MASK);
+
        pgd = kvm->arch.pgd + stage2_pgd_index(addr);
        do {
                /*
index ff7dc890941a8447d6e5abeae6dfe6544fac18d7..cdce653e3c47fb31b9eb0ccf73c3bebd830d8496 100644 (file)
@@ -617,11 +617,6 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
                pr_warn("GICV physical address 0x%llx not page aligned\n",
                        (unsigned long long)info->vcpu.start);
                kvm_vgic_global_state.vcpu_base = 0;
-       } else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
-               pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
-                       (unsigned long long)resource_size(&info->vcpu),
-                       PAGE_SIZE);
-               kvm_vgic_global_state.vcpu_base = 0;
        } else {
                kvm_vgic_global_state.vcpu_base = info->vcpu.start;
                kvm_vgic_global_state.can_emulate_gicv2 = true;
index 90d30fbe95aefb1e1a943d5bf29d7aee763fb9d0..b20b751286fc612214c59c95e787c9fb0fac50b7 100644 (file)
@@ -119,8 +119,12 @@ irqfd_shutdown(struct work_struct *work)
 {
        struct kvm_kernel_irqfd *irqfd =
                container_of(work, struct kvm_kernel_irqfd, shutdown);
+       struct kvm *kvm = irqfd->kvm;
        u64 cnt;
 
+       /* Make sure irqfd has been initalized in assign path. */
+       synchronize_srcu(&kvm->irq_srcu);
+
        /*
         * Synchronize with the wait-queue and unhook ourselves to prevent
         * further events.
@@ -387,7 +391,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
 
        idx = srcu_read_lock(&kvm->irq_srcu);
        irqfd_update(kvm, irqfd);
-       srcu_read_unlock(&kvm->irq_srcu, idx);
 
        list_add_tail(&irqfd->list, &kvm->irqfds.items);
 
@@ -402,11 +405,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
        if (events & EPOLLIN)
                schedule_work(&irqfd->inject);
 
-       /*
-        * do not drop the file until the irqfd is fully initialized, otherwise
-        * we might race against the EPOLLHUP
-        */
-       fdput(f);
 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
        if (kvm_arch_has_irq_bypass()) {
                irqfd->consumer.token = (void *)irqfd->eventfd;
@@ -421,6 +419,13 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
        }
 #endif
 
+       srcu_read_unlock(&kvm->irq_srcu, idx);
+
+       /*
+        * do not drop the file until the irqfd is fully initialized, otherwise
+        * we might race against the EPOLLHUP
+        */
+       fdput(f);
        return 0;
 
 fail:
index ada21f47f22b5a902e81572ba94efb16a2a7bccb..8b47507faab5b645295094992c0eaa388765f025 100644 (file)
@@ -116,6 +116,11 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
 #ifdef CONFIG_KVM_COMPAT
 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
                                  unsigned long arg);
+#define KVM_COMPAT(c)  .compat_ioctl   = (c)
+#else
+static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
+                               unsigned long arg) { return -EINVAL; }
+#define KVM_COMPAT(c)  .compat_ioctl   = kvm_no_compat_ioctl
 #endif
 static int hardware_enable_all(void);
 static void hardware_disable_all(void);
@@ -2396,11 +2401,9 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
 static struct file_operations kvm_vcpu_fops = {
        .release        = kvm_vcpu_release,
        .unlocked_ioctl = kvm_vcpu_ioctl,
-#ifdef CONFIG_KVM_COMPAT
-       .compat_ioctl   = kvm_vcpu_compat_ioctl,
-#endif
        .mmap           = kvm_vcpu_mmap,
        .llseek         = noop_llseek,
+       KVM_COMPAT(kvm_vcpu_compat_ioctl),
 };
 
 /*
@@ -2824,10 +2827,8 @@ static int kvm_device_release(struct inode *inode, struct file *filp)
 
 static const struct file_operations kvm_device_fops = {
        .unlocked_ioctl = kvm_device_ioctl,
-#ifdef CONFIG_KVM_COMPAT
-       .compat_ioctl = kvm_device_ioctl,
-#endif
        .release = kvm_device_release,
+       KVM_COMPAT(kvm_device_ioctl),
 };
 
 struct kvm_device *kvm_device_from_filp(struct file *filp)
@@ -3165,10 +3166,8 @@ static long kvm_vm_compat_ioctl(struct file *filp,
 static struct file_operations kvm_vm_fops = {
        .release        = kvm_vm_release,
        .unlocked_ioctl = kvm_vm_ioctl,
-#ifdef CONFIG_KVM_COMPAT
-       .compat_ioctl   = kvm_vm_compat_ioctl,
-#endif
        .llseek         = noop_llseek,
+       KVM_COMPAT(kvm_vm_compat_ioctl),
 };
 
 static int kvm_dev_ioctl_create_vm(unsigned long type)
@@ -3259,8 +3258,8 @@ out:
 
 static struct file_operations kvm_chardev_ops = {
        .unlocked_ioctl = kvm_dev_ioctl,
-       .compat_ioctl   = kvm_dev_ioctl,
        .llseek         = noop_llseek,
+       KVM_COMPAT(kvm_dev_ioctl),
 };
 
 static struct miscdevice kvm_dev = {