Merge tag 'xfs-fixes-for-4.19-rc7' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 11 Oct 2018 05:17:42 +0000 (07:17 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 11 Oct 2018 05:17:42 +0000 (07:17 +0200)
Dave writes:
  "xfs: fixes for 4.19-rc7

   Update for 4.19-rc7 to fix numerous file clone and deduplication issues."

* tag 'xfs-fixes-for-4.19-rc7' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux:
  xfs: fix data corruption w/ unaligned reflink ranges
  xfs: fix data corruption w/ unaligned dedupe ranges
  xfs: update ctime and remove suid before cloning files
  xfs: zero posteof blocks when cloning above eof
  xfs: refactor clonerange preparation into a separate helper

1467 files changed:
Documentation/ABI/stable/sysfs-bus-xen-backend
Documentation/ABI/stable/sysfs-devices-system-xen_memory
Documentation/ABI/testing/sysfs-driver-xen-blkback
Documentation/admin-guide/kernel-parameters.txt
Documentation/arm64/sve.txt
Documentation/device-mapper/dm-raid.txt
Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
Documentation/devicetree/bindings/input/gpio-keys.txt
Documentation/devicetree/bindings/interrupt-controller/riscv,cpu-intc.txt
Documentation/devicetree/bindings/net/cpsw.txt
Documentation/devicetree/bindings/net/macb.txt
Documentation/devicetree/bindings/net/sh_eth.txt
Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
Documentation/driver-api/fpga/fpga-mgr.rst
Documentation/fb/uvesafb.txt
Documentation/filesystems/vfs.txt
Documentation/hwmon/ina2xx
Documentation/i2c/DMA-considerations
Documentation/media/uapi/dvb/video_function_calls.rst
Documentation/networking/ip-sysctl.txt
Documentation/process/changes.rst
Documentation/process/code-of-conduct.rst [new file with mode: 0644]
Documentation/process/code-of-conflict.rst [deleted file]
Documentation/process/index.rst
Documentation/scsi/scsi-parameters.txt
Documentation/virtual/kvm/api.txt
Documentation/x86/earlyprintk.txt
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/boot/dts/axc003.dtsi
arch/arc/boot/dts/axc003_idu.dtsi
arch/arc/boot/dts/axs10x_mb.dtsi
arch/arc/boot/dts/hsdk.dts
arch/arc/configs/axs101_defconfig
arch/arc/configs/axs103_defconfig
arch/arc/configs/axs103_smp_defconfig
arch/arc/configs/haps_hs_defconfig
arch/arc/configs/haps_hs_smp_defconfig
arch/arc/configs/hsdk_defconfig
arch/arc/configs/nps_defconfig
arch/arc/configs/nsim_700_defconfig
arch/arc/configs/nsim_hs_defconfig
arch/arc/configs/nsim_hs_smp_defconfig
arch/arc/configs/nsimosci_defconfig
arch/arc/configs/nsimosci_hs_defconfig
arch/arc/configs/nsimosci_hs_smp_defconfig
arch/arc/configs/tb10x_defconfig
arch/arc/configs/vdk_hs38_defconfig
arch/arc/configs/vdk_hs38_smp_defconfig
arch/arc/include/asm/atomic.h
arch/arc/include/asm/dma-mapping.h [new file with mode: 0644]
arch/arc/kernel/process.c
arch/arc/kernel/troubleshoot.c
arch/arc/mm/cache.c
arch/arc/mm/dma.c
arch/arm/boot/dts/am335x-osd3358-sm-red.dts [changed mode: 0755->0644]
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
arch/arm/boot/dts/bcm63138.dtsi
arch/arm/boot/dts/imx23-evk.dts
arch/arm/boot/dts/imx28-evk.dts
arch/arm/boot/dts/imx7d.dtsi
arch/arm/boot/dts/omap4-droid4-xt894.dts
arch/arm/boot/dts/sama5d3_emac.dtsi
arch/arm/boot/dts/stm32mp157c.dtsi
arch/arm/boot/dts/sun8i-r40.dtsi
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/mxs_defconfig
arch/arm/configs/versatile_defconfig
arch/arm/include/asm/kvm_host.h
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mm/ioremap.c
arch/arm/tools/syscall.tbl
arch/arm64/Kconfig
arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
arch/arm64/configs/defconfig
arch/arm64/crypto/ghash-ce-glue.c
arch/arm64/crypto/sm4-ce-glue.c
arch/arm64/include/asm/jump_label.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/crash_core.c [new file with mode: 0644]
arch/arm64/kernel/machine_kexec.c
arch/arm64/kvm/guest.c
arch/arm64/kvm/hyp/switch.c
arch/arm64/mm/hugetlbpage.c
arch/arm64/mm/mmu.c
arch/hexagon/include/asm/bitops.h
arch/hexagon/kernel/dma.c
arch/m68k/mac/misc.c
arch/m68k/mm/mcfmmu.c
arch/mips/include/asm/kvm_host.h
arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
arch/mips/include/asm/processor.h
arch/mips/kernel/process.c
arch/mips/kernel/setup.c
arch/mips/kernel/vdso.c
arch/mips/kvm/mmu.c
arch/mips/lantiq/xway/dma.c
arch/mips/lib/memset.S
arch/nds32/Kconfig
arch/nds32/Makefile
arch/nds32/include/asm/elf.h
arch/nds32/include/asm/ftrace.h [new file with mode: 0644]
arch/nds32/include/asm/nds32.h
arch/nds32/include/asm/uaccess.h
arch/nds32/kernel/Makefile
arch/nds32/kernel/atl2c.c
arch/nds32/kernel/ex-entry.S
arch/nds32/kernel/ex-exit.S
arch/nds32/kernel/ftrace.c [new file with mode: 0644]
arch/nds32/kernel/module.c
arch/nds32/kernel/stacktrace.c
arch/nds32/kernel/traps.c
arch/nds32/kernel/vmlinux.lds.S
arch/nios2/Kconfig.debug
arch/powerpc/Kconfig
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/iommu.h
arch/powerpc/include/asm/mmu_context.h
arch/powerpc/include/asm/setup.h
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/iommu.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/tm.S
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/kvm/book3s_64_vio_hv.c
arch/powerpc/lib/checksum_64.S
arch/powerpc/lib/code-patching.c
arch/powerpc/mm/init_64.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/mmu_context_iommu.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/pkeys.c
arch/powerpc/platforms/powernv/pci-ioda-tce.c
arch/riscv/include/asm/asm-prototypes.h [new file with mode: 0644]
arch/riscv/include/asm/tlb.h
arch/riscv/kernel/setup.c
arch/riscv/kernel/sys_riscv.c
arch/s390/crypto/paes_s390.c
arch/s390/include/asm/mmu.h
arch/s390/include/asm/sclp.h
arch/s390/kernel/early_printk.c
arch/s390/kernel/swsusp.S
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/priv.c
arch/s390/kvm/vsie.c
arch/s390/mm/gmap.c
arch/sparc/kernel/auxio_64.c
arch/sparc/kernel/kgdb_32.c
arch/sparc/kernel/kgdb_64.c
arch/sparc/kernel/of_device_32.c
arch/sparc/kernel/of_device_64.c
arch/sparc/kernel/power.c
arch/sparc/kernel/prom_32.c
arch/sparc/kernel/prom_64.c
arch/sparc/kernel/viohs.c
arch/sparc/vdso/Makefile
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/boot/compressed/mem_encrypt.S
arch/x86/crypto/aegis128-aesni-glue.c
arch/x86/crypto/aegis128l-aesni-glue.c
arch/x86/crypto/aegis256-aesni-glue.c
arch/x86/crypto/aesni-intel_asm.S
arch/x86/crypto/morus1280-sse2-glue.c
arch/x86/crypto/morus640-sse2-glue.c
arch/x86/entry/vdso/Makefile
arch/x86/entry/vdso/vclock_gettime.c
arch/x86/events/amd/uncore.c
arch/x86/events/core.c
arch/x86/events/intel/lbr.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/hyperv/hv_apic.c
arch/x86/include/asm/atomic.h
arch/x86/include/asm/atomic64_32.h
arch/x86/include/asm/atomic64_64.h
arch/x86/include/asm/fixmap.h
arch/x86/include/asm/hyperv-tlfs.h
arch/x86/include/asm/irqflags.h
arch/x86/include/asm/kdebug.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mem_encrypt.h
arch/x86/include/asm/perf_event.h
arch/x86/include/asm/pgtable-2level.h
arch/x86/include/asm/pgtable-3level.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_64.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/signal.h
arch/x86/include/asm/stacktrace.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/uv/uv.h
arch/x86/include/asm/vgtod.h
arch/x86/include/uapi/asm/kvm.h
arch/x86/kernel/alternative.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/apm_32.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel_rdt.h
arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/eisa.c
arch/x86/kernel/head64.c
arch/x86/kernel/head_64.S
arch/x86/kernel/kvmclock.c
arch/x86/kernel/paravirt.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/topology.c
arch/x86/kernel/tsc.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/lib/usercopy.c
arch/x86/mm/fault.c
arch/x86/mm/init.c
arch/x86/mm/mem_encrypt.c
arch/x86/mm/pageattr.c
arch/x86/mm/pgtable.c
arch/x86/mm/pti.c
arch/x86/mm/tlb.c
arch/x86/platform/efi/efi_32.c
arch/x86/xen/mmu_pv.c
arch/x86/xen/pmu.c
arch/xtensa/Kconfig
arch/xtensa/Makefile
arch/xtensa/platforms/iss/setup.c
block/bfq-cgroup.c
block/bio.c
block/blk-cgroup.c
block/blk-core.c
block/blk-mq-tag.c
block/blk-mq.c
block/blk-throttle.c
block/blk-wbt.c
block/bsg.c
block/elevator.c
block/genhd.c
block/partition-generic.c
drivers/acpi/acpi_lpss.c
drivers/acpi/bus.c
drivers/android/binder_alloc.c
drivers/ata/libata-core.c
drivers/ata/pata_ftide010.c
drivers/base/firmware_loader/main.c
drivers/base/memory.c
drivers/base/power/clock_ops.c
drivers/base/power/main.c
drivers/block/floppy.c
drivers/block/nbd.c
drivers/block/null_blk.h
drivers/block/null_blk_main.c
drivers/block/null_blk_zoned.c
drivers/block/rbd.c
drivers/block/xen-blkback/blkback.c
drivers/block/xen-blkback/common.h
drivers/block/xen-blkfront.c
drivers/bluetooth/Kconfig
drivers/bluetooth/btmtkuart.c
drivers/bluetooth/hci_ldisc.c
drivers/bus/ti-sysc.c
drivers/cdrom/cdrom.c
drivers/char/Kconfig
drivers/char/ipmi/ipmi_bt_sm.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_ssif.c
drivers/char/ipmi/kcs_bmc.c
drivers/char/random.c
drivers/clk/clk-npcm7xx.c
drivers/clk/x86/clk-pmc-atom.c
drivers/clk/x86/clk-st.c
drivers/clocksource/timer-atmel-pit.c
drivers/clocksource/timer-fttmr010.c
drivers/clocksource/timer-ti-32k.c
drivers/cpufreq/qcom-cpufreq-kryo.c
drivers/cpuidle/governors/menu.c
drivers/crypto/caam/caamalg.c
drivers/crypto/caam/caamalg_qi.c
drivers/crypto/caam/caampkc.c
drivers/crypto/caam/jr.c
drivers/crypto/cavium/nitrox/nitrox_dev.h
drivers/crypto/cavium/nitrox/nitrox_lib.c
drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
drivers/crypto/ccp/psp-dev.c
drivers/crypto/chelsio/chcr_algo.c
drivers/crypto/chelsio/chcr_crypto.h
drivers/crypto/chelsio/chtls/chtls.h
drivers/crypto/chelsio/chtls/chtls_main.c
drivers/crypto/mxs-dcp.c
drivers/crypto/qat/qat_c3xxx/adf_drv.c
drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
drivers/crypto/qat/qat_c62x/adf_drv.c
drivers/crypto/qat/qat_c62xvf/adf_drv.c
drivers/crypto/qat/qat_dh895xcc/adf_drv.c
drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
drivers/crypto/vmx/aes_cbc.c
drivers/crypto/vmx/aes_xts.c
drivers/dax/device.c
drivers/dma/mic_x100_dma.c
drivers/firmware/arm_scmi/perf.c
drivers/firmware/efi/Kconfig
drivers/fpga/dfl-fme-pr.c
drivers/fpga/dfl-fme-region.c
drivers/fpga/fpga-bridge.c
drivers/fpga/of-fpga-region.c
drivers/gpio/gpio-adp5588.c
drivers/gpio/gpio-dwapb.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib-of.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/kv_dpm.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/si_dpm.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/arm/malidp_drv.c
drivers/gpu/drm/arm/malidp_hw.c
drivers/gpu/drm/arm/malidp_hw.h
drivers/gpu/drm/arm/malidp_mw.c
drivers/gpu/drm/arm/malidp_regs.h
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_client.c
drivers/gpu/drm/drm_debugfs.c
drivers/gpu/drm/drm_fb_cma_helper.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_lease.c
drivers/gpu/drm/drm_panel.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/exynos/exynos_drm_iommu.h
drivers/gpu/drm/i2c/tda9950.c
drivers/gpu/drm/i915/gvt/dmabuf.c
drivers/gpu/drm/i915/gvt/fb_decoder.c
drivers/gpu/drm/i915/gvt/fb_decoder.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mmio.c
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/gvt/opregion.c
drivers/gpu/drm/i915/gvt/sched_policy.c
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_gpu_error.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lspcon.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/mediatek/mtk_disp_ovl.c
drivers/gpu/drm/mediatek/mtk_disp_rdma.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.h
drivers/gpu/drm/mediatek/mtk_drm_ddp.c
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_display.h
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_fbcon.h
drivers/gpu/drm/nouveau/nouveau_vga.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
drivers/gpu/drm/pl111/pl111_vexpress.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
drivers/gpu/drm/sun4i/sun8i_mixer.c
drivers/gpu/drm/sun4i/sun8i_tcon_top.c
drivers/gpu/drm/udl/udl_fb.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/gpu/vga/vga_switcheroo.c
drivers/hid/hid-apple.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-saitek.c
drivers/hid/hid-sensor-hub.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/intel-ish-hid/ipc/hw-ish.h
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hv/connection.c
drivers/hv/vmbus_drv.c
drivers/hwmon/adt7475.c
drivers/hwmon/ina2xx.c
drivers/hwmon/nct6775.c
drivers/hwmon/raspberrypi-hwmon.c
drivers/hwtracing/intel_th/core.c
drivers/hwtracing/intel_th/pci.c
drivers/i2c/algos/i2c-algo-bit.c
drivers/i2c/busses/i2c-designware-master.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-imx-lpi2c.c
drivers/i2c/busses/i2c-isch.c
drivers/i2c/busses/i2c-qcom-geni.c
drivers/i2c/busses/i2c-scmi.c
drivers/i2c/busses/i2c-sh_mobile.c
drivers/i2c/busses/i2c-uniphier-f.c
drivers/i2c/busses/i2c-uniphier.c
drivers/i2c/busses/i2c-xiic.c
drivers/i2c/i2c-core-base.c
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
drivers/iio/temperature/maxim_thermocouple.c
drivers/infiniband/core/cache.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/rdma_core.c
drivers/infiniband/core/ucma.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/uverbs_uapi.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/bnxt_re/qplib_fp.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/pcie.c
drivers/infiniband/hw/hfi1/pio.c
drivers/infiniband/hw/hfi1/pio.h
drivers/infiniband/hw/hfi1/user_sdma.c
drivers/infiniband/hw/hfi1/verbs.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/input/keyboard/atakbd.c
drivers/input/misc/uinput.c
drivers/input/mouse/elantech.c
drivers/input/touchscreen/egalax_ts.c
drivers/iommu/amd_iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel-pasid.h
drivers/iommu/rockchip-iommu.c
drivers/irqchip/irq-gic-v3-its.c
drivers/md/bcache/bcache.h
drivers/md/bcache/journal.c
drivers/md/bcache/super.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-integrity.c
drivers/md/dm-linear.c
drivers/md/dm-mpath.c
drivers/md/dm-raid.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin.c
drivers/md/dm-verity-target.c
drivers/md/dm.c
drivers/md/md-cluster.c
drivers/md/raid10.c
drivers/md/raid5-log.h
drivers/md/raid5.c
drivers/media/i2c/mt9v111.c
drivers/media/platform/Kconfig
drivers/media/platform/qcom/camss/camss-csid.c
drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
drivers/media/platform/qcom/camss/camss-csiphy.c
drivers/media/platform/qcom/camss/camss-ispif.c
drivers/media/platform/qcom/camss/camss-vfe-4-1.c
drivers/media/platform/qcom/camss/camss-vfe-4-7.c
drivers/media/platform/qcom/camss/camss.c
drivers/media/usb/dvb-usb-v2/af9035.c
drivers/media/v4l2-core/v4l2-event.c
drivers/media/v4l2-core/v4l2-fh.c
drivers/memory/ti-aemif.c
drivers/mfd/omap-usb-host.c
drivers/misc/hmc6352.c
drivers/misc/ibmvmc.c
drivers/misc/mei/bus.c
drivers/misc/mei/client.c
drivers/misc/mei/hbm.c
drivers/mmc/core/host.c
drivers/mmc/core/queue.c
drivers/mmc/core/queue.h
drivers/mmc/core/slot-gpio.c
drivers/mmc/host/android-goldfish.c
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/meson-mx-sdio.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/renesas_sdhi_internal_dmac.c
drivers/mmc/host/renesas_sdhi_sys_dmac.c
drivers/mtd/devices/m25p80.c
drivers/mtd/mtdpart.c
drivers/mtd/nand/raw/denali.c
drivers/mtd/nand/raw/docg4.c
drivers/mtd/nand/raw/marvell_nand.c
drivers/net/appletalk/ipddp.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/mv88e6xxx/global1.h
drivers/net/dsa/mv88e6xxx/global1_atu.c
drivers/net/ethernet/amazon/ena/ena_com.c
drivers/net/ethernet/amazon/ena/ena_eth_com.c
drivers/net/ethernet/amazon/ena/ena_eth_com.h
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amazon/ena/ena_netdev.h
drivers/net/ethernet/amd/declance.c
drivers/net/ethernet/apple/bmac.c
drivers/net/ethernet/apple/mace.c
drivers/net/ethernet/apple/macmace.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/cirrus/ep93xx_eth.c
drivers/net/ethernet/cirrus/mac89x0.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns/hnae.c
drivers/net/ethernet/hisilicon/hns/hnae.h
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/hp/hp100.c
drivers/net/ethernet/huawei/hinic/hinic_main.c
drivers/net/ethernet/i825xx/ether1.c
drivers/net/ethernet/i825xx/lib82596.c
drivers/net/ethernet/i825xx/sun3_82586.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/fm10k/fm10k.h
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_controlq.c
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_nvm.c
drivers/net/ethernet/intel/ice/ice_sched.c
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_switch.h
drivers/net/ethernet/intel/ice/ice_txrx.h
drivers/net/ethernet/intel/ice/ice_type.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/lantiq_etop.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/dev.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/transobj.c
drivers/net/ethernet/mellanox/mlx5/core/wq.c
drivers/net/ethernet/mellanox/mlx5/core/wq.h
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/microchip/lan743x_main.c
drivers/net/ethernet/mscc/ocelot_board.c
drivers/net/ethernet/netronome/nfp/flower/action.c
drivers/net/ethernet/netronome/nfp/flower/main.h
drivers/net/ethernet/netronome/nfp/flower/match.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.h
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_hsi.h
drivers/net/ethernet/qlogic/qed/qed_init_ops.c
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_mcp.h
drivers/net/ethernet/qlogic/qed/qed_rdma.c
drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
drivers/net/ethernet/qlogic/qed/qed_roce.c
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
drivers/net/ethernet/qlogic/qed/qed_vf.c
drivers/net/ethernet/qlogic/qede/qede_filter.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlge/qlge_main.c
drivers/net/ethernet/qualcomm/qca_7k.c
drivers/net/ethernet/qualcomm/qca_spi.c
drivers/net/ethernet/qualcomm/qca_spi.h
drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/Kconfig
drivers/net/ethernet/renesas/Makefile
drivers/net/ethernet/renesas/ravb.h
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/ravb_ptp.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/seeq/ether3.c
drivers/net/ethernet/seeq/sgiseeq.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/falcon/efx.c
drivers/net/ethernet/sgi/ioc3-eth.c
drivers/net/ethernet/sgi/meth.c
drivers/net/ethernet/stmicro/stmmac/Kconfig
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/cpsw-phy-sel.c
drivers/net/ethernet/wiznet/w5100.c
drivers/net/ethernet/wiznet/w5300.c
drivers/net/hamradio/yam.c
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/ieee802154/adf7242.c
drivers/net/ieee802154/ca8210.c
drivers/net/ieee802154/mcr20a.c
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/phy/sfp-bus.c
drivers/net/phy/sfp.c
drivers/net/ppp/pppoe.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/asix_common.c
drivers/net/usb/ax88179_178a.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/smsc95xx.c
drivers/net/usb/sr9800.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/net/wimax/i2400m/control.c
drivers/net/wireless/broadcom/b43/dma.c
drivers/net/wireless/intel/iwlwifi/cfg/1000.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mediatek/mt76/mt76x0/main.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/hash.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netfront.c
drivers/nvme/host/multipath.c
drivers/nvme/host/pci.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/core.c
drivers/nvme/target/fcloop.c
drivers/nvme/target/rdma.c
drivers/of/base.c
drivers/of/platform.c
drivers/of/unittest.c
drivers/pci/controller/dwc/pcie-designware.c
drivers/pci/controller/dwc/pcie-designware.h
drivers/pci/controller/pci-hyperv.c
drivers/pci/controller/pci-mvebu.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/hotplug/pciehp_hpc.c
drivers/pci/pci.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/pci/switch/switchtec.c
drivers/pinctrl/cirrus/pinctrl-madera-core.c
drivers/pinctrl/intel/pinctrl-cannonlake.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/pinctrl-ingenic.c
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/platform/chrome/cros_ec_proto.c
drivers/platform/x86/alienware-wmi.c
drivers/platform/x86/dell-smbios-wmi.c
drivers/regulator/bd71837-regulator.c
drivers/regulator/core.c
drivers/regulator/of_regulator.c
drivers/s390/char/sclp_early_core.c
drivers/s390/cio/vfio_ccw_cp.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/cio/vfio_ccw_fsm.c
drivers/s390/cio/vfio_ccw_ops.c
drivers/s390/cio/vfio_ccw_private.h
drivers/s390/crypto/ap_bus.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_mpc.c
drivers/s390/net/qeth_core_mpc.h
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/sbus/char/openprom.c
drivers/sbus/char/oradax.c
drivers/scsi/Kconfig
drivers/scsi/aacraid/aacraid.h
drivers/scsi/csiostor/csio_hw.c
drivers/scsi/csiostor/csio_hw.h
drivers/scsi/csiostor/csio_mb.c
drivers/scsi/hosts.c
drivers/scsi/hpsa.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/qedi/qedi.h
drivers/scsi/qedi/qedi_main.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/ufs/ufshcd.c
drivers/soc/fsl/qbman/qman.c
drivers/soc/fsl/qe/ucc.c
drivers/soundwire/stream.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-gpio.c
drivers/spi/spi-rspi.c
drivers/spi/spi-sh-msiof.c
drivers/spi/spi-tegra20-slink.c
drivers/spi/spi.c
drivers/staging/erofs/Kconfig
drivers/staging/erofs/super.c
drivers/staging/fbtft/TODO
drivers/staging/gasket/TODO
drivers/staging/media/mt9t031/Kconfig
drivers/staging/vboxvideo/vbox_drv.c
drivers/staging/vboxvideo/vbox_mode.c
drivers/staging/wilc1000/Makefile
drivers/staging/wilc1000/linux_wlan.c
drivers/staging/wilc1000/wilc_debugfs.c
drivers/staging/wilc1000/wilc_wlan.c
drivers/staging/wilc1000/wilc_wlan_if.h
drivers/target/iscsi/cxgbit/cxgbit_ddp.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_login.h
drivers/thermal/of-thermal.c
drivers/thermal/qoriq_thermal.c
drivers/thermal/rcar_gen3_thermal.c
drivers/thermal/rcar_thermal.c
drivers/thunderbolt/icm.c
drivers/thunderbolt/nhi.c
drivers/tty/hvc/hvc_console.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/cpm_uart/cpm_uart_core.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/imx.c
drivers/tty/serial/mvebu-uart.c
drivers/tty/serial/sh-sci.c
drivers/tty/tty_io.c
drivers/tty/vt/vt_ioctl.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-acm.h
drivers/usb/common/common.c
drivers/usb/common/roles.c
drivers/usb/core/devio.c
drivers/usb/core/driver.c
drivers/usb/core/hcd-pci.c
drivers/usb/core/message.c
drivers/usb/core/of.c
drivers/usb/core/quirks.c
drivers/usb/core/usb.c
drivers/usb/dwc2/platform.c
drivers/usb/dwc3/dwc3-of-simple.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/udc/fotg210-udc.c
drivers/usb/gadget/udc/net2280.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/u132-hcd.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-mtk.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci.c
drivers/usb/misc/uss720.c
drivers/usb/misc/yurex.c
drivers/usb/mtu3/mtu3_core.c
drivers/usb/mtu3/mtu3_hw_regs.h
drivers/usb/musb/musb_dsps.c
drivers/usb/serial/io_ti.h
drivers/usb/serial/option.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/serial/usb-serial-simple.c
drivers/usb/storage/scsiglue.c
drivers/usb/storage/uas.c
drivers/usb/storage/unusual_devs.h
drivers/usb/typec/bus.c
drivers/usb/typec/class.c
drivers/usb/typec/mux.c
drivers/vhost/vhost.c
drivers/video/fbdev/aty/atyfb.h
drivers/video/fbdev/aty/atyfb_base.c
drivers/video/fbdev/aty/mach64_ct.c
drivers/video/fbdev/efifb.c
drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
drivers/video/fbdev/pxa168fb.c
drivers/video/fbdev/stifb.c
drivers/xen/Kconfig
drivers/xen/cpu_hotplug.c
drivers/xen/events/events_base.c
drivers/xen/gntdev.c
drivers/xen/grant-table.c
drivers/xen/manage.c
drivers/xen/mem-reservation.c
drivers/xen/xen-balloon.c
drivers/xen/xenbus/xenbus_probe.c
fs/afs/proc.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/tree-log.c
fs/btrfs/tree-log.h
fs/btrfs/volumes.c
fs/buffer.c
fs/ceph/super.c
fs/cifs/Kconfig
fs/cifs/cifs_unicode.c
fs/cifs/cifsglob.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/readdir.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/transport.c
fs/dax.c
fs/ext2/inode.c
fs/ext4/dir.c
fs/ext4/ext4.h
fs/ext4/inline.c
fs/ext4/inode.c
fs/ext4/mmp.c
fs/ext4/namei.c
fs/ext4/resize.c
fs/ext4/super.c
fs/gfs2/bmap.c
fs/ioctl.c
fs/isofs/inode.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/nfs4trace.h
fs/nfs/pnfs.c
fs/nfsd/vfs.c
fs/nilfs2/alloc.c
fs/nilfs2/alloc.h
fs/nilfs2/bmap.c
fs/nilfs2/bmap.h
fs/nilfs2/btnode.c
fs/nilfs2/btnode.h
fs/nilfs2/btree.c
fs/nilfs2/btree.h
fs/nilfs2/cpfile.c
fs/nilfs2/cpfile.h
fs/nilfs2/dat.c
fs/nilfs2/dat.h
fs/nilfs2/dir.c
fs/nilfs2/direct.c
fs/nilfs2/direct.h
fs/nilfs2/file.c
fs/nilfs2/gcinode.c
fs/nilfs2/ifile.c
fs/nilfs2/ifile.h
fs/nilfs2/inode.c
fs/nilfs2/ioctl.c
fs/nilfs2/mdt.c
fs/nilfs2/mdt.h
fs/nilfs2/namei.c
fs/nilfs2/nilfs.h
fs/nilfs2/page.c
fs/nilfs2/page.h
fs/nilfs2/recovery.c
fs/nilfs2/segbuf.c
fs/nilfs2/segbuf.h
fs/nilfs2/segment.c
fs/nilfs2/segment.h
fs/nilfs2/sufile.c
fs/nilfs2/sufile.h
fs/nilfs2/super.c
fs/nilfs2/sysfs.c
fs/nilfs2/sysfs.h
fs/nilfs2/the_nilfs.c
fs/nilfs2/the_nilfs.h
fs/notify/fsnotify.c
fs/notify/mark.c
fs/ocfs2/buffer_head_io.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/refcounttree.c
fs/overlayfs/copy_up.c
fs/overlayfs/file.c
fs/overlayfs/inode.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/super.c
fs/overlayfs/util.c
fs/proc/base.c
fs/proc/kcore.c
fs/pstore/ram.c
fs/pstore/ram_core.c
fs/quota/quota.c
fs/read_write.c
fs/ubifs/super.c
fs/ubifs/xattr.c
fs/udf/super.c
fs/xattr.c
include/asm-generic/io.h
include/drm/drm_client.h
include/drm/drm_drv.h
include/drm/drm_panel.h
include/linux/arm-smccc.h
include/linux/blk-cgroup.h
include/linux/blkdev.h
include/linux/compiler-gcc.h
include/linux/compiler_types.h
include/linux/fpga/fpga-mgr.h
include/linux/fs.h
include/linux/genhd.h
include/linux/hid.h
include/linux/hugetlb.h
include/linux/i2c.h
include/linux/kvm_host.h
include/linux/mfd/da9063/pdata.h
include/linux/mfd/rohm-bd718x7.h
include/linux/mlx5/driver.h
include/linux/mlx5/transobj.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/mm_types_task.h
include/linux/mmzone.h
include/linux/mod_devicetable.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netpoll.h
include/linux/of.h
include/linux/pci.h
include/linux/pci_ids.h
include/linux/platform_data/ina2xx.h
include/linux/quota.h
include/linux/regulator/machine.h
include/linux/serial_sci.h
include/linux/spi/spi-mem.h
include/linux/stmmac.h
include/linux/timekeeping.h
include/linux/tracepoint.h
include/linux/uio.h
include/linux/vga_switcheroo.h
include/linux/virtio_net.h
include/linux/vm_event_item.h
include/linux/vmacache.h
include/media/v4l2-fh.h
include/net/act_api.h
include/net/bonding.h
include/net/cfg80211.h
include/net/inet_sock.h
include/net/netfilter/nf_conntrack_timeout.h
include/net/netlink.h
include/net/nfc/hci.h
include/net/pkt_cls.h
include/net/regulatory.h
include/net/tls.h
include/sound/hdaudio.h
include/sound/soc-dapm.h
include/trace/events/migrate.h
include/trace/events/rxrpc.h
include/uapi/asm-generic/hugetlb_encode.h
include/uapi/linux/kvm.h
include/uapi/linux/memfd.h
include/uapi/linux/mman.h
include/uapi/linux/perf_event.h
include/uapi/linux/rds.h
include/uapi/linux/shm.h
include/uapi/linux/vhost.h
include/uapi/sound/skl-tplg-interface.h
include/xen/mem-reservation.h
ipc/shm.c
kernel/bpf/btf.c
kernel/bpf/hashtab.c
kernel/bpf/local_storage.c
kernel/bpf/sockmap.c
kernel/bpf/verifier.c
kernel/cpu.c
kernel/dma/Kconfig
kernel/dma/direct.c
kernel/events/core.c
kernel/events/hw_breakpoint.c
kernel/fork.c
kernel/jump_label.c
kernel/locking/lockdep.c
kernel/locking/mutex.c
kernel/locking/test-ww_mutex.c
kernel/pid.c
kernel/printk/printk.c
kernel/printk/printk_safe.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/sched.h
kernel/sched/topology.c
kernel/sys.c
kernel/time/clocksource.c
kernel/trace/ring_buffer.c
kernel/watchdog.c
kernel/watchdog_hld.c
kernel/workqueue.c
lib/Kconfig.debug
lib/percpu_counter.c
lib/rhashtable.c
lib/vsprintf.c
lib/xz/xz_crc32.c
lib/xz/xz_private.h
mm/Kconfig
mm/Makefile
mm/backing-dev.c
mm/debug.c
mm/fadvise.c
mm/gup_benchmark.c
mm/huge_memory.c
mm/hugetlb.c
mm/kmemleak.c
mm/madvise.c
mm/memcontrol.c
mm/memory_hotplug.c
mm/migrate.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/percpu.c
mm/readahead.c
mm/rmap.c
mm/shmem.c
mm/slub.c
mm/util.c
mm/vmacache.c
mm/vmscan.c
mm/vmstat.c
net/batman-adv/bat_v_elp.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/gateway_client.c
net/batman-adv/main.h
net/batman-adv/network-coding.c
net/batman-adv/soft-interface.c
net/batman-adv/sysfs.c
net/batman-adv/translation-table.c
net/batman-adv/tvlv.c
net/bluetooth/mgmt.c
net/bluetooth/smp.c
net/bluetooth/smp.h
net/bpfilter/bpfilter_kern.c
net/bridge/br_netfilter_hooks.c
net/core/dev.c
net/core/devlink.c
net/core/ethtool.c
net/core/filter.c
net/core/neighbour.c
net/core/netpoll.c
net/core/rtnetlink.c
net/core/skbuff.c
net/dccp/input.c
net/dccp/ipv4.c
net/dsa/dsa.c
net/dsa/slave.c
net/ipv4/af_inet.c
net/ipv4/igmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_fragment.c
net/ipv4/ip_gre.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel.c
net/ipv4/netfilter/Kconfig
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_bbr.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/udp.c
net/ipv4/xfrm4_input.c
net/ipv4/xfrm4_mode_transport.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/udp.c
net/ipv6/xfrm6_input.c
net/ipv6/xfrm6_mode_transport.c
net/ipv6/xfrm6_output.c
net/iucv/af_iucv.c
net/iucv/iucv.c
net/mac80211/cfg.c
net/mac80211/ibss.c
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/mesh.h
net/mac80211/mesh_hwmp.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/status.c
net/mac80211/tdls.c
net/mac80211/tx.c
net/mac80211/util.c
net/mpls/af_mpls.c
net/ncsi/ncsi-netlink.c
net/netfilter/Kconfig
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_generic.c
net/netfilter/nf_conntrack_proto_gre.c
net/netfilter/nf_conntrack_proto_icmp.c
net/netfilter/nf_conntrack_proto_icmpv6.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nfnetlink_queue.c
net/netfilter/nft_ct.c
net/netfilter/nft_osf.c
net/netfilter/nft_set_rbtree.c
net/netfilter/xt_CHECKSUM.c
net/netfilter/xt_cluster.c
net/netfilter/xt_hashlimit.c
net/netfilter/xt_socket.c
net/netlabel/netlabel_unlabeled.c
net/nfc/hci/core.c
net/openvswitch/conntrack.c
net/packet/af_packet.c
net/packet/internal.h
net/rds/Kconfig
net/rds/bind.c
net/rds/ib.c
net/rds/ib.h
net/rds/tcp.c
net/rfkill/rfkill-gpio.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_object.c
net/rxrpc/conn_client.c
net/rxrpc/conn_object.c
net/rxrpc/input.c
net/rxrpc/local_object.c
net/rxrpc/output.c
net/rxrpc/peer_event.c
net/rxrpc/peer_object.c
net/rxrpc/protocol.h
net/sched/act_api.c
net/sched/act_bpf.c
net/sched/act_connmark.c
net/sched/act_csum.c
net/sched/act_gact.c
net/sched/act_ife.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/act_sample.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/act_skbmod.c
net/sched/act_tunnel_key.c
net/sched/act_vlan.c
net/sched/cls_api.c
net/sched/cls_u32.c
net/sched/sch_api.c
net/sched/sch_cake.c
net/sctp/outqueue.c
net/sctp/proc.c
net/sctp/socket.c
net/sctp/transport.c
net/smc/af_smc.c
net/smc/smc_clc.c
net/smc/smc_close.c
net/smc/smc_pnet.c
net/socket.c
net/tipc/bcast.c
net/tipc/bearer.c
net/tipc/diag.c
net/tipc/link.c
net/tipc/link.h
net/tipc/name_table.c
net/tipc/name_table.h
net/tipc/netlink.c
net/tipc/netlink_compat.c
net/tipc/node.c
net/tipc/socket.c
net/tipc/socket.h
net/tipc/topsrv.c
net/tls/tls_device.c
net/tls/tls_device_fallback.c
net/tls/tls_main.c
net/tls/tls_sw.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/scan.c
net/wireless/util.c
net/wireless/wext-compat.c
net/xdp/xdp_umem.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
scripts/Kbuild.include
scripts/Makefile.build
scripts/checkpatch.pl
scripts/depmod.sh
scripts/kconfig/Makefile
scripts/kconfig/check-pkgconfig.sh [deleted file]
scripts/kconfig/gconf-cfg.sh
scripts/kconfig/mconf-cfg.sh
scripts/kconfig/mconf.c
scripts/kconfig/nconf-cfg.sh
scripts/kconfig/qconf-cfg.sh
scripts/recordmcount.pl
scripts/setlocalversion
scripts/subarch.include [new file with mode: 0644]
security/Kconfig
security/apparmor/secid.c
sound/core/rawmidi.c
sound/firewire/bebob/bebob.c
sound/firewire/bebob/bebob_maudio.c
sound/firewire/digi00x/digi00x.c
sound/firewire/fireface/ff-protocol-ff400.c
sound/firewire/fireworks/fireworks.c
sound/firewire/oxfw/oxfw.c
sound/firewire/tascam/tascam.c
sound/hda/ext/hdac_ext_stream.c
sound/hda/hdac_controller.c
sound/hda/hdac_i915.c
sound/pci/emu10k1/emufx.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_intel.h
sound/pci/hda/patch_realtek.c
sound/soc/amd/acp-pcm-dma.c
sound/soc/codecs/cs4265.c
sound/soc/codecs/max98373.c
sound/soc/codecs/rt5514.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/sigmadsp.c
sound/soc/codecs/tas6424.c
sound/soc/codecs/wm8804-i2c.c
sound/soc/codecs/wm9712.c
sound/soc/intel/boards/bytcr_rt5640.c
sound/soc/intel/skylake/skl.c
sound/soc/qcom/qdsp6/q6routing.c
sound/soc/sh/rcar/adg.c
sound/soc/sh/rcar/core.c
sound/soc/sh/rcar/dma.c
sound/soc/sh/rcar/rsnd.h
sound/soc/sh/rcar/ssi.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/bpf/bpftool/map.c
tools/bpf/bpftool/map_perf_ring.c
tools/hv/hv_fcopy_daemon.c
tools/hv/hv_kvp_daemon.c
tools/include/linux/lockdep.h
tools/include/linux/nmi.h [new file with mode: 0644]
tools/include/tools/libc_compat.h
tools/include/uapi/asm-generic/unistd.h
tools/include/uapi/drm/drm.h
tools/include/uapi/linux/if_link.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/perf_event.h
tools/include/uapi/linux/vhost.h
tools/kvm/kvm_stat/kvm_stat
tools/lib/bpf/Build
tools/lib/bpf/libbpf.c
tools/lib/bpf/str_error.c [new file with mode: 0644]
tools/lib/bpf/str_error.h [new file with mode: 0644]
tools/perf/Documentation/Makefile
tools/perf/Makefile.perf
tools/perf/arch/arm64/Makefile
tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
tools/perf/arch/powerpc/util/sym-handling.c
tools/perf/arch/x86/include/arch-tests.h
tools/perf/arch/x86/tests/Build
tools/perf/arch/x86/tests/arch-tests.c
tools/perf/arch/x86/tests/bp-modify.c [new file with mode: 0644]
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/evsel.c
tools/perf/util/map.c
tools/perf/util/trace-event-info.c
tools/perf/util/trace-event-parse.c
tools/testing/selftests/android/Makefile
tools/testing/selftests/android/config [moved from tools/testing/selftests/android/ion/config with 100% similarity]
tools/testing/selftests/android/ion/Makefile
tools/testing/selftests/bpf/test_maps.c
tools/testing/selftests/cgroup/.gitignore
tools/testing/selftests/cgroup/cgroup_util.c
tools/testing/selftests/cgroup/cgroup_util.h
tools/testing/selftests/cgroup/test_memcontrol.c
tools/testing/selftests/efivarfs/config [new file with mode: 0644]
tools/testing/selftests/futex/functional/Makefile
tools/testing/selftests/gpio/Makefile
tools/testing/selftests/kselftest.h
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/include/kvm_util.h
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/platform_info_test.c [new file with mode: 0644]
tools/testing/selftests/lib.mk
tools/testing/selftests/memory-hotplug/config
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/pmtu.sh
tools/testing/selftests/net/tls.c
tools/testing/selftests/networking/timestamping/Makefile
tools/testing/selftests/powerpc/alignment/Makefile
tools/testing/selftests/powerpc/benchmarks/Makefile
tools/testing/selftests/powerpc/cache_shape/Makefile
tools/testing/selftests/powerpc/copyloops/Makefile
tools/testing/selftests/powerpc/dscr/Makefile
tools/testing/selftests/powerpc/math/Makefile
tools/testing/selftests/powerpc/mm/Makefile
tools/testing/selftests/powerpc/pmu/Makefile
tools/testing/selftests/powerpc/pmu/ebb/Makefile
tools/testing/selftests/powerpc/primitives/Makefile
tools/testing/selftests/powerpc/ptrace/Makefile
tools/testing/selftests/powerpc/signal/Makefile
tools/testing/selftests/powerpc/stringloops/Makefile
tools/testing/selftests/powerpc/switch_endian/Makefile
tools/testing/selftests/powerpc/syscalls/Makefile
tools/testing/selftests/powerpc/tm/Makefile
tools/testing/selftests/powerpc/vphn/Makefile
tools/testing/selftests/rseq/param_test.c
tools/testing/selftests/tc-testing/tc-tests/actions/police.json
tools/testing/selftests/vm/Makefile
tools/testing/selftests/x86/test_vdso.c
tools/vm/page-types.c
tools/vm/slabinfo.c
virt/kvm/arm/mmu.c
virt/kvm/arm/trace.h

index 3d5951c..e8b60bd 100644 (file)
@@ -73,3 +73,12 @@ KernelVersion:       3.0
 Contact:       Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 Description:
                 Number of sectors written by the frontend.
+
+What:          /sys/bus/xen-backend/devices/*/state
+Date:          August 2018
+KernelVersion: 4.19
+Contact:       Joe Jin <joe.jin@oracle.com>
+Description:
+                The state of the device. One of: 'Unknown',
+                'Initialising', 'Initialised', 'Connected', 'Closing',
+                'Closed', 'Reconfiguring', 'Reconfigured'.
index caa311d..6d83f95 100644 (file)
@@ -75,3 +75,12 @@ Contact:     Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 Description:
                Amount (in KiB) of low (or normal) memory in the
                balloon.
+
+What:          /sys/devices/system/xen_memory/xen_memory0/scrub_pages
+Date:          September 2018
+KernelVersion: 4.20
+Contact:       xen-devel@lists.xenproject.org
+Description:
+               Control scrubbing pages before returning them to Xen for others domains
+               use. Can be set with xen_scrub_pages cmdline
+               parameter. Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
index 8bb43b6..4e7babb 100644 (file)
@@ -15,3 +15,13 @@ Description:
                 blkback. If the frontend tries to use more than
                 max_persistent_grants, the LRU kicks in and starts
                 removing 5% of max_persistent_grants every 100ms.
+
+What:           /sys/module/xen_blkback/parameters/persistent_grant_unused_seconds
+Date:           August 2018
+KernelVersion:  4.19
+Contact:        Roger Pau Monné <roger.pau@citrix.com>
+Description:
+                How long a persistent grant is allowed to remain
+                allocated without being in use. The time is in
+                seconds, 0 means indefinitely long.
+                The default is 60 seconds.
index 9871e64..92eb1f4 100644 (file)
        ramdisk_size=   [RAM] Sizes of RAM disks in kilobytes
                        See Documentation/blockdev/ramdisk.txt.
 
+       random.trust_cpu={on,off}
+                       [KNL] Enable or disable trusting the use of the
+                       CPU's random number generator (if available) to
+                       fully seed the kernel's CRNG. Default is controlled
+                       by CONFIG_RANDOM_TRUST_CPU.
+
        ras=option[,option,...] [KNL] RAS-specific options
 
                cec_disable     [X86]
                        Disables the PV optimizations forcing the HVM guest to
                        run as generic HVM guest with no PV drivers.
 
+       xen_scrub_pages=        [XEN]
+                       Boolean option to control scrubbing pages before giving them back
+                       to Xen, for use by other domains. Can be also changed at runtime
+                       with /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
+                       Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
+
        xirc2ps_cs=     [NET,PCMCIA]
                        Format:
                        <irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
index f128f73..7169a0e 100644 (file)
@@ -200,7 +200,7 @@ prctl(PR_SVE_SET_VL, unsigned long arg)
       thread.
 
     * Changing the vector length causes all of P0..P15, FFR and all bits of
-      Z0..V31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become
+      Z0..Z31 except for Z0 bits [127:0] .. Z31 bits [127:0] to become
       unspecified.  Calling PR_SVE_SET_VL with vl equal to the thread's current
       vector length, or calling PR_SVE_SET_VL with the PR_SVE_SET_VL_ONEXEC
       flag, does not constitute a change to the vector length for this purpose.
@@ -500,7 +500,7 @@ References
 [2] arch/arm64/include/uapi/asm/ptrace.h
     AArch64 Linux ptrace ABI definitions
 
-[3] linux/Documentation/arm64/cpu-feature-registers.txt
+[3] Documentation/arm64/cpu-feature-registers.txt
 
 [4] ARM IHI0055C
     http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055c/IHI0055C_beta_aapcs64.pdf
index 390c145..52a719b 100644 (file)
@@ -348,3 +348,7 @@ Version History
 1.13.1  Fix deadlock caused by early md_stop_writes().  Also fix size an
        state races.
 1.13.2  Fix raid redundancy validation and avoid keeping raid set frozen
+1.14.0  Fix reshape race on small devices.  Fix stripe adding reshape
+       deadlock/potential data corruption.  Update superblock when
+       specific devices are requested via rebuild.  Fix RAID leg
+       rebuild errors.
index 00e4365..091c8df 100644 (file)
@@ -3,7 +3,6 @@
 Required properties:
 - compatible :
   - "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc
-  - "fsl,imx8dv-lpi2c" for LPI2C compatible with the one integrated on i.MX8DV soc
 - reg : address and length of the lpi2c master registers
 - interrupts : lpi2c interrupt
 - clocks : lpi2c clock specifier
@@ -11,7 +10,7 @@ Required properties:
 Examples:
 
 lpi2c7: lpi2c7@40a50000 {
-       compatible = "fsl,imx8dv-lpi2c";
+       compatible = "fsl,imx7ulp-lpi2c";
        reg = <0x40A50000 0x10000>;
        interrupt-parent = <&intc>;
        interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
index 996ce84..7cccc49 100644 (file)
@@ -1,4 +1,4 @@
-Device-Tree bindings for input/gpio_keys.c keyboard driver
+Device-Tree bindings for input/keyboard/gpio_keys.c keyboard driver
 
 Required properties:
        - compatible = "gpio-keys";
index b0a8af5..265b223 100644 (file)
@@ -11,7 +11,7 @@ The RISC-V supervisor ISA manual specifies three interrupt sources that are
 attached to every HLIC: software interrupts, the timer interrupt, and external
 interrupts.  Software interrupts are used to send IPIs between cores.  The
 timer interrupt comes from an architecturally mandated real-time timer that is
-controller via Supervisor Binary Interface (SBI) calls and CSR reads.  External
+controlled via Supervisor Binary Interface (SBI) calls and CSR reads.  External
 interrupts connect all other device interrupts to the HLIC, which are routed
 via the platform-level interrupt controller (PLIC).
 
@@ -25,7 +25,15 @@ in the system.
 
 Required properties:
 - compatible : "riscv,cpu-intc"
-- #interrupt-cells : should be <1>
+- #interrupt-cells : should be <1>.  The interrupt sources are defined by the
+  RISC-V supervisor ISA manual, with only the following three interrupts being
+  defined for supervisor mode:
+    - Source 1 is the supervisor software interrupt, which can be sent by an SBI
+      call and is reserved for use by software.
+    - Source 5 is the supervisor timer interrupt, which can be configured by
+      SBI calls and implements a one-shot timer.
+    - Source 9 is the supervisor external interrupt, which chains to all other
+      device interrupts.
 - interrupt-controller : Identifies the node as an interrupt controller
 
 Furthermore, this interrupt-controller MUST be embedded inside the cpu
@@ -38,7 +46,7 @@ An example device tree entry for a HLIC is show below.
                ...
                cpu1-intc: interrupt-controller {
                        #interrupt-cells = <1>;
-                       compatible = "riscv,cpu-intc", "sifive,fu540-c000-cpu-intc";
+                       compatible = "sifive,fu540-c000-cpu-intc", "riscv,cpu-intc";
                        interrupt-controller;
                };
        };
index 4108936..b3acebe 100644 (file)
@@ -19,6 +19,10 @@ Required properties:
 - slaves               : Specifies number for slaves
 - active_slave         : Specifies the slave to use for time stamping,
                          ethtool and SIOCGMIIPHY
+- cpsw-phy-sel         : Specifies the phandle to the CPSW phy mode selection
+                         device. See also cpsw-phy-sel.txt for it's binding.
+                         Note that in legacy cases cpsw-phy-sel may be
+                         a child device instead of a phandle.
 
 Optional properties:
 - ti,hwmods            : Must be "cpgmac0"
@@ -75,6 +79,7 @@ Examples:
                cpts_clock_mult = <0x80000000>;
                cpts_clock_shift = <29>;
                syscon = <&cm>;
+               cpsw-phy-sel = <&phy_sel>;
                cpsw_emac0: slave@0 {
                        phy_id = <&davinci_mdio>, <0>;
                        phy-mode = "rgmii-txid";
@@ -103,6 +108,7 @@ Examples:
                cpts_clock_mult = <0x80000000>;
                cpts_clock_shift = <29>;
                syscon = <&cm>;
+               cpsw-phy-sel = <&phy_sel>;
                cpsw_emac0: slave@0 {
                        phy_id = <&davinci_mdio>, <0>;
                        phy-mode = "rgmii-txid";
index 457d5ae..3e17ac1 100644 (file)
@@ -10,6 +10,7 @@ Required properties:
   Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
   the Cadence GEM, or the generic form: "cdns,gem".
   Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs.
+  Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 SoCs.
   Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
   Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
   Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
index 76db9f1..abc3627 100644 (file)
@@ -16,6 +16,7 @@ Required properties:
              "renesas,ether-r8a7794"  if the device is a part of R8A7794 SoC.
              "renesas,gether-r8a77980" if the device is a part of R8A77980 SoC.
              "renesas,ether-r7s72100" if the device is a part of R7S72100 SoC.
+             "renesas,ether-r7s9210" if the device is a part of R7S9210 SoC.
              "renesas,rcar-gen1-ether" for a generic R-Car Gen1 device.
              "renesas,rcar-gen2-ether" for a generic R-Car Gen2 or RZ/G1
                                        device.
index 5d47a26..9407212 100644 (file)
@@ -7,6 +7,7 @@ Required properties:
               Examples with soctypes are:
                 - "renesas,r8a7743-wdt" (RZ/G1M)
                 - "renesas,r8a7745-wdt" (RZ/G1E)
+                - "renesas,r8a774a1-wdt" (RZ/G2M)
                 - "renesas,r8a7790-wdt" (R-Car H2)
                 - "renesas,r8a7791-wdt" (R-Car M2-W)
                 - "renesas,r8a7792-wdt" (R-Car V2H)
@@ -21,8 +22,8 @@ Required properties:
                 - "renesas,r7s72100-wdt" (RZ/A1)
                The generic compatible string must be:
                 - "renesas,rza-wdt" for RZ/A
-                - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G
-                - "renesas,rcar-gen3-wdt" for R-Car Gen3
+                - "renesas,rcar-gen2-wdt" for R-Car Gen2 and RZ/G1
+                - "renesas,rcar-gen3-wdt" for R-Car Gen3 and RZ/G2
 
 - reg : Should contain WDT registers location and length
 - clocks : the clock feeding the watchdog timer.
index 4b3825d..82b6dbb 100644 (file)
@@ -184,6 +184,11 @@ API for implementing a new FPGA Manager driver
 API for programming an FPGA
 ---------------------------
 
+FPGA Manager flags
+
+.. kernel-doc:: include/linux/fpga/fpga-mgr.h
+   :doc: FPGA Manager flags
+
 .. kernel-doc:: include/linux/fpga/fpga-mgr.h
    :functions: fpga_image_info
 
index f6362d8..aa92419 100644 (file)
@@ -15,7 +15,8 @@ than x86.  Check the v86d documentation for a list of currently supported
 arches.
 
 v86d source code can be downloaded from the following website:
-  http://dev.gentoo.org/~spock/projects/uvesafb
+
+  https://github.com/mjanusz/v86d
 
 Please refer to the v86d documentation for detailed configuration and
 installation instructions.
@@ -177,7 +178,7 @@ from the Video BIOS if you set pixclock to 0 in fb_var_screeninfo.
 
 --
  Michal Januszewski <spock@gentoo.org>
- Last updated: 2009-03-30
+ Last updated: 2017-10-10
 
  Documentation of the uvesafb options is loosely based on vesafb.txt.
 
index 4b2084d..a6c6a8a 100644 (file)
@@ -848,7 +848,7 @@ struct file_operations
 ----------------------
 
 This describes how the VFS can manipulate an open file. As of kernel
-4.1, the following members are defined:
+4.18, the following members are defined:
 
 struct file_operations {
        struct module *owner;
@@ -858,11 +858,11 @@ struct file_operations {
        ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
        ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
        int (*iterate) (struct file *, struct dir_context *);
+       int (*iterate_shared) (struct file *, struct dir_context *);
        __poll_t (*poll) (struct file *, struct poll_table_struct *);
        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
        int (*mmap) (struct file *, struct vm_area_struct *);
-       int (*mremap)(struct file *, struct vm_area_struct *);
        int (*open) (struct inode *, struct file *);
        int (*flush) (struct file *, fl_owner_t id);
        int (*release) (struct inode *, struct file *);
@@ -882,6 +882,10 @@ struct file_operations {
 #ifndef CONFIG_MMU
        unsigned (*mmap_capabilities)(struct file *);
 #endif
+       ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int);
+       int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
+       int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
+       int (*fadvise)(struct file *, loff_t, loff_t, int);
 };
 
 Again, all methods are called without any locks being held, unless
@@ -899,6 +903,9 @@ otherwise noted.
 
   iterate: called when the VFS needs to read the directory contents
 
+  iterate_shared: called when the VFS needs to read the directory contents
+       when filesystem supports concurrent dir iterators
+
   poll: called by the VFS when a process wants to check if there is
        activity on this file and (optionally) go to sleep until there
        is activity. Called by the select(2) and poll(2) system calls
@@ -951,6 +958,16 @@ otherwise noted.
 
   fallocate: called by the VFS to preallocate blocks or punch a hole.
 
+  copy_file_range: called by the copy_file_range(2) system call.
+
+  clone_file_range: called by the ioctl(2) system call for FICLONERANGE and
+       FICLONE commands.
+
+  dedupe_file_range: called by the ioctl(2) system call for FIDEDUPERANGE
+       command.
+
+  fadvise: possibly called by the fadvise64() system call.
+
 Note that the file operations are implemented by the specific
 filesystem in which the inode resides. When opening a device node
 (character or block special) most filesystems will call special
index 72d16f0..b8df81f 100644 (file)
@@ -32,7 +32,7 @@ Supported chips:
     Datasheet: Publicly available at the Texas Instruments website
                http://www.ti.com/
 
-Author: Lothar Felten <l-felten@ti.com>
+Author: Lothar Felten <lothar.felten@gmail.com>
 
 Description
 -----------
index 966610a..2030020 100644 (file)
@@ -50,10 +50,14 @@ bounce buffer. But you don't need to care about that detail, just use the
 returned buffer. If NULL is returned, the threshold was not met or a bounce
 buffer could not be allocated. Fall back to PIO in that case.
 
-In any case, a buffer obtained from above needs to be released. It ensures data
-is copied back to the message and a potentially used bounce buffer is freed::
+In any case, a buffer obtained from above needs to be released. Another helper
+function ensures a potentially used bounce buffer is freed::
 
-       i2c_release_dma_safe_msg_buf(msg, dma_buf);
+       i2c_put_dma_safe_msg_buf(dma_buf, msg, xferred);
+
+The last argument 'xferred' controls if the buffer is synced back to the
+message or not. No syncing is needed in cases setting up DMA had an error and
+there was no data transferred.
 
 The bounce buffer handling from the core is generic and simple. It will always
 allocate a new bounce buffer. If you want a more sophisticated handling (e.g.
index 3f4f6c9..a4222b6 100644 (file)
@@ -33,4 +33,3 @@ Video Function Calls
     video-clear-buffer
     video-set-streamtype
     video-set-format
-    video-set-attributes
index 8313a63..960de8f 100644 (file)
@@ -425,7 +425,7 @@ tcp_mtu_probing - INTEGER
          1 - Disabled by default, enabled when an ICMP black hole detected
          2 - Always enabled, use initial MSS of tcp_base_mss.
 
-tcp_probe_interval - INTEGER
+tcp_probe_interval - UNSIGNED INTEGER
        Controls how often to start TCP Packetization-Layer Path MTU
        Discovery reprobe. The default is reprobing every 10 minutes as
        per RFC4821.
index 61f918b..d1bf143 100644 (file)
@@ -86,7 +86,7 @@ pkg-config
 
 The build system, as of 4.18, requires pkg-config to check for installed
 kconfig tools and to determine flags settings for use in
-'make {menu,n,g,x}config'.  Previously pkg-config was being used but not
+'make {g,x}config'.  Previously pkg-config was being used but not
 verified or documented.
 
 Flex
diff --git a/Documentation/process/code-of-conduct.rst b/Documentation/process/code-of-conduct.rst
new file mode 100644 (file)
index 0000000..ab7c24b
--- /dev/null
@@ -0,0 +1,81 @@
+Contributor Covenant Code of Conduct
+++++++++++++++++++++++++++++++++++++
+
+Our Pledge
+==========
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, sex characteristics, gender identity and
+expression, level of experience, education, socio-economic status, nationality,
+personal appearance, race, religion, or sexual identity and orientation.
+
+Our Standards
+=============
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+  advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others’ private information, such as a physical or electronic
+  address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+  professional setting
+
+
+Our Responsibilities
+====================
+
+Maintainers are responsible for clarifying the standards of acceptable behavior
+and are expected to take appropriate and fair corrective action in response to
+any instances of unacceptable behavior.
+
+Maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, or to ban temporarily or permanently any
+contributor for other behaviors that they deem inappropriate, threatening,
+offensive, or harmful.
+
+Scope
+=====
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+Enforcement
+===========
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the Technical Advisory Board (TAB) at
+<tab@lists.linux-foundation.org>. All complaints will be reviewed and
+investigated and will result in a response that is deemed necessary and
+appropriate to the circumstances. The TAB is obligated to maintain
+confidentiality with regard to the reporter of an incident.  Further details of
+specific enforcement policies may be posted separately.
+
+Maintainers who do not follow or enforce the Code of Conduct in good faith may
+face temporary or permanent repercussions as determined by other members of the
+project’s leadership.
+
+Attribution
+===========
+
+This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
+available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
diff --git a/Documentation/process/code-of-conflict.rst b/Documentation/process/code-of-conflict.rst
deleted file mode 100644 (file)
index 47b6de7..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-Code of Conflict
-----------------
-
-The Linux kernel development effort is a very personal process compared
-to "traditional" ways of developing software.  Your code and ideas
-behind it will be carefully reviewed, often resulting in critique and
-criticism.  The review will almost always require improvements to the
-code before it can be included in the kernel.  Know that this happens
-because everyone involved wants to see the best possible solution for
-the overall success of Linux.  This development process has been proven
-to create the most robust operating system kernel ever, and we do not
-want to do anything to cause the quality of submission and eventual
-result to ever decrease.
-
-If however, anyone feels personally abused, threatened, or otherwise
-uncomfortable due to this process, that is not acceptable.  If so,
-please contact the Linux Foundation's Technical Advisory Board at
-<tab@lists.linux-foundation.org>, or the individual members, and they
-will work to resolve the issue to the best of their ability.  For more
-information on who is on the Technical Advisory Board and what their
-role is, please see:
-
-       - http://www.linuxfoundation.org/projects/linux/tab
-
-As a reviewer of code, please strive to keep things civil and focused on
-the technical issues involved.  We are all humans, and frustrations can
-be high on both sides of the process.  Try to keep in mind the immortal
-words of Bill and Ted, "Be excellent to each other."
index 37bd062..9ae3e31 100644 (file)
@@ -20,7 +20,7 @@ Below are the essential guides that every developer should read.
    :maxdepth: 1
 
    howto
-   code-of-conflict
+   code-of-conduct
    development-process
    submitting-patches
    coding-style
index 25a4b4c..92999d4 100644 (file)
@@ -97,6 +97,11 @@ parameters may be changed at runtime by the command
                        allowing boot to proceed.  none ignores them, expecting
                        user space to do the scan.
 
+       scsi_mod.use_blk_mq=
+                       [SCSI] use blk-mq I/O path by default
+                       See SCSI_MQ_DEFAULT in drivers/scsi/Kconfig.
+                       Format: <y/n>
+
        sim710=         [SCSI,HW]
                        See header of drivers/scsi/sim710.c.
 
index c664064..647f941 100644 (file)
@@ -4510,7 +4510,8 @@ Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits.
 Architectures: s390
 Parameters: none
 Returns: 0 on success, -EINVAL if hpage module parameter was not set
-        or cmma is enabled
+        or cmma is enabled, or the VM has the KVM_VM_S390_UCONTROL
+        flag set
 
 With this capability the KVM support for memory backing with 1m pages
 through hugetlbfs can be enabled for a VM. After the capability is
@@ -4521,6 +4522,15 @@ hpage module parameter is not set to 1, -EINVAL is returned.
 While it is generally possible to create a huge page backed VM without
 this capability, the VM will not be able to run.
 
+7.14 KVM_CAP_MSR_PLATFORM_INFO
+
+Architectures: x86
+Parameters: args[0] whether feature should be enabled or not
+
+With this capability, a guest may read the MSR_PLATFORM_INFO MSR. Otherwise,
+a #GP would be raised when the guest tries to access. Currently, this
+capability does not enable write permissions of this MSR for the guest.
+
 8. Other capabilities.
 ----------------------
 
index 688e3ee..46933e0 100644 (file)
@@ -35,25 +35,25 @@ and two USB cables, connected like this:
 ( If your system does not list a debug port capability then you probably
   won't be able to use the USB debug key. )
 
- b.) You also need a Netchip USB debug cable/key:
+ b.) You also need a NetChip USB debug cable/key:
 
         http://www.plxtech.com/products/NET2000/NET20DC/default.asp
 
-     This is a small blue plastic connector with two USB connections,
+     This is a small blue plastic connector with two USB connections;
      it draws power from its USB connections.
 
  c.) You need a second client/console system with a high speed USB 2.0
      port.
 
- d.) The Netchip device must be plugged directly into the physical
+ d.) The NetChip device must be plugged directly into the physical
      debug port on the "host/target" system.  You cannot use a USB hub in
      between the physical debug port and the "host/target" system.
 
      The EHCI debug controller is bound to a specific physical USB
-     port and the Netchip device will only work as an early printk
+     port and the NetChip device will only work as an early printk
      device in this port.  The EHCI host controllers are electrically
      wired such that the EHCI debug controller is hooked up to the
-     first physical and there is no way to change this via software.
+     first physical port and there is no way to change this via software.
      You can find the physical port through experimentation by trying
      each physical port on the system and rebooting.  Or you can try
      and use lsusb or look at the kernel info messages emitted by the
@@ -65,9 +65,9 @@ and two USB cables, connected like this:
      to the hardware vendor, because there is no reason not to wire
      this port into one of the physically accessible ports.
 
- e.) It is also important to note, that many versions of the Netchip
+ e.) It is also important to note, that many versions of the NetChip
      device require the "client/console" system to be plugged into the
-     right and side of the device (with the product logo facing up and
+     right hand side of the device (with the product logo facing up and
      readable left to right).  The reason being is that the 5 volt
      power supply is taken from only one side of the device and it
      must be the side that does not get rebooted.
@@ -81,13 +81,18 @@ and two USB cables, connected like this:
       CONFIG_EARLY_PRINTK_DBGP=y
 
     And you need to add the boot command line: "earlyprintk=dbgp".
+
     (If you are using Grub, append it to the 'kernel' line in
-     /etc/grub.conf)
+     /etc/grub.conf.  If you are using Grub2 on a BIOS firmware system,
+     append it to the 'linux' line in /boot/grub2/grub.cfg. If you are
+     using Grub2 on an EFI firmware system, append it to the 'linux'
+     or 'linuxefi' line in /boot/grub2/grub.cfg or
+     /boot/efi/EFI/<distro>/grub.cfg.)
 
     On systems with more than one EHCI debug controller you must
     specify the correct EHCI debug controller number.  The ordering
     comes from the PCI bus enumeration of the EHCI controllers.  The
-    default with no number argument is "0" the first EHCI debug
+    default with no number argument is "0" or the first EHCI debug
     controller.  To use the second EHCI debug controller, you would
     use the command line: "earlyprintk=dbgp1"
 
@@ -111,7 +116,7 @@ and two USB cables, connected like this:
     see the raw output.
 
  c.) On Nvidia Southbridge based systems: the kernel will try to probe
-     and find out which port has debug device connected.
+     and find out which port has debug device connected.
 
 3. Testing that it works fine:
 
index a5b256b..40082e4 100644 (file)
@@ -324,7 +324,6 @@ F:  Documentation/ABI/testing/sysfs-bus-acpi
 F:     Documentation/ABI/testing/configfs-acpi
 F:     drivers/pci/*acpi*
 F:     drivers/pci/*/*acpi*
-F:     drivers/pci/*/*/*acpi*
 F:     tools/power/acpi/
 
 ACPI APEI
@@ -1251,7 +1250,7 @@ N:        meson
 
 ARM/Annapurna Labs ALPINE ARCHITECTURE
 M:     Tsahee Zidenberg <tsahee@annapurnalabs.com>
-M:     Antoine Tenart <antoine.tenart@free-electrons.com>
+M:     Antoine Tenart <antoine.tenart@bootlin.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-alpine/
@@ -2311,6 +2310,7 @@ F:        drivers/clocksource/cadence_ttc_timer.c
 F:     drivers/i2c/busses/i2c-cadence.c
 F:     drivers/mmc/host/sdhci-of-arasan.c
 F:     drivers/edac/synopsys_edac.c
+F:     drivers/i2c/busses/i2c-xiic.c
 
 ARM64 PORT (AARCH64 ARCHITECTURE)
 M:     Catalin Marinas <catalin.marinas@arm.com>
@@ -2955,7 +2955,6 @@ F:        include/linux/bcm963xx_tag.h
 
 BROADCOM BNX2 GIGABIT ETHERNET DRIVER
 M:     Rasesh Mody <rasesh.mody@cavium.com>
-M:     Harish Patil <harish.patil@cavium.com>
 M:     Dept-GELinuxNICDev@cavium.com
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -2976,6 +2975,7 @@ F:        drivers/scsi/bnx2i/
 
 BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
 M:     Ariel Elior <ariel.elior@cavium.com>
+M:     Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
 M:     everest-linux-l2@cavium.com
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -5469,7 +5469,8 @@ S:        Odd Fixes
 F:     drivers/net/ethernet/agere/
 
 ETHERNET BRIDGE
-M:     Stephen Hemminger <stephen@networkplumber.org>
+M:     Roopa Prabhu <roopa@cumulusnetworks.com>
+M:     Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
 L:     bridge@lists.linux-foundation.org (moderated for non-subscribers)
 L:     netdev@vger.kernel.org
 W:     http://www.linuxfoundation.org/en/Net:Bridge
@@ -5624,6 +5625,8 @@ F:        lib/fault-inject.c
 
 FBTFT Framebuffer drivers
 M:     Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+L:     dri-devel@lists.freedesktop.org
+L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/staging/fbtft/
 
@@ -6059,7 +6062,7 @@ F:        Documentation/gcc-plugins.txt
 
 GASKET DRIVER FRAMEWORK
 M:     Rob Springer <rspringer@google.com>
-M:     John Joseph <jnjoseph@google.com>
+M:     Todd Poynor <toddpoynor@google.com>
 M:     Ben Chan <benchan@chromium.org>
 S:     Maintained
 F:     drivers/staging/gasket/
@@ -7015,6 +7018,20 @@ F:       drivers/crypto/vmx/aes*
 F:     drivers/crypto/vmx/ghash*
 F:     drivers/crypto/vmx/ppc-xlate.pl
 
+IBM Power PCI Hotplug Driver for RPA-compliant PPC64 platform
+M:     Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+L:     linux-pci@vger.kernel.org
+L:     linuxppc-dev@lists.ozlabs.org
+S:     Supported
+F:     drivers/pci/hotplug/rpaphp*
+
+IBM Power IO DLPAR Driver for RPA-compliant PPC64 platform
+M:     Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+L:     linux-pci@vger.kernel.org
+L:     linuxppc-dev@lists.ozlabs.org
+S:     Supported
+F:     drivers/pci/hotplug/rpadlpar*
+
 IBM ServeRAID RAID DRIVER
 S:     Orphan
 F:     drivers/scsi/ips.*
@@ -8255,9 +8272,9 @@ F:        drivers/ata/pata_arasan_cf.c
 
 LIBATA PATA DRIVERS
 M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
-M:     Jens Axboe <kernel.dk>
+M:     Jens Axboe <axboe@kernel.dk>
 L:     linux-ide@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
 S:     Maintained
 F:     drivers/ata/pata_*.c
 F:     drivers/ata/ata_generic.c
@@ -8275,7 +8292,7 @@ LIBATA SATA AHCI PLATFORM devices support
 M:     Hans de Goede <hdegoede@redhat.com>
 M:     Jens Axboe <axboe@kernel.dk>
 L:     linux-ide@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
 S:     Maintained
 F:     drivers/ata/ahci_platform.c
 F:     drivers/ata/libahci_platform.c
@@ -8291,7 +8308,7 @@ F:        drivers/ata/sata_promise.*
 LIBATA SUBSYSTEM (Serial and Parallel ATA drivers)
 M:     Jens Axboe <axboe@kernel.dk>
 L:     linux-ide@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
 S:     Maintained
 F:     drivers/ata/
 F:     include/linux/ata.h
@@ -8299,7 +8316,7 @@ F:        include/linux/libata.h
 F:     Documentation/devicetree/bindings/ata/
 
 LIBLOCKDEP
-M:     Sasha Levin <alexander.levin@verizon.com>
+M:     Sasha Levin <alexander.levin@microsoft.com>
 S:     Maintained
 F:     tools/lib/lockdep/
 
@@ -8581,7 +8598,6 @@ F:        include/linux/spinlock*.h
 F:     arch/*/include/asm/spinlock*.h
 F:     include/linux/rwlock*.h
 F:     include/linux/mutex*.h
-F:     arch/*/include/asm/mutex*.h
 F:     include/linux/rwsem*.h
 F:     arch/*/include/asm/rwsem.h
 F:     include/linux/seqlock.h
@@ -9641,7 +9657,8 @@ MIPS/LOONGSON2 ARCHITECTURE
 M:     Jiaxun Yang <jiaxun.yang@flygoat.com>
 L:     linux-mips@linux-mips.org
 S:     Maintained
-F:     arch/mips/loongson64/*{2e/2f}*
+F:     arch/mips/loongson64/fuloong-2e/
+F:     arch/mips/loongson64/lemote-2f/
 F:     arch/mips/include/asm/mach-loongson64/
 F:     drivers/*/*loongson2*
 F:     drivers/*/*/*loongson2*
@@ -9699,13 +9716,6 @@ Q:       http://patchwork.linuxtv.org/project/linux-media/list/
 S:     Maintained
 F:     drivers/media/dvb-frontends/mn88473*
 
-PCI DRIVER FOR MOBIVEIL PCIE IP
-M:     Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
-L:     linux-pci@vger.kernel.org
-S:     Supported
-F:     Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
-F:     drivers/pci/controller/pcie-mobiveil.c
-
 MODULE SUPPORT
 M:     Jessica Yu <jeyu@kernel.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
@@ -10932,7 +10942,7 @@ M:      Willy Tarreau <willy@haproxy.com>
 M:     Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
 S:     Odd Fixes
 F:     Documentation/auxdisplay/lcd-panel-cgram.txt
-F:     drivers/misc/panel.c
+F:     drivers/auxdisplay/panel.c
 
 PARALLEL PORT SUBSYSTEM
 M:     Sudip Mukherjee <sudipm.mukherjee@gmail.com>
@@ -11120,6 +11130,13 @@ F:     include/uapi/linux/switchtec_ioctl.h
 F:     include/linux/switchtec.h
 F:     drivers/ntb/hw/mscc/
 
+PCI DRIVER FOR MOBIVEIL PCIE IP
+M:     Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+L:     linux-pci@vger.kernel.org
+S:     Supported
+F:     Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
+F:     drivers/pci/controller/pcie-mobiveil.c
+
 PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
 M:     Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
 M:     Jason Cooper <jason@lakedaemon.net>
@@ -11153,7 +11170,7 @@ F:      drivers/pci/controller/dwc/pci-exynos.c
 
 PCI DRIVER FOR SYNOPSYS DESIGNWARE
 M:     Jingoo Han <jingoohan1@gmail.com>
-M:     Joao Pinto <Joao.Pinto@synopsys.com>
+M:     Gustavo Pimentel <gustavo.pimentel@synopsys.com>
 L:     linux-pci@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -11186,8 +11203,14 @@ F:     tools/pci/
 
 PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC
 M:     Russell Currey <ruscur@russell.cc>
+M:     Sam Bobroff <sbobroff@linux.ibm.com>
+M:     Oliver O'Halloran <oohall@gmail.com>
 L:     linuxppc-dev@lists.ozlabs.org
 S:     Supported
+F:     Documentation/PCI/pci-error-recovery.txt
+F:     drivers/pci/pcie/aer.c
+F:     drivers/pci/pcie/dpc.c
+F:     drivers/pci/pcie/err.c
 F:     Documentation/powerpc/eeh-pci-error-recovery.txt
 F:     arch/powerpc/kernel/eeh*.c
 F:     arch/powerpc/platforms/*/eeh*.c
@@ -11345,10 +11368,10 @@ S:    Maintained
 F:     drivers/platform/x86/peaq-wmi.c
 
 PER-CPU MEMORY ALLOCATOR
+M:     Dennis Zhou <dennis@kernel.org>
 M:     Tejun Heo <tj@kernel.org>
 M:     Christoph Lameter <cl@linux.com>
-M:     Dennis Zhou <dennisszhou@gmail.com>
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu.git
 S:     Maintained
 F:     include/linux/percpu*.h
 F:     mm/percpu*.c
@@ -11956,7 +11979,7 @@ F:      Documentation/scsi/LICENSE.qla4xxx
 F:     drivers/scsi/qla4xxx/
 
 QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M:     Harish Patil <harish.patil@cavium.com>
+M:     Shahed Shaikh <Shahed.Shaikh@cavium.com>
 M:     Manish Chopra <manish.chopra@cavium.com>
 M:     Dept-GELinuxNICDev@cavium.com
 L:     netdev@vger.kernel.org
@@ -11964,7 +11987,6 @@ S:      Supported
 F:     drivers/net/ethernet/qlogic/qlcnic/
 
 QLOGIC QLGE 10Gb ETHERNET DRIVER
-M:     Harish Patil <harish.patil@cavium.com>
 M:     Manish Chopra <manish.chopra@cavium.com>
 M:     Dept-GELinuxNICDev@cavium.com
 L:     netdev@vger.kernel.org
@@ -12243,6 +12265,7 @@ F:      Documentation/networking/rds.txt
 
 RDT - RESOURCE ALLOCATION
 M:     Fenghua Yu <fenghua.yu@intel.com>
+M:     Reinette Chatre <reinette.chatre@intel.com>
 L:     linux-kernel@vger.kernel.org
 S:     Supported
 F:     arch/x86/kernel/cpu/intel_rdt*
@@ -13432,9 +13455,8 @@ F:      drivers/i2c/busses/i2c-synquacer.c
 F:     Documentation/devicetree/bindings/i2c/i2c-synquacer.txt
 
 SOCIONEXT UNIPHIER SOUND DRIVER
-M:     Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
-S:     Maintained
+S:     Orphan
 F:     sound/soc/uniphier/
 
 SOEKRIS NET48XX LED SUPPORT
@@ -15372,7 +15394,7 @@ S:      Maintained
 UVESAFB DRIVER
 M:     Michal Januszewski <spock@gentoo.org>
 L:     linux-fbdev@vger.kernel.org
-W:     http://dev.gentoo.org/~spock/projects/uvesafb/
+W:     https://github.com/mjanusz/v86d
 S:     Maintained
 F:     Documentation/fb/uvesafb.txt
 F:     drivers/video/fbdev/uvesafb.*
@@ -15896,6 +15918,7 @@ F:      net/x25/
 X86 ARCHITECTURE (32-BIT AND 64-BIT)
 M:     Thomas Gleixner <tglx@linutronix.de>
 M:     Ingo Molnar <mingo@redhat.com>
+M:     Borislav Petkov <bp@alien8.de>
 R:     "H. Peter Anvin" <hpa@zytor.com>
 M:     x86@kernel.org
 L:     linux-kernel@vger.kernel.org
@@ -15924,6 +15947,15 @@ M:     Borislav Petkov <bp@alien8.de>
 S:     Maintained
 F:     arch/x86/kernel/cpu/microcode/*
 
+X86 MM
+M:     Dave Hansen <dave.hansen@linux.intel.com>
+M:     Andy Lutomirski <luto@kernel.org>
+M:     Peter Zijlstra <peterz@infradead.org>
+L:     linux-kernel@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/mm
+S:     Maintained
+F:     arch/x86/mm/
+
 X86 PLATFORM DRIVERS
 M:     Darren Hart <dvhart@infradead.org>
 M:     Andy Shevchenko <andy@infradead.org>
index 2b45880..9b2df07 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 4
 PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc7
 NAME = Merciless Moray
 
 # *DOCUMENTATION*
@@ -299,19 +299,7 @@ KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
 KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
 export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
 
-# SUBARCH tells the usermode build what the underlying arch is.  That is set
-# first, and if a usermode build is happening, the "ARCH=um" on the command
-# line overrides the setting of ARCH below.  If a native build is happening,
-# then ARCH is assigned, getting whatever value it gets normally, and
-# SUBARCH is subsequently ignored.
-
-SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
-                                 -e s/sun4u/sparc64/ \
-                                 -e s/arm.*/arm/ -e s/sa110/arm/ \
-                                 -e s/s390x/s390/ -e s/parisc64/parisc/ \
-                                 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
-                                 -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
-                                 -e s/riscv.*/riscv/)
+include scripts/subarch.include
 
 # Cross compiling and selecting different set of gcc/bin-utils
 # ---------------------------------------------------------------------------
@@ -616,6 +604,11 @@ CFLAGS_GCOV        := -fprofile-arcs -ftest-coverage \
        $(call cc-disable-warning,maybe-uninitialized,)
 export CFLAGS_GCOV
 
+# The arch Makefiles can override CC_FLAGS_FTRACE. We may also append it later.
+ifdef CONFIG_FUNCTION_TRACER
+  CC_FLAGS_FTRACE := -pg
+endif
+
 # The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
 # values of the respective KBUILD_* variables
 ARCH_CPPFLAGS :=
@@ -755,9 +748,6 @@ KBUILD_CFLAGS       += $(call cc-option, -femit-struct-debug-baseonly) \
 endif
 
 ifdef CONFIG_FUNCTION_TRACER
-ifndef CC_FLAGS_FTRACE
-CC_FLAGS_FTRACE := -pg
-endif
 ifdef CONFIG_FTRACE_MCOUNT_RECORD
   # gcc 5 supports generating the mcount tables directly
   ifeq ($(call cc-option-yn,-mrecord-mcount),y)
@@ -807,6 +797,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
 # disable pointer signed / unsigned warnings in gcc 4.0
 KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
 
+# disable stringop warnings in gcc 8+
+KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
+
 # disable invalid "can't wrap" optimizations for signed / pointers
 KBUILD_CFLAGS  += $(call cc-option,-fno-strict-overflow)
 
index 6d5eb82..a045f30 100644 (file)
@@ -9,6 +9,7 @@
 config ARC
        def_bool y
        select ARC_TIMERS
+       select ARCH_HAS_PTE_SPECIAL
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
        select ARCH_HAS_SG_CHAIN
@@ -28,8 +29,12 @@ config ARC
        select GENERIC_SMP_IDLE_THREAD
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_TRACEHOOK
+       select HAVE_DEBUG_STACKOVERFLOW
        select HAVE_FUTEX_CMPXCHG if FUTEX
+       select HAVE_GENERIC_DMA_COHERENT
        select HAVE_IOREMAP_PROT
+       select HAVE_KERNEL_GZIP
+       select HAVE_KERNEL_LZMA
        select HAVE_KPROBES
        select HAVE_KRETPROBES
        select HAVE_MEMBLOCK
@@ -44,11 +49,6 @@ config ARC
        select OF_EARLY_FLATTREE
        select OF_RESERVED_MEM
        select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
-       select HAVE_DEBUG_STACKOVERFLOW
-       select HAVE_GENERIC_DMA_COHERENT
-       select HAVE_KERNEL_GZIP
-       select HAVE_KERNEL_LZMA
-       select ARCH_HAS_PTE_SPECIAL
 
 config ARCH_HAS_CACHE_LINE_SIZE
        def_bool y
@@ -149,7 +149,7 @@ config ARC_CPU_770
          Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
          This core has a bunch of cool new features:
          -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
-                   Shared Address Spaces (for sharing TLB entires in MMU)
+                   Shared Address Spaces (for sharing TLB entries in MMU)
          -Caches: New Prog Model, Region Flush
          -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
 
index fb02619..644815c 100644 (file)
@@ -6,33 +6,11 @@
 # published by the Free Software Foundation.
 #
 
-ifeq ($(CROSS_COMPILE),)
-ifndef CONFIG_CPU_BIG_ENDIAN
-CROSS_COMPILE := arc-linux-
-else
-CROSS_COMPILE := arceb-linux-
-endif
-endif
-
 KBUILD_DEFCONFIG := nsim_700_defconfig
 
 cflags-y       += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
 cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
-cflags-$(CONFIG_ISA_ARCV2)     += -mcpu=archs
-
-is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
-
-ifdef CONFIG_ISA_ARCOMPACT
-ifeq ($(is_700), 0)
-    $(error Toolchain not configured for ARCompact builds)
-endif
-endif
-
-ifdef CONFIG_ISA_ARCV2
-ifeq ($(is_700), 1)
-    $(error Toolchain not configured for ARCv2 builds)
-endif
-endif
+cflags-$(CONFIG_ISA_ARCV2)     += -mcpu=hs38
 
 ifdef CONFIG_ARC_CURR_IN_REG
 # For a global register defintion, make sure it gets passed to every file
@@ -43,10 +21,7 @@ ifdef CONFIG_ARC_CURR_IN_REG
 LINUXINCLUDE   +=  -include ${src}/arch/arc/include/asm/current.h
 endif
 
-upto_gcc44    :=  $(call cc-ifversion, -le, 0404, y)
-atleast_gcc44 :=  $(call cc-ifversion, -ge, 0404, y)
-
-cflags-$(atleast_gcc44)                        += -fsection-anchors
+cflags-y                               += -fsection-anchors
 
 cflags-$(CONFIG_ARC_HAS_LLSC)          += -mlock
 cflags-$(CONFIG_ARC_HAS_SWAPE)         += -mswape
@@ -82,12 +57,7 @@ cflags-$(disable_small_data)         += -mno-sdata -fcall-used-gp
 cflags-$(CONFIG_CPU_BIG_ENDIAN)                += -mbig-endian
 ldflags-$(CONFIG_CPU_BIG_ENDIAN)       += -EB
 
-# STAR 9000518362: (fixed with binutils shipping with gcc 4.8)
-# arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept
-# --build-id w/o "-marclinux". Default arc-elf32-ld is OK
-ldflags-$(upto_gcc44)                  += -marclinux
-
-LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
+LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
 
 # Modules with short calls might break for calls into builtin-kernel
 KBUILD_CFLAGS_MODULE   += -mlong-calls -mno-millicode
index dc91c66..d75d65d 100644 (file)
        };
 
        /*
+        * Mark DMA peripherals connected via IOC port as dma-coherent. We do
+        * it via overlay because peripherals defined in axs10x_mb.dtsi are
+        * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so
+        * only AXS103 board has HW-coherent DMA peripherals)
+        * We don't need to mark pgu@17000 as dma-coherent because it uses
+        * external DMA buffer located outside of IOC aperture.
+        */
+       axs10x_mb {
+               ethernet@0x18000 {
+                       dma-coherent;
+               };
+
+               ehci@0x40000 {
+                       dma-coherent;
+               };
+
+               ohci@0x60000 {
+                       dma-coherent;
+               };
+
+               mmc@0x15000 {
+                       dma-coherent;
+               };
+       };
+
+       /*
         * The DW APB ICTL intc on MB is connected to CPU intc via a
         * DT "invisible" DW APB GPIO block, configured to simply pass thru
         * interrupts - setup accordinly in platform init (plat-axs10x/ax10x.c)
index 69ff489..a05bb73 100644 (file)
        };
 
        /*
+        * Mark DMA peripherals connected via IOC port as dma-coherent. We do
+        * it via overlay because peripherals defined in axs10x_mb.dtsi are
+        * used for both AXS101 and AXS103 boards and only AXS103 has IOC (so
+        * only AXS103 board has HW-coherent DMA peripherals)
+        * We don't need to mark pgu@17000 as dma-coherent because it uses
+        * external DMA buffer located outside of IOC aperture.
+        */
+       axs10x_mb {
+               ethernet@0x18000 {
+                       dma-coherent;
+               };
+
+               ehci@0x40000 {
+                       dma-coherent;
+               };
+
+               ohci@0x60000 {
+                       dma-coherent;
+               };
+
+               mmc@0x15000 {
+                       dma-coherent;
+               };
+       };
+
+       /*
         * This INTC is actually connected to DW APB GPIO
         * which acts as a wire between MB INTC and CPU INTC.
         * GPIO INTC is configured in platform init code
index 47b74fb..37bafd4 100644 (file)
@@ -9,6 +9,10 @@
  */
 
 / {
+       aliases {
+               ethernet = &gmac;
+       };
+
        axs10x_mb {
                compatible = "simple-bus";
                #address-cells = <1>;
@@ -68,7 +72,7 @@
                        };
                };
 
-               ethernet@0x18000 {
+               gmac: ethernet@0x18000 {
                        #interrupt-cells = <1>;
                        compatible = "snps,dwmac";
                        reg = < 0x18000 0x2000 >;
@@ -81,6 +85,7 @@
                        max-speed = <100>;
                        resets = <&creg_rst 5>;
                        reset-names = "stmmaceth";
+                       mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
                };
 
                ehci@0x40000 {
index 006aa3d..ef149f5 100644 (file)
                bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
        };
 
+       aliases {
+               ethernet = &gmac;
+       };
+
        cpus {
                #address-cells = <1>;
                #size-cells = <0>;
                        #clock-cells = <0>;
                };
 
-               ethernet@8000 {
+               gmac: ethernet@8000 {
                        #interrupt-cells = <1>;
                        compatible = "snps,dwmac";
                        reg = <0x8000 0x2000>;
                        phy-handle = <&phy0>;
                        resets = <&cgu_rst HSDK_ETH_RESET>;
                        reset-names = "stmmaceth";
+                       mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
+                       dma-coherent;
 
                        mdio {
                                #address-cells = <1>;
                        compatible = "snps,hsdk-v1.0-ohci", "generic-ohci";
                        reg = <0x60000 0x100>;
                        interrupts = <15>;
+                       dma-coherent;
                };
 
                ehci@40000 {
                        compatible = "snps,hsdk-v1.0-ehci", "generic-ehci";
                        reg = <0x40000 0x100>;
                        interrupts = <15>;
+                       dma-coherent;
                };
 
                mmc@a000 {
                        clock-names = "biu", "ciu";
                        interrupts = <12>;
                        bus-width = <4>;
+                       dma-coherent;
                };
        };
 
index a635ea9..41bc08b 100644 (file)
@@ -1,5 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -63,7 +61,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 CONFIG_MOUSE_SERIAL=y
 CONFIG_MOUSE_SYNAPTICS_USB=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
index aa507e4..1e1c4a8 100644 (file)
@@ -1,5 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -64,7 +62,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 CONFIG_MOUSE_SERIAL=y
 CONFIG_MOUSE_SYNAPTICS_USB=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
index eba07f4..6b0c0cf 100644 (file)
@@ -1,5 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -65,7 +63,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 CONFIG_MOUSE_SERIAL=y
 CONFIG_MOUSE_SYNAPTICS_USB=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
index 098b19f..240dd2c 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -57,7 +56,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 0104c40..14ae7e5 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -60,7 +59,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 6491be0..1dec2b4 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 CONFIG_SYSVIPC=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_NO_HZ_IDLE=y
index 7c9c706..31ba224 100644 (file)
@@ -59,7 +59,6 @@ CONFIG_NETCONSOLE=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 99e05cf..8e0b8b1 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -44,7 +43,6 @@ CONFIG_LXT_PHY=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_ARC=y
 CONFIG_SERIAL_ARC_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
index 0dc4f9b..739b90e 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -45,7 +44,6 @@ CONFIG_DEVTMPFS=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_ARC=y
 CONFIG_SERIAL_ARC_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
index be3c30a..b5895bd 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 # CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_HIGH_RES_TIMERS=y
@@ -44,7 +43,6 @@ CONFIG_DEVTMPFS=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_ARC=y
 CONFIG_SERIAL_ARC_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
index 3a74b9b..f14eeff 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -48,7 +47,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index ea2834b..025298a 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -47,7 +46,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 80a5a1b..df7b77b 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 # CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -58,7 +57,6 @@ CONFIG_MOUSE_PS2_TOUCHKIT=y
 # CONFIG_SERIO_SERPORT is not set
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index 2cc87f9..a7f6531 100644 (file)
@@ -57,7 +57,6 @@ CONFIG_STMMAC_ETH=y
 # CONFIG_SERIO is not set
 # CONFIG_VT is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_NR_UARTS=1
index f629493..db47c35 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
@@ -53,7 +52,6 @@ CONFIG_NATIONAL_PHY=y
 CONFIG_MOUSE_PS2_TOUCHKIT=y
 CONFIG_SERIO_ARC_PS2=y
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
index 21f0ca2..a8ac5e9 100644 (file)
@@ -1,5 +1,4 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_DEFAULT_HOSTNAME="ARCLinux"
 # CONFIG_CROSS_MEMORY_ATTACH is not set
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_IKCONFIG=y
index 4e00727..158af07 100644 (file)
@@ -84,7 +84,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v)                       \
        "1:     llock   %[orig], [%[ctr]]               \n"             \
        "       " #asm_op " %[val], %[orig], %[i]       \n"             \
        "       scond   %[val], [%[ctr]]                \n"             \
-       "                                               \n"             \
+       "       bnz     1b                              \n"             \
        : [val] "=&r"   (val),                                          \
          [orig] "=&r" (orig)                                           \
        : [ctr] "r"     (&v->counter),                                  \
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
new file mode 100644 (file)
index 0000000..c946c0a
--- /dev/null
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier:  GPL-2.0
+// (C) 2018 Synopsys, Inc. (www.synopsys.com)
+
+#ifndef ASM_ARC_DMA_MAPPING_H
+#define ASM_ARC_DMA_MAPPING_H
+
+#include <asm-generic/dma-mapping.h>
+
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+                       const struct iommu_ops *iommu, bool coherent);
+#define arch_setup_dma_ops arch_setup_dma_ops
+
+#endif
index 4674541..8ce6e72 100644 (file)
@@ -241,6 +241,26 @@ int copy_thread(unsigned long clone_flags,
                task_thread_info(current)->thr_ptr;
        }
 
+
+       /*
+        * setup usermode thread pointer #1:
+        * when child is picked by scheduler, __switch_to() uses @c_callee to
+        * populate usermode callee regs: this works (despite being in a kernel
+        * function) since special return path for child @ret_from_fork()
+        * ensures those regs are not clobbered all the way to RTIE to usermode
+        */
+       c_callee->r25 = task_thread_info(p)->thr_ptr;
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+       /*
+        * setup usermode thread pointer #2:
+        * however for this special use of r25 in kernel, __switch_to() sets
+        * r25 for kernel needs and only in the final return path is usermode
+        * r25 setup, from pt_regs->user_r25. So set that up as well
+        */
+       c_regs->user_r25 = c_callee->r25;
+#endif
+
        return 0;
 }
 
index 783b203..e8d9fb4 100644 (file)
@@ -83,9 +83,6 @@ done:
 static void show_faulting_vma(unsigned long address, char *buf)
 {
        struct vm_area_struct *vma;
-       struct inode *inode;
-       unsigned long ino = 0;
-       dev_t dev = 0;
        char *nm = buf;
        struct mm_struct *active_mm = current->active_mm;
 
@@ -99,12 +96,10 @@ static void show_faulting_vma(unsigned long address, char *buf)
         * if the container VMA is not found
         */
        if (vma && (vma->vm_start <= address)) {
-               struct file *file = vma->vm_file;
-               if (file) {
-                       nm = file_path(file, buf, PAGE_SIZE - 1);
-                       inode = file_inode(vma->vm_file);
-                       dev = inode->i_sb->s_dev;
-                       ino = inode->i_ino;
+               if (vma->vm_file) {
+                       nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1);
+                       if (IS_ERR(nm))
+                               nm = "?";
                }
                pr_info("    @off 0x%lx in [%s]\n"
                        "    VMA: 0x%08lx to 0x%08lx\n",
index 25c6319..f2701c1 100644 (file)
@@ -65,7 +65,7 @@ char *arc_cache_mumbojumbo(int c, char *buf, int len)
 
        n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
                       perip_base,
-                      IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
+                      IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
 
        return buf;
 }
@@ -897,15 +897,6 @@ static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
 }
 
 /*
- * DMA ops for systems with IOC
- * IOC hardware snoops all DMA traffic keeping the caches consistent with
- * memory - eliding need for any explicit cache maintenance of DMA buffers
- */
-static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {}
-static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {}
-static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {}
-
-/*
  * Exported DMA API
  */
 void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
@@ -1153,6 +1144,19 @@ noinline void __init arc_ioc_setup(void)
 {
        unsigned int ioc_base, mem_sz;
 
+       /*
+        * As for today we don't support both IOC and ZONE_HIGHMEM enabled
+        * simultaneously. This happens because as of today IOC aperture covers
+        * only ZONE_NORMAL (low mem) and any dma transactions outside this
+        * region won't be HW coherent.
+        * If we want to use both IOC and ZONE_HIGHMEM we can use
+        * bounce_buffer to handle dma transactions to HIGHMEM.
+        * Also it is possible to modify dma_direct cache ops or increase IOC
+        * aperture size if we are planning to use HIGHMEM without PAE.
+        */
+       if (IS_ENABLED(CONFIG_HIGHMEM))
+               panic("IOC and HIGHMEM can't be used simultaneously");
+
        /* Flush + invalidate + disable L1 dcache */
        __dc_disable();
 
@@ -1264,11 +1268,7 @@ void __init arc_cache_init_master(void)
        if (is_isa_arcv2() && ioc_enable)
                arc_ioc_setup();
 
-       if (is_isa_arcv2() && ioc_enable) {
-               __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
-               __dma_cache_inv = __dma_cache_inv_ioc;
-               __dma_cache_wback = __dma_cache_wback_ioc;
-       } else if (is_isa_arcv2() && l2_line_sz && slc_enable) {
+       if (is_isa_arcv2() && l2_line_sz && slc_enable) {
                __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
                __dma_cache_inv = __dma_cache_inv_slc;
                __dma_cache_wback = __dma_cache_wback_slc;
@@ -1277,6 +1277,12 @@ void __init arc_cache_init_master(void)
                __dma_cache_inv = __dma_cache_inv_l1;
                __dma_cache_wback = __dma_cache_wback_l1;
        }
+       /*
+        * In case of IOC (say IOC+SLC case), pointers above could still be set
+        * but end up not being relevant as the first function in chain is not
+        * called at all for @dma_direct_ops
+        *     arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
+        */
 }
 
 void __ref arc_cache_init(void)
index ec47e60..c75d5c3 100644 (file)
@@ -6,20 +6,17 @@
  * published by the Free Software Foundation.
  */
 
-/*
- * DMA Coherent API Notes
- *
- * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
- * implemented by accessing it using a kernel virtual address, with
- * Cache bit off in the TLB entry.
- *
- * The default DMA address == Phy address which is 0x8000_0000 based.
- */
-
 #include <linux/dma-noncoherent.h>
 #include <asm/cache.h>
 #include <asm/cacheflush.h>
 
+/*
+ * ARCH specific callbacks for generic noncoherent DMA ops (dma/noncoherent.c)
+ *  - hardware IOC not available (or "dma-coherent" not set for device in DT)
+ *  - But still handle both coherent and non-coherent requests from caller
+ *
+ * For DMA coherent hardware (IOC) generic code suffices
+ */
 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
                gfp_t gfp, unsigned long attrs)
 {
@@ -27,42 +24,29 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
        struct page *page;
        phys_addr_t paddr;
        void *kvaddr;
-       int need_coh = 1, need_kvaddr = 0;
-
-       page = alloc_pages(gfp, order);
-       if (!page)
-               return NULL;
+       bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);
 
        /*
-        * IOC relies on all data (even coherent DMA data) being in cache
-        * Thus allocate normal cached memory
-        *
-        * The gains with IOC are two pronged:
-        *   -For streaming data, elides need for cache maintenance, saving
-        *    cycles in flush code, and bus bandwidth as all the lines of a
-        *    buffer need to be flushed out to memory
-        *   -For coherent data, Read/Write to buffers terminate early in cache
-        *   (vs. always going to memory - thus are faster)
+        * __GFP_HIGHMEM flag is cleared by upper layer functions
+        * (in include/linux/dma-mapping.h) so we should never get a
+        * __GFP_HIGHMEM here.
         */
-       if ((is_isa_arcv2() && ioc_enable) ||
-           (attrs & DMA_ATTR_NON_CONSISTENT))
-               need_coh = 0;
+       BUG_ON(gfp & __GFP_HIGHMEM);
 
-       /*
-        * - A coherent buffer needs MMU mapping to enforce non-cachability
-        * - A highmem page needs a virtual handle (hence MMU mapping)
-        *   independent of cachability
-        */
-       if (PageHighMem(page) || need_coh)
-               need_kvaddr = 1;
+       page = alloc_pages(gfp, order);
+       if (!page)
+               return NULL;
 
        /* This is linear addr (0x8000_0000 based) */
        paddr = page_to_phys(page);
 
        *dma_handle = paddr;
 
-       /* This is kernel Virtual address (0x7000_0000 based) */
-       if (need_kvaddr) {
+       /*
+        * A coherent buffer needs MMU mapping to enforce non-cachability.
+        * kvaddr is kernel Virtual address (0x7000_0000 based).
+        */
+       if (need_coh) {
                kvaddr = ioremap_nocache(paddr, size);
                if (kvaddr == NULL) {
                        __free_pages(page, order);
@@ -93,12 +77,8 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
 {
        phys_addr_t paddr = dma_handle;
        struct page *page = virt_to_page(paddr);
-       int is_non_coh = 1;
-
-       is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
-                       (is_isa_arcv2() && ioc_enable);
 
-       if (PageHighMem(page) || !is_non_coh)
+       if (!(attrs & DMA_ATTR_NON_CONSISTENT))
                iounmap((void __force __iomem *)vaddr);
 
        __free_pages(page, get_order(size));
@@ -185,3 +165,23 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
                break;
        }
 }
+
+/*
+ * Plug in coherent or noncoherent dma ops
+ */
+void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+                       const struct iommu_ops *iommu, bool coherent)
+{
+       /*
+        * IOC hardware snoops all DMA traffic keeping the caches consistent
+        * with memory - eliding need for any explicit cache maintenance of
+        * DMA buffers - so we can use dma_direct cache ops.
+        */
+       if (is_isa_arcv2() && ioc_enable && coherent) {
+               set_dma_ops(dev, &dma_direct_ops);
+               dev_info(dev, "use dma_direct_ops cache ops\n");
+       } else {
+               set_dma_ops(dev, &dma_noncoherent_ops);
+               dev_info(dev, "use dma_noncoherent_ops cache ops\n");
+       }
+}
old mode 100755 (executable)
new mode 100644 (file)
index f0cbd86..d4b7c59 100644 (file)
                        ti,hwmods = "rtc";
                        clocks = <&clk_32768_ck>;
                        clock-names = "int-clk";
+                       system-power-controller;
                        status = "disabled";
                };
 
index b10dccd..3b1baa8 100644 (file)
@@ -11,6 +11,7 @@
 #include "sama5d2-pinfunc.h"
 #include <dt-bindings/mfd/atmel-flexcom.h>
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/at91.h>
 
 / {
        model = "Atmel SAMA5D2 PTC EK";
                                                         <PIN_PA30__NWE_NANDWE>,
                                                         <PIN_PB2__NRD_NANDOE>;
                                                bias-pull-up;
+                                               atmel,drive-strength = <ATMEL_PIO_DRVSTR_ME>;
                                        };
 
                                        ale_cle_rdy_cs {
index 43ee992..6df6151 100644 (file)
                global_timer: timer@1e200 {
                        compatible = "arm,cortex-a9-global-timer";
                        reg = <0x1e200 0x20>;
-                       interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
                        clocks = <&axi_clk>;
                };
 
                local_timer: local-timer@1e600 {
                        compatible = "arm,cortex-a9-twd-timer";
                        reg = <0x1e600 0x20>;
-                       interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
+                                                 IRQ_TYPE_EDGE_RISING)>;
                        clocks = <&axi_clk>;
                };
 
                twd_watchdog: watchdog@1e620 {
                        compatible = "arm,cortex-a9-twd-wdt";
                        reg = <0x1e620 0x20>;
-                       interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
+                                                 IRQ_TYPE_LEVEL_HIGH)>;
                };
 
                armpll: armpll {
                serial0: serial@600 {
                        compatible = "brcm,bcm6345-uart";
                        reg = <0x600 0x1b>;
-                       interrupts = <GIC_SPI 32 0>;
+                       interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&periph_clk>;
                        clock-names = "periph";
                        status = "disabled";
                serial1: serial@620 {
                        compatible = "brcm,bcm6345-uart";
                        reg = <0x620 0x1b>;
-                       interrupts = <GIC_SPI 33 0>;
+                       interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&periph_clk>;
                        clock-names = "periph";
                        status = "disabled";
                        reg = <0x2000 0x600>, <0xf0 0x10>;
                        reg-names = "nand", "nand-int-base";
                        status = "disabled";
-                       interrupts = <GIC_SPI 38 0>;
+                       interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "nand";
                };
 
index 9fb4772..ad2ae25 100644 (file)
                reg = <0x40000000 0x08000000>;
        };
 
+       reg_vddio_sd0: regulator-vddio-sd0 {
+               compatible = "regulator-fixed";
+               regulator-name = "vddio-sd0";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               gpio = <&gpio1 29 0>;
+       };
+
+       reg_lcd_3v3: regulator-lcd-3v3 {
+               compatible = "regulator-fixed";
+               regulator-name = "lcd-3v3";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               gpio = <&gpio1 18 0>;
+               enable-active-high;
+       };
+
+       reg_lcd_5v: regulator-lcd-5v {
+               compatible = "regulator-fixed";
+               regulator-name = "lcd-5v";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+       };
+
+       panel {
+               compatible = "sii,43wvf1g";
+               backlight = <&backlight_display>;
+               dvdd-supply = <&reg_lcd_3v3>;
+               avdd-supply = <&reg_lcd_5v>;
+
+               port {
+                       panel_in: endpoint {
+                               remote-endpoint = <&display_out>;
+                       };
+               };
+       };
+
        apb@80000000 {
                apbh@80000000 {
                        gpmi-nand@8000c000 {
                        lcdif@80030000 {
                                pinctrl-names = "default";
                                pinctrl-0 = <&lcdif_24bit_pins_a>;
-                               lcd-supply = <&reg_lcd_3v3>;
-                               display = <&display0>;
                                status = "okay";
 
-                               display0: display0 {
-                                       bits-per-pixel = <32>;
-                                       bus-width = <24>;
-
-                                       display-timings {
-                                               native-mode = <&timing0>;
-                                               timing0: timing0 {
-                                                       clock-frequency = <9200000>;
-                                                       hactive = <480>;
-                                                       vactive = <272>;
-                                                       hback-porch = <15>;
-                                                       hfront-porch = <8>;
-                                                       vback-porch = <12>;
-                                                       vfront-porch = <4>;
-                                                       hsync-len = <1>;
-                                                       vsync-len = <1>;
-                                                       hsync-active = <0>;
-                                                       vsync-active = <0>;
-                                                       de-active = <1>;
-                                                       pixelclk-active = <0>;
-                                               };
+                               port {
+                                       display_out: endpoint {
+                                               remote-endpoint = <&panel_in>;
                                        };
                                };
                        };
                };
        };
 
-       regulators {
-               compatible = "simple-bus";
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               reg_vddio_sd0: regulator@0 {
-                       compatible = "regulator-fixed";
-                       reg = <0>;
-                       regulator-name = "vddio-sd0";
-                       regulator-min-microvolt = <3300000>;
-                       regulator-max-microvolt = <3300000>;
-                       gpio = <&gpio1 29 0>;
-               };
-
-               reg_lcd_3v3: regulator@1 {
-                       compatible = "regulator-fixed";
-                       reg = <1>;
-                       regulator-name = "lcd-3v3";
-                       regulator-min-microvolt = <3300000>;
-                       regulator-max-microvolt = <3300000>;
-                       gpio = <&gpio1 18 0>;
-                       enable-active-high;
-               };
-       };
-
-       backlight {
+       backlight_display: backlight {
                compatible = "pwm-backlight";
                pwms = <&pwm 2 5000000>;
                brightness-levels = <0 4 8 16 32 64 128 255>;
index 6b0ae66..93ab5bd 100644 (file)
                reg = <0x40000000 0x08000000>;
        };
 
+
+       reg_3p3v: regulator-3p3v {
+               compatible = "regulator-fixed";
+               regulator-name = "3P3V";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               regulator-always-on;
+       };
+
+       reg_vddio_sd0: regulator-vddio-sd0 {
+               compatible = "regulator-fixed";
+               regulator-name = "vddio-sd0";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               gpio = <&gpio3 28 0>;
+       };
+
+       reg_fec_3v3: regulator-fec-3v3 {
+               compatible = "regulator-fixed";
+               regulator-name = "fec-3v3";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               gpio = <&gpio2 15 0>;
+       };
+
+       reg_usb0_vbus: regulator-usb0-vbus {
+               compatible = "regulator-fixed";
+               regulator-name = "usb0_vbus";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+               gpio = <&gpio3 9 0>;
+               enable-active-high;
+       };
+
+       reg_usb1_vbus: regulator-usb1-vbus {
+               compatible = "regulator-fixed";
+               regulator-name = "usb1_vbus";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+               gpio = <&gpio3 8 0>;
+               enable-active-high;
+       };
+
+       reg_lcd_3v3: regulator-lcd-3v3 {
+               compatible = "regulator-fixed";
+               regulator-name = "lcd-3v3";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               gpio = <&gpio3 30 0>;
+               enable-active-high;
+       };
+
+       reg_can_3v3: regulator-can-3v3 {
+               compatible = "regulator-fixed";
+               regulator-name = "can-3v3";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               gpio = <&gpio2 13 0>;
+               enable-active-high;
+       };
+
+       reg_lcd_5v: regulator-lcd-5v {
+               compatible = "regulator-fixed";
+               regulator-name = "lcd-5v";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+       };
+
+       panel {
+               compatible = "sii,43wvf1g";
+               backlight = <&backlight_display>;
+               dvdd-supply = <&reg_lcd_3v3>;
+               avdd-supply = <&reg_lcd_5v>;
+
+               port {
+                       panel_in: endpoint {
+                               remote-endpoint = <&display_out>;
+                       };
+               };
+       };
+
        apb@80000000 {
                apbh@80000000 {
                        gpmi-nand@8000c000 {
                                pinctrl-names = "default";
                                pinctrl-0 = <&lcdif_24bit_pins_a
                                             &lcdif_pins_evk>;
-                               lcd-supply = <&reg_lcd_3v3>;
-                               display = <&display0>;
                                status = "okay";
 
-                               display0: display0 {
-                                       bits-per-pixel = <32>;
-                                       bus-width = <24>;
-
-                                       display-timings {
-                                               native-mode = <&timing0>;
-                                               timing0: timing0 {
-                                                       clock-frequency = <33500000>;
-                                                       hactive = <800>;
-                                                       vactive = <480>;
-                                                       hback-porch = <89>;
-                                                       hfront-porch = <164>;
-                                                       vback-porch = <23>;
-                                                       vfront-porch = <10>;
-                                                       hsync-len = <10>;
-                                                       vsync-len = <10>;
-                                                       hsync-active = <0>;
-                                                       vsync-active = <0>;
-                                                       de-active = <1>;
-                                                       pixelclk-active = <0>;
-                                               };
+                               port {
+                                       display_out: endpoint {
+                                               remote-endpoint = <&panel_in>;
                                        };
                                };
                        };
                };
        };
 
-       regulators {
-               compatible = "simple-bus";
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               reg_3p3v: regulator@0 {
-                       compatible = "regulator-fixed";
-                       reg = <0>;
-                       regulator-name = "3P3V";
-                       regulator-min-microvolt = <3300000>;
-                       regulator-max-microvolt = <3300000>;
-                       regulator-always-on;
-               };
-
-               reg_vddio_sd0: regulator@1 {
-                       compatible = "regulator-fixed";
-                       reg = <1>;
-                       regulator-name = "vddio-sd0";
-                       regulator-min-microvolt = <3300000>;
-                       regulator-max-microvolt = <3300000>;
-                       gpio = <&gpio3 28 0>;
-               };
-
-               reg_fec_3v3: regulator@2 {
-                       compatible = "regulator-fixed";
-                       reg = <2>;
-                       regulator-name = "fec-3v3";
-                       regulator-min-microvolt = <3300000>;
-                       regulator-max-microvolt = <3300000>;
-                       gpio = <&gpio2 15 0>;
-               };
-
-               reg_usb0_vbus: regulator@3 {
-                       compatible = "regulator-fixed";
-                       reg = <3>;
-                       regulator-name = "usb0_vbus";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       gpio = <&gpio3 9 0>;
-                       enable-active-high;
-               };
-
-               reg_usb1_vbus: regulator@4 {
-                       compatible = "regulator-fixed";
-                       reg = <4>;
-                       regulator-name = "usb1_vbus";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       gpio = <&gpio3 8 0>;
-                       enable-active-high;
-               };
-
-               reg_lcd_3v3: regulator@5 {
-                       compatible = "regulator-fixed";
-                       reg = <5>;
-                       regulator-name = "lcd-3v3";
-                       regulator-min-microvolt = <3300000>;
-                       regulator-max-microvolt = <3300000>;
-                       gpio = <&gpio3 30 0>;
-                       enable-active-high;
-               };
-
-               reg_can_3v3: regulator@6 {
-                       compatible = "regulator-fixed";
-                       reg = <6>;
-                       regulator-name = "can-3v3";
-                       regulator-min-microvolt = <3300000>;
-                       regulator-max-microvolt = <3300000>;
-                       gpio = <&gpio2 13 0>;
-                       enable-active-high;
-               };
-
-       };
-
        sound {
                compatible = "fsl,imx28-evk-sgtl5000",
                             "fsl,mxs-audio-sgtl5000";
                };
        };
 
-       backlight {
+       backlight_display: backlight {
                compatible = "pwm-backlight";
                pwms = <&pwm 2 5000000>;
                brightness-levels = <0 4 8 16 32 64 128 255>;
index 7cbc2ff..7234e83 100644 (file)
                interrupt-names = "msi";
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0x7>;
-               interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
-                               <0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
+               /*
+                * Reference manual lists pci irqs incorrectly
+                * Real hardware ordering is same as imx6: D+MSI, C, B, A
+                */
+               interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                               <0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>,
                         <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>,
                         <&clks IMX7D_PCIE_PHY_ROOT_CLK>;
index 12d6822..04758a2 100644 (file)
 &mmc2 {
        vmmc-supply = <&vsdio>;
        bus-width = <8>;
-       non-removable;
+       ti,non-removable;
 };
 
 &mmc3 {
                OMAP4_IOPAD(0x10c, PIN_INPUT | MUX_MODE1)       /* abe_mcbsp3_fsx */
                >;
        };
-};
-
-&omap4_pmx_wkup {
-       usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins {
-               /* gpio_wk0 */
-               pinctrl-single,pins = <
-               OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3)
-               >;
-       };
 
        vibrator_direction_pin: pinmux_vibrator_direction_pin {
                pinctrl-single,pins = <
        };
 };
 
+&omap4_pmx_wkup {
+       usb_gpio_mux_sel2: pinmux_usb_gpio_mux_sel2_pins {
+               /* gpio_wk0 */
+               pinctrl-single,pins = <
+               OMAP4_IOPAD(0x040, PIN_OUTPUT_PULLDOWN | MUX_MODE3)
+               >;
+       };
+};
+
 /*
  * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for
  * uart1 wakeirq.
index 7cb235e..6e9e1c2 100644 (file)
@@ -41,7 +41,7 @@
                        };
 
                        macb1: ethernet@f802c000 {
-                               compatible = "cdns,at91sam9260-macb", "cdns,macb";
+                               compatible = "atmel,sama5d3-macb", "cdns,at91sam9260-macb", "cdns,macb";
                                reg = <0xf802c000 0x100>;
                                interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
                                pinctrl-names = "default";
index 661be94..185541a 100644 (file)
                        interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&rcc SPI6_K>;
                        resets = <&rcc SPI6_R>;
-                       dmas = <&mdma1 34 0x0 0x40008 0x0 0x0 0>,
-                              <&mdma1 35 0x0 0x40002 0x0 0x0 0>;
+                       dmas = <&mdma1 34 0x0 0x40008 0x0 0x0>,
+                              <&mdma1 35 0x0 0x40002 0x0 0x0>;
                        dma-names = "rx", "tx";
                        status = "disabled";
                };
index ffd9f00..5f547c1 100644 (file)
                };
 
                hdmi_phy: hdmi-phy@1ef0000 {
-                       compatible = "allwinner,sun8i-r40-hdmi-phy",
-                                    "allwinner,sun50i-a64-hdmi-phy";
+                       compatible = "allwinner,sun8i-r40-hdmi-phy";
                        reg = <0x01ef0000 0x10000>;
                        clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>,
                                 <&ccu 7>, <&ccu 16>;
index e2c1276..7eca43f 100644 (file)
@@ -257,6 +257,7 @@ CONFIG_IMX_IPUV3_CORE=y
 CONFIG_DRM=y
 CONFIG_DRM_PANEL_LVDS=y
 CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
 CONFIG_DRM_DW_HDMI_AHB_AUDIO=m
 CONFIG_DRM_DW_HDMI_CEC=y
 CONFIG_DRM_IMX=y
index 148226e..7b82128 100644 (file)
@@ -95,6 +95,7 @@ CONFIG_MFD_MXS_LRADC=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_DRM=y
+CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
 CONFIG_DRM_MXSFB=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
index df68dc4..5282324 100644 (file)
@@ -5,19 +5,19 @@ CONFIG_HIGH_RES_TIMERS=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_PARTITION_ADVANCED=y
 # CONFIG_ARCH_MULTI_V7 is not set
 CONFIG_ARCH_VERSATILE=y
 CONFIG_AEABI=y
 CONFIG_OABI_COMPAT=y
-CONFIG_CMA=y
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_CMDLINE="root=1f03 mem=32M"
 CONFIG_FPE_NWFPE=y
 CONFIG_VFP=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_CMA=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -59,6 +59,7 @@ CONFIG_GPIO_PL061=y
 CONFIG_DRM=y
 CONFIG_DRM_PANEL_ARM_VERSATILE=y
 CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_DUMB_VGA_DAC=y
 CONFIG_DRM_PL111=y
 CONFIG_FB_MODE_HELPERS=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
@@ -89,9 +90,10 @@ CONFIG_NFSD=y
 CONFIG_NFSD_V3=y
 CONFIG_NLS_CODEPAGE_850=m
 CONFIG_NLS_ISO8859_1=m
+CONFIG_FONTS=y
+CONFIG_FONT_ACORN_8x8=y
+CONFIG_DEBUG_FS=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_DEBUG_USER=y
 CONFIG_DEBUG_LL=y
-CONFIG_FONTS=y
-CONFIG_FONT_ACORN_8x8=y
index 79906ce..3ad482d 100644 (file)
@@ -223,7 +223,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
                              struct kvm_vcpu_events *events);
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
 int kvm_unmap_hva_range(struct kvm *kvm,
                        unsigned long start, unsigned long end);
 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
index 2ceffd8..cd65ea4 100644 (file)
@@ -2161,6 +2161,37 @@ static int of_dev_hwmod_lookup(struct device_node *np,
 }
 
 /**
+ * omap_hwmod_fix_mpu_rt_idx - fix up mpu_rt_idx register offsets
+ *
+ * @oh: struct omap_hwmod *
+ * @np: struct device_node *
+ *
+ * Fix up module register offsets for modules with mpu_rt_idx.
+ * Only needed for cpsw with interconnect target module defined
+ * in device tree while still using legacy hwmod platform data
+ * for rev, sysc and syss registers.
+ *
+ * Can be removed when all cpsw hwmod platform data has been
+ * dropped.
+ */
+static void omap_hwmod_fix_mpu_rt_idx(struct omap_hwmod *oh,
+                                     struct device_node *np,
+                                     struct resource *res)
+{
+       struct device_node *child = NULL;
+       int error;
+
+       child = of_get_next_child(np, child);
+       if (!child)
+               return;
+
+       error = of_address_to_resource(child, oh->mpu_rt_idx, res);
+       if (error)
+               pr_err("%s: error mapping mpu_rt_idx: %i\n",
+                      __func__, error);
+}
+
+/**
  * omap_hwmod_parse_module_range - map module IO range from device tree
  * @oh: struct omap_hwmod *
  * @np: struct device_node *
@@ -2220,7 +2251,13 @@ int omap_hwmod_parse_module_range(struct omap_hwmod *oh,
        size = be32_to_cpup(ranges);
 
        pr_debug("omap_hwmod: %s %s at 0x%llx size 0x%llx\n",
-                oh->name, np->name, base, size);
+                oh ? oh->name : "", np->name, base, size);
+
+       if (oh && oh->mpu_rt_idx) {
+               omap_hwmod_fix_mpu_rt_idx(oh, np, res);
+
+               return 0;
+       }
 
        res->start = base;
        res->end = base + size - 1;
index fc91205..5bf9443 100644 (file)
@@ -473,7 +473,7 @@ void pci_ioremap_set_mem_type(int mem_type)
 
 int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
 {
-       BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
+       BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
 
        return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
                                  PCI_IO_VIRT_BASE + offset + SZ_64K,
index fbc74b5..8edf93b 100644 (file)
 396    common  pkey_free               sys_pkey_free
 397    common  statx                   sys_statx
 398    common  rseq                    sys_rseq
+399    common  io_pgetevents           sys_io_pgetevents
index 29e75b4..1b1a0e9 100644 (file)
@@ -763,7 +763,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK
 
 config HOLES_IN_ZONE
        def_bool y
-       depends on NUMA
 
 source kernel/Kconfig.hz
 
index ceffc40..48daec7 100644 (file)
@@ -46,6 +46,7 @@
        pinctrl-0 = <&mmc0_pins>;
        vmmc-supply = <&reg_cldo1>;
        cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
+       bus-width = <4>;
        status = "okay";
 };
 
@@ -56,6 +57,7 @@
        vqmmc-supply = <&reg_bldo2>;
        non-removable;
        cap-mmc-hw-reset;
+       bus-width = <8>;
        status = "okay";
 };
 
index f67e8d5..db8d364 100644 (file)
@@ -38,6 +38,7 @@ CONFIG_ARCH_BCM_IPROC=y
 CONFIG_ARCH_BERLIN=y
 CONFIG_ARCH_BRCMSTB=y
 CONFIG_ARCH_EXYNOS=y
+CONFIG_ARCH_K3=y
 CONFIG_ARCH_LAYERSCAPE=y
 CONFIG_ARCH_LG1K=y
 CONFIG_ARCH_HISI=y
@@ -605,6 +606,8 @@ CONFIG_ARCH_TEGRA_132_SOC=y
 CONFIG_ARCH_TEGRA_210_SOC=y
 CONFIG_ARCH_TEGRA_186_SOC=y
 CONFIG_ARCH_TEGRA_194_SOC=y
+CONFIG_ARCH_K3_AM6_SOC=y
+CONFIG_SOC_TI=y
 CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
 CONFIG_EXTCON_USB_GPIO=y
 CONFIG_EXTCON_USBC_CROS_EC=y
index 6e9f33d..067d893 100644 (file)
@@ -417,7 +417,7 @@ static int gcm_encrypt(struct aead_request *req)
                __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
                put_unaligned_be32(2, iv + GCM_IV_SIZE);
 
-               while (walk.nbytes >= AES_BLOCK_SIZE) {
+               while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
                        int blocks = walk.nbytes / AES_BLOCK_SIZE;
                        u8 *dst = walk.dst.virt.addr;
                        u8 *src = walk.src.virt.addr;
@@ -437,11 +437,18 @@ static int gcm_encrypt(struct aead_request *req)
                                        NULL);
 
                        err = skcipher_walk_done(&walk,
-                                                walk.nbytes % AES_BLOCK_SIZE);
+                                                walk.nbytes % (2 * AES_BLOCK_SIZE));
                }
-               if (walk.nbytes)
+               if (walk.nbytes) {
                        __aes_arm64_encrypt(ctx->aes_key.key_enc, ks, iv,
                                            nrounds);
+                       if (walk.nbytes > AES_BLOCK_SIZE) {
+                               crypto_inc(iv, AES_BLOCK_SIZE);
+                               __aes_arm64_encrypt(ctx->aes_key.key_enc,
+                                                   ks + AES_BLOCK_SIZE, iv,
+                                                   nrounds);
+                       }
+               }
        }
 
        /* handle the tail */
@@ -545,7 +552,7 @@ static int gcm_decrypt(struct aead_request *req)
                __aes_arm64_encrypt(ctx->aes_key.key_enc, tag, iv, nrounds);
                put_unaligned_be32(2, iv + GCM_IV_SIZE);
 
-               while (walk.nbytes >= AES_BLOCK_SIZE) {
+               while (walk.nbytes >= (2 * AES_BLOCK_SIZE)) {
                        int blocks = walk.nbytes / AES_BLOCK_SIZE;
                        u8 *dst = walk.dst.virt.addr;
                        u8 *src = walk.src.virt.addr;
@@ -564,11 +571,21 @@ static int gcm_decrypt(struct aead_request *req)
                        } while (--blocks > 0);
 
                        err = skcipher_walk_done(&walk,
-                                                walk.nbytes % AES_BLOCK_SIZE);
+                                                walk.nbytes % (2 * AES_BLOCK_SIZE));
                }
-               if (walk.nbytes)
+               if (walk.nbytes) {
+                       if (walk.nbytes > AES_BLOCK_SIZE) {
+                               u8 *iv2 = iv + AES_BLOCK_SIZE;
+
+                               memcpy(iv2, iv, AES_BLOCK_SIZE);
+                               crypto_inc(iv2, AES_BLOCK_SIZE);
+
+                               __aes_arm64_encrypt(ctx->aes_key.key_enc, iv2,
+                                                   iv2, nrounds);
+                       }
                        __aes_arm64_encrypt(ctx->aes_key.key_enc, iv, iv,
                                            nrounds);
+               }
        }
 
        /* handle the tail */
index b7fb527..0c4fc22 100644 (file)
@@ -69,5 +69,5 @@ static void __exit sm4_ce_mod_fini(void)
        crypto_unregister_alg(&sm4_ce_alg);
 }
 
-module_cpu_feature_match(SM3, sm4_ce_mod_init);
+module_cpu_feature_match(SM4, sm4_ce_mod_init);
 module_exit(sm4_ce_mod_fini);
index 1b5e0e8..7e2b3e3 100644 (file)
@@ -28,7 +28,7 @@
 
 static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
-       asm goto("1: nop\n\t"
+       asm_volatile_goto("1: nop\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 ".align 3\n\t"
                 ".quad 1b, %l[l_yes], %c0\n\t"
@@ -42,7 +42,7 @@ l_yes:
 
 static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
-       asm goto("1: b %l[l_yes]\n\t"
+       asm_volatile_goto("1: b %l[l_yes]\n\t"
                 ".pushsection __jump_table,  \"aw\"\n\t"
                 ".align 3\n\t"
                 ".quad 1b, %l[l_yes], %c0\n\t"
index f26055f..3d6d733 100644 (file)
@@ -61,8 +61,7 @@ struct kvm_arch {
        u64    vmid_gen;
        u32    vmid;
 
-       /* 1-level 2nd stage table and lock */
-       spinlock_t pgd_lock;
+       /* 1-level 2nd stage table, protected by kvm->mmu_lock */
        pgd_t *pgd;
 
        /* VTTBR value associated with above pgd and vmid */
@@ -357,7 +356,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
                              struct kvm_vcpu_events *events);
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
 int kvm_unmap_hva_range(struct kvm *kvm,
                        unsigned long start, unsigned long end);
 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
index 95ac737..4c8b13b 100644 (file)
@@ -54,6 +54,7 @@ arm64-obj-$(CONFIG_KEXEC)             += machine_kexec.o relocate_kernel.o    \
 arm64-obj-$(CONFIG_ARM64_RELOC_TEST)   += arm64-reloc-test.o
 arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
 arm64-obj-$(CONFIG_CRASH_DUMP)         += crash_dump.o
+arm64-obj-$(CONFIG_CRASH_CORE)         += crash_core.o
 arm64-obj-$(CONFIG_ARM_SDE_INTERFACE)  += sdei.o
 arm64-obj-$(CONFIG_ARM64_SSBD)         += ssbd.o
 
diff --git a/arch/arm64/kernel/crash_core.c b/arch/arm64/kernel/crash_core.c
new file mode 100644 (file)
index 0000000..ca4c3e1
--- /dev/null
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Linaro.
+ * Copyright (C) Huawei Futurewei Technologies.
+ */
+
+#include <linux/crash_core.h>
+#include <asm/memory.h>
+
+void arch_crash_save_vmcoreinfo(void)
+{
+       VMCOREINFO_NUMBER(VA_BITS);
+       /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
+       vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
+                                               kimage_voffset);
+       vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
+                                               PHYS_OFFSET);
+       vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
+}
index f6a5c6b..922add8 100644 (file)
@@ -358,14 +358,3 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
        }
 }
 #endif /* CONFIG_HIBERNATION */
-
-void arch_crash_save_vmcoreinfo(void)
-{
-       VMCOREINFO_NUMBER(VA_BITS);
-       /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
-       vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
-                                               kimage_voffset);
-       vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
-                                               PHYS_OFFSET);
-       vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
-}
index 07256b0..a6c9fba 100644 (file)
@@ -57,6 +57,45 @@ static u64 core_reg_offset_from_id(u64 id)
        return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
 }
 
+static int validate_core_offset(const struct kvm_one_reg *reg)
+{
+       u64 off = core_reg_offset_from_id(reg->id);
+       int size;
+
+       switch (off) {
+       case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+            KVM_REG_ARM_CORE_REG(regs.regs[30]):
+       case KVM_REG_ARM_CORE_REG(regs.sp):
+       case KVM_REG_ARM_CORE_REG(regs.pc):
+       case KVM_REG_ARM_CORE_REG(regs.pstate):
+       case KVM_REG_ARM_CORE_REG(sp_el1):
+       case KVM_REG_ARM_CORE_REG(elr_el1):
+       case KVM_REG_ARM_CORE_REG(spsr[0]) ...
+            KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
+               size = sizeof(__u64);
+               break;
+
+       case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+            KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+               size = sizeof(__uint128_t);
+               break;
+
+       case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+       case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+               size = sizeof(__u32);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       if (KVM_REG_SIZE(reg->id) == size &&
+           IS_ALIGNED(off, size / sizeof(__u32)))
+               return 0;
+
+       return -EINVAL;
+}
+
 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
 {
        /*
@@ -76,6 +115,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
            (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
                return -ENOENT;
 
+       if (validate_core_offset(reg))
+               return -EINVAL;
+
        if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
                return -EFAULT;
 
@@ -98,6 +140,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
            (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
                return -ENOENT;
 
+       if (validate_core_offset(reg))
+               return -EINVAL;
+
        if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
                return -EINVAL;
 
@@ -107,17 +152,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
        }
 
        if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
-               u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
+               u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
                switch (mode) {
                case PSR_AA32_MODE_USR:
+                       if (!system_supports_32bit_el0())
+                               return -EINVAL;
+                       break;
                case PSR_AA32_MODE_FIQ:
                case PSR_AA32_MODE_IRQ:
                case PSR_AA32_MODE_SVC:
                case PSR_AA32_MODE_ABT:
                case PSR_AA32_MODE_UND:
+                       if (!vcpu_el1_is_32bit(vcpu))
+                               return -EINVAL;
+                       break;
                case PSR_MODE_EL0t:
                case PSR_MODE_EL1t:
                case PSR_MODE_EL1h:
+                       if (vcpu_el1_is_32bit(vcpu))
+                               return -EINVAL;
                        break;
                default:
                        err = -EINVAL;
index d496ef5..ca46153 100644 (file)
@@ -98,8 +98,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
        val = read_sysreg(cpacr_el1);
        val |= CPACR_EL1_TTA;
        val &= ~CPACR_EL1_ZEN;
-       if (!update_fp_enabled(vcpu))
+       if (!update_fp_enabled(vcpu)) {
                val &= ~CPACR_EL1_FPEN;
+               __activate_traps_fpsimd32(vcpu);
+       }
 
        write_sysreg(val, cpacr_el1);
 
@@ -114,8 +116,10 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
 
        val = CPTR_EL2_DEFAULT;
        val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
-       if (!update_fp_enabled(vcpu))
+       if (!update_fp_enabled(vcpu)) {
                val |= CPTR_EL2_TFP;
+               __activate_traps_fpsimd32(vcpu);
+       }
 
        write_sysreg(val, cptr_el2);
 }
@@ -129,7 +133,6 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
        if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
                write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
 
-       __activate_traps_fpsimd32(vcpu);
        if (has_vhe())
                activate_traps_vhe(vcpu);
        else
index 192b3ba..f58ea50 100644 (file)
@@ -117,11 +117,14 @@ static pte_t get_clear_flush(struct mm_struct *mm,
 
                /*
                 * If HW_AFDBM is enabled, then the HW could turn on
-                * the dirty bit for any page in the set, so check
-                * them all.  All hugetlb entries are already young.
+                * the dirty or accessed bit for any page in the set,
+                * so check them all.
                 */
                if (pte_dirty(pte))
                        orig_pte = pte_mkdirty(orig_pte);
+
+               if (pte_young(pte))
+                       orig_pte = pte_mkyoung(orig_pte);
        }
 
        if (valid) {
@@ -320,11 +323,40 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
        return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
 }
 
+/*
+ * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
+ * and write permission.
+ *
+ * For a contiguous huge pte range we need to check whether or not write
+ * permission has to change only on the first pte in the set. Then for
+ * all the contiguous ptes we need to check whether or not there is a
+ * discrepancy between dirty or young.
+ */
+static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
+{
+       int i;
+
+       if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
+               return 1;
+
+       for (i = 0; i < ncontig; i++) {
+               pte_t orig_pte = huge_ptep_get(ptep + i);
+
+               if (pte_dirty(pte) != pte_dirty(orig_pte))
+                       return 1;
+
+               if (pte_young(pte) != pte_young(orig_pte))
+                       return 1;
+       }
+
+       return 0;
+}
+
 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
                               unsigned long addr, pte_t *ptep,
                               pte_t pte, int dirty)
 {
-       int ncontig, i, changed = 0;
+       int ncontig, i;
        size_t pgsize = 0;
        unsigned long pfn = pte_pfn(pte), dpfn;
        pgprot_t hugeprot;
@@ -336,19 +368,23 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
        ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
        dpfn = pgsize >> PAGE_SHIFT;
 
+       if (!__cont_access_flags_changed(ptep, pte, ncontig))
+               return 0;
+
        orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
-       if (!pte_same(orig_pte, pte))
-               changed = 1;
 
-       /* Make sure we don't lose the dirty state */
+       /* Make sure we don't lose the dirty or young state */
        if (pte_dirty(orig_pte))
                pte = pte_mkdirty(pte);
 
+       if (pte_young(orig_pte))
+               pte = pte_mkyoung(pte);
+
        hugeprot = pte_pgprot(pte);
        for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
                set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
 
-       return changed;
+       return 1;
 }
 
 void huge_ptep_set_wrprotect(struct mm_struct *mm,
index 65f8627..8080c9f 100644 (file)
@@ -985,8 +985,9 @@ int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
 
        pmd = READ_ONCE(*pmdp);
 
-       /* No-op for empty entry and WARN_ON for valid entry */
-       if (!pmd_present(pmd) || !pmd_table(pmd)) {
+       if (!pmd_present(pmd))
+               return 1;
+       if (!pmd_table(pmd)) {
                VM_WARN_ON(!pmd_table(pmd));
                return 1;
        }
@@ -1007,8 +1008,9 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
 
        pud = READ_ONCE(*pudp);
 
-       /* No-op for empty entry and WARN_ON for valid entry */
-       if (!pud_present(pud) || !pud_table(pud)) {
+       if (!pud_present(pud))
+               return 1;
+       if (!pud_table(pud)) {
                VM_WARN_ON(!pud_table(pud));
                return 1;
        }
index 5e4a59b..2691a18 100644 (file)
@@ -211,7 +211,7 @@ static inline long ffz(int x)
  * This is defined the same way as ffs.
  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  */
-static inline long fls(int x)
+static inline int fls(int x)
 {
        int r;
 
@@ -232,7 +232,7 @@ static inline long fls(int x)
  * the libc and compiler builtin ffs routines, therefore
  * differs in spirit from the above ffz (man ffs).
  */
-static inline long ffs(int x)
+static inline int ffs(int x)
 {
        int r;
 
index 77459df..7ebe7ad 100644 (file)
@@ -60,7 +60,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
                        panic("Can't create %s() memory pool!", __func__);
                else
                        gen_pool_add(coherent_pool,
-                               pfn_to_virt(max_low_pfn),
+                               (unsigned long)pfn_to_virt(max_low_pfn),
                                hexagon_coherent_pool_size, -1);
        }
 
index 3534aa6..1b083c5 100644 (file)
@@ -98,11 +98,10 @@ static time64_t pmu_read_time(void)
 
        if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
                return 0;
-       while (!req.complete)
-               pmu_poll();
+       pmu_wait_complete(&req);
 
-       time = (u32)((req.reply[1] << 24) | (req.reply[2] << 16) |
-                    (req.reply[3] << 8) | req.reply[4]);
+       time = (u32)((req.reply[0] << 24) | (req.reply[1] << 16) |
+                    (req.reply[2] << 8) | req.reply[3]);
 
        return time - RTC_OFFSET;
 }
@@ -116,8 +115,7 @@ static void pmu_write_time(time64_t time)
                        (data >> 24) & 0xFF, (data >> 16) & 0xFF,
                        (data >> 8) & 0xFF, data & 0xFF) < 0)
                return;
-       while (!req.complete)
-               pmu_poll();
+       pmu_wait_complete(&req);
 }
 
 static __u8 pmu_read_pram(int offset)
index 70dde04..f5453d9 100644 (file)
@@ -172,7 +172,7 @@ void __init cf_bootmem_alloc(void)
        high_memory = (void *)_ramend;
 
        /* Reserve kernel text/data/bss */
-       memblock_reserve(memstart, memstart - _rambase);
+       memblock_reserve(_rambase, memstart - _rambase);
 
        m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
        module_fixup(NULL, __start_fixup, __stop_fixup);
index a9af1d2..2c1c53d 100644 (file)
@@ -931,7 +931,6 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
                                                   bool write);
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
 int kvm_unmap_hva_range(struct kvm *kvm,
                        unsigned long start, unsigned long end);
 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
index 4901833..8441b26 100644 (file)
@@ -40,6 +40,7 @@ struct ltq_dma_channel {
        int desc;                       /* the current descriptor */
        struct ltq_dma_desc *desc_base; /* the descriptor base */
        int phys;                       /* physical addr */
+       struct device *dev;
 };
 
 enum {
index b2fa629..49d6046 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/atomic.h>
 #include <linux/cpumask.h>
+#include <linux/sizes.h>
 #include <linux/threads.h>
 
 #include <asm/cachectl.h>
@@ -80,11 +81,10 @@ extern unsigned int vced_count, vcei_count;
 
 #endif
 
-/*
- * One page above the stack is used for branch delay slot "emulation".
- * See dsemul.c for details.
- */
-#define STACK_TOP      ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE)
+#define VDSO_RANDOMIZE_SIZE    (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M)
+
+extern unsigned long mips_stack_top(void);
+#define STACK_TOP              mips_stack_top()
 
 /*
  * This decides where the kernel will search for a free chunk of vm
index 8fc6989..d4f7fd4 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/nmi.h>
 #include <linux/cpu.h>
 
+#include <asm/abi.h>
 #include <asm/asm.h>
 #include <asm/bootinfo.h>
 #include <asm/cpu.h>
@@ -39,6 +40,7 @@
 #include <asm/dsp.h>
 #include <asm/fpu.h>
 #include <asm/irq.h>
+#include <asm/mips-cps.h>
 #include <asm/msa.h>
 #include <asm/pgtable.h>
 #include <asm/mipsregs.h>
@@ -645,6 +647,29 @@ out:
        return pc;
 }
 
+unsigned long mips_stack_top(void)
+{
+       unsigned long top = TASK_SIZE & PAGE_MASK;
+
+       /* One page for branch delay slot "emulation" */
+       top -= PAGE_SIZE;
+
+       /* Space for the VDSO, data page & GIC user page */
+       top -= PAGE_ALIGN(current->thread.abi->vdso->size);
+       top -= PAGE_SIZE;
+       top -= mips_gic_present() ? PAGE_SIZE : 0;
+
+       /* Space for cache colour alignment */
+       if (cpu_has_dc_aliases)
+               top -= shm_align_mask + 1;
+
+       /* Space to randomize the VDSO base */
+       if (current->flags & PF_RANDOMIZE)
+               top -= VDSO_RANDOMIZE_SIZE;
+
+       return top;
+}
+
 /*
  * Don't forget that the stack pointer must be aligned on a 8 bytes
  * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
index c71d1eb..8aaaa42 100644 (file)
@@ -846,6 +846,34 @@ static void __init arch_mem_init(char **cmdline_p)
        struct memblock_region *reg;
        extern void plat_mem_setup(void);
 
+       /*
+        * Initialize boot_command_line to an innocuous but non-empty string in
+        * order to prevent early_init_dt_scan_chosen() from copying
+        * CONFIG_CMDLINE into it without our knowledge. We handle
+        * CONFIG_CMDLINE ourselves below & don't want to duplicate its
+        * content because repeating arguments can be problematic.
+        */
+       strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE);
+
+       /* call board setup routine */
+       plat_mem_setup();
+
+       /*
+        * Make sure all kernel memory is in the maps.  The "UP" and
+        * "DOWN" are opposite for initdata since if it crosses over
+        * into another memory section you don't want that to be
+        * freed when the initdata is freed.
+        */
+       arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
+                        PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
+                        BOOT_MEM_RAM);
+       arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
+                        PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
+                        BOOT_MEM_INIT_RAM);
+
+       pr_info("Determined physical RAM map:\n");
+       print_memory_map();
+
 #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
        strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
 #else
@@ -873,26 +901,6 @@ static void __init arch_mem_init(char **cmdline_p)
        }
 #endif
 #endif
-
-       /* call board setup routine */
-       plat_mem_setup();
-
-       /*
-        * Make sure all kernel memory is in the maps.  The "UP" and
-        * "DOWN" are opposite for initdata since if it crosses over
-        * into another memory section you don't want that to be
-        * freed when the initdata is freed.
-        */
-       arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
-                        PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
-                        BOOT_MEM_RAM);
-       arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
-                        PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
-                        BOOT_MEM_INIT_RAM);
-
-       pr_info("Determined physical RAM map:\n");
-       print_memory_map();
-
        strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 
        *cmdline_p = command_line;
index 019035d..48a9c6b 100644 (file)
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/ioport.h>
+#include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/random.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/timekeeper_internal.h>
 
 #include <asm/abi.h>
 #include <asm/mips-cps.h>
+#include <asm/page.h>
 #include <asm/vdso.h>
 
 /* Kernel-provided data used by the VDSO. */
@@ -95,6 +98,21 @@ void update_vsyscall_tz(void)
        }
 }
 
+static unsigned long vdso_base(void)
+{
+       unsigned long base;
+
+       /* Skip the delay slot emulation page */
+       base = STACK_TOP + PAGE_SIZE;
+
+       if (current->flags & PF_RANDOMIZE) {
+               base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
+               base = PAGE_ALIGN(base);
+       }
+
+       return base;
+}
+
 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
        struct mips_vdso_image *image = current->thread.abi->vdso;
@@ -128,12 +146,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
        vvar_size = gic_size + PAGE_SIZE;
        size = vvar_size + image->size;
 
-       base = get_unmapped_area(NULL, 0, size, 0, 0);
+       /*
+        * Find a region that's large enough for us to perform the
+        * colour-matching alignment below.
+        */
+       if (cpu_has_dc_aliases)
+               size += shm_align_mask + 1;
+
+       base = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
        if (IS_ERR_VALUE(base)) {
                ret = base;
                goto out;
        }
 
+       /*
+        * If we suffer from dcache aliasing, ensure that the VDSO data page
+        * mapping is coloured the same as the kernel's mapping of that memory.
+        * This ensures that when the kernel updates the VDSO data userland
+        * will observe it without requiring cache invalidations.
+        */
+       if (cpu_has_dc_aliases) {
+               base = __ALIGN_MASK(base, shm_align_mask);
+               base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;
+       }
+
        data_addr = base + gic_size;
        vdso_addr = data_addr + PAGE_SIZE;
 
index ee64db0..d8dcdb3 100644 (file)
@@ -512,16 +512,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
        return 1;
 }
 
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
-       unsigned long end = hva + PAGE_SIZE;
-
-       handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
-
-       kvm_mips_callbacks->flush_shadow_all(kvm);
-       return 0;
-}
-
 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
 {
        handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
index 4b9fbb6..664f2f7 100644 (file)
@@ -130,7 +130,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
        unsigned long flags;
 
        ch->desc = 0;
-       ch->desc_base = dma_zalloc_coherent(NULL,
+       ch->desc_base = dma_zalloc_coherent(ch->dev,
                                LTQ_DESC_NUM * LTQ_DESC_SIZE,
                                &ch->phys, GFP_ATOMIC);
 
@@ -182,7 +182,7 @@ ltq_dma_free(struct ltq_dma_channel *ch)
        if (!ch->desc_base)
                return;
        ltq_dma_close(ch);
-       dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE,
+       dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE,
                ch->desc_base, ch->phys);
 }
 EXPORT_SYMBOL_GPL(ltq_dma_free);
index 3a6f34e..069acec 100644 (file)
         * unset_bytes = end_addr - current_addr + 1
         *      a2     =    t1    -      a0      + 1
         */
+       .set            reorder
        PTR_SUBU        a2, t1, a0
+       PTR_ADDIU       a2, 1
        jr              ra
-        PTR_ADDIU      a2, 1
+       .set            noreorder
 
        .endm
 
index 1d4248f..7068f34 100644 (file)
@@ -40,6 +40,10 @@ config NDS32
        select NO_IOPORT_MAP
        select RTC_LIB
        select THREAD_INFO_IN_TASK
+       select HAVE_FUNCTION_TRACER
+       select HAVE_FUNCTION_GRAPH_TRACER
+       select HAVE_FTRACE_MCOUNT_RECORD
+       select HAVE_DYNAMIC_FTRACE
        help
          Andes(nds32) Linux support.
 
index 63f4f17..3509fac 100644 (file)
@@ -5,6 +5,10 @@ KBUILD_DEFCONFIG := defconfig
 
 comma = ,
 
+ifdef CONFIG_FUNCTION_TRACER
+arch-y += -malways-save-lp -mno-relax
+endif
+
 KBUILD_CFLAGS  += $(call cc-option, -mno-sched-prolog-epilog)
 KBUILD_CFLAGS  += -mcmodel=large
 
index 56c4790..f5f9cf7 100644 (file)
@@ -121,9 +121,9 @@ struct elf32_hdr;
  */
 #define ELF_CLASS      ELFCLASS32
 #ifdef __NDS32_EB__
-#define ELF_DATA       ELFDATA2MSB;
+#define ELF_DATA       ELFDATA2MSB
 #else
-#define ELF_DATA       ELFDATA2LSB;
+#define ELF_DATA       ELFDATA2LSB
 #endif
 #define ELF_ARCH       EM_NDS32
 #define USE_ELF_CORE_DUMP
diff --git a/arch/nds32/include/asm/ftrace.h b/arch/nds32/include/asm/ftrace.h
new file mode 100644 (file)
index 0000000..2f96cc9
--- /dev/null
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_NDS32_FTRACE_H
+#define __ASM_NDS32_FTRACE_H
+
+#ifdef CONFIG_FUNCTION_TRACER
+
+#define HAVE_FUNCTION_GRAPH_FP_TEST
+
+#define MCOUNT_ADDR ((unsigned long)(_mcount))
+/* mcount call is composed of three instructions:
+ * sethi + ori + jral
+ */
+#define MCOUNT_INSN_SIZE 12
+
+extern void _mcount(unsigned long parent_ip);
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+#define FTRACE_ADDR ((unsigned long)_ftrace_caller)
+
+#ifdef __NDS32_EL__
+#define INSN_NOP               0x09000040
+#define INSN_SIZE(insn)                (((insn & 0x00000080) == 0) ? 4 : 2)
+#define IS_SETHI(insn)         ((insn & 0x000000fe) == 0x00000046)
+#define ENDIAN_CONVERT(insn)   be32_to_cpu(insn)
+#else /* __NDS32_EB__ */
+#define INSN_NOP               0x40000009
+#define INSN_SIZE(insn)                (((insn & 0x80000000) == 0) ? 4 : 2)
+#define IS_SETHI(insn)         ((insn & 0xfe000000) == 0x46000000)
+#define ENDIAN_CONVERT(insn)   (insn)
+#endif
+
+extern void _ftrace_caller(unsigned long parent_ip);
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+       return addr;
+}
+struct dyn_arch_ftrace {
+};
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#endif /* __ASM_NDS32_FTRACE_H */
index 19b1939..68c3815 100644 (file)
@@ -17,6 +17,7 @@
 #else
 #define FP_OFFSET (-2)
 #endif
+#define LP_OFFSET (-1)
 
 extern void __init early_trap_init(void);
 static inline void GIE_ENABLE(void)
index 18a009f..362a32d 100644 (file)
@@ -38,7 +38,7 @@ struct exception_table_entry {
 extern int fixup_exception(struct pt_regs *regs);
 
 #define KERNEL_DS      ((mm_segment_t) { ~0UL })
-#define USER_DS        ((mm_segment_t) {TASK_SIZE - 1})
+#define USER_DS                ((mm_segment_t) {TASK_SIZE - 1})
 
 #define get_ds()       (KERNEL_DS)
 #define get_fs()       (current_thread_info()->addr_limit)
@@ -49,11 +49,11 @@ static inline void set_fs(mm_segment_t fs)
        current_thread_info()->addr_limit = fs;
 }
 
-#define segment_eq(a, b)    ((a) == (b))
+#define segment_eq(a, b)       ((a) == (b))
 
 #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size))
 
-#define access_ok(type, addr, size)                 \
+#define access_ok(type, addr, size)    \
        __range_ok((unsigned long)addr, (unsigned long)size)
 /*
  * Single-value transfer routines.  They automatically use the right
@@ -75,70 +75,73 @@ static inline void set_fs(mm_segment_t fs)
  * versions are void (ie, don't return a value as such).
  */
 
-#define get_user(x,p)                                                  \
-({                                                                     \
-       long __e = -EFAULT;                                             \
-       if(likely(access_ok(VERIFY_READ,  p, sizeof(*p)))) {            \
-               __e = __get_user(x,p);                                  \
-       } else                                                          \
-               x = 0;                                                  \
-       __e;                                                            \
-})
-#define __get_user(x,ptr)                                              \
+#define get_user       __get_user                                      \
+
+#define __get_user(x, ptr)                                             \
 ({                                                                     \
        long __gu_err = 0;                                              \
-       __get_user_err((x),(ptr),__gu_err);                             \
+       __get_user_check((x), (ptr), __gu_err);                         \
        __gu_err;                                                       \
 })
 
-#define __get_user_error(x,ptr,err)                                    \
+#define __get_user_error(x, ptr, err)                                  \
 ({                                                                     \
-       __get_user_err((x),(ptr),err);                                  \
-       (void) 0;                                                       \
+       __get_user_check((x), (ptr), (err));                            \
+       (void)0;                                                        \
 })
 
-#define __get_user_err(x,ptr,err)                                      \
+#define __get_user_check(x, ptr, err)                                  \
+({                                                                     \
+       const __typeof__(*(ptr)) __user *__p = (ptr);                   \
+       might_fault();                                                  \
+       if (access_ok(VERIFY_READ, __p, sizeof(*__p))) {                \
+               __get_user_err((x), __p, (err));                        \
+       } else {                                                        \
+               (x) = 0; (err) = -EFAULT;                               \
+       }                                                               \
+})
+
+#define __get_user_err(x, ptr, err)                                    \
 do {                                                                   \
-       unsigned long __gu_addr = (unsigned long)(ptr);                 \
        unsigned long __gu_val;                                         \
        __chk_user_ptr(ptr);                                            \
        switch (sizeof(*(ptr))) {                                       \
        case 1:                                                         \
-               __get_user_asm("lbi",__gu_val,__gu_addr,err);           \
+               __get_user_asm("lbi", __gu_val, (ptr), (err));          \
                break;                                                  \
        case 2:                                                         \
-               __get_user_asm("lhi",__gu_val,__gu_addr,err);           \
+               __get_user_asm("lhi", __gu_val, (ptr), (err));          \
                break;                                                  \
        case 4:                                                         \
-               __get_user_asm("lwi",__gu_val,__gu_addr,err);           \
+               __get_user_asm("lwi", __gu_val, (ptr), (err));          \
                break;                                                  \
        case 8:                                                         \
-               __get_user_asm_dword(__gu_val,__gu_addr,err);           \
+               __get_user_asm_dword(__gu_val, (ptr), (err));           \
                break;                                                  \
        default:                                                        \
                BUILD_BUG();                                            \
                break;                                                  \
        }                                                               \
-       (x) = (__typeof__(*(ptr)))__gu_val;                             \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
 } while (0)
 
-#define __get_user_asm(inst,x,addr,err)                                        \
-       asm volatile(                                                   \
-       "1:     "inst"  %1,[%2]\n"                                      \
-       "2:\n"                                                          \
-       "       .section .fixup,\"ax\"\n"                               \
-       "       .align  2\n"                                            \
-       "3:     move %0, %3\n"                                          \
-       "       move %1, #0\n"                                          \
-       "       b       2b\n"                                           \
-       "       .previous\n"                                            \
-       "       .section __ex_table,\"a\"\n"                            \
-       "       .align  3\n"                                            \
-       "       .long   1b, 3b\n"                                       \
-       "       .previous"                                              \
-       : "+r" (err), "=&r" (x)                                         \
-       : "r" (addr), "i" (-EFAULT)                                     \
-       : "cc")
+#define __get_user_asm(inst, x, addr, err)                             \
+       __asm__ __volatile__ (                                          \
+               "1:     "inst"  %1,[%2]\n"                              \
+               "2:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .align  2\n"                                    \
+               "3:     move %0, %3\n"                                  \
+               "       move %1, #0\n"                                  \
+               "       b       2b\n"                                   \
+               "       .previous\n"                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  3\n"                                    \
+               "       .long   1b, 3b\n"                               \
+               "       .previous"                                      \
+               : "+r" (err), "=&r" (x)                                 \
+               : "r" (addr), "i" (-EFAULT)                             \
+               : "cc")
 
 #ifdef __NDS32_EB__
 #define __gu_reg_oper0 "%H1"
@@ -149,61 +152,66 @@ do {                                                                      \
 #endif
 
 #define __get_user_asm_dword(x, addr, err)                             \
-       asm volatile(                                                   \
-       "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n"                           \
-       "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n"                         \
-       "3:\n"                                                          \
-       "       .section .fixup,\"ax\"\n"                               \
-       "       .align  2\n"                                            \
-       "4:     move    %0, %3\n"                                       \
-       "       b       3b\n"                                           \
-       "       .previous\n"                                            \
-       "       .section __ex_table,\"a\"\n"                            \
-       "       .align  3\n"                                            \
-       "       .long   1b, 4b\n"                                       \
-       "       .long   2b, 4b\n"                                       \
-       "       .previous"                                              \
-       : "+r"(err), "=&r"(x)                                           \
-       : "r"(addr), "i"(-EFAULT)                                       \
-       : "cc")
-#define put_user(x,p)                                                  \
-({                                                                     \
-       long __e = -EFAULT;                                             \
-       if(likely(access_ok(VERIFY_WRITE,  p, sizeof(*p)))) {           \
-               __e = __put_user(x,p);                                  \
-       }                                                               \
-       __e;                                                            \
-})
-#define __put_user(x,ptr)                                              \
+       __asm__ __volatile__ (                                          \
+               "\n1:\tlwi " __gu_reg_oper0 ",[%2]\n"                   \
+               "\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n"                 \
+               "3:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .align  2\n"                                    \
+               "4:     move    %0, %3\n"                               \
+               "       b       3b\n"                                   \
+               "       .previous\n"                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  3\n"                                    \
+               "       .long   1b, 4b\n"                               \
+               "       .long   2b, 4b\n"                               \
+               "       .previous"                                      \
+               : "+r"(err), "=&r"(x)                                   \
+               : "r"(addr), "i"(-EFAULT)                               \
+               : "cc")
+
+#define put_user       __put_user                                      \
+
+#define __put_user(x, ptr)                                             \
 ({                                                                     \
        long __pu_err = 0;                                              \
-       __put_user_err((x),(ptr),__pu_err);                             \
+       __put_user_err((x), (ptr), __pu_err);                           \
        __pu_err;                                                       \
 })
 
-#define __put_user_error(x,ptr,err)                                    \
+#define __put_user_error(x, ptr, err)                                  \
+({                                                                     \
+       __put_user_err((x), (ptr), (err));                              \
+       (void)0;                                                        \
+})
+
+#define __put_user_check(x, ptr, err)                                  \
 ({                                                                     \
-       __put_user_err((x),(ptr),err);                                  \
-       (void) 0;                                                       \
+       __typeof__(*(ptr)) __user *__p = (ptr);                         \
+       might_fault();                                                  \
+       if (access_ok(VERIFY_WRITE, __p, sizeof(*__p))) {               \
+               __put_user_err((x), __p, (err));                        \
+       } else  {                                                       \
+               (err) = -EFAULT;                                        \
+       }                                                               \
 })
 
-#define __put_user_err(x,ptr,err)                                      \
+#define __put_user_err(x, ptr, err)                                    \
 do {                                                                   \
-       unsigned long __pu_addr = (unsigned long)(ptr);                 \
        __typeof__(*(ptr)) __pu_val = (x);                              \
        __chk_user_ptr(ptr);                                            \
        switch (sizeof(*(ptr))) {                                       \
        case 1:                                                         \
-               __put_user_asm("sbi",__pu_val,__pu_addr,err);           \
+               __put_user_asm("sbi", __pu_val, (ptr), (err));          \
                break;                                                  \
        case 2:                                                         \
-               __put_user_asm("shi",__pu_val,__pu_addr,err);           \
+               __put_user_asm("shi", __pu_val, (ptr), (err));          \
                break;                                                  \
        case 4:                                                         \
-               __put_user_asm("swi",__pu_val,__pu_addr,err);           \
+               __put_user_asm("swi", __pu_val, (ptr), (err));          \
                break;                                                  \
        case 8:                                                         \
-               __put_user_asm_dword(__pu_val,__pu_addr,err);           \
+               __put_user_asm_dword(__pu_val, (ptr), (err));           \
                break;                                                  \
        default:                                                        \
                BUILD_BUG();                                            \
@@ -211,22 +219,22 @@ do {                                                                      \
        }                                                               \
 } while (0)
 
-#define __put_user_asm(inst,x,addr,err)                                        \
-       asm volatile(                                                   \
-       "1:     "inst"  %1,[%2]\n"                                      \
-       "2:\n"                                                          \
-       "       .section .fixup,\"ax\"\n"                               \
-       "       .align  2\n"                                            \
-       "3:     move    %0, %3\n"                                       \
-       "       b       2b\n"                                           \
-       "       .previous\n"                                            \
-       "       .section __ex_table,\"a\"\n"                            \
-       "       .align  3\n"                                            \
-       "       .long   1b, 3b\n"                                       \
-       "       .previous"                                              \
-       : "+r" (err)                                                    \
-       : "r" (x), "r" (addr), "i" (-EFAULT)                            \
-       : "cc")
+#define __put_user_asm(inst, x, addr, err)                             \
+       __asm__ __volatile__ (                                          \
+               "1:     "inst"  %1,[%2]\n"                              \
+               "2:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .align  2\n"                                    \
+               "3:     move    %0, %3\n"                               \
+               "       b       2b\n"                                   \
+               "       .previous\n"                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  3\n"                                    \
+               "       .long   1b, 3b\n"                               \
+               "       .previous"                                      \
+               : "+r" (err)                                            \
+               : "r" (x), "r" (addr), "i" (-EFAULT)                    \
+               : "cc")
 
 #ifdef __NDS32_EB__
 #define __pu_reg_oper0 "%H2"
@@ -237,23 +245,24 @@ do {                                                                      \
 #endif
 
 #define __put_user_asm_dword(x, addr, err)                             \
-       asm volatile(                                                   \
-       "\n1:\tswi " __pu_reg_oper0 ",[%1]\n"                           \
-       "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n"                         \
-       "3:\n"                                                          \
-       "       .section .fixup,\"ax\"\n"                               \
-       "       .align  2\n"                                            \
-       "4:     move    %0, %3\n"                                       \
-       "       b       3b\n"                                           \
-       "       .previous\n"                                            \
-       "       .section __ex_table,\"a\"\n"                            \
-       "       .align  3\n"                                            \
-       "       .long   1b, 4b\n"                                       \
-       "       .long   2b, 4b\n"                                       \
-       "       .previous"                                              \
-       : "+r"(err)                                                     \
-       : "r"(addr), "r"(x), "i"(-EFAULT)                               \
-       : "cc")
+       __asm__ __volatile__ (                                          \
+               "\n1:\tswi " __pu_reg_oper0 ",[%1]\n"                   \
+               "\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n"                 \
+               "3:\n"                                                  \
+               "       .section .fixup,\"ax\"\n"                       \
+               "       .align  2\n"                                    \
+               "4:     move    %0, %3\n"                               \
+               "       b       3b\n"                                   \
+               "       .previous\n"                                    \
+               "       .section __ex_table,\"a\"\n"                    \
+               "       .align  3\n"                                    \
+               "       .long   1b, 4b\n"                               \
+               "       .long   2b, 4b\n"                               \
+               "       .previous"                                      \
+               : "+r"(err)                                             \
+               : "r"(addr), "r"(x), "i"(-EFAULT)                       \
+               : "cc")
+
 extern unsigned long __arch_clear_user(void __user * addr, unsigned long n);
 extern long strncpy_from_user(char *dest, const char __user * src, long count);
 extern __must_check long strlen_user(const char __user * str);
index 4279274..27cded3 100644 (file)
@@ -21,3 +21,9 @@ extra-y := head.o vmlinux.lds
 
 
 obj-y                          += vdso/
+
+obj-$(CONFIG_FUNCTION_TRACER)   += ftrace.o
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+endif
index 0c6d031..0c5386e 100644 (file)
@@ -9,7 +9,8 @@
 
 void __iomem *atl2c_base;
 static const struct of_device_id atl2c_ids[] __initconst = {
-       {.compatible = "andestech,atl2c",}
+       {.compatible = "andestech,atl2c",},
+       {}
 };
 
 static int __init atl2c_of_init(void)
index b8ae4e9..21a1440 100644 (file)
@@ -118,7 +118,7 @@ common_exception_handler:
        /* interrupt */
 2:
 #ifdef CONFIG_TRACE_IRQFLAGS
-       jal     trace_hardirqs_off
+       jal     __trace_hardirqs_off
 #endif
        move    $r0, $sp
        sethi   $lp, hi20(ret_from_intr)
index 03e4f77..f00af92 100644 (file)
@@ -138,8 +138,8 @@ no_work_pending:
 #ifdef CONFIG_TRACE_IRQFLAGS
        lwi     $p0, [$sp+(#IPSW_OFFSET)]
        andi    $p0, $p0, #0x1
-       la      $r10, trace_hardirqs_off
-       la      $r9, trace_hardirqs_on
+       la      $r10, __trace_hardirqs_off
+       la      $r9, __trace_hardirqs_on
        cmovz   $r9, $p0, $r10
        jral    $r9
 #endif
diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c
new file mode 100644 (file)
index 0000000..a0a9679
--- /dev/null
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ftrace.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+extern void (*ftrace_trace_function)(unsigned long, unsigned long,
+                                    struct ftrace_ops*, struct pt_regs*);
+extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
+extern void ftrace_graph_caller(void);
+
+noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
+                                 struct ftrace_ops *op, struct pt_regs *regs)
+{
+       __asm__ ("");  /* avoid to optimize as pure function */
+}
+
+noinline void _mcount(unsigned long parent_ip)
+{
+       /* save all state by the compiler prologue */
+
+       unsigned long ip = (unsigned long)__builtin_return_address(0);
+
+       if (ftrace_trace_function != ftrace_stub)
+               ftrace_trace_function(ip - MCOUNT_INSN_SIZE, parent_ip,
+                                     NULL, NULL);
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       if (ftrace_graph_return != (trace_func_graph_ret_t)ftrace_stub
+           || ftrace_graph_entry != ftrace_graph_entry_stub)
+               ftrace_graph_caller();
+#endif
+
+       /* restore all state by the compiler epilogue */
+}
+EXPORT_SYMBOL(_mcount);
+
+#else /* CONFIG_DYNAMIC_FTRACE */
+
+noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
+                                 struct ftrace_ops *op, struct pt_regs *regs)
+{
+       __asm__ ("");  /* avoid to optimize as pure function */
+}
+
+noinline void __naked _mcount(unsigned long parent_ip)
+{
+       __asm__ ("");  /* avoid to optimize as pure function */
+}
+EXPORT_SYMBOL(_mcount);
+
+#define XSTR(s) STR(s)
+#define STR(s) #s
+void _ftrace_caller(unsigned long parent_ip)
+{
+       /* save all state needed by the compiler prologue */
+
+       /*
+        * prepare arguments for real tracing function
+        * first  arg : __builtin_return_address(0) - MCOUNT_INSN_SIZE
+        * second arg : parent_ip
+        */
+       __asm__ __volatile__ (
+               "move $r1, %0                              \n\t"
+               "addi $r0, %1, #-" XSTR(MCOUNT_INSN_SIZE) "\n\t"
+               :
+               : "r" (parent_ip), "r" (__builtin_return_address(0)));
+
+       /* a placeholder for the call to a real tracing function */
+       __asm__ __volatile__ (
+               "ftrace_call:           \n\t"
+               "nop                    \n\t"
+               "nop                    \n\t"
+               "nop                    \n\t");
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       /* a placeholder for the call to ftrace_graph_caller */
+       __asm__ __volatile__ (
+               "ftrace_graph_call:     \n\t"
+               "nop                    \n\t"
+               "nop                    \n\t"
+               "nop                    \n\t");
+#endif
+       /* restore all state needed by the compiler epilogue */
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+       return 0;
+}
+
+int ftrace_arch_code_modify_prepare(void)
+{
+       set_all_modules_text_rw();
+       return 0;
+}
+
+int ftrace_arch_code_modify_post_process(void)
+{
+       set_all_modules_text_ro();
+       return 0;
+}
+
+static unsigned long gen_sethi_insn(unsigned long addr)
+{
+       unsigned long opcode = 0x46000000;
+       unsigned long imm = addr >> 12;
+       unsigned long rt_num = 0xf << 20;
+
+       return ENDIAN_CONVERT(opcode | rt_num | imm);
+}
+
+static unsigned long gen_ori_insn(unsigned long addr)
+{
+       unsigned long opcode = 0x58000000;
+       unsigned long imm = addr & 0x0000fff;
+       unsigned long rt_num = 0xf << 20;
+       unsigned long ra_num = 0xf << 15;
+
+       return ENDIAN_CONVERT(opcode | rt_num | ra_num | imm);
+}
+
+static unsigned long gen_jral_insn(unsigned long addr)
+{
+       unsigned long opcode = 0x4a000001;
+       unsigned long rt_num = 0x1e << 20;
+       unsigned long rb_num = 0xf << 10;
+
+       return ENDIAN_CONVERT(opcode | rt_num | rb_num);
+}
+
+static void ftrace_gen_call_insn(unsigned long *call_insns,
+                                unsigned long addr)
+{
+       call_insns[0] = gen_sethi_insn(addr); /* sethi $r15, imm20u       */
+       call_insns[1] = gen_ori_insn(addr);   /* ori   $r15, $r15, imm15u */
+       call_insns[2] = gen_jral_insn(addr);  /* jral  $lp,  $r15         */
+}
+
+static int __ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
+                               unsigned long *new_insn, bool validate)
+{
+       unsigned long orig_insn[3];
+
+       if (validate) {
+               if (probe_kernel_read(orig_insn, (void *)pc, MCOUNT_INSN_SIZE))
+                       return -EFAULT;
+               if (memcmp(orig_insn, old_insn, MCOUNT_INSN_SIZE))
+                       return -EINVAL;
+       }
+
+       if (probe_kernel_write((void *)pc, new_insn, MCOUNT_INSN_SIZE))
+               return -EPERM;
+
+       return 0;
+}
+
+static int ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
+                             unsigned long *new_insn, bool validate)
+{
+       int ret;
+
+       ret = __ftrace_modify_code(pc, old_insn, new_insn, validate);
+       if (ret)
+               return ret;
+
+       flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
+
+       return ret;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+       unsigned long pc = (unsigned long)&ftrace_call;
+       unsigned long old_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+       unsigned long new_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+       if (func != ftrace_stub)
+               ftrace_gen_call_insn(new_insn, (unsigned long)func);
+
+       return ftrace_modify_code(pc, old_insn, new_insn, false);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+       unsigned long pc = rec->ip;
+       unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+       unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+       ftrace_gen_call_insn(call_insn, addr);
+
+       return ftrace_modify_code(pc, nop_insn, call_insn, true);
+}
+
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+                   unsigned long addr)
+{
+       unsigned long pc = rec->ip;
+       unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+       unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+       ftrace_gen_call_insn(call_insn, addr);
+
+       return ftrace_modify_code(pc, call_insn, nop_insn, true);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+                          unsigned long frame_pointer)
+{
+       unsigned long return_hooker = (unsigned long)&return_to_handler;
+       struct ftrace_graph_ent trace;
+       unsigned long old;
+       int err;
+
+       if (unlikely(atomic_read(&current->tracing_graph_pause)))
+               return;
+
+       old = *parent;
+
+       trace.func = self_addr;
+       trace.depth = current->curr_ret_stack + 1;
+
+       /* Only trace if the calling function expects to */
+       if (!ftrace_graph_entry(&trace))
+               return;
+
+       err = ftrace_push_return_trace(old, self_addr, &trace.depth,
+                                      frame_pointer, NULL);
+
+       if (err == -EBUSY)
+               return;
+
+       *parent = return_hooker;
+}
+
+noinline void ftrace_graph_caller(void)
+{
+       unsigned long *parent_ip =
+               (unsigned long *)(__builtin_frame_address(2) - 4);
+
+       unsigned long selfpc =
+               (unsigned long)(__builtin_return_address(1) - MCOUNT_INSN_SIZE);
+
+       unsigned long frame_pointer =
+               (unsigned long)__builtin_frame_address(3);
+
+       prepare_ftrace_return(parent_ip, selfpc, frame_pointer);
+}
+
+extern unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
+void __naked return_to_handler(void)
+{
+       __asm__ __volatile__ (
+               /* save state needed by the ABI     */
+               "smw.adm $r0,[$sp],$r1,#0x0  \n\t"
+
+               /* get original return address      */
+               "move $r0, $fp               \n\t"
+               "bal ftrace_return_to_handler\n\t"
+               "move $lp, $r0               \n\t"
+
+               /* restore state nedded by the ABI  */
+               "lmw.bim $r0,[$sp],$r1,#0x0  \n\t");
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern unsigned long ftrace_graph_call;
+
+static int ftrace_modify_graph_caller(bool enable)
+{
+       unsigned long pc = (unsigned long)&ftrace_graph_call;
+       unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+       unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+       ftrace_gen_call_insn(call_insn, (unsigned long)ftrace_graph_caller);
+
+       if (enable)
+               return ftrace_modify_code(pc, nop_insn, call_insn, true);
+       else
+               return ftrace_modify_code(pc, call_insn, nop_insn, true);
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+       return ftrace_modify_graph_caller(true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+       return ftrace_modify_graph_caller(false);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+noinline void __trace_hardirqs_off(void)
+{
+       trace_hardirqs_off();
+}
+noinline void __trace_hardirqs_on(void)
+{
+       trace_hardirqs_on();
+}
+#endif /* CONFIG_TRACE_IRQFLAGS */
index 4167283..1e31829 100644 (file)
@@ -40,7 +40,7 @@ void do_reloc16(unsigned int val, unsigned int *loc, unsigned int val_mask,
 
        tmp2 = tmp & loc_mask;
        if (partial_in_place) {
-               tmp &= (!loc_mask);
+               tmp &= (~loc_mask);
                tmp =
                    tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask);
        } else {
@@ -70,7 +70,7 @@ void do_reloc32(unsigned int val, unsigned int *loc, unsigned int val_mask,
 
        tmp2 = tmp & loc_mask;
        if (partial_in_place) {
-               tmp &= (!loc_mask);
+               tmp &= (~loc_mask);
                tmp =
                    tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask);
        } else {
index 8b231e9..d974c0c 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/sched/debug.h>
 #include <linux/sched/task_stack.h>
 #include <linux/stacktrace.h>
+#include <linux/ftrace.h>
 
 void save_stack_trace(struct stack_trace *trace)
 {
@@ -16,6 +17,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
        unsigned long *fpn;
        int skip = trace->skip;
        int savesched;
+       int graph_idx = 0;
 
        if (tsk == current) {
                __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn));
@@ -29,10 +31,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
               && (fpn >= (unsigned long *)TASK_SIZE)) {
                unsigned long lpp, fpp;
 
-               lpp = fpn[-1];
+               lpp = fpn[LP_OFFSET];
                fpp = fpn[FP_OFFSET];
                if (!__kernel_text_address(lpp))
                        break;
+               else
+                       lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL);
 
                if (savesched || !in_sched_functions(lpp)) {
                        if (skip) {
index a6205fd..1496aab 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/kdebug.h>
 #include <linux/sched/task_stack.h>
 #include <linux/uaccess.h>
+#include <linux/ftrace.h>
 
 #include <asm/proc-fns.h>
 #include <asm/unistd.h>
@@ -94,28 +95,6 @@ static void dump_instr(struct pt_regs *regs)
        set_fs(fs);
 }
 
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-#include <linux/ftrace.h>
-static void
-get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph)
-{
-       if (*addr == (unsigned long)return_to_handler) {
-               int index = tsk->curr_ret_stack;
-
-               if (tsk->ret_stack && index >= *graph) {
-                       index -= *graph;
-                       *addr = tsk->ret_stack[index].ret;
-                       (*graph)++;
-               }
-       }
-}
-#else
-static inline void
-get_real_ret_addr(unsigned long *addr, struct task_struct *tsk, int *graph)
-{
-}
-#endif
-
 #define LOOP_TIMES (100)
 static void __dump(struct task_struct *tsk, unsigned long *base_reg)
 {
@@ -126,7 +105,8 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg)
                while (!kstack_end(base_reg)) {
                        ret_addr = *base_reg++;
                        if (__kernel_text_address(ret_addr)) {
-                               get_real_ret_addr(&ret_addr, tsk, &graph);
+                               ret_addr = ftrace_graph_ret_addr(
+                                               tsk, &graph, ret_addr, NULL);
                                print_ip_sym(ret_addr);
                        }
                        if (--cnt < 0)
@@ -137,15 +117,12 @@ static void __dump(struct task_struct *tsk, unsigned long *base_reg)
                       !((unsigned long)base_reg & 0x3) &&
                       ((unsigned long)base_reg >= TASK_SIZE)) {
                        unsigned long next_fp;
-#if !defined(NDS32_ABI_2)
-                       ret_addr = base_reg[0];
-                       next_fp = base_reg[1];
-#else
-                       ret_addr = base_reg[-1];
+                       ret_addr = base_reg[LP_OFFSET];
                        next_fp = base_reg[FP_OFFSET];
-#endif
                        if (__kernel_text_address(ret_addr)) {
-                               get_real_ret_addr(&ret_addr, tsk, &graph);
+
+                               ret_addr = ftrace_graph_ret_addr(
+                                               tsk, &graph, ret_addr, NULL);
                                print_ip_sym(ret_addr);
                        }
                        if (--cnt < 0)
@@ -196,11 +173,10 @@ void die(const char *str, struct pt_regs *regs, int err)
        pr_emerg("CPU: %i\n", smp_processor_id());
        show_regs(regs);
        pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
-                tsk->comm, tsk->pid, task_thread_info(tsk) + 1);
+                tsk->comm, tsk->pid, end_of_stack(tsk));
 
        if (!user_mode(regs) || in_interrupt()) {
-               dump_mem("Stack: ", regs->sp,
-                        THREAD_SIZE + (unsigned long)task_thread_info(tsk));
+               dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK);
                dump_instr(regs);
                dump_stack();
        }
index 288313b..9e90f30 100644 (file)
@@ -13,14 +13,26 @@ OUTPUT_ARCH(nds32)
 ENTRY(_stext_lma)
 jiffies = jiffies_64;
 
+#if defined(CONFIG_GCOV_KERNEL)
+#define NDS32_EXIT_KEEP(x)     x
+#else
+#define NDS32_EXIT_KEEP(x)
+#endif
+
 SECTIONS
 {
        _stext_lma = TEXTADDR - LOAD_OFFSET;
        . = TEXTADDR;
        __init_begin = .;
        HEAD_TEXT_SECTION
+       .exit.text : {
+               NDS32_EXIT_KEEP(EXIT_TEXT)
+       }
        INIT_TEXT_SECTION(PAGE_SIZE)
        INIT_DATA_SECTION(16)
+       .exit.data : {
+               NDS32_EXIT_KEEP(EXIT_DATA)
+       }
        PERCPU_SECTION(L1_CACHE_BYTES)
        __init_end = .;
 
index 7a49f0d..f1da8a7 100644 (file)
@@ -3,15 +3,6 @@
 config TRACE_IRQFLAGS_SUPPORT
        def_bool y
 
-config DEBUG_STACK_USAGE
-       bool "Enable stack utilization instrumentation"
-       depends on DEBUG_KERNEL
-       help
-         Enables the display of the minimum amount of free stack which each
-         task has ever had available in the sysrq-T and sysrq-P debug output.
-
-         This option will slow down process creation somewhat.
-
 config EARLY_PRINTK
        bool "Activate early kernel debugging"
        default y
index db0b6ee..a806692 100644 (file)
@@ -177,7 +177,6 @@ config PPC
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_MMAP_RND_BITS
        select HAVE_ARCH_MMAP_RND_COMPAT_BITS   if COMPAT
-       select HAVE_ARCH_PREL32_RELOCATIONS
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_CBPF_JIT                    if !PPC64
index 13a688f..2fdc865 100644 (file)
@@ -1051,7 +1051,6 @@ static inline void vmemmap_remove_mapping(unsigned long start,
        return hash__vmemmap_remove_mapping(start, page_size);
 }
 #endif
-struct page *realmode_pfn_to_page(unsigned long pfn);
 
 static inline pte_t pmd_pte(pmd_t pmd)
 {
index ab3a4fb..3d4b88c 100644 (file)
@@ -220,8 +220,6 @@ extern void iommu_del_device(struct device *dev);
 extern int __init tce_iommu_bus_notifier_init(void);
 extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
                unsigned long *hpa, enum dma_data_direction *direction);
-extern long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
-               unsigned long *hpa, enum dma_data_direction *direction);
 #else
 static inline void iommu_register_group(struct iommu_table_group *table_group,
                                        int pci_domain_number,
index b2f89b6..b694d6a 100644 (file)
@@ -38,6 +38,7 @@ extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
                unsigned long ua, unsigned int pageshift, unsigned long *hpa);
 extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
                unsigned long ua, unsigned int pageshift, unsigned long *hpa);
+extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua);
 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
 extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
 #endif
index 1a951b0..1fffbba 100644 (file)
@@ -9,6 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
 
 extern unsigned int rtas_data;
 extern unsigned long long memory_limit;
+extern bool init_mem_is_free;
 extern unsigned long klimit;
 extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
 
index ea04dfb..2d8fc8c 100644 (file)
@@ -1314,9 +1314,7 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
 
 #ifdef CONFIG_PPC_DENORMALISATION
        mfspr   r10,SPRN_HSRR1
-       mfspr   r11,SPRN_HSRR0          /* save HSRR0 */
        andis.  r10,r10,(HSRR1_DENORM)@h /* denorm? */
-       addi    r11,r11,-4              /* HSRR0 is next instruction */
        bne+    denorm_assist
 #endif
 
@@ -1382,6 +1380,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
  */
        XVCPSGNDP32(32)
 denorm_done:
+       mfspr   r11,SPRN_HSRR0
+       subi    r11,r11,4
        mtspr   SPRN_HSRR0,r11
        mtcrf   0x80,r9
        ld      r9,PACA_EXGEN+EX_R9(r13)
index af7a20d..19b4c62 100644 (file)
@@ -1013,31 +1013,6 @@ long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
 }
 EXPORT_SYMBOL_GPL(iommu_tce_xchg);
 
-#ifdef CONFIG_PPC_BOOK3S_64
-long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
-               unsigned long *hpa, enum dma_data_direction *direction)
-{
-       long ret;
-
-       ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
-
-       if (!ret && ((*direction == DMA_FROM_DEVICE) ||
-                       (*direction == DMA_BIDIRECTIONAL))) {
-               struct page *pg = realmode_pfn_to_page(*hpa >> PAGE_SHIFT);
-
-               if (likely(pg)) {
-                       SetPageDirty(pg);
-               } else {
-                       tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
-                       ret = -EFAULT;
-               }
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_tce_xchg_rm);
-#endif
-
 int iommu_take_ownership(struct iommu_table *tbl)
 {
        unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
index 913c572..bb6ac47 100644 (file)
@@ -1306,6 +1306,16 @@ void show_user_instructions(struct pt_regs *regs)
 
        pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int));
 
+       /*
+        * Make sure the NIP points at userspace, not kernel text/data or
+        * elsewhere.
+        */
+       if (!__access_ok(pc, instructions_to_print * sizeof(int), USER_DS)) {
+               pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
+                       current->comm, current->pid);
+               return;
+       }
+
        pr_info("%s[%d]: code: ", current->comm, current->pid);
 
        for (i = 0; i < instructions_to_print; i++) {
index 6bffbc5..7716374 100644 (file)
@@ -176,13 +176,27 @@ _GLOBAL(tm_reclaim)
        std     r1, PACATMSCRATCH(r13)
        ld      r1, PACAR1(r13)
 
-       /* Store the PPR in r11 and reset to decent value */
        std     r11, GPR11(r1)                  /* Temporary stash */
 
+       /*
+        * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
+        * clobbered by an exception once we turn on MSR_RI below.
+        */
+       ld      r11, PACATMSCRATCH(r13)
+       std     r11, GPR1(r1)
+
+       /*
+        * Store r13 away so we can free up the scratch SPR for the SLB fault
+        * handler (needed once we start accessing the thread_struct).
+        */
+       GET_SCRATCH0(r11)
+       std     r11, GPR13(r1)
+
        /* Reset MSR RI so we can take SLB faults again */
        li      r11, MSR_RI
        mtmsrd  r11, 1
 
+       /* Store the PPR in r11 and reset to decent value */
        mfspr   r11, SPRN_PPR
        HMT_MEDIUM
 
@@ -207,11 +221,11 @@ _GLOBAL(tm_reclaim)
        SAVE_GPR(8, r7)                         /* user r8 */
        SAVE_GPR(9, r7)                         /* user r9 */
        SAVE_GPR(10, r7)                        /* user r10 */
-       ld      r3, PACATMSCRATCH(r13)          /* user r1 */
+       ld      r3, GPR1(r1)                    /* user r1 */
        ld      r4, GPR7(r1)                    /* user r7 */
        ld      r5, GPR11(r1)                   /* user r11 */
        ld      r6, GPR12(r1)                   /* user r12 */
-       GET_SCRATCH0(8)                         /* user r13 */
+       ld      r8, GPR13(r1)                   /* user r13 */
        std     r3, GPR1(r7)
        std     r4, GPR7(r7)
        std     r5, GPR11(r7)
index 3c0e8fb..68e14af 100644 (file)
@@ -358,7 +358,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
        unsigned long pp, key;
        unsigned long v, orig_v, gr;
        __be64 *hptep;
-       int index;
+       long int index;
        int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
 
        if (kvm_is_radix(vcpu->kvm))
index 0af1c0a..998f8d0 100644 (file)
@@ -525,8 +525,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
                                   unsigned long ea, unsigned long dsisr)
 {
        struct kvm *kvm = vcpu->kvm;
-       unsigned long mmu_seq, pte_size;
-       unsigned long gpa, gfn, hva, pfn;
+       unsigned long mmu_seq;
+       unsigned long gpa, gfn, hva;
        struct kvm_memory_slot *memslot;
        struct page *page = NULL;
        long ret;
@@ -623,9 +623,10 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
         */
        hva = gfn_to_hva_memslot(memslot, gfn);
        if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
-               pfn = page_to_pfn(page);
                upgrade_write = true;
        } else {
+               unsigned long pfn;
+
                /* Call KVM generic code to do the slow-path check */
                pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
                                           writing, upgrade_p);
@@ -639,63 +640,55 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
                }
        }
 
-       /* See if we can insert a 1GB or 2MB large PTE here */
-       level = 0;
-       if (page && PageCompound(page)) {
-               pte_size = PAGE_SIZE << compound_order(compound_head(page));
-               if (pte_size >= PUD_SIZE &&
-                   (gpa & (PUD_SIZE - PAGE_SIZE)) ==
-                   (hva & (PUD_SIZE - PAGE_SIZE))) {
-                       level = 2;
-                       pfn &= ~((PUD_SIZE >> PAGE_SHIFT) - 1);
-               } else if (pte_size >= PMD_SIZE &&
-                          (gpa & (PMD_SIZE - PAGE_SIZE)) ==
-                          (hva & (PMD_SIZE - PAGE_SIZE))) {
-                       level = 1;
-                       pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
-               }
-       }
-
        /*
-        * Compute the PTE value that we need to insert.
+        * Read the PTE from the process' radix tree and use that
+        * so we get the shift and attribute bits.
         */
-       if (page) {
-               pgflags = _PAGE_READ | _PAGE_EXEC | _PAGE_PRESENT | _PAGE_PTE |
-                       _PAGE_ACCESSED;
-               if (writing || upgrade_write)
-                       pgflags |= _PAGE_WRITE | _PAGE_DIRTY;
-               pte = pfn_pte(pfn, __pgprot(pgflags));
-       } else {
-               /*
-                * Read the PTE from the process' radix tree and use that
-                * so we get the attribute bits.
-                */
-               local_irq_disable();
-               ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
-               pte = *ptep;
+       local_irq_disable();
+       ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
+       /*
+        * If the PTE disappeared temporarily due to a THP
+        * collapse, just return and let the guest try again.
+        */
+       if (!ptep) {
                local_irq_enable();
-               if (shift == PUD_SHIFT &&
-                   (gpa & (PUD_SIZE - PAGE_SIZE)) ==
-                   (hva & (PUD_SIZE - PAGE_SIZE))) {
-                       level = 2;
-               } else if (shift == PMD_SHIFT &&
-                          (gpa & (PMD_SIZE - PAGE_SIZE)) ==
-                          (hva & (PMD_SIZE - PAGE_SIZE))) {
-                       level = 1;
-               } else if (shift && shift != PAGE_SHIFT) {
-                       /* Adjust PFN */
-                       unsigned long mask = (1ul << shift) - PAGE_SIZE;
-                       pte = __pte(pte_val(pte) | (hva & mask));
-               }
-               pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
-               if (writing || upgrade_write) {
-                       if (pte_val(pte) & _PAGE_WRITE)
-                               pte = __pte(pte_val(pte) | _PAGE_DIRTY);
-               } else {
-                       pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
+               if (page)
+                       put_page(page);
+               return RESUME_GUEST;
+       }
+       pte = *ptep;
+       local_irq_enable();
+
+       /* Get pte level from shift/size */
+       if (shift == PUD_SHIFT &&
+           (gpa & (PUD_SIZE - PAGE_SIZE)) ==
+           (hva & (PUD_SIZE - PAGE_SIZE))) {
+               level = 2;
+       } else if (shift == PMD_SHIFT &&
+                  (gpa & (PMD_SIZE - PAGE_SIZE)) ==
+                  (hva & (PMD_SIZE - PAGE_SIZE))) {
+               level = 1;
+       } else {
+               level = 0;
+               if (shift > PAGE_SHIFT) {
+                       /*
+                        * If the pte maps more than one page, bring over
+                        * bits from the virtual address to get the real
+                        * address of the specific single page we want.
+                        */
+                       unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
+                       pte = __pte(pte_val(pte) | (hva & rpnmask));
                }
        }
 
+       pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
+       if (writing || upgrade_write) {
+               if (pte_val(pte) & _PAGE_WRITE)
+                       pte = __pte(pte_val(pte) | _PAGE_DIRTY);
+       } else {
+               pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
+       }
+
        /* Allocate space in the tree and write the PTE */
        ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
 
@@ -725,10 +718,10 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
                                              gpa, shift);
                kvmppc_radix_tlbie_page(kvm, gpa, shift);
                if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
-                       unsigned long npages = 1;
+                       unsigned long psize = PAGE_SIZE;
                        if (shift)
-                               npages = 1ul << (shift - PAGE_SHIFT);
-                       kvmppc_update_dirty_map(memslot, gfn, npages);
+                               psize = 1ul << shift;
+                       kvmppc_update_dirty_map(memslot, gfn, psize);
                }
        }
        return 0;                               
index 506a4d4..6821ead 100644 (file)
@@ -187,12 +187,35 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
 
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry)
+static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
+               unsigned long entry, unsigned long *hpa,
+               enum dma_data_direction *direction)
+{
+       long ret;
+
+       ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
+
+       if (!ret && ((*direction == DMA_FROM_DEVICE) ||
+                               (*direction == DMA_BIDIRECTIONAL))) {
+               __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
+               /*
+                * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
+                * calling this so we still get here a valid UA.
+                */
+               if (pua && *pua)
+                       mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
+       }
+
+       return ret;
+}
+
+static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
+               unsigned long entry)
 {
        unsigned long hpa = 0;
        enum dma_data_direction dir = DMA_NONE;
 
-       iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
+       iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
 }
 
 static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
@@ -224,7 +247,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
        unsigned long hpa = 0;
        long ret;
 
-       if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir))
+       if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir))
                /*
                 * real mode xchg can fail if struct page crosses
                 * a page boundary
@@ -236,7 +259,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
 
        ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
        if (ret)
-               iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
+               iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
 
        return ret;
 }
@@ -282,7 +305,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
        if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
                return H_CLOSED;
 
-       ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
+       ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
        if (ret) {
                mm_iommu_mapped_dec(mem);
                /*
@@ -371,7 +394,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
                        return ret;
 
                WARN_ON_ONCE_RM(1);
-               kvmppc_rm_clear_tce(stit->tbl, entry);
+               kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
        }
 
        kvmppc_tce_put(stt, entry, tce);
@@ -520,7 +543,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
                                goto unlock_exit;
 
                        WARN_ON_ONCE_RM(1);
-                       kvmppc_rm_clear_tce(stit->tbl, entry);
+                       kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
                }
 
                kvmppc_tce_put(stt, entry + i, tce);
@@ -571,7 +594,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
                                return ret;
 
                        WARN_ON_ONCE_RM(1);
-                       kvmppc_rm_clear_tce(stit->tbl, entry);
+                       kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
                }
        }
 
index 886ed94..d05c8af 100644 (file)
@@ -443,6 +443,9 @@ _GLOBAL(csum_ipv6_magic)
        addc    r0, r8, r9
        ld      r10, 0(r4)
        ld      r11, 8(r4)
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+       rotldi  r5, r5, 8
+#endif
        adde    r0, r0, r10
        add     r5, r5, r7
        adde    r0, r0, r11
index 850f3b8..5ffee29 100644 (file)
@@ -142,7 +142,7 @@ static inline int unmap_patch_area(unsigned long addr)
        return 0;
 }
 
-int patch_instruction(unsigned int *addr, unsigned int instr)
+static int do_patch_instruction(unsigned int *addr, unsigned int instr)
 {
        int err;
        unsigned int *patch_addr = NULL;
@@ -182,12 +182,22 @@ out:
 }
 #else /* !CONFIG_STRICT_KERNEL_RWX */
 
-int patch_instruction(unsigned int *addr, unsigned int instr)
+static int do_patch_instruction(unsigned int *addr, unsigned int instr)
 {
        return raw_patch_instruction(addr, instr);
 }
 
 #endif /* CONFIG_STRICT_KERNEL_RWX */
+
+int patch_instruction(unsigned int *addr, unsigned int instr)
+{
+       /* Make sure we aren't patching a freed init section */
+       if (init_mem_is_free && init_section_contains(addr, 4)) {
+               pr_debug("Skipping init section patching addr: 0x%px\n", addr);
+               return 0;
+       }
+       return do_patch_instruction(addr, instr);
+}
 NOKPROBE_SYMBOL(patch_instruction);
 
 int patch_branch(unsigned int *addr, unsigned long target, int flags)
index 51ce091..7a9886f 100644 (file)
@@ -308,55 +308,6 @@ void register_page_bootmem_memmap(unsigned long section_nr,
 {
 }
 
-/*
- * We do not have access to the sparsemem vmemmap, so we fallback to
- * walking the list of sparsemem blocks which we already maintain for
- * the sake of crashdump. In the long run, we might want to maintain
- * a tree if performance of that linear walk becomes a problem.
- *
- * realmode_pfn_to_page functions can fail due to:
- * 1) As real sparsemem blocks do not lay in RAM continously (they
- * are in virtual address space which is not available in the real mode),
- * the requested page struct can be split between blocks so get_page/put_page
- * may fail.
- * 2) When huge pages are used, the get_page/put_page API will fail
- * in real mode as the linked addresses in the page struct are virtual
- * too.
- */
-struct page *realmode_pfn_to_page(unsigned long pfn)
-{
-       struct vmemmap_backing *vmem_back;
-       struct page *page;
-       unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
-       unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
-
-       for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
-               if (pg_va < vmem_back->virt_addr)
-                       continue;
-
-               /* After vmemmap_list entry free is possible, need check all */
-               if ((pg_va + sizeof(struct page)) <=
-                               (vmem_back->virt_addr + page_size)) {
-                       page = (struct page *) (vmem_back->phys + pg_va -
-                               vmem_back->virt_addr);
-                       return page;
-               }
-       }
-
-       /* Probably that page struct is split between real pages */
-       return NULL;
-}
-EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
-
-#else
-
-struct page *realmode_pfn_to_page(unsigned long pfn)
-{
-       struct page *page = pfn_to_page(pfn);
-       return page;
-}
-EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
-
 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
 #ifdef CONFIG_PPC_BOOK3S_64
index 5c8530d..04ccb27 100644 (file)
@@ -63,6 +63,7 @@
 #endif
 
 unsigned long long memory_limit;
+bool init_mem_is_free;
 
 #ifdef CONFIG_HIGHMEM
 pte_t *kmap_pte;
@@ -396,6 +397,7 @@ void free_initmem(void)
 {
        ppc_md.progress = ppc_printk_progress;
        mark_initmem_nx();
+       init_mem_is_free = true;
        free_initmem_default(POISON_FREE_INITMEM);
 }
 
index c9ee9e2..56c2234 100644 (file)
 #include <linux/migrate.h>
 #include <linux/hugetlb.h>
 #include <linux/swap.h>
+#include <linux/sizes.h>
 #include <asm/mmu_context.h>
 #include <asm/pte-walk.h>
 
 static DEFINE_MUTEX(mem_list_mutex);
 
+#define MM_IOMMU_TABLE_GROUP_PAGE_DIRTY        0x1
+#define MM_IOMMU_TABLE_GROUP_PAGE_MASK ~(SZ_4K - 1)
+
 struct mm_iommu_table_group_mem_t {
        struct list_head next;
        struct rcu_head rcu;
@@ -263,6 +267,9 @@ static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
                if (!page)
                        continue;
 
+               if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY)
+                       SetPageDirty(page);
+
                put_page(page);
                mem->hpas[i] = 0;
        }
@@ -360,7 +367,6 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);
 
 struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
                unsigned long ua, unsigned long entries)
@@ -390,7 +396,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
        if (pageshift > mem->pageshift)
                return -EFAULT;
 
-       *hpa = *va | (ua & ~PAGE_MASK);
+       *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
 
        return 0;
 }
@@ -413,11 +419,31 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
        if (!pa)
                return -EFAULT;
 
-       *hpa = *pa | (ua & ~PAGE_MASK);
+       *hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm);
+
+extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
+{
+       struct mm_iommu_table_group_mem_t *mem;
+       long entry;
+       void *va;
+       unsigned long *pa;
+
+       mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE);
+       if (!mem)
+               return;
+
+       entry = (ua - mem->ua) >> PAGE_SHIFT;
+       va = &mem->hpas[entry];
+
+       pa = (void *) vmalloc_to_phys(va);
+       if (!pa)
+               return;
+
+       *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY;
+}
 
 long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
 {
index 35ac542..055b211 100644 (file)
@@ -1204,7 +1204,9 @@ int find_and_online_cpu_nid(int cpu)
        int new_nid;
 
        /* Use associativity from first thread for all siblings */
-       vphn_get_associativity(cpu, associativity);
+       if (vphn_get_associativity(cpu, associativity))
+               return cpu_to_node(cpu);
+
        new_nid = associativity_to_nid(associativity);
        if (new_nid < 0 || !node_possible(new_nid))
                new_nid = first_online_node;
@@ -1215,9 +1217,10 @@ int find_and_online_cpu_nid(int cpu)
                 * Need to ensure that NODE_DATA is initialized for a node from
                 * available memory (see memblock_alloc_try_nid). If unable to
                 * init the node, then default to nearest node that has memory
-                * installed.
+                * installed. Skip onlining a node if the subsystems are not
+                * yet initialized.
                 */
-               if (try_online_node(new_nid))
+               if (!topology_inited || try_online_node(new_nid))
                        new_nid = first_online_node;
 #else
                /*
@@ -1452,7 +1455,8 @@ static struct timer_list topology_timer;
 
 static void reset_topology_timer(void)
 {
-       mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
+       if (vphn_enabled)
+               mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
 }
 
 #ifdef CONFIG_SMP
index 333b1f8..b271b28 100644 (file)
@@ -45,7 +45,7 @@ static void scan_pkey_feature(void)
         * Since any pkey can be used for data or execute, we will just treat
         * all keys as equal and track them as one entity.
         */
-       pkeys_total = be32_to_cpu(vals[0]);
+       pkeys_total = vals[0];
        pkeys_devtree_defined = true;
 }
 
index 6c5db1a..fe96910 100644 (file)
@@ -276,7 +276,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
        level_shift = entries_shift + 3;
        level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT);
 
-       if ((level_shift - 3) * levels + page_shift >= 60)
+       if ((level_shift - 3) * levels + page_shift >= 55)
                return -EINVAL;
 
        /* Allocate TCE table */
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
new file mode 100644 (file)
index 0000000..c9fecd1
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_PROTOTYPES_H
+
+#include <linux/ftrace.h>
+#include <asm-generic/asm-prototypes.h>
+
+#endif /* _ASM_RISCV_PROTOTYPES_H */
index c229509..439dc70 100644 (file)
 #ifndef _ASM_RISCV_TLB_H
 #define _ASM_RISCV_TLB_H
 
+struct mmu_gather;
+
+static void tlb_flush(struct mmu_gather *tlb);
+
 #include <asm-generic/tlb.h>
 
 static inline void tlb_flush(struct mmu_gather *tlb)
index db20dc6..b2d26d9 100644 (file)
@@ -85,15 +85,8 @@ atomic_t hart_lottery;
 #ifdef CONFIG_BLK_DEV_INITRD
 static void __init setup_initrd(void)
 {
-       extern char __initramfs_start[];
-       extern unsigned long __initramfs_size;
        unsigned long size;
 
-       if (__initramfs_size > 0) {
-               initrd_start = (unsigned long)(&__initramfs_start);
-               initrd_end = initrd_start + __initramfs_size;
-       }
-
        if (initrd_start >= initrd_end) {
                printk(KERN_INFO "initrd not found or empty");
                goto disable;
@@ -193,7 +186,7 @@ static void __init setup_bootmem(void)
        BUG_ON(mem_size == 0);
 
        set_max_mapnr(PFN_DOWN(mem_size));
-       max_low_pfn = pfn_base + PFN_DOWN(mem_size);
+       max_low_pfn = memblock_end_of_DRAM();
 
 #ifdef CONFIG_BLK_DEV_INITRD
        setup_initrd();
index 568026c..fb03a44 100644 (file)
@@ -65,24 +65,11 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
 SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
        uintptr_t, flags)
 {
-#ifdef CONFIG_SMP
-       struct mm_struct *mm = current->mm;
-       bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0;
-#endif
-
        /* Check the reserved flags. */
        if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL))
                return -EINVAL;
 
-       /*
-        * Without CONFIG_SMP flush_icache_mm is a just a flush_icache_all(),
-        * which generates unused variable warnings all over this function.
-        */
-#ifdef CONFIG_SMP
-       flush_icache_mm(mm, local);
-#else
-       flush_icache_all();
-#endif
+       flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL);
 
        return 0;
 }
index 80b2729..ab9a0eb 100644 (file)
@@ -208,7 +208,7 @@ static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
                              walk->dst.virt.addr, walk->src.virt.addr, n);
                if (k)
                        ret = blkcipher_walk_done(desc, walk, nbytes - k);
-               if (n < k) {
+               if (k < n) {
                        if (__cbc_paes_set_key(ctx) != 0)
                                return blkcipher_walk_done(desc, walk, -EIO);
                        memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
index f31a150..a8418e1 100644 (file)
@@ -16,7 +16,13 @@ typedef struct {
        unsigned long asce;
        unsigned long asce_limit;
        unsigned long vdso_base;
-       /* The mmu context allocates 4K page tables. */
+       /*
+        * The following bitfields need a down_write on the mm
+        * semaphore when they are written to. As they are only
+        * written once, they can be read without a lock.
+        *
+        * The mmu context allocates 4K page tables.
+        */
        unsigned int alloc_pgste:1;
        /* The mmu context uses extended page tables. */
        unsigned int has_pgste:1;
index 3cae916..e44a8d7 100644 (file)
@@ -108,7 +108,8 @@ int sclp_early_get_core_info(struct sclp_core_info *info);
 void sclp_early_get_ipl_info(struct sclp_ipl_info *info);
 void sclp_early_detect(void);
 void sclp_early_printk(const char *s);
-void __sclp_early_printk(const char *s, unsigned int len);
+void sclp_early_printk_force(const char *s);
+void __sclp_early_printk(const char *s, unsigned int len, unsigned int force);
 
 int _sclp_get_core_info(struct sclp_core_info *info);
 int sclp_core_configure(u8 core);
index 9431784..40c1dfe 100644 (file)
@@ -10,7 +10,7 @@
 
 static void sclp_early_write(struct console *con, const char *s, unsigned int len)
 {
-       __sclp_early_printk(s, len);
+       __sclp_early_printk(s, len, 0);
 }
 
 static struct console sclp_early_console = {
index a049a7b..c1a080b 100644 (file)
@@ -198,12 +198,10 @@ pgm_check_entry:
 
        /* Suspend CPU not available -> panic */
        larl    %r15,init_thread_union
-       ahi     %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
+       aghi    %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
+       aghi    %r15,-STACK_FRAME_OVERHEAD
        larl    %r2,.Lpanic_string
-       lghi    %r1,0
-       sam31
-       sigp    %r1,%r0,SIGP_SET_ARCHITECTURE
-       brasl   %r14,sclp_early_printk
+       brasl   %r14,sclp_early_printk_force
        larl    %r3,.Ldisabled_wait_31
        lpsw    0(%r3)
 4:
index 91ad4a9..ac5da6b 100644 (file)
@@ -481,7 +481,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                break;
        case KVM_CAP_S390_HPAGE_1M:
                r = 0;
-               if (hpage)
+               if (hpage && !kvm_is_ucontrol(kvm))
                        r = 1;
                break;
        case KVM_CAP_S390_MEM_OP:
@@ -691,11 +691,13 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
                mutex_lock(&kvm->lock);
                if (kvm->created_vcpus)
                        r = -EBUSY;
-               else if (!hpage || kvm->arch.use_cmma)
+               else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
                        r = -EINVAL;
                else {
                        r = 0;
+                       down_write(&kvm->mm->mmap_sem);
                        kvm->mm->context.allow_gmap_hpage_1m = 1;
+                       up_write(&kvm->mm->mmap_sem);
                        /*
                         * We might have to create fake 4k page
                         * tables. To avoid that the hardware works on
index d68f104..8679bd7 100644 (file)
@@ -280,9 +280,11 @@ retry:
                        goto retry;
                }
        }
-       if (rc)
-               return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
        up_read(&current->mm->mmap_sem);
+       if (rc == -EFAULT)
+               return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+       if (rc < 0)
+               return rc;
        vcpu->run->s.regs.gprs[reg1] &= ~0xff;
        vcpu->run->s.regs.gprs[reg1] |= key;
        return 0;
@@ -324,9 +326,11 @@ retry:
                        goto retry;
                }
        }
-       if (rc < 0)
-               return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
        up_read(&current->mm->mmap_sem);
+       if (rc == -EFAULT)
+               return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+       if (rc < 0)
+               return rc;
        kvm_s390_set_psw_cc(vcpu, rc);
        return 0;
 }
@@ -390,12 +394,12 @@ static int handle_sske(struct kvm_vcpu *vcpu)
                                              FAULT_FLAG_WRITE, &unlocked);
                        rc = !rc ? -EAGAIN : rc;
                }
+               up_read(&current->mm->mmap_sem);
                if (rc == -EFAULT)
                        return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
-               up_read(&current->mm->mmap_sem);
-               if (rc >= 0)
-                       start += PAGE_SIZE;
+               if (rc < 0)
+                       return rc;
+               start += PAGE_SIZE;
        }
 
        if (m3 & (SSKE_MC | SSKE_MR)) {
@@ -1002,13 +1006,15 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
                                                      FAULT_FLAG_WRITE, &unlocked);
                                rc = !rc ? -EAGAIN : rc;
                        }
+                       up_read(&current->mm->mmap_sem);
                        if (rc == -EFAULT)
                                return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
-                       up_read(&current->mm->mmap_sem);
-                       if (rc >= 0)
-                               start += PAGE_SIZE;
+                       if (rc == -EAGAIN)
+                               continue;
+                       if (rc < 0)
+                               return rc;
                }
+               start += PAGE_SIZE;
        }
        if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
                if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
index 63844b9..a2b28cd 100644 (file)
@@ -173,7 +173,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
                return set_validity_icpt(scb_s, 0x0039U);
 
        /* copy only the wrapping keys */
-       if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56))
+       if (read_guest_real(vcpu, crycb_addr + 72,
+                           vsie_page->crycb.dea_wrapping_key_mask, 56))
                return set_validity_icpt(scb_s, 0x0035U);
 
        scb_s->ecb3 |= ecb3_flags;
index bb44990..911c7de 100644 (file)
@@ -708,11 +708,13 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
                vmaddr |= gaddr & ~PMD_MASK;
                /* Find vma in the parent mm */
                vma = find_vma(gmap->mm, vmaddr);
+               if (!vma)
+                       continue;
                /*
                 * We do not discard pages that are backed by
                 * hugetlbfs, so we don't have to refault them.
                 */
-               if (vma && is_vm_hugetlb_page(vma))
+               if (is_vm_hugetlb_page(vma))
                        continue;
                size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
                zap_page_range(vma, vmaddr, size);
index 4e8f56c..cc42225 100644 (file)
@@ -115,8 +115,8 @@ static int auxio_probe(struct platform_device *dev)
                auxio_devtype = AUXIO_TYPE_SBUS;
                size = 1;
        } else {
-               printk("auxio: Unknown parent bus type [%s]\n",
-                      dp->parent->name);
+               printk("auxio: Unknown parent bus type [%pOFn]\n",
+                      dp->parent);
                return -ENODEV;
        }
        auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio");
index 5868fc3..639c8e5 100644 (file)
@@ -122,7 +122,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
                        linux_regs->pc = addr;
                        linux_regs->npc = addr + 4;
                }
-               /* fallthru */
+               /* fall through */
 
        case 'D':
        case 'k':
index d5f7dc6..a68bbdd 100644 (file)
@@ -148,7 +148,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
                        linux_regs->tpc = addr;
                        linux_regs->tnpc = addr + 4;
                }
-               /* fallthru */
+               /* fall through */
 
        case 'D':
        case 'k':
index 3641a29..e4abe9b 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/irq.h>
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
+#include <linux/dma-mapping.h>
 #include <asm/leon.h>
 #include <asm/leon_amba.h>
 
@@ -381,6 +382,9 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
        else
                dev_set_name(&op->dev, "%08x", dp->phandle);
 
+       op->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       op->dev.dma_mask = &op->dev.coherent_dma_mask;
+
        if (of_device_register(op)) {
                printk("%s: Could not register of device.\n",
                       dp->full_name);
index 44e4d44..6df6086 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
+#include <linux/dma-mapping.h>
 #include <linux/init.h>
 #include <linux/export.h>
 #include <linux/mod_devicetable.h>
@@ -675,6 +676,8 @@ static struct platform_device * __init scan_one_device(struct device_node *dp,
                dev_set_name(&op->dev, "root");
        else
                dev_set_name(&op->dev, "%08x", dp->phandle);
+       op->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       op->dev.dma_mask = &op->dev.coherent_dma_mask;
 
        if (of_device_register(op)) {
                printk("%s: Could not register of device.\n",
index 92627ab..d941875 100644 (file)
@@ -41,8 +41,8 @@ static int power_probe(struct platform_device *op)
 
        power_reg = of_ioremap(res, 0, 0x4, "power");
 
-       printk(KERN_INFO "%s: Control reg at %llx\n",
-              op->dev.of_node->name, res->start);
+       printk(KERN_INFO "%pOFn: Control reg at %llx\n",
+              op->dev.of_node, res->start);
 
        if (has_button_interrupt(irq, op->dev.of_node)) {
                if (request_irq(irq,
index b51cbb9..17c87d2 100644 (file)
@@ -68,8 +68,8 @@ static void __init sparc32_path_component(struct device_node *dp, char *tmp_buf)
                return;
 
        regs = rprop->value;
-       sprintf(tmp_buf, "%s@%x,%x",
-               dp->name,
+       sprintf(tmp_buf, "%pOFn@%x,%x",
+               dp,
                regs->which_io, regs->phys_addr);
 }
 
@@ -84,8 +84,8 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
                return;
 
        regs = prop->value;
-       sprintf(tmp_buf, "%s@%x,%x",
-               dp->name,
+       sprintf(tmp_buf, "%pOFn@%x,%x",
+               dp,
                regs->which_io,
                regs->phys_addr);
 }
@@ -104,13 +104,13 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
        regs = prop->value;
        devfn = (regs->phys_hi >> 8) & 0xff;
        if (devfn & 0x07) {
-               sprintf(tmp_buf, "%s@%x,%x",
-                       dp->name,
+               sprintf(tmp_buf, "%pOFn@%x,%x",
+                       dp,
                        devfn >> 3,
                        devfn & 0x07);
        } else {
-               sprintf(tmp_buf, "%s@%x",
-                       dp->name,
+               sprintf(tmp_buf, "%pOFn@%x",
+                       dp,
                        devfn >> 3);
        }
 }
@@ -127,8 +127,8 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
 
        regs = prop->value;
 
-       sprintf(tmp_buf, "%s@%x,%x",
-               dp->name,
+       sprintf(tmp_buf, "%pOFn@%x,%x",
+               dp,
                regs->which_io, regs->phys_addr);
 }
 
@@ -167,8 +167,8 @@ static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf)
                return;
        device = prop->value;
 
-       sprintf(tmp_buf, "%s:%d:%d@%x,%x",
-               dp->name, *vendor, *device,
+       sprintf(tmp_buf, "%pOFn:%d:%d@%x,%x",
+               dp, *vendor, *device,
                *intr, reg0);
 }
 
@@ -201,7 +201,7 @@ char * __init build_path_component(struct device_node *dp)
        tmp_buf[0] = '\0';
        __build_path_component(dp, tmp_buf);
        if (tmp_buf[0] == '\0')
-               strcpy(tmp_buf, dp->name);
+               snprintf(tmp_buf, sizeof(tmp_buf), "%pOFn", dp);
 
        n = prom_early_alloc(strlen(tmp_buf) + 1);
        strcpy(n, tmp_buf);
index baeaeed..6220411 100644 (file)
@@ -82,8 +82,8 @@ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
 
        regs = rprop->value;
        if (!of_node_is_root(dp->parent)) {
-               sprintf(tmp_buf, "%s@%x,%x",
-                       dp->name,
+               sprintf(tmp_buf, "%pOFn@%x,%x",
+                       dp,
                        (unsigned int) (regs->phys_addr >> 32UL),
                        (unsigned int) (regs->phys_addr & 0xffffffffUL));
                return;
@@ -97,17 +97,17 @@ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
                const char *prefix = (type == 0) ? "m" : "i";
 
                if (low_bits)
-                       sprintf(tmp_buf, "%s@%s%x,%x",
-                               dp->name, prefix,
+                       sprintf(tmp_buf, "%pOFn@%s%x,%x",
+                               dp, prefix,
                                high_bits, low_bits);
                else
-                       sprintf(tmp_buf, "%s@%s%x",
-                               dp->name,
+                       sprintf(tmp_buf, "%pOFn@%s%x",
+                               dp,
                                prefix,
                                high_bits);
        } else if (type == 12) {
-               sprintf(tmp_buf, "%s@%x",
-                       dp->name, high_bits);
+               sprintf(tmp_buf, "%pOFn@%x",
+                       dp, high_bits);
        }
 }
 
@@ -122,8 +122,8 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
 
        regs = prop->value;
        if (!of_node_is_root(dp->parent)) {
-               sprintf(tmp_buf, "%s@%x,%x",
-                       dp->name,
+               sprintf(tmp_buf, "%pOFn@%x,%x",
+                       dp,
                        (unsigned int) (regs->phys_addr >> 32UL),
                        (unsigned int) (regs->phys_addr & 0xffffffffUL));
                return;
@@ -138,8 +138,8 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
                if (tlb_type >= cheetah)
                        mask = 0x7fffff;
 
-               sprintf(tmp_buf, "%s@%x,%x",
-                       dp->name,
+               sprintf(tmp_buf, "%pOFn@%x,%x",
+                       dp,
                        *(u32 *)prop->value,
                        (unsigned int) (regs->phys_addr & mask));
        }
@@ -156,8 +156,8 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
                return;
 
        regs = prop->value;
-       sprintf(tmp_buf, "%s@%x,%x",
-               dp->name,
+       sprintf(tmp_buf, "%pOFn@%x,%x",
+               dp,
                regs->which_io,
                regs->phys_addr);
 }
@@ -176,13 +176,13 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
        regs = prop->value;
        devfn = (regs->phys_hi >> 8) & 0xff;
        if (devfn & 0x07) {
-               sprintf(tmp_buf, "%s@%x,%x",
-                       dp->name,
+               sprintf(tmp_buf, "%pOFn@%x,%x",
+                       dp,
                        devfn >> 3,
                        devfn & 0x07);
        } else {
-               sprintf(tmp_buf, "%s@%x",
-                       dp->name,
+               sprintf(tmp_buf, "%pOFn@%x",
+                       dp,
                        devfn >> 3);
        }
 }
@@ -203,8 +203,8 @@ static void __init upa_path_component(struct device_node *dp, char *tmp_buf)
        if (!prop)
                return;
 
-       sprintf(tmp_buf, "%s@%x,%x",
-               dp->name,
+       sprintf(tmp_buf, "%pOFn@%x,%x",
+               dp,
                *(u32 *) prop->value,
                (unsigned int) (regs->phys_addr & 0xffffffffUL));
 }
@@ -221,7 +221,7 @@ static void __init vdev_path_component(struct device_node *dp, char *tmp_buf)
 
        regs = prop->value;
 
-       sprintf(tmp_buf, "%s@%x", dp->name, *regs);
+       sprintf(tmp_buf, "%pOFn@%x", dp, *regs);
 }
 
 /* "name@addrhi,addrlo" */
@@ -236,8 +236,8 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
 
        regs = prop->value;
 
-       sprintf(tmp_buf, "%s@%x,%x",
-               dp->name,
+       sprintf(tmp_buf, "%pOFn@%x,%x",
+               dp,
                (unsigned int) (regs->phys_addr >> 32UL),
                (unsigned int) (regs->phys_addr & 0xffffffffUL));
 }
@@ -257,8 +257,8 @@ static void __init i2c_path_component(struct device_node *dp, char *tmp_buf)
        /* This actually isn't right... should look at the #address-cells
         * property of the i2c bus node etc. etc.
         */
-       sprintf(tmp_buf, "%s@%x,%x",
-               dp->name, regs[0], regs[1]);
+       sprintf(tmp_buf, "%pOFn@%x,%x",
+               dp, regs[0], regs[1]);
 }
 
 /* "name@reg0[,reg1]" */
@@ -274,11 +274,11 @@ static void __init usb_path_component(struct device_node *dp, char *tmp_buf)
        regs = prop->value;
 
        if (prop->length == sizeof(u32) || regs[1] == 1) {
-               sprintf(tmp_buf, "%s@%x",
-                       dp->name, regs[0]);
+               sprintf(tmp_buf, "%pOFn@%x",
+                       dp, regs[0]);
        } else {
-               sprintf(tmp_buf, "%s@%x,%x",
-                       dp->name, regs[0], regs[1]);
+               sprintf(tmp_buf, "%pOFn@%x,%x",
+                       dp, regs[0], regs[1]);
        }
 }
 
@@ -295,11 +295,11 @@ static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf
        regs = prop->value;
 
        if (regs[2] || regs[3]) {
-               sprintf(tmp_buf, "%s@%08x%08x,%04x%08x",
-                       dp->name, regs[0], regs[1], regs[2], regs[3]);
+               sprintf(tmp_buf, "%pOFn@%08x%08x,%04x%08x",
+                       dp, regs[0], regs[1], regs[2], regs[3]);
        } else {
-               sprintf(tmp_buf, "%s@%08x%08x",
-                       dp->name, regs[0], regs[1]);
+               sprintf(tmp_buf, "%pOFn@%08x%08x",
+                       dp, regs[0], regs[1]);
        }
 }
 
@@ -361,7 +361,7 @@ char * __init build_path_component(struct device_node *dp)
        tmp_buf[0] = '\0';
        __build_path_component(dp, tmp_buf);
        if (tmp_buf[0] == '\0')
-               strcpy(tmp_buf, dp->name);
+               snprintf(tmp_buf, sizeof(tmp_buf), "%pOFn", dp);
 
        n = prom_early_alloc(strlen(tmp_buf) + 1);
        strcpy(n, tmp_buf);
index 635d67f..7db5aab 100644 (file)
@@ -180,11 +180,17 @@ static int send_dreg(struct vio_driver_state *vio)
                struct vio_dring_register pkt;
                char all[sizeof(struct vio_dring_register) +
                         (sizeof(struct ldc_trans_cookie) *
-                         dr->ncookies)];
+                         VIO_MAX_RING_COOKIES)];
        } u;
+       size_t bytes = sizeof(struct vio_dring_register) +
+                      (sizeof(struct ldc_trans_cookie) *
+                       dr->ncookies);
        int i;
 
-       memset(&u, 0, sizeof(u));
+       if (WARN_ON(bytes > sizeof(u)))
+               return -EINVAL;
+
+       memset(&u, 0, bytes);
        init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
        u.pkt.dring_ident = 0;
        u.pkt.num_descr = dr->num_entries;
@@ -206,7 +212,7 @@ static int send_dreg(struct vio_driver_state *vio)
                       (unsigned long long) u.pkt.cookies[i].cookie_size);
        }
 
-       return send_ctrl(vio, &u.pkt.tag, sizeof(u));
+       return send_ctrl(vio, &u.pkt.tag, bytes);
 }
 
 static int send_rdx(struct vio_driver_state *vio)
index dd0b5a9..dc85570 100644 (file)
@@ -31,23 +31,21 @@ obj-y += $(vdso_img_objs)
 targets += $(vdso_img_cfiles)
 targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
 
-export CPPFLAGS_vdso.lds += -P -C
+CPPFLAGS_vdso.lds += -P -C
 
 VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
                        -Wl,--no-undefined \
                        -Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \
                        $(DISABLE_LTO)
 
-$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
+$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
        $(call if_changed,vdso)
 
 HOST_EXTRACFLAGS += -I$(srctree)/tools/include
 hostprogs-y                    += vdso2c
 
 quiet_cmd_vdso2c = VDSO2C  $@
-define cmd_vdso2c
-       $(obj)/vdso2c $< $(<:%.dbg=%) $@
-endef
+      cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@
 
 $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
        $(call if_changed,vdso2c)
index c5ff296..1a0be02 100644 (file)
@@ -2843,7 +2843,7 @@ config X86_SYSFB
          This option, if enabled, marks VGA/VBE/EFI framebuffers as generic
          framebuffers so the new generic system-framebuffer drivers can be
          used on x86. If the framebuffer is not compatible with the generic
-         modes, it is adverticed as fallback platform framebuffer so legacy
+         modes, it is advertised as fallback platform framebuffer so legacy
          drivers like efifb, vesafb and uvesafb can pick it up.
          If this option is not selected, all system framebuffers are always
          marked as fallback platform framebuffers as usual.
index 9485924..8f6e7eb 100644 (file)
@@ -175,22 +175,6 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER
   endif
 endif
 
-ifndef CC_HAVE_ASM_GOTO
-  $(error Compiler lacks asm-goto support.)
-endif
-
-#
-# Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a
-# GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226).  There's no way
-# to test for this bug at compile-time because the test case needs to execute,
-# which is a no-go for cross compilers.  So check the GCC version instead.
-#
-ifdef CONFIG_JUMP_LABEL
-  ifneq ($(ACCUMULATE_OUTGOING_ARGS), 1)
-       ACCUMULATE_OUTGOING_ARGS = $(call cc-if-fullversion, -lt, 040502, 1)
-  endif
-endif
-
 ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
        # This compiler flag is not supported by Clang:
        KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
@@ -312,6 +296,13 @@ PHONY += vdso_install
 vdso_install:
        $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@
 
+archprepare: checkbin
+checkbin:
+ifndef CC_HAVE_ASM_GOTO
+       @echo Compiler lacks asm-goto support.
+       @exit 1
+endif
+
 archclean:
        $(Q)rm -rf $(objtree)/arch/i386
        $(Q)rm -rf $(objtree)/arch/x86_64
index eaa843a..a480356 100644 (file)
@@ -25,20 +25,6 @@ ENTRY(get_sev_encryption_bit)
        push    %ebx
        push    %ecx
        push    %edx
-       push    %edi
-
-       /*
-        * RIP-relative addressing is needed to access the encryption bit
-        * variable. Since we are running in 32-bit mode we need this call/pop
-        * sequence to get the proper relative addressing.
-        */
-       call    1f
-1:     popl    %edi
-       subl    $1b, %edi
-
-       movl    enc_bit(%edi), %eax
-       cmpl    $0, %eax
-       jge     .Lsev_exit
 
        /* Check if running under a hypervisor */
        movl    $1, %eax
@@ -69,15 +55,12 @@ ENTRY(get_sev_encryption_bit)
 
        movl    %ebx, %eax
        andl    $0x3f, %eax             /* Return the encryption bit location */
-       movl    %eax, enc_bit(%edi)
        jmp     .Lsev_exit
 
 .Lno_sev:
        xor     %eax, %eax
-       movl    %eax, enc_bit(%edi)
 
 .Lsev_exit:
-       pop     %edi
        pop     %edx
        pop     %ecx
        pop     %ebx
@@ -113,8 +96,6 @@ ENTRY(set_sev_encryption_mask)
 ENDPROC(set_sev_encryption_mask)
 
        .data
-enc_bit:
-       .int    0xffffffff
 
 #ifdef CONFIG_AMD_MEM_ENCRYPT
        .balign 8
index acd11b3..2a356b9 100644 (file)
@@ -379,7 +379,6 @@ static int __init crypto_aegis128_aesni_module_init(void)
 {
        if (!boot_cpu_has(X86_FEATURE_XMM2) ||
            !boot_cpu_has(X86_FEATURE_AES) ||
-           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
            !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
index 2071c3d..dbe8bb9 100644 (file)
@@ -379,7 +379,6 @@ static int __init crypto_aegis128l_aesni_module_init(void)
 {
        if (!boot_cpu_has(X86_FEATURE_XMM2) ||
            !boot_cpu_has(X86_FEATURE_AES) ||
-           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
            !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
index b5f2a8f..8bebda2 100644 (file)
@@ -379,7 +379,6 @@ static int __init crypto_aegis256_aesni_module_init(void)
 {
        if (!boot_cpu_has(X86_FEATURE_XMM2) ||
            !boot_cpu_has(X86_FEATURE_AES) ||
-           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
            !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
index 9bd1395..cb2deb6 100644 (file)
@@ -223,34 +223,34 @@ ALL_F:      .octa 0xffffffffffffffffffffffffffffffff
        pcmpeqd TWOONE(%rip), \TMP2
        pand    POLY(%rip), \TMP2
        pxor    \TMP2, \TMP3
-       movdqa  \TMP3, HashKey(%arg2)
+       movdqu  \TMP3, HashKey(%arg2)
 
        movdqa     \TMP3, \TMP5
        pshufd     $78, \TMP3, \TMP1
        pxor       \TMP3, \TMP1
-       movdqa     \TMP1, HashKey_k(%arg2)
+       movdqu     \TMP1, HashKey_k(%arg2)
 
        GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
 # TMP5 = HashKey^2<<1 (mod poly)
-       movdqa     \TMP5, HashKey_2(%arg2)
+       movdqu     \TMP5, HashKey_2(%arg2)
 # HashKey_2 = HashKey^2<<1 (mod poly)
        pshufd     $78, \TMP5, \TMP1
        pxor       \TMP5, \TMP1
-       movdqa     \TMP1, HashKey_2_k(%arg2)
+       movdqu     \TMP1, HashKey_2_k(%arg2)
 
        GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
 # TMP5 = HashKey^3<<1 (mod poly)
-       movdqa     \TMP5, HashKey_3(%arg2)
+       movdqu     \TMP5, HashKey_3(%arg2)
        pshufd     $78, \TMP5, \TMP1
        pxor       \TMP5, \TMP1
-       movdqa     \TMP1, HashKey_3_k(%arg2)
+       movdqu     \TMP1, HashKey_3_k(%arg2)
 
        GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
 # TMP5 = HashKey^3<<1 (mod poly)
-       movdqa     \TMP5, HashKey_4(%arg2)
+       movdqu     \TMP5, HashKey_4(%arg2)
        pshufd     $78, \TMP5, \TMP1
        pxor       \TMP5, \TMP1
-       movdqa     \TMP1, HashKey_4_k(%arg2)
+       movdqu     \TMP1, HashKey_4_k(%arg2)
 .endm
 
 # GCM_INIT initializes a gcm_context struct to prepare for encoding/decoding.
@@ -271,7 +271,7 @@ ALL_F:      .octa 0xffffffffffffffffffffffffffffffff
        movdqu %xmm0, CurCount(%arg2) # ctx_data.current_counter = iv
 
        PRECOMPUTE \SUBKEY, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
-       movdqa HashKey(%arg2), %xmm13
+       movdqu HashKey(%arg2), %xmm13
 
        CALC_AAD_HASH %xmm13, \AAD, \AADLEN, %xmm0, %xmm1, %xmm2, %xmm3, \
        %xmm4, %xmm5, %xmm6
@@ -997,7 +997,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        pshufd    $78, \XMM5, \TMP6
        pxor      \XMM5, \TMP6
        paddd     ONE(%rip), \XMM0              # INCR CNT
-       movdqa    HashKey_4(%arg2), \TMP5
+       movdqu    HashKey_4(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP4           # TMP4 = a1*b1
        movdqa    \XMM0, \XMM1
        paddd     ONE(%rip), \XMM0              # INCR CNT
@@ -1016,7 +1016,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        pxor      (%arg1), \XMM2
        pxor      (%arg1), \XMM3
        pxor      (%arg1), \XMM4
-       movdqa    HashKey_4_k(%arg2), \TMP5
+       movdqu    HashKey_4_k(%arg2), \TMP5
        PCLMULQDQ 0x00, \TMP5, \TMP6           # TMP6 = (a1+a0)*(b1+b0)
        movaps 0x10(%arg1), \TMP1
        AESENC    \TMP1, \XMM1              # Round 1
@@ -1031,7 +1031,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        movdqa    \XMM6, \TMP1
        pshufd    $78, \XMM6, \TMP2
        pxor      \XMM6, \TMP2
-       movdqa    HashKey_3(%arg2), \TMP5
+       movdqu    HashKey_3(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP1           # TMP1 = a1 * b1
        movaps 0x30(%arg1), \TMP3
        AESENC    \TMP3, \XMM1              # Round 3
@@ -1044,7 +1044,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        AESENC    \TMP3, \XMM2
        AESENC    \TMP3, \XMM3
        AESENC    \TMP3, \XMM4
-       movdqa    HashKey_3_k(%arg2), \TMP5
+       movdqu    HashKey_3_k(%arg2), \TMP5
        PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
        movaps 0x50(%arg1), \TMP3
        AESENC    \TMP3, \XMM1              # Round 5
@@ -1058,7 +1058,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        movdqa    \XMM7, \TMP1
        pshufd    $78, \XMM7, \TMP2
        pxor      \XMM7, \TMP2
-       movdqa    HashKey_2(%arg2), \TMP5
+       movdqu    HashKey_2(%arg2), \TMP5
 
         # Multiply TMP5 * HashKey using karatsuba
 
@@ -1074,7 +1074,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        AESENC    \TMP3, \XMM2
        AESENC    \TMP3, \XMM3
        AESENC    \TMP3, \XMM4
-       movdqa    HashKey_2_k(%arg2), \TMP5
+       movdqu    HashKey_2_k(%arg2), \TMP5
        PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
        movaps 0x80(%arg1), \TMP3
        AESENC    \TMP3, \XMM1             # Round 8
@@ -1092,7 +1092,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        movdqa    \XMM8, \TMP1
        pshufd    $78, \XMM8, \TMP2
        pxor      \XMM8, \TMP2
-       movdqa    HashKey(%arg2), \TMP5
+       movdqu    HashKey(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP1          # TMP1 = a1*b1
        movaps 0x90(%arg1), \TMP3
        AESENC    \TMP3, \XMM1            # Round 9
@@ -1121,7 +1121,7 @@ aes_loop_par_enc_done\@:
        AESENCLAST \TMP3, \XMM2
        AESENCLAST \TMP3, \XMM3
        AESENCLAST \TMP3, \XMM4
-       movdqa    HashKey_k(%arg2), \TMP5
+       movdqu    HashKey_k(%arg2), \TMP5
        PCLMULQDQ 0x00, \TMP5, \TMP2          # TMP2 = (a1+a0)*(b1+b0)
        movdqu    (%arg4,%r11,1), \TMP3
        pxor      \TMP3, \XMM1                 # Ciphertext/Plaintext XOR EK
@@ -1205,7 +1205,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        pshufd    $78, \XMM5, \TMP6
        pxor      \XMM5, \TMP6
        paddd     ONE(%rip), \XMM0              # INCR CNT
-       movdqa    HashKey_4(%arg2), \TMP5
+       movdqu    HashKey_4(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP4           # TMP4 = a1*b1
        movdqa    \XMM0, \XMM1
        paddd     ONE(%rip), \XMM0              # INCR CNT
@@ -1224,7 +1224,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        pxor      (%arg1), \XMM2
        pxor      (%arg1), \XMM3
        pxor      (%arg1), \XMM4
-       movdqa    HashKey_4_k(%arg2), \TMP5
+       movdqu    HashKey_4_k(%arg2), \TMP5
        PCLMULQDQ 0x00, \TMP5, \TMP6           # TMP6 = (a1+a0)*(b1+b0)
        movaps 0x10(%arg1), \TMP1
        AESENC    \TMP1, \XMM1              # Round 1
@@ -1239,7 +1239,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        movdqa    \XMM6, \TMP1
        pshufd    $78, \XMM6, \TMP2
        pxor      \XMM6, \TMP2
-       movdqa    HashKey_3(%arg2), \TMP5
+       movdqu    HashKey_3(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP1           # TMP1 = a1 * b1
        movaps 0x30(%arg1), \TMP3
        AESENC    \TMP3, \XMM1              # Round 3
@@ -1252,7 +1252,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        AESENC    \TMP3, \XMM2
        AESENC    \TMP3, \XMM3
        AESENC    \TMP3, \XMM4
-       movdqa    HashKey_3_k(%arg2), \TMP5
+       movdqu    HashKey_3_k(%arg2), \TMP5
        PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
        movaps 0x50(%arg1), \TMP3
        AESENC    \TMP3, \XMM1              # Round 5
@@ -1266,7 +1266,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        movdqa    \XMM7, \TMP1
        pshufd    $78, \XMM7, \TMP2
        pxor      \XMM7, \TMP2
-       movdqa    HashKey_2(%arg2), \TMP5
+       movdqu    HashKey_2(%arg2), \TMP5
 
         # Multiply TMP5 * HashKey using karatsuba
 
@@ -1282,7 +1282,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        AESENC    \TMP3, \XMM2
        AESENC    \TMP3, \XMM3
        AESENC    \TMP3, \XMM4
-       movdqa    HashKey_2_k(%arg2), \TMP5
+       movdqu    HashKey_2_k(%arg2), \TMP5
        PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
        movaps 0x80(%arg1), \TMP3
        AESENC    \TMP3, \XMM1             # Round 8
@@ -1300,7 +1300,7 @@ TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
        movdqa    \XMM8, \TMP1
        pshufd    $78, \XMM8, \TMP2
        pxor      \XMM8, \TMP2
-       movdqa    HashKey(%arg2), \TMP5
+       movdqu    HashKey(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP1          # TMP1 = a1*b1
        movaps 0x90(%arg1), \TMP3
        AESENC    \TMP3, \XMM1            # Round 9
@@ -1329,7 +1329,7 @@ aes_loop_par_dec_done\@:
        AESENCLAST \TMP3, \XMM2
        AESENCLAST \TMP3, \XMM3
        AESENCLAST \TMP3, \XMM4
-       movdqa    HashKey_k(%arg2), \TMP5
+       movdqu    HashKey_k(%arg2), \TMP5
        PCLMULQDQ 0x00, \TMP5, \TMP2          # TMP2 = (a1+a0)*(b1+b0)
        movdqu    (%arg4,%r11,1), \TMP3
        pxor      \TMP3, \XMM1                 # Ciphertext/Plaintext XOR EK
@@ -1405,10 +1405,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
        movdqa    \XMM1, \TMP6
        pshufd    $78, \XMM1, \TMP2
        pxor      \XMM1, \TMP2
-       movdqa    HashKey_4(%arg2), \TMP5
+       movdqu    HashKey_4(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP6       # TMP6 = a1*b1
        PCLMULQDQ 0x00, \TMP5, \XMM1       # XMM1 = a0*b0
-       movdqa    HashKey_4_k(%arg2), \TMP4
+       movdqu    HashKey_4_k(%arg2), \TMP4
        PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
        movdqa    \XMM1, \XMMDst
        movdqa    \TMP2, \XMM1              # result in TMP6, XMMDst, XMM1
@@ -1418,10 +1418,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
        movdqa    \XMM2, \TMP1
        pshufd    $78, \XMM2, \TMP2
        pxor      \XMM2, \TMP2
-       movdqa    HashKey_3(%arg2), \TMP5
+       movdqu    HashKey_3(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP1       # TMP1 = a1*b1
        PCLMULQDQ 0x00, \TMP5, \XMM2       # XMM2 = a0*b0
-       movdqa    HashKey_3_k(%arg2), \TMP4
+       movdqu    HashKey_3_k(%arg2), \TMP4
        PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
        pxor      \TMP1, \TMP6
        pxor      \XMM2, \XMMDst
@@ -1433,10 +1433,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
        movdqa    \XMM3, \TMP1
        pshufd    $78, \XMM3, \TMP2
        pxor      \XMM3, \TMP2
-       movdqa    HashKey_2(%arg2), \TMP5
+       movdqu    HashKey_2(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP1       # TMP1 = a1*b1
        PCLMULQDQ 0x00, \TMP5, \XMM3       # XMM3 = a0*b0
-       movdqa    HashKey_2_k(%arg2), \TMP4
+       movdqu    HashKey_2_k(%arg2), \TMP4
        PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
        pxor      \TMP1, \TMP6
        pxor      \XMM3, \XMMDst
@@ -1446,10 +1446,10 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
        movdqa    \XMM4, \TMP1
        pshufd    $78, \XMM4, \TMP2
        pxor      \XMM4, \TMP2
-       movdqa    HashKey(%arg2), \TMP5
+       movdqu    HashKey(%arg2), \TMP5
        PCLMULQDQ 0x11, \TMP5, \TMP1        # TMP1 = a1*b1
        PCLMULQDQ 0x00, \TMP5, \XMM4       # XMM4 = a0*b0
-       movdqa    HashKey_k(%arg2), \TMP4
+       movdqu    HashKey_k(%arg2), \TMP4
        PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
        pxor      \TMP1, \TMP6
        pxor      \XMM4, \XMMDst
index 95cf857..f40244e 100644 (file)
@@ -40,7 +40,6 @@ MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
 static int __init crypto_morus1280_sse2_module_init(void)
 {
        if (!boot_cpu_has(X86_FEATURE_XMM2) ||
-           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
            !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
index 615fb7b..9afaf8f 100644 (file)
@@ -40,7 +40,6 @@ MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
 static int __init crypto_morus640_sse2_module_init(void)
 {
        if (!boot_cpu_has(X86_FEATURE_XMM2) ||
-           !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
            !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
                return -ENODEV;
 
index fa3f439..141d415 100644 (file)
@@ -68,7 +68,13 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
 CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
        $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
        -fno-omit-frame-pointer -foptimize-sibling-calls \
-       -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
+       -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
+
+ifdef CONFIG_RETPOLINE
+ifneq ($(RETPOLINE_VDSO_CFLAGS),)
+  CFL += $(RETPOLINE_VDSO_CFLAGS)
+endif
+endif
 
 $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
 
@@ -138,7 +144,13 @@ KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
 KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
 KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
 KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
-KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
+
+ifdef CONFIG_RETPOLINE
+ifneq ($(RETPOLINE_VDSO_CFLAGS),)
+  KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
+endif
+endif
+
 $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
 
 $(obj)/vdso32.so.dbg: FORCE \
index f19856d..e48ca3a 100644 (file)
@@ -43,8 +43,9 @@ extern u8 hvclock_page
 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 {
        long ret;
-       asm("syscall" : "=a" (ret) :
-           "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
+       asm ("syscall" : "=a" (ret), "=m" (*ts) :
+            "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
+            "memory", "rcx", "r11");
        return ret;
 }
 
@@ -52,8 +53,9 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
 {
        long ret;
 
-       asm("syscall" : "=a" (ret) :
-           "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+       asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
+            "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
+            "memory", "rcx", "r11");
        return ret;
 }
 
@@ -64,13 +66,13 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 {
        long ret;
 
-       asm(
+       asm (
                "mov %%ebx, %%edx \n"
-               "mov %2, %%ebx \n"
+               "mov %[clock], %%ebx \n"
                "call __kernel_vsyscall \n"
                "mov %%edx, %%ebx \n"
-               : "=a" (ret)
-               : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
+               : "=a" (ret), "=m" (*ts)
+               : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
                : "memory", "edx");
        return ret;
 }
@@ -79,13 +81,13 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
 {
        long ret;
 
-       asm(
+       asm (
                "mov %%ebx, %%edx \n"
-               "mov %2, %%ebx \n"
+               "mov %[tv], %%ebx \n"
                "call __kernel_vsyscall \n"
                "mov %%edx, %%ebx \n"
-               : "=a" (ret)
-               : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
+               : "=a" (ret), "=m" (*tv), "=m" (*tz)
+               : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
                : "memory", "edx");
        return ret;
 }
index 981ba5e..8671de1 100644 (file)
@@ -36,6 +36,7 @@
 
 static int num_counters_llc;
 static int num_counters_nb;
+static bool l3_mask;
 
 static HLIST_HEAD(uncore_unused_list);
 
@@ -209,6 +210,13 @@ static int amd_uncore_event_init(struct perf_event *event)
        hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
        hwc->idx = -1;
 
+       /*
+        * SliceMask and ThreadMask need to be set for certain L3 events in
+        * Family 17h. For other events, the two fields do not affect the count.
+        */
+       if (l3_mask)
+               hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
+
        if (event->cpu < 0)
                return -EINVAL;
 
@@ -525,6 +533,7 @@ static int __init amd_uncore_init(void)
                amd_llc_pmu.name          = "amd_l3";
                format_attr_event_df.show = &event_show_df;
                format_attr_event_l3.show = &event_show_l3;
+               l3_mask                   = true;
        } else {
                num_counters_nb           = NUM_COUNTERS_NB;
                num_counters_llc          = NUM_COUNTERS_L2;
@@ -532,6 +541,7 @@ static int __init amd_uncore_init(void)
                amd_llc_pmu.name          = "amd_l2";
                format_attr_event_df      = format_attr_event;
                format_attr_event_l3      = format_attr_event;
+               l3_mask                   = false;
        }
 
        amd_nb_pmu.attr_groups  = amd_uncore_attr_groups_df;
index 5f4829f..dfb2f7c 100644 (file)
@@ -2465,7 +2465,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
 
        perf_callchain_store(entry, regs->ip);
 
-       if (!current->mm)
+       if (!nmi_uaccess_okay())
                return;
 
        if (perf_callchain_user32(regs, entry))
index f3e006b..c88ed39 100644 (file)
@@ -1272,4 +1272,8 @@ void intel_pmu_lbr_init_knl(void)
 
        x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
        x86_pmu.lbr_sel_map  = snb_lbr_sel_map;
+
+       /* Knights Landing does have MISPREDICT bit */
+       if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
+               x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
 }
index 51d7c11..c07bee3 100644 (file)
@@ -3061,7 +3061,7 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
 
 void bdx_uncore_cpu_init(void)
 {
-       int pkg = topology_phys_to_logical_pkg(0);
+       int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
 
        if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
                bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
@@ -3931,16 +3931,16 @@ static const struct pci_device_id skx_uncore_pci_ids[] = {
                .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
        },
        { /* M3UPI0 Link 0 */
-               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
-               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0),
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
        },
        { /* M3UPI0 Link 1 */
-               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
-               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1),
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
        },
        { /* M3UPI1 Link 2 */
-               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
-               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2),
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
        },
        { /* end: all zeroes */ }
 };
index 5b0f613..2c43e30 100644 (file)
@@ -95,8 +95,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val)
  */
 static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
 {
-       struct ipi_arg_ex **arg;
-       struct ipi_arg_ex *ipi_arg;
+       struct hv_send_ipi_ex **arg;
+       struct hv_send_ipi_ex *ipi_arg;
        unsigned long flags;
        int nr_bank = 0;
        int ret = 1;
@@ -105,7 +105,7 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
                return false;
 
        local_irq_save(flags);
-       arg = (struct ipi_arg_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
+       arg = (struct hv_send_ipi_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
 
        ipi_arg = *arg;
        if (unlikely(!ipi_arg))
@@ -135,7 +135,7 @@ ipi_mask_ex_done:
 static bool __send_ipi_mask(const struct cpumask *mask, int vector)
 {
        int cur_cpu, vcpu;
-       struct ipi_arg_non_ex ipi_arg;
+       struct hv_send_ipi ipi_arg;
        int ret = 1;
 
        trace_hyperv_send_ipi_mask(mask, vector);
index b143717..ce84388 100644 (file)
@@ -80,11 +80,11 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v)
  * true if the result is zero, or false for all
  * other cases.
  */
-#define arch_atomic_sub_and_test arch_atomic_sub_and_test
 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
 }
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
 
 /**
  * arch_atomic_inc - increment atomic variable
@@ -92,12 +92,12 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
  *
  * Atomically increments @v by 1.
  */
-#define arch_atomic_inc arch_atomic_inc
 static __always_inline void arch_atomic_inc(atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "incl %0"
                     : "+m" (v->counter));
 }
+#define arch_atomic_inc arch_atomic_inc
 
 /**
  * arch_atomic_dec - decrement atomic variable
@@ -105,12 +105,12 @@ static __always_inline void arch_atomic_inc(atomic_t *v)
  *
  * Atomically decrements @v by 1.
  */
-#define arch_atomic_dec arch_atomic_dec
 static __always_inline void arch_atomic_dec(atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "decl %0"
                     : "+m" (v->counter));
 }
+#define arch_atomic_dec arch_atomic_dec
 
 /**
  * arch_atomic_dec_and_test - decrement and test
@@ -120,11 +120,11 @@ static __always_inline void arch_atomic_dec(atomic_t *v)
  * returns true if the result is 0, or false for all other
  * cases.
  */
-#define arch_atomic_dec_and_test arch_atomic_dec_and_test
 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
 }
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
 
 /**
  * arch_atomic_inc_and_test - increment and test
@@ -134,11 +134,11 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
  * and returns true if the result is zero, or false for all
  * other cases.
  */
-#define arch_atomic_inc_and_test arch_atomic_inc_and_test
 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
 }
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
 
 /**
  * arch_atomic_add_negative - add and test if negative
@@ -149,11 +149,11 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
  * if the result is negative, or false when
  * result is greater than or equal to zero.
  */
-#define arch_atomic_add_negative arch_atomic_add_negative
 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
 }
+#define arch_atomic_add_negative arch_atomic_add_negative
 
 /**
  * arch_atomic_add_return - add integer and return
index ef959f0..6a5b0ec 100644 (file)
@@ -205,12 +205,12 @@ static inline long long arch_atomic64_sub(long long i, atomic64_t *v)
  *
  * Atomically increments @v by 1.
  */
-#define arch_atomic64_inc arch_atomic64_inc
 static inline void arch_atomic64_inc(atomic64_t *v)
 {
        __alternative_atomic64(inc, inc_return, /* no output */,
                               "S" (v) : "memory", "eax", "ecx", "edx");
 }
+#define arch_atomic64_inc arch_atomic64_inc
 
 /**
  * arch_atomic64_dec - decrement atomic64 variable
@@ -218,12 +218,12 @@ static inline void arch_atomic64_inc(atomic64_t *v)
  *
  * Atomically decrements @v by 1.
  */
-#define arch_atomic64_dec arch_atomic64_dec
 static inline void arch_atomic64_dec(atomic64_t *v)
 {
        __alternative_atomic64(dec, dec_return, /* no output */,
                               "S" (v) : "memory", "eax", "ecx", "edx");
 }
+#define arch_atomic64_dec arch_atomic64_dec
 
 /**
  * arch_atomic64_add_unless - add unless the number is a given value
@@ -245,7 +245,6 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, long long a,
        return (int)a;
 }
 
-#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
 static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
 {
        int r;
@@ -253,8 +252,8 @@ static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
                             "S" (v) : "ecx", "edx", "memory");
        return r;
 }
+#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
 
-#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
 static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
 {
        long long r;
@@ -262,6 +261,7 @@ static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
                             "S" (v) : "ecx", "memory");
        return r;
 }
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
 
 #undef alternative_atomic64
 #undef __alternative_atomic64
index 4343d9b..5f851d9 100644 (file)
@@ -71,11 +71,11 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
  * true if the result is zero, or false for all
  * other cases.
  */
-#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
 static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
 }
+#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
 
 /**
  * arch_atomic64_inc - increment atomic64 variable
@@ -83,13 +83,13 @@ static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
  *
  * Atomically increments @v by 1.
  */
-#define arch_atomic64_inc arch_atomic64_inc
 static __always_inline void arch_atomic64_inc(atomic64_t *v)
 {
        asm volatile(LOCK_PREFIX "incq %0"
                     : "=m" (v->counter)
                     : "m" (v->counter));
 }
+#define arch_atomic64_inc arch_atomic64_inc
 
 /**
  * arch_atomic64_dec - decrement atomic64 variable
@@ -97,13 +97,13 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v)
  *
  * Atomically decrements @v by 1.
  */
-#define arch_atomic64_dec arch_atomic64_dec
 static __always_inline void arch_atomic64_dec(atomic64_t *v)
 {
        asm volatile(LOCK_PREFIX "decq %0"
                     : "=m" (v->counter)
                     : "m" (v->counter));
 }
+#define arch_atomic64_dec arch_atomic64_dec
 
 /**
  * arch_atomic64_dec_and_test - decrement and test
@@ -113,11 +113,11 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
  * returns true if the result is 0, or false for all other
  * cases.
  */
-#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
 static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
 }
+#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
 
 /**
  * arch_atomic64_inc_and_test - increment and test
@@ -127,11 +127,11 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
  * and returns true if the result is zero, or false for all
  * other cases.
  */
-#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
 static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
 }
+#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
 
 /**
  * arch_atomic64_add_negative - add and test if negative
@@ -142,11 +142,11 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
  * if the result is negative, or false when
  * result is greater than or equal to zero.
  */
-#define arch_atomic64_add_negative arch_atomic64_add_negative
 static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
 }
+#define arch_atomic64_add_negative arch_atomic64_add_negative
 
 /**
  * arch_atomic64_add_return - add and return
index e203169..6390bd8 100644 (file)
 #ifndef _ASM_X86_FIXMAP_H
 #define _ASM_X86_FIXMAP_H
 
+/*
+ * Exposed to assembly code for setting up initial page tables. Cannot be
+ * calculated in assembly code (fixmap entries are an enum), but is sanity
+ * checked in the actual fixmap C code to make sure that the fixmap is
+ * covered fully.
+ */
+#define FIXMAP_PMD_NUM 2
+/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */
+#define FIXMAP_PMD_TOP 507
+
 #ifndef __ASSEMBLY__
 #include <linux/kernel.h>
 #include <asm/acpi.h>
index e977b6b..00e01d2 100644 (file)
@@ -726,19 +726,21 @@ struct hv_enlightened_vmcs {
 #define HV_STIMER_AUTOENABLE           (1ULL << 3)
 #define HV_STIMER_SINT(config)         (__u8)(((config) >> 16) & 0x0F)
 
-struct ipi_arg_non_ex {
-       u32 vector;
-       u32 reserved;
-       u64 cpu_mask;
-};
-
 struct hv_vpset {
        u64 format;
        u64 valid_bank_mask;
        u64 bank_contents[];
 };
 
-struct ipi_arg_ex {
+/* HvCallSendSyntheticClusterIpi hypercall */
+struct hv_send_ipi {
+       u32 vector;
+       u32 reserved;
+       u64 cpu_mask;
+};
+
+/* HvCallSendSyntheticClusterIpiEx hypercall */
+struct hv_send_ipi_ex {
        u32 vector;
        u32 reserved;
        struct hv_vpset vp_set;
index c14f2a7..15450a6 100644 (file)
@@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void)
        return flags;
 }
 
-static inline void native_restore_fl(unsigned long flags)
+extern inline void native_restore_fl(unsigned long flags);
+extern inline void native_restore_fl(unsigned long flags)
 {
        asm volatile("push %0 ; popf"
                     : /* no output */
index 395c963..75f1e35 100644 (file)
@@ -22,10 +22,20 @@ enum die_val {
        DIE_NMIUNKNOWN,
 };
 
+enum show_regs_mode {
+       SHOW_REGS_SHORT,
+       /*
+        * For when userspace crashed, but we don't think it's our fault, and
+        * therefore don't print kernel registers.
+        */
+       SHOW_REGS_USER,
+       SHOW_REGS_ALL
+};
+
 extern void die(const char *, struct pt_regs *,long);
 extern int __must_check __die(const char *, struct pt_regs *, long);
 extern void show_stack_regs(struct pt_regs *regs);
-extern void __show_regs(struct pt_regs *regs, int all);
+extern void __show_regs(struct pt_regs *regs, enum show_regs_mode);
 extern void show_iret_regs(struct pt_regs *regs);
 extern unsigned long oops_begin(void);
 extern void oops_end(unsigned long, struct pt_regs *, int signr);
index 00ddb0c..09b2e3e 100644 (file)
@@ -869,6 +869,8 @@ struct kvm_arch {
 
        bool x2apic_format;
        bool x2apic_broadcast_quirk_disabled;
+
+       bool guest_can_read_msr_platform_info;
 };
 
 struct kvm_vm_stat {
@@ -1022,6 +1024,7 @@ struct kvm_x86_ops {
        void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
        void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
        void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
+       bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
        void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
        void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
        void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
@@ -1055,6 +1058,7 @@ struct kvm_x86_ops {
        bool (*umip_emulated)(void);
 
        int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
+       void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
 
        void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
 
@@ -1237,19 +1241,12 @@ enum emulation_result {
 #define EMULTYPE_NO_DECODE         (1 << 0)
 #define EMULTYPE_TRAP_UD           (1 << 1)
 #define EMULTYPE_SKIP              (1 << 2)
-#define EMULTYPE_RETRY             (1 << 3)
-#define EMULTYPE_NO_REEXECUTE      (1 << 4)
-#define EMULTYPE_NO_UD_ON_FAIL     (1 << 5)
-#define EMULTYPE_VMWARE                    (1 << 6)
-int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
-                           int emulation_type, void *insn, int insn_len);
-
-static inline int emulate_instruction(struct kvm_vcpu *vcpu,
-                       int emulation_type)
-{
-       return x86_emulate_instruction(vcpu, 0,
-                       emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
-}
+#define EMULTYPE_ALLOW_RETRY       (1 << 3)
+#define EMULTYPE_NO_UD_ON_FAIL     (1 << 4)
+#define EMULTYPE_VMWARE                    (1 << 5)
+int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
+int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
+                                       void *insn, int insn_len);
 
 void kvm_enable_efer_bits(u64);
 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
@@ -1450,7 +1447,6 @@ asmlinkage void kvm_spurious_fault(void);
        ____kvm_handle_fault_on_reboot(insn, "")
 
 #define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
@@ -1463,7 +1459,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
 
 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
-                   unsigned long ipi_bitmap_high, int min,
+                   unsigned long ipi_bitmap_high, u32 min,
                    unsigned long icr, int op_64_bit);
 
 u64 kvm_get_arch_capabilities(void);
@@ -1490,6 +1486,7 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
 
 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
+void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
 
 int kvm_is_in_guest(void);
 
index c064383..616f8e6 100644 (file)
@@ -48,10 +48,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
 
 /* Architecture __weak replacement functions */
 void __init mem_encrypt_init(void);
+void __init mem_encrypt_free_decrypted_mem(void);
 
 bool sme_active(void);
 bool sev_active(void);
 
+#define __bss_decrypted __attribute__((__section__(".bss..decrypted")))
+
 #else  /* !CONFIG_AMD_MEM_ENCRYPT */
 
 #define sme_me_mask    0ULL
@@ -77,6 +80,8 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0;
 static inline int __init
 early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
 
+#define __bss_decrypted
+
 #endif /* CONFIG_AMD_MEM_ENCRYPT */
 
 /*
@@ -88,6 +93,8 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0;
 #define __sme_pa(x)            (__pa(x) | sme_me_mask)
 #define __sme_pa_nodebug(x)    (__pa_nodebug(x) | sme_me_mask)
 
+extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __X86_MEM_ENCRYPT_H__ */
index 12f5408..78241b7 100644 (file)
 #define INTEL_ARCH_EVENT_MASK  \
        (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
 
+#define AMD64_L3_SLICE_SHIFT                           48
+#define AMD64_L3_SLICE_MASK                            \
+       ((0xFULL) << AMD64_L3_SLICE_SHIFT)
+
+#define AMD64_L3_THREAD_SHIFT                          56
+#define AMD64_L3_THREAD_MASK                           \
+       ((0xFFULL) << AMD64_L3_THREAD_SHIFT)
+
 #define X86_RAW_EVENT_MASK             \
        (ARCH_PERFMON_EVENTSEL_EVENT |  \
         ARCH_PERFMON_EVENTSEL_UMASK |  \
index 24c6cf5..60d0f90 100644 (file)
@@ -19,9 +19,6 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
 
 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-       pmd.pud.p4d.pgd = pti_set_user_pgtbl(&pmdp->pud.p4d.pgd, pmd.pud.p4d.pgd);
-#endif
        *pmdp = pmd;
 }
 
@@ -61,9 +58,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
 #ifdef CONFIG_SMP
 static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
 {
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-       pti_set_user_pgtbl(&xp->pud.p4d.pgd, __pgd(0));
-#endif
        return __pmd(xchg((pmdval_t *)xp, 0));
 }
 #else
@@ -73,9 +67,6 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
 #ifdef CONFIG_SMP
 static inline pud_t native_pudp_get_and_clear(pud_t *xp)
 {
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
-       pti_set_user_pgtbl(&xp->p4d.pgd, __pgd(0));
-#endif
        return __pud(xchg((pudval_t *)xp, 0));
 }
 #else
index a564084..f8b1ad2 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef _ASM_X86_PGTABLE_3LEVEL_H
 #define _ASM_X86_PGTABLE_3LEVEL_H
 
+#include <asm/atomic64_32.h>
+
 /*
  * Intel Physical Address Extension (PAE) Mode - three-level page
  * tables on PPro+ CPUs.
@@ -150,10 +152,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
 {
        pte_t res;
 
-       /* xchg acts as a barrier before the setting of the high bits */
-       res.pte_low = xchg(&ptep->pte_low, 0);
-       res.pte_high = ptep->pte_high;
-       ptep->pte_high = 0;
+       res.pte = (pteval_t)arch_atomic64_xchg((atomic64_t *)ptep, 0);
 
        return res;
 }
index e4ffa56..690c030 100644 (file)
@@ -1195,7 +1195,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
                return xchg(pmdp, pmd);
        } else {
                pmd_t old = *pmdp;
-               *pmdp = pmd;
+               WRITE_ONCE(*pmdp, pmd);
                return old;
        }
 }
index f773d5e..9c85b54 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/processor.h>
 #include <linux/bitops.h>
 #include <linux/threads.h>
+#include <asm/fixmap.h>
 
 extern p4d_t level4_kernel_pgt[512];
 extern p4d_t level4_ident_pgt[512];
@@ -22,7 +23,7 @@ extern pud_t level3_ident_pgt[512];
 extern pmd_t level2_kernel_pgt[512];
 extern pmd_t level2_fixmap_pgt[512];
 extern pmd_t level2_ident_pgt[512];
-extern pte_t level1_fixmap_pgt[512];
+extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
 extern pgd_t init_top_pgt[];
 
 #define swapper_pg_dir init_top_pgt
@@ -55,15 +56,15 @@ struct mm_struct;
 void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
 
-static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
-                                   pte_t *ptep)
+static inline void native_set_pte(pte_t *ptep, pte_t pte)
 {
-       *ptep = native_make_pte(0);
+       WRITE_ONCE(*ptep, pte);
 }
 
-static inline void native_set_pte(pte_t *ptep, pte_t pte)
+static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
+                                   pte_t *ptep)
 {
-       *ptep = pte;
+       native_set_pte(ptep, native_make_pte(0));
 }
 
 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
@@ -73,7 +74,7 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
 
 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
-       *pmdp = pmd;
+       WRITE_ONCE(*pmdp, pmd);
 }
 
 static inline void native_pmd_clear(pmd_t *pmd)
@@ -109,7 +110,7 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
 
 static inline void native_set_pud(pud_t *pudp, pud_t pud)
 {
-       *pudp = pud;
+       WRITE_ONCE(*pudp, pud);
 }
 
 static inline void native_pud_clear(pud_t *pud)
@@ -137,13 +138,13 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
        pgd_t pgd;
 
        if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) {
-               *p4dp = p4d;
+               WRITE_ONCE(*p4dp, p4d);
                return;
        }
 
        pgd = native_make_pgd(native_p4d_val(p4d));
        pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd);
-       *p4dp = native_make_p4d(native_pgd_val(pgd));
+       WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd)));
 }
 
 static inline void native_p4d_clear(p4d_t *p4d)
@@ -153,7 +154,7 @@ static inline void native_p4d_clear(p4d_t *p4d)
 
 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
 {
-       *pgdp = pti_set_user_pgtbl(pgdp, pgd);
+       WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd));
 }
 
 static inline void native_pgd_clear(pgd_t *pgd)
index c242972..d53c54b 100644 (file)
@@ -132,6 +132,8 @@ struct cpuinfo_x86 {
        /* Index into per_cpu list: */
        u16                     cpu_index;
        u32                     microcode;
+       /* Address space bits used by the cache internally */
+       u8                      x86_cache_bits;
        unsigned                initialized : 1;
 } __randomize_layout;
 
@@ -183,7 +185,7 @@ extern void cpu_detect(struct cpuinfo_x86 *c);
 
 static inline unsigned long long l1tf_pfn_limit(void)
 {
-       return BIT_ULL(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT);
+       return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
 }
 
 extern void early_cpu_init(void);
index 5f9012f..33d3c88 100644 (file)
@@ -39,6 +39,7 @@ extern void do_signal(struct pt_regs *regs);
 
 #define __ARCH_HAS_SA_RESTORER
 
+#include <asm/asm.h>
 #include <uapi/asm/sigcontext.h>
 
 #ifdef __i386__
@@ -86,9 +87,9 @@ static inline int __const_sigismember(sigset_t *set, int _sig)
 
 static inline int __gen_sigismember(sigset_t *set, int _sig)
 {
-       unsigned char ret;
-       asm("btl %2,%1\n\tsetc %0"
-           : "=qm"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
+       bool ret;
+       asm("btl %2,%1" CC_SET(c)
+           : CC_OUT(c) (ret) : "m"(*set), "Ir"(_sig-1));
        return ret;
 }
 
index b6dc698..f335aad 100644 (file)
@@ -111,6 +111,6 @@ static inline unsigned long caller_frame_pointer(void)
        return (unsigned long)frame;
 }
 
-void show_opcodes(u8 *rip, const char *loglvl);
+void show_opcodes(struct pt_regs *regs, const char *loglvl);
 void show_ip(struct pt_regs *regs, const char *loglvl);
 #endif /* _ASM_X86_STACKTRACE_H */
index 29c9da6..58ce528 100644 (file)
@@ -175,8 +175,16 @@ struct tlb_state {
         * are on.  This means that it may not match current->active_mm,
         * which will contain the previous user mm when we're in lazy TLB
         * mode even if we've already switched back to swapper_pg_dir.
+        *
+        * During switch_mm_irqs_off(), loaded_mm will be set to
+        * LOADED_MM_SWITCHING during the brief interrupts-off window
+        * when CR3 and loaded_mm would otherwise be inconsistent.  This
+        * is for nmi_uaccess_okay()'s benefit.
         */
        struct mm_struct *loaded_mm;
+
+#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
+
        u16 loaded_mm_asid;
        u16 next_asid;
        /* last user mm's ctx id */
@@ -246,6 +254,38 @@ struct tlb_state {
 };
 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
 
+/*
+ * Blindly accessing user memory from NMI context can be dangerous
+ * if we're in the middle of switching the current user task or
+ * switching the loaded mm.  It can also be dangerous if we
+ * interrupted some kernel code that was temporarily using a
+ * different mm.
+ */
+static inline bool nmi_uaccess_okay(void)
+{
+       struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
+       struct mm_struct *current_mm = current->mm;
+
+       VM_WARN_ON_ONCE(!loaded_mm);
+
+       /*
+        * The condition we want to check is
+        * current_mm->pgd == __va(read_cr3_pa()).  This may be slow, though,
+        * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
+        * is supposed to be reasonably fast.
+        *
+        * Instead, we check the almost equivalent but somewhat conservative
+        * condition below, and we rely on the fact that switch_mm_irqs_off()
+        * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
+        */
+       if (loaded_mm != current_mm)
+               return false;
+
+       VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
+
+       return true;
+}
+
 /* Initialize cr4 shadow for this CPU. */
 static inline void cr4_init_shadow(void)
 {
index a80c067..e60c45f 100644 (file)
@@ -10,8 +10,13 @@ struct cpumask;
 struct mm_struct;
 
 #ifdef CONFIG_X86_UV
+#include <linux/efi.h>
 
 extern enum uv_system_type get_uv_system_type(void);
+static inline bool is_early_uv_system(void)
+{
+       return !((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab);
+}
 extern int is_uv_system(void);
 extern int is_uv_hubless(void);
 extern void uv_cpu_init(void);
@@ -23,6 +28,7 @@ extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
 #else  /* X86_UV */
 
 static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
+static inline bool is_early_uv_system(void)    { return 0; }
 static inline int is_uv_system(void)   { return 0; }
 static inline int is_uv_hubless(void)  { return 0; }
 static inline void uv_cpu_init(void)   { }
index fb856c9..5374854 100644 (file)
@@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void)
         *
         * If RDPID is available, use it.
         */
-       alternative_io ("lsl %[p],%[seg]",
+       alternative_io ("lsl %[seg],%[p]",
                        ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
                        X86_FEATURE_RDPID,
                        [p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
index 86299ef..fd23d57 100644 (file)
@@ -377,6 +377,7 @@ struct kvm_sync_regs {
 
 #define KVM_X86_QUIRK_LINT0_REENABLED  (1 << 0)
 #define KVM_X86_QUIRK_CD_NW_CLEARED    (1 << 1)
+#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE  (1 << 2)
 
 #define KVM_STATE_NESTED_GUEST_MODE    0x00000001
 #define KVM_STATE_NESTED_RUN_PENDING   0x00000002
index 014f214..b9d5e7c 100644 (file)
@@ -684,8 +684,6 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
  * It means the size must be writable atomically and the address must be aligned
  * in a way that permits an atomic write. It also makes sure we fit on a single
  * page.
- *
- * Note: Must be called under text_mutex.
  */
 void *text_poke(void *addr, const void *opcode, size_t len)
 {
@@ -700,6 +698,8 @@ void *text_poke(void *addr, const void *opcode, size_t len)
         */
        BUG_ON(!after_bootmem);
 
+       lockdep_assert_held(&text_mutex);
+
        if (!core_kernel_text((unsigned long)addr)) {
                pages[0] = vmalloc_to_page(addr);
                pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
@@ -782,8 +782,6 @@ int poke_int3_handler(struct pt_regs *regs)
  *     - replace the first byte (int3) by the first byte of
  *       replacing opcode
  *     - sync cores
- *
- * Note: must be called under text_mutex.
  */
 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
 {
@@ -792,6 +790,9 @@ void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
        bp_int3_handler = handler;
        bp_int3_addr = (u8 *)addr + sizeof(int3);
        bp_patching_in_progress = true;
+
+       lockdep_assert_held(&text_mutex);
+
        /*
         * Corresponding read barrier in int3 notifier for making sure the
         * in_progress and handler are correctly ordered wrt. patching.
index 9f148e3..7654feb 100644 (file)
@@ -413,7 +413,7 @@ static int activate_managed(struct irq_data *irqd)
        if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
                /* Something in the core code broke! Survive gracefully */
                pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
-               return EINVAL;
+               return -EINVAL;
        }
 
        ret = assign_managed_vector(irqd, vector_searchmask);
index ec00d1f..f7151cd 100644 (file)
@@ -1640,6 +1640,7 @@ static int do_open(struct inode *inode, struct file *filp)
        return 0;
 }
 
+#ifdef CONFIG_PROC_FS
 static int proc_apm_show(struct seq_file *m, void *v)
 {
        unsigned short  bx;
@@ -1719,6 +1720,7 @@ static int proc_apm_show(struct seq_file *m, void *v)
                   units);
        return 0;
 }
+#endif
 
 static int apm(void *unused)
 {
index 22ab408..eeea634 100644 (file)
@@ -922,7 +922,7 @@ static void init_amd(struct cpuinfo_x86 *c)
 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
 {
        /* AMD errata T13 (order #21922) */
-       if ((c->x86 == 6)) {
+       if (c->x86 == 6) {
                /* Duron Rev A0 */
                if (c->x86_model == 3 && c->x86_stepping == 0)
                        size = 64;
index 4c2313d..40bdaea 100644 (file)
@@ -668,6 +668,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
 
+/*
+ * These CPUs all support 44bits physical address space internally in the
+ * cache but CPUID can report a smaller number of physical address bits.
+ *
+ * The L1TF mitigation uses the top most address bit for the inversion of
+ * non present PTEs. When the installed memory reaches into the top most
+ * address bit due to memory holes, which has been observed on machines
+ * which report 36bits physical address bits and have 32G RAM installed,
+ * then the mitigation range check in l1tf_select_mitigation() triggers.
+ * This is a false positive because the mitigation is still possible due to
+ * the fact that the cache uses 44bit internally. Use the cache bits
+ * instead of the reported physical bits and adjust them on the affected
+ * machines to 44bit if the reported bits are less than 44.
+ */
+static void override_cache_bits(struct cpuinfo_x86 *c)
+{
+       if (c->x86 != 6)
+               return;
+
+       switch (c->x86_model) {
+       case INTEL_FAM6_NEHALEM:
+       case INTEL_FAM6_WESTMERE:
+       case INTEL_FAM6_SANDYBRIDGE:
+       case INTEL_FAM6_IVYBRIDGE:
+       case INTEL_FAM6_HASWELL_CORE:
+       case INTEL_FAM6_HASWELL_ULT:
+       case INTEL_FAM6_HASWELL_GT3E:
+       case INTEL_FAM6_BROADWELL_CORE:
+       case INTEL_FAM6_BROADWELL_GT3E:
+       case INTEL_FAM6_SKYLAKE_MOBILE:
+       case INTEL_FAM6_SKYLAKE_DESKTOP:
+       case INTEL_FAM6_KABYLAKE_MOBILE:
+       case INTEL_FAM6_KABYLAKE_DESKTOP:
+               if (c->x86_cache_bits < 44)
+                       c->x86_cache_bits = 44;
+               break;
+       }
+}
+
 static void __init l1tf_select_mitigation(void)
 {
        u64 half_pa;
@@ -675,6 +714,8 @@ static void __init l1tf_select_mitigation(void)
        if (!boot_cpu_has_bug(X86_BUG_L1TF))
                return;
 
+       override_cache_bits(&boot_cpu_data);
+
        switch (l1tf_mitigation) {
        case L1TF_MITIGATION_OFF:
        case L1TF_MITIGATION_FLUSH_NOWARN:
@@ -694,11 +735,6 @@ static void __init l1tf_select_mitigation(void)
        return;
 #endif
 
-       /*
-        * This is extremely unlikely to happen because almost all
-        * systems have far more MAX_PA/2 than RAM can be fit into
-        * DIMM slots.
-        */
        half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
        if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
                pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
index 84dee5a..44c4ef3 100644 (file)
@@ -919,6 +919,7 @@ void get_cpu_address_sizes(struct cpuinfo_x86 *c)
        else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
                c->x86_phys_bits = 36;
 #endif
+       c->x86_cache_bits = c->x86_phys_bits;
 }
 
 static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
index 401e8c1..fc3c07f 100644 (file)
@@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
        if (cpu_has(c, X86_FEATURE_HYPERVISOR))
                return false;
 
+       if (c->x86 != 6)
+               return false;
+
        for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
                if (c->x86_model == spectre_bad_microcodes[i].model &&
                    c->x86_stepping == spectre_bad_microcodes[i].stepping)
index 4e588f3..285eb3e 100644 (file)
@@ -382,6 +382,11 @@ static inline bool is_mbm_event(int e)
                e <= QOS_L3_MBM_LOCAL_EVENT_ID);
 }
 
+struct rdt_parse_data {
+       struct rdtgroup         *rdtgrp;
+       char                    *buf;
+};
+
 /**
  * struct rdt_resource - attributes of an RDT resource
  * @rid:               The index of the resource
@@ -423,16 +428,19 @@ struct rdt_resource {
        struct rdt_cache        cache;
        struct rdt_membw        membw;
        const char              *format_str;
-       int (*parse_ctrlval)    (void *data, struct rdt_resource *r,
-                                struct rdt_domain *d);
+       int (*parse_ctrlval)(struct rdt_parse_data *data,
+                            struct rdt_resource *r,
+                            struct rdt_domain *d);
        struct list_head        evt_list;
        int                     num_rmid;
        unsigned int            mon_scale;
        unsigned long           fflags;
 };
 
-int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d);
-int parse_bw(void *_buf, struct rdt_resource *r,  struct rdt_domain *d);
+int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
+             struct rdt_domain *d);
+int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
+            struct rdt_domain *d);
 
 extern struct mutex rdtgroup_mutex;
 
@@ -536,6 +544,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
 void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
 struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r);
 int update_domains(struct rdt_resource *r, int closid);
+int closids_supported(void);
 void closid_free(int closid);
 int alloc_rmid(void);
 void free_rmid(u32 rmid);
index af358ca..0f53049 100644 (file)
@@ -64,19 +64,19 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
        return true;
 }
 
-int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d)
+int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
+            struct rdt_domain *d)
 {
-       unsigned long data;
-       char *buf = _buf;
+       unsigned long bw_val;
 
        if (d->have_new_ctrl) {
                rdt_last_cmd_printf("duplicate domain %d\n", d->id);
                return -EINVAL;
        }
 
-       if (!bw_validate(buf, &data, r))
+       if (!bw_validate(data->buf, &bw_val, r))
                return -EINVAL;
-       d->new_ctrl = data;
+       d->new_ctrl = bw_val;
        d->have_new_ctrl = true;
 
        return 0;
@@ -123,18 +123,13 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
        return true;
 }
 
-struct rdt_cbm_parse_data {
-       struct rdtgroup         *rdtgrp;
-       char                    *buf;
-};
-
 /*
  * Read one cache bit mask (hex). Check that it is valid for the current
  * resource type.
  */
-int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d)
+int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
+             struct rdt_domain *d)
 {
-       struct rdt_cbm_parse_data *data = _data;
        struct rdtgroup *rdtgrp = data->rdtgrp;
        u32 cbm_val;
 
@@ -195,11 +190,17 @@ int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d)
 static int parse_line(char *line, struct rdt_resource *r,
                      struct rdtgroup *rdtgrp)
 {
-       struct rdt_cbm_parse_data data;
+       struct rdt_parse_data data;
        char *dom = NULL, *id;
        struct rdt_domain *d;
        unsigned long dom_id;
 
+       if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
+           r->rid == RDT_RESOURCE_MBA) {
+               rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
+               return -EINVAL;
+       }
+
 next:
        if (!line || line[0] == '\0')
                return 0;
index b799c00..1b8e86a 100644 (file)
@@ -97,6 +97,12 @@ void rdt_last_cmd_printf(const char *fmt, ...)
  *   limited as the number of resources grows.
  */
 static int closid_free_map;
+static int closid_free_map_len;
+
+int closids_supported(void)
+{
+       return closid_free_map_len;
+}
 
 static void closid_init(void)
 {
@@ -111,6 +117,7 @@ static void closid_init(void)
 
        /* CLOSID 0 is always reserved for the default group */
        closid_free_map &= ~1;
+       closid_free_map_len = rdt_min_closid;
 }
 
 static int closid_alloc(void)
@@ -802,7 +809,7 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
                sw_shareable = 0;
                exclusive = 0;
                seq_printf(seq, "%d=", dom->id);
-               for (i = 0; i < r->num_closid; i++, ctrl++) {
+               for (i = 0; i < closids_supported(); i++, ctrl++) {
                        if (!closid_allocated(i))
                                continue;
                        mode = rdtgroup_mode_by_closid(i);
@@ -989,7 +996,7 @@ bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
 
        /* Check for overlap with other resource groups */
        ctrl = d->ctrl_val;
-       for (i = 0; i < r->num_closid; i++, ctrl++) {
+       for (i = 0; i < closids_supported(); i++, ctrl++) {
                ctrl_b = (unsigned long *)ctrl;
                mode = rdtgroup_mode_by_closid(i);
                if (closid_allocated(i) && i != closid &&
@@ -1024,16 +1031,27 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
 {
        int closid = rdtgrp->closid;
        struct rdt_resource *r;
+       bool has_cache = false;
        struct rdt_domain *d;
 
        for_each_alloc_enabled_rdt_resource(r) {
+               if (r->rid == RDT_RESOURCE_MBA)
+                       continue;
+               has_cache = true;
                list_for_each_entry(d, &r->domains, list) {
                        if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
-                                                 rdtgrp->closid, false))
+                                                 rdtgrp->closid, false)) {
+                               rdt_last_cmd_puts("schemata overlaps\n");
                                return false;
+                       }
                }
        }
 
+       if (!has_cache) {
+               rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n");
+               return false;
+       }
+
        return true;
 }
 
@@ -1085,7 +1103,6 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
                rdtgrp->mode = RDT_MODE_SHAREABLE;
        } else if (!strcmp(buf, "exclusive")) {
                if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
-                       rdt_last_cmd_printf("schemata overlaps\n");
                        ret = -EINVAL;
                        goto out;
                }
@@ -1155,8 +1172,8 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
        struct rdt_resource *r;
        struct rdt_domain *d;
        unsigned int size;
-       bool sep = false;
-       u32 cbm;
+       bool sep;
+       u32 ctrl;
 
        rdtgrp = rdtgroup_kn_lock_live(of->kn);
        if (!rdtgrp) {
@@ -1174,6 +1191,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
        }
 
        for_each_alloc_enabled_rdt_resource(r) {
+               sep = false;
                seq_printf(s, "%*s:", max_name_width, r->name);
                list_for_each_entry(d, &r->domains, list) {
                        if (sep)
@@ -1181,8 +1199,13 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
                        if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
                                size = 0;
                        } else {
-                               cbm = d->ctrl_val[rdtgrp->closid];
-                               size = rdtgroup_cbm_to_size(r, d, cbm);
+                               ctrl = (!is_mba_sc(r) ?
+                                               d->ctrl_val[rdtgrp->closid] :
+                                               d->mbps_val[rdtgrp->closid]);
+                               if (r->rid == RDT_RESOURCE_MBA)
+                                       size = ctrl;
+                               else
+                                       size = rdtgroup_cbm_to_size(r, d, ctrl);
                        }
                        seq_printf(s, "%d=%u", d->id, size);
                        sep = true;
@@ -2336,12 +2359,18 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
        u32 *ctrl;
 
        for_each_alloc_enabled_rdt_resource(r) {
+               /*
+                * Only initialize default allocations for CBM cache
+                * resources
+                */
+               if (r->rid == RDT_RESOURCE_MBA)
+                       continue;
                list_for_each_entry(d, &r->domains, list) {
                        d->have_new_ctrl = false;
                        d->new_ctrl = r->cache.shareable_bits;
                        used_b = r->cache.shareable_bits;
                        ctrl = d->ctrl_val;
-                       for (i = 0; i < r->num_closid; i++, ctrl++) {
+                       for (i = 0; i < closids_supported(); i++, ctrl++) {
                                if (closid_allocated(i) && i != closid) {
                                        mode = rdtgroup_mode_by_closid(i);
                                        if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
@@ -2373,6 +2402,12 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
        }
 
        for_each_alloc_enabled_rdt_resource(r) {
+               /*
+                * Only initialize default allocations for CBM cache
+                * resources
+                */
+               if (r->rid == RDT_RESOURCE_MBA)
+                       continue;
                ret = update_domains(r, rdtgrp->closid);
                if (ret < 0) {
                        rdt_last_cmd_puts("failed to initialize allocations\n");
index 0624957..07b5fc0 100644 (file)
@@ -504,6 +504,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
        struct microcode_amd *mc_amd;
        struct ucode_cpu_info *uci;
        struct ucode_patch *p;
+       enum ucode_state ret;
        u32 rev, dummy;
 
        BUG_ON(raw_smp_processor_id() != cpu);
@@ -521,9 +522,8 @@ static enum ucode_state apply_microcode_amd(int cpu)
 
        /* need to apply patch? */
        if (rev >= mc_amd->hdr.patch_id) {
-               c->microcode = rev;
-               uci->cpu_sig.rev = rev;
-               return UCODE_OK;
+               ret = UCODE_OK;
+               goto out;
        }
 
        if (__apply_microcode_amd(mc_amd)) {
@@ -531,13 +531,21 @@ static enum ucode_state apply_microcode_amd(int cpu)
                        cpu, mc_amd->hdr.patch_id);
                return UCODE_ERROR;
        }
-       pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
-               mc_amd->hdr.patch_id);
 
-       uci->cpu_sig.rev = mc_amd->hdr.patch_id;
-       c->microcode = mc_amd->hdr.patch_id;
+       rev = mc_amd->hdr.patch_id;
+       ret = UCODE_UPDATED;
+
+       pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
 
-       return UCODE_UPDATED;
+out:
+       uci->cpu_sig.rev = rev;
+       c->microcode     = rev;
+
+       /* Update boot_cpu_data's revision too, if we're on the BSP: */
+       if (c->cpu_index == boot_cpu_data.cpu_index)
+               boot_cpu_data.microcode = rev;
+
+       return ret;
 }
 
 static int install_equiv_cpu_table(const u8 *buf)
index 97ccf4c..16936a2 100644 (file)
@@ -795,6 +795,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct microcode_intel *mc;
+       enum ucode_state ret;
        static int prev_rev;
        u32 rev;
 
@@ -817,9 +818,8 @@ static enum ucode_state apply_microcode_intel(int cpu)
         */
        rev = intel_get_microcode_revision();
        if (rev >= mc->hdr.rev) {
-               uci->cpu_sig.rev = rev;
-               c->microcode = rev;
-               return UCODE_OK;
+               ret = UCODE_OK;
+               goto out;
        }
 
        /*
@@ -848,10 +848,17 @@ static enum ucode_state apply_microcode_intel(int cpu)
                prev_rev = rev;
        }
 
+       ret = UCODE_UPDATED;
+
+out:
        uci->cpu_sig.rev = rev;
-       c->microcode = rev;
+       c->microcode     = rev;
+
+       /* Update boot_cpu_data's revision too, if we're on the BSP: */
+       if (c->cpu_index == boot_cpu_data.cpu_index)
+               boot_cpu_data.microcode = rev;
 
-       return UCODE_UPDATED;
+       return ret;
 }
 
 static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
index 9c86529..2b58864 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/bug.h>
 #include <linux/nmi.h>
 #include <linux/sysfs.h>
+#include <linux/kasan.h>
 
 #include <asm/cpu_entry_area.h>
 #include <asm/stacktrace.h>
@@ -89,14 +90,24 @@ static void printk_stack_address(unsigned long address, int reliable,
  * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random
  * guesstimate in attempt to achieve all of the above.
  */
-void show_opcodes(u8 *rip, const char *loglvl)
+void show_opcodes(struct pt_regs *regs, const char *loglvl)
 {
 #define PROLOGUE_SIZE 42
 #define EPILOGUE_SIZE 21
 #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE)
        u8 opcodes[OPCODE_BUFSIZE];
+       unsigned long prologue = regs->ip - PROLOGUE_SIZE;
+       bool bad_ip;
 
-       if (probe_kernel_read(opcodes, rip - PROLOGUE_SIZE, OPCODE_BUFSIZE)) {
+       /*
+        * Make sure userspace isn't trying to trick us into dumping kernel
+        * memory by pointing the userspace instruction pointer at it.
+        */
+       bad_ip = user_mode(regs) &&
+               __chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX);
+
+       if (bad_ip || probe_kernel_read(opcodes, (u8 *)prologue,
+                                       OPCODE_BUFSIZE)) {
                printk("%sCode: Bad RIP value.\n", loglvl);
        } else {
                printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %"
@@ -112,7 +123,7 @@ void show_ip(struct pt_regs *regs, const char *loglvl)
 #else
        printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip);
 #endif
-       show_opcodes((u8 *)regs->ip, loglvl);
+       show_opcodes(regs, loglvl);
 }
 
 void show_iret_regs(struct pt_regs *regs)
@@ -135,7 +146,7 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
         * they can be printed in the right context.
         */
        if (!partial && on_stack(info, regs, sizeof(*regs))) {
-               __show_regs(regs, 0);
+               __show_regs(regs, SHOW_REGS_SHORT);
 
        } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
                                       IRET_FRAME_SIZE)) {
@@ -333,7 +344,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
        oops_exit();
 
        /* Executive summary in case the oops scrolled away */
-       __show_regs(&exec_summary_regs, true);
+       __show_regs(&exec_summary_regs, SHOW_REGS_ALL);
 
        if (!signr)
                return;
@@ -346,7 +357,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
         * We're not going to return, but we might be on an IST stack or
         * have very little stack space left.  Rewind the stack and kill
         * the task.
+        * Before we rewind the stack, we have to tell KASAN that we're going to
+        * reuse the task stack and that existing poisons are invalid.
         */
+       kasan_unpoison_task_stack(current);
        rewind_stack_do_exit(signr);
 }
 NOKPROBE_SYMBOL(oops_end);
@@ -393,14 +407,9 @@ void die(const char *str, struct pt_regs *regs, long err)
 
 void show_regs(struct pt_regs *regs)
 {
-       bool all = true;
-
        show_regs_print_info(KERN_DEFAULT);
 
-       if (IS_ENABLED(CONFIG_X86_32))
-               all = !user_mode(regs);
-
-       __show_regs(regs, all);
+       __show_regs(regs, user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL);
 
        /*
         * When in-kernel, we also print out the stack at the time of the fault..
index f260e45..e8c8c5d 100644 (file)
@@ -7,11 +7,17 @@
 #include <linux/eisa.h>
 #include <linux/io.h>
 
+#include <xen/xen.h>
+
 static __init int eisa_bus_probe(void)
 {
-       void __iomem *p = ioremap(0x0FFFD9, 4);
+       void __iomem *p;
+
+       if (xen_pv_domain() && !xen_initial_domain())
+               return 0;
 
-       if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
+       p = ioremap(0x0FFFD9, 4);
+       if (p && readl(p) == 'E' + ('I' << 8) + ('S' << 16) + ('A' << 24))
                EISA_bus = 1;
        iounmap(p);
        return 0;
index 8047379..ddee1f0 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/bootparam_utils.h>
 #include <asm/microcode.h>
 #include <asm/kasan.h>
+#include <asm/fixmap.h>
 
 /*
  * Manage page tables very early on.
@@ -112,6 +113,7 @@ static bool __head check_la57_support(unsigned long physaddr)
 unsigned long __head __startup_64(unsigned long physaddr,
                                  struct boot_params *bp)
 {
+       unsigned long vaddr, vaddr_end;
        unsigned long load_delta, *p;
        unsigned long pgtable_flags;
        pgdval_t *pgd;
@@ -165,7 +167,8 @@ unsigned long __head __startup_64(unsigned long physaddr,
        pud[511] += load_delta;
 
        pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
-       pmd[506] += load_delta;
+       for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
+               pmd[i] += load_delta;
 
        /*
         * Set up the identity mapping for the switchover.  These
@@ -235,6 +238,21 @@ unsigned long __head __startup_64(unsigned long physaddr,
        sme_encrypt_kernel(bp);
 
        /*
+        * Clear the memory encryption mask from the .bss..decrypted section.
+        * The bss section will be memset to zero later in the initialization so
+        * there is no need to zero it after changing the memory encryption
+        * attribute.
+        */
+       if (mem_encrypt_active()) {
+               vaddr = (unsigned long)__start_bss_decrypted;
+               vaddr_end = (unsigned long)__end_bss_decrypted;
+               for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
+                       i = pmd_index(vaddr);
+                       pmd[i] -= sme_get_me_mask();
+               }
+       }
+
+       /*
         * Return the SME encryption mask (if SME is active) to be used as a
         * modifier for the initial pgdir entry programmed into CR3.
         */
index 15ebc2f..a3618cf 100644 (file)
@@ -24,6 +24,7 @@
 #include "../entry/calling.h"
 #include <asm/export.h>
 #include <asm/nospec-branch.h>
+#include <asm/fixmap.h>
 
 #ifdef CONFIG_PARAVIRT
 #include <asm/asm-offsets.h>
@@ -445,13 +446,20 @@ NEXT_PAGE(level2_kernel_pgt)
                KERNEL_IMAGE_SIZE/PMD_SIZE)
 
 NEXT_PAGE(level2_fixmap_pgt)
-       .fill   506,8,0
-       .quad   level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
-       /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
-       .fill   5,8,0
+       .fill   (512 - 4 - FIXMAP_PMD_NUM),8,0
+       pgtno = 0
+       .rept (FIXMAP_PMD_NUM)
+       .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
+               + _PAGE_TABLE_NOENC;
+       pgtno = pgtno + 1
+       .endr
+       /* 6 MB reserved space + a 2MB hole */
+       .fill   4,8,0
 
 NEXT_PAGE(level1_fixmap_pgt)
+       .rept (FIXMAP_PMD_NUM)
        .fill   512,8,0
+       .endr
 
 #undef PMDS
 
index 1e67646..013fe3d 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/sched/clock.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
+#include <linux/set_memory.h>
 
 #include <asm/hypervisor.h>
 #include <asm/mem_encrypt.h>
@@ -61,9 +62,10 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
        (PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info))
 
 static struct pvclock_vsyscall_time_info
-                       hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __aligned(PAGE_SIZE);
-static struct pvclock_wall_clock wall_clock;
+                       hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
+static struct pvclock_wall_clock wall_clock __bss_decrypted;
 static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
+static struct pvclock_vsyscall_time_info *hvclock_mem;
 
 static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
 {
@@ -236,6 +238,45 @@ static void kvm_shutdown(void)
        native_machine_shutdown();
 }
 
+static void __init kvmclock_init_mem(void)
+{
+       unsigned long ncpus;
+       unsigned int order;
+       struct page *p;
+       int r;
+
+       if (HVC_BOOT_ARRAY_SIZE >= num_possible_cpus())
+               return;
+
+       ncpus = num_possible_cpus() - HVC_BOOT_ARRAY_SIZE;
+       order = get_order(ncpus * sizeof(*hvclock_mem));
+
+       p = alloc_pages(GFP_KERNEL, order);
+       if (!p) {
+               pr_warn("%s: failed to alloc %d pages", __func__, (1U << order));
+               return;
+       }
+
+       hvclock_mem = page_address(p);
+
+       /*
+        * hvclock is shared between the guest and the hypervisor, must
+        * be mapped decrypted.
+        */
+       if (sev_active()) {
+               r = set_memory_decrypted((unsigned long) hvclock_mem,
+                                        1UL << order);
+               if (r) {
+                       __free_pages(p, order);
+                       hvclock_mem = NULL;
+                       pr_warn("kvmclock: set_memory_decrypted() failed. Disabling\n");
+                       return;
+               }
+       }
+
+       memset(hvclock_mem, 0, PAGE_SIZE << order);
+}
+
 static int __init kvm_setup_vsyscall_timeinfo(void)
 {
 #ifdef CONFIG_X86_64
@@ -250,6 +291,9 @@ static int __init kvm_setup_vsyscall_timeinfo(void)
 
        kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
 #endif
+
+       kvmclock_init_mem();
+
        return 0;
 }
 early_initcall(kvm_setup_vsyscall_timeinfo);
@@ -269,8 +313,10 @@ static int kvmclock_setup_percpu(unsigned int cpu)
        /* Use the static page for the first CPUs, allocate otherwise */
        if (cpu < HVC_BOOT_ARRAY_SIZE)
                p = &hv_clock_boot[cpu];
+       else if (hvclock_mem)
+               p = hvclock_mem + cpu - HVC_BOOT_ARRAY_SIZE;
        else
-               p = kzalloc(sizeof(*p), GFP_KERNEL);
+               return -ENOMEM;
 
        per_cpu(hv_clock_per_cpu, cpu) = p;
        return p ? 0 : -ENOMEM;
index afdb303..8dc69d8 100644 (file)
@@ -91,7 +91,7 @@ unsigned paravirt_patch_call(void *insnbuf,
 
        if (len < 5) {
 #ifdef CONFIG_RETPOLINE
-               WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
+               WARN_ONCE(1, "Failing to patch indirect CALL in %ps\n", (void *)addr);
 #endif
                return len;     /* call too long for patch site */
        }
@@ -111,7 +111,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
 
        if (len < 5) {
 #ifdef CONFIG_RETPOLINE
-               WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
+               WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr);
 #endif
                return len;     /* call too long for patch site */
        }
index 2924fd4..5046a3c 100644 (file)
@@ -59,7 +59,7 @@
 #include <asm/intel_rdt_sched.h>
 #include <asm/proto.h>
 
-void __show_regs(struct pt_regs *regs, int all)
+void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
 {
        unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
        unsigned long d0, d1, d2, d3, d6, d7;
@@ -85,7 +85,7 @@ void __show_regs(struct pt_regs *regs, int all)
        printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n",
               (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags);
 
-       if (!all)
+       if (mode != SHOW_REGS_ALL)
                return;
 
        cr0 = read_cr0();
index a451bc3..ea5ea85 100644 (file)
@@ -62,7 +62,7 @@
 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
 
 /* Prints also some state that isn't saved in the pt_regs */
-void __show_regs(struct pt_regs *regs, int all)
+void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
 {
        unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
        unsigned long d0, d1, d2, d3, d6, d7;
@@ -87,9 +87,17 @@ void __show_regs(struct pt_regs *regs, int all)
        printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
               regs->r13, regs->r14, regs->r15);
 
-       if (!all)
+       if (mode == SHOW_REGS_SHORT)
                return;
 
+       if (mode == SHOW_REGS_USER) {
+               rdmsrl(MSR_FS_BASE, fs);
+               rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
+               printk(KERN_DEFAULT "FS:  %016lx GS:  %016lx\n",
+                      fs, shadowgs);
+               return;
+       }
+
        asm("movl %%ds,%0" : "=r" (ds));
        asm("movl %%cs,%0" : "=r" (cs));
        asm("movl %%es,%0" : "=r" (es));
index 12cbe2b..738bf42 100644 (file)
@@ -111,8 +111,10 @@ int arch_register_cpu(int num)
        /*
         * Currently CPU0 is only hotpluggable on Intel platforms. Other
         * vendors can add hotplug support later.
+        * Xen PV guests don't support CPU0 hotplug at all.
         */
-       if (c->x86_vendor != X86_VENDOR_INTEL)
+       if (c->x86_vendor != X86_VENDOR_INTEL ||
+           boot_cpu_has(X86_FEATURE_XENPV))
                cpu0_hotpluggable = 0;
 
        /*
index 1463468..b52bd2b 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/apic.h>
 #include <asm/intel-family.h>
 #include <asm/i8259.h>
+#include <asm/uv/uv.h>
 
 unsigned int __read_mostly cpu_khz;    /* TSC clocks / usec, not used here */
 EXPORT_SYMBOL(cpu_khz);
@@ -1415,7 +1416,7 @@ static bool __init determine_cpu_tsc_frequencies(bool early)
 
 static unsigned long __init get_loops_per_jiffy(void)
 {
-       unsigned long lpj = tsc_khz * KHZ;
+       u64 lpj = (u64)tsc_khz * KHZ;
 
        do_div(lpj, HZ);
        return lpj;
@@ -1433,6 +1434,9 @@ void __init tsc_early_init(void)
 {
        if (!boot_cpu_has(X86_FEATURE_TSC))
                return;
+       /* Don't change UV TSC multi-chassis synchronization */
+       if (is_early_uv_system())
+               return;
        if (!determine_cpu_tsc_frequencies(true))
                return;
        loops_per_jiffy = get_loops_per_jiffy();
index 8bde0a4..5dd3317 100644 (file)
@@ -65,6 +65,23 @@ jiffies_64 = jiffies;
 #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
 #define ALIGN_ENTRY_TEXT_END   . = ALIGN(PMD_SIZE);
 
+/*
+ * This section contains data which will be mapped as decrypted. Memory
+ * encryption operates on a page basis. Make this section PMD-aligned
+ * to avoid splitting the pages while mapping the section early.
+ *
+ * Note: We use a separate section so that only this section gets
+ * decrypted to avoid exposing more than we wish.
+ */
+#define BSS_DECRYPTED                                          \
+       . = ALIGN(PMD_SIZE);                                    \
+       __start_bss_decrypted = .;                              \
+       *(.bss..decrypted);                                     \
+       . = ALIGN(PAGE_SIZE);                                   \
+       __start_bss_decrypted_unused = .;                       \
+       . = ALIGN(PMD_SIZE);                                    \
+       __end_bss_decrypted = .;                                \
+
 #else
 
 #define X86_ALIGN_RODATA_BEGIN
@@ -74,6 +91,7 @@ jiffies_64 = jiffies;
 
 #define ALIGN_ENTRY_TEXT_BEGIN
 #define ALIGN_ENTRY_TEXT_END
+#define BSS_DECRYPTED
 
 #endif
 
@@ -355,6 +373,7 @@ SECTIONS
                __bss_start = .;
                *(.bss..page_aligned)
                *(.bss)
+               BSS_DECRYPTED
                . = ALIGN(PAGE_SIZE);
                __bss_stop = .;
        }
index 0cefba2..fbb0e6d 100644 (file)
@@ -548,7 +548,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
 }
 
 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
-                   unsigned long ipi_bitmap_high, int min,
+                   unsigned long ipi_bitmap_high, u32 min,
                    unsigned long icr, int op_64_bit)
 {
        int i;
@@ -571,18 +571,31 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
        rcu_read_lock();
        map = rcu_dereference(kvm->arch.apic_map);
 
+       if (min > map->max_apic_id)
+               goto out;
        /* Bits above cluster_size are masked in the caller.  */
-       for_each_set_bit(i, &ipi_bitmap_low, BITS_PER_LONG) {
-               vcpu = map->phys_map[min + i]->vcpu;
-               count += kvm_apic_set_irq(vcpu, &irq, NULL);
+       for_each_set_bit(i, &ipi_bitmap_low,
+               min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
+               if (map->phys_map[min + i]) {
+                       vcpu = map->phys_map[min + i]->vcpu;
+                       count += kvm_apic_set_irq(vcpu, &irq, NULL);
+               }
        }
 
        min += cluster_size;
-       for_each_set_bit(i, &ipi_bitmap_high, BITS_PER_LONG) {
-               vcpu = map->phys_map[min + i]->vcpu;
-               count += kvm_apic_set_irq(vcpu, &irq, NULL);
+
+       if (min > map->max_apic_id)
+               goto out;
+
+       for_each_set_bit(i, &ipi_bitmap_high,
+               min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
+               if (map->phys_map[min + i]) {
+                       vcpu = map->phys_map[min + i]->vcpu;
+                       count += kvm_apic_set_irq(vcpu, &irq, NULL);
+               }
        }
 
+out:
        rcu_read_unlock();
        return count;
 }
@@ -1331,9 +1344,8 @@ EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
 
 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
 {
-       return kvm_apic_hw_enabled(apic) &&
-           addr >= apic->base_address &&
-           addr < apic->base_address + LAPIC_MMIO_LENGTH;
+       return addr >= apic->base_address &&
+               addr < apic->base_address + LAPIC_MMIO_LENGTH;
 }
 
 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
@@ -1345,6 +1357,15 @@ static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
        if (!apic_mmio_in_range(apic, address))
                return -EOPNOTSUPP;
 
+       if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
+               if (!kvm_check_has_quirk(vcpu->kvm,
+                                        KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
+                       return -EOPNOTSUPP;
+
+               memset(data, 0xff, len);
+               return 0;
+       }
+
        kvm_lapic_reg_read(apic, offset, len, data);
 
        return 0;
@@ -1904,6 +1925,14 @@ static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
        if (!apic_mmio_in_range(apic, address))
                return -EOPNOTSUPP;
 
+       if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
+               if (!kvm_check_has_quirk(vcpu->kvm,
+                                        KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
+                       return -EOPNOTSUPP;
+
+               return 0;
+       }
+
        /*
         * APIC register must be aligned on 128-bits boundary.
         * 32/64/128 bits registers must be accessed thru 32 bits.
index a282321..51b953a 100644 (file)
@@ -249,6 +249,17 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
  */
 static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
 
+/*
+ * In some cases, we need to preserve the GFN of a non-present or reserved
+ * SPTE when we usurp the upper five bits of the physical address space to
+ * defend against L1TF, e.g. for MMIO SPTEs.  To preserve the GFN, we'll
+ * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
+ * left into the reserved bits, i.e. the GFN in the SPTE will be split into
+ * high and low parts.  This mask covers the lower bits of the GFN.
+ */
+static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
+
+
 static void mmu_spte_set(u64 *sptep, u64 spte);
 static union kvm_mmu_page_role
 kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
@@ -357,9 +368,7 @@ static bool is_mmio_spte(u64 spte)
 
 static gfn_t get_mmio_spte_gfn(u64 spte)
 {
-       u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
-                  shadow_nonpresent_or_rsvd_mask;
-       u64 gpa = spte & ~mask;
+       u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
 
        gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
               & shadow_nonpresent_or_rsvd_mask;
@@ -423,6 +432,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
 
 static void kvm_mmu_reset_all_pte_masks(void)
 {
+       u8 low_phys_bits;
+
        shadow_user_mask = 0;
        shadow_accessed_mask = 0;
        shadow_dirty_mask = 0;
@@ -437,12 +448,17 @@ static void kvm_mmu_reset_all_pte_masks(void)
         * appropriate mask to guard against L1TF attacks. Otherwise, it is
         * assumed that the CPU is not vulnerable to L1TF.
         */
+       low_phys_bits = boot_cpu_data.x86_phys_bits;
        if (boot_cpu_data.x86_phys_bits <
-           52 - shadow_nonpresent_or_rsvd_mask_len)
+           52 - shadow_nonpresent_or_rsvd_mask_len) {
                shadow_nonpresent_or_rsvd_mask =
                        rsvd_bits(boot_cpu_data.x86_phys_bits -
                                  shadow_nonpresent_or_rsvd_mask_len,
                                  boot_cpu_data.x86_phys_bits - 1);
+               low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
+       }
+       shadow_nonpresent_or_rsvd_lower_gfn_mask =
+               GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
 }
 
 static int is_cpuid_PSE36(void)
@@ -899,7 +915,7 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
 {
        /*
         * Make sure the write to vcpu->mode is not reordered in front of
-        * reads to sptes.  If it does, kvm_commit_zap_page() can see us
+        * reads to sptes.  If it does, kvm_mmu_commit_zap_page() can see us
         * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
         */
        smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
@@ -1853,11 +1869,6 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
        return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
 }
 
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
-       return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
-}
-
 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
 {
        return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
@@ -5217,7 +5228,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
                       void *insn, int insn_len)
 {
-       int r, emulation_type = EMULTYPE_RETRY;
+       int r, emulation_type = 0;
        enum emulation_result er;
        bool direct = vcpu->arch.mmu.direct_map;
 
@@ -5230,10 +5241,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
        r = RET_PF_INVALID;
        if (unlikely(error_code & PFERR_RSVD_MASK)) {
                r = handle_mmio_page_fault(vcpu, cr2, direct);
-               if (r == RET_PF_EMULATE) {
-                       emulation_type = 0;
+               if (r == RET_PF_EMULATE)
                        goto emulate;
-               }
        }
 
        if (r == RET_PF_INVALID) {
@@ -5260,8 +5269,19 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
                return 1;
        }
 
-       if (mmio_info_in_cache(vcpu, cr2, direct))
-               emulation_type = 0;
+       /*
+        * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
+        * optimistically try to just unprotect the page and let the processor
+        * re-execute the instruction that caused the page fault.  Do not allow
+        * retrying MMIO emulation, as it's not only pointless but could also
+        * cause us to enter an infinite loop because the processor will keep
+        * faulting on the non-existent MMIO address.  Retrying an instruction
+        * from a nested guest is also pointless and dangerous as we are only
+        * explicitly shadowing L1's page tables, i.e. unprotecting something
+        * for L1 isn't going to magically fix whatever issue cause L2 to fail.
+        */
+       if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu))
+               emulation_type = EMULTYPE_ALLOW_RETRY;
 emulate:
        /*
         * On AMD platforms, under certain conditions insn_len may be zero on #NPF.
@@ -5413,7 +5433,12 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
 {
        MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
-       kvm_init_mmu(vcpu, true);
+       /*
+        * kvm_mmu_setup() is called only on vCPU initialization.  
+        * Therefore, no need to reset mmu roots as they are not yet
+        * initialized.
+        */
+       kvm_init_mmu(vcpu, false);
 }
 
 static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
index 6276140..d96092b 100644 (file)
@@ -776,7 +776,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
        }
 
        if (!svm->next_rip) {
-               if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
+               if (kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) !=
                                EMULATE_DONE)
                        printk(KERN_DEBUG "%s: NOP\n", __func__);
                return;
@@ -1226,8 +1226,7 @@ static __init int sev_hardware_setup(void)
        min_sev_asid = cpuid_edx(0x8000001F);
 
        /* Initialize SEV ASID bitmap */
-       sev_asid_bitmap = kcalloc(BITS_TO_LONGS(max_sev_asid),
-                               sizeof(unsigned long), GFP_KERNEL);
+       sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
        if (!sev_asid_bitmap)
                return 1;
 
@@ -1405,7 +1404,7 @@ static __exit void svm_hardware_unsetup(void)
        int cpu;
 
        if (svm_sev_enabled())
-               kfree(sev_asid_bitmap);
+               bitmap_free(sev_asid_bitmap);
 
        for_each_possible_cpu(cpu)
                svm_cpu_uninit(cpu);
@@ -2715,7 +2714,7 @@ static int gp_interception(struct vcpu_svm *svm)
 
        WARN_ON_ONCE(!enable_vmware_backdoor);
 
-       er = emulate_instruction(vcpu,
+       er = kvm_emulate_instruction(vcpu,
                EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
        if (er == EMULATE_USER_EXIT)
                return 0;
@@ -2819,7 +2818,7 @@ static int io_interception(struct vcpu_svm *svm)
        string = (io_info & SVM_IOIO_STR_MASK) != 0;
        in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
        if (string)
-               return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+               return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
 
        port = io_info >> 16;
        size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
@@ -3861,7 +3860,7 @@ static int iret_interception(struct vcpu_svm *svm)
 static int invlpg_interception(struct vcpu_svm *svm)
 {
        if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
-               return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+               return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
 
        kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
        return kvm_skip_emulated_instruction(&svm->vcpu);
@@ -3869,13 +3868,13 @@ static int invlpg_interception(struct vcpu_svm *svm)
 
 static int emulate_on_interception(struct vcpu_svm *svm)
 {
-       return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+       return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
 }
 
 static int rsm_interception(struct vcpu_svm *svm)
 {
-       return x86_emulate_instruction(&svm->vcpu, 0, 0,
-                                      rsm_ins_bytes, 2) == EMULATE_DONE;
+       return kvm_emulate_instruction_from_buffer(&svm->vcpu,
+                                       rsm_ins_bytes, 2) == EMULATE_DONE;
 }
 
 static int rdpmc_interception(struct vcpu_svm *svm)
@@ -4700,7 +4699,7 @@ static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
                ret = avic_unaccel_trap_write(svm);
        } else {
                /* Handling Fault */
-               ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
+               ret = (kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
        }
 
        return ret;
@@ -6747,7 +6746,7 @@ e_free:
 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
 {
        unsigned long vaddr, vaddr_end, next_vaddr;
-       unsigned long dst_vaddr, dst_vaddr_end;
+       unsigned long dst_vaddr;
        struct page **src_p, **dst_p;
        struct kvm_sev_dbg debug;
        unsigned long n;
@@ -6763,7 +6762,6 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
        size = debug.len;
        vaddr_end = vaddr + size;
        dst_vaddr = debug.dst_uaddr;
-       dst_vaddr_end = dst_vaddr + size;
 
        for (; vaddr < vaddr_end; vaddr = next_vaddr) {
                int len, s_off, d_off;
@@ -7150,6 +7148,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .check_intercept = svm_check_intercept,
        .handle_external_intr = svm_handle_external_intr,
 
+       .request_immediate_exit = __kvm_request_immediate_exit,
+
        .sched_in = svm_sched_in,
 
        .pmu_ops = &amd_pmu_ops,
index 1d26f3c..612fd17 100644 (file)
@@ -121,7 +121,6 @@ module_param_named(pml, enable_pml, bool, S_IRUGO);
 
 #define MSR_BITMAP_MODE_X2APIC         1
 #define MSR_BITMAP_MODE_X2APIC_APICV   2
-#define MSR_BITMAP_MODE_LM             4
 
 #define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL
 
@@ -397,6 +396,7 @@ struct loaded_vmcs {
        int cpu;
        bool launched;
        bool nmi_known_unmasked;
+       bool hv_timer_armed;
        /* Support for vnmi-less CPUs */
        int soft_vnmi_blocked;
        ktime_t entry_time;
@@ -856,6 +856,7 @@ struct nested_vmx {
 
        /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
        u64 vmcs01_debugctl;
+       u64 vmcs01_guest_bndcfgs;
 
        u16 vpid02;
        u16 last_vpid;
@@ -1019,6 +1020,8 @@ struct vcpu_vmx {
        int ple_window;
        bool ple_window_dirty;
 
+       bool req_immediate_exit;
+
        /* Support for PML */
 #define PML_ENTITY_NUM         512
        struct page *pml_pg;
@@ -2864,6 +2867,8 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
        u16 fs_sel, gs_sel;
        int i;
 
+       vmx->req_immediate_exit = false;
+
        if (vmx->loaded_cpu_state)
                return;
 
@@ -2894,8 +2899,7 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
                vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
        }
 
-       if (is_long_mode(&vmx->vcpu))
-               wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+       wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
 #else
        savesegment(fs, fs_sel);
        savesegment(gs, gs_sel);
@@ -2946,8 +2950,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
        vmx->loaded_cpu_state = NULL;
 
 #ifdef CONFIG_X86_64
-       if (is_long_mode(&vmx->vcpu))
-               rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+       rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
 #endif
        if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
                kvm_load_ldt(host_state->ldt_sel);
@@ -2975,24 +2978,19 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
 #ifdef CONFIG_X86_64
 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
 {
-       if (is_long_mode(&vmx->vcpu)) {
-               preempt_disable();
-               if (vmx->loaded_cpu_state)
-                       rdmsrl(MSR_KERNEL_GS_BASE,
-                              vmx->msr_guest_kernel_gs_base);
-               preempt_enable();
-       }
+       preempt_disable();
+       if (vmx->loaded_cpu_state)
+               rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+       preempt_enable();
        return vmx->msr_guest_kernel_gs_base;
 }
 
 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
 {
-       if (is_long_mode(&vmx->vcpu)) {
-               preempt_disable();
-               if (vmx->loaded_cpu_state)
-                       wrmsrl(MSR_KERNEL_GS_BASE, data);
-               preempt_enable();
-       }
+       preempt_disable();
+       if (vmx->loaded_cpu_state)
+               wrmsrl(MSR_KERNEL_GS_BASE, data);
+       preempt_enable();
        vmx->msr_guest_kernel_gs_base = data;
 }
 #endif
@@ -3528,9 +3526,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
                VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
                VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
 
-       if (kvm_mpx_supported())
-               msrs->exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
-
        /* We support free control of debug control saving. */
        msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
 
@@ -3547,8 +3542,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
                VM_ENTRY_LOAD_IA32_PAT;
        msrs->entry_ctls_high |=
                (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
-       if (kvm_mpx_supported())
-               msrs->entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
 
        /* We support free control of debug control loading. */
        msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
@@ -3596,12 +3589,12 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
                msrs->secondary_ctls_high);
        msrs->secondary_ctls_low = 0;
        msrs->secondary_ctls_high &=
-               SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
                SECONDARY_EXEC_DESC |
                SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
                SECONDARY_EXEC_APIC_REGISTER_VIRT |
                SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
                SECONDARY_EXEC_WBINVD_EXITING;
+
        /*
         * We can emulate "VMCS shadowing," even if the hardware
         * doesn't support it.
@@ -3658,6 +3651,10 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
                msrs->secondary_ctls_high |=
                        SECONDARY_EXEC_UNRESTRICTED_GUEST;
 
+       if (flexpriority_enabled)
+               msrs->secondary_ctls_high |=
+                       SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+
        /* miscellaneous data */
        rdmsr(MSR_IA32_VMX_MISC,
                msrs->misc_low,
@@ -5068,19 +5065,6 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
        if (!msr)
                return;
 
-       /*
-        * MSR_KERNEL_GS_BASE is not intercepted when the guest is in
-        * 64-bit mode as a 64-bit kernel may frequently access the
-        * MSR.  This means we need to manually save/restore the MSR
-        * when switching between guest and host state, but only if
-        * the guest is in 64-bit mode.  Sync our cached value if the
-        * guest is transitioning to 32-bit mode and the CPU contains
-        * guest state, i.e. the cache is stale.
-        */
-#ifdef CONFIG_X86_64
-       if (!(efer & EFER_LMA))
-               (void)vmx_read_guest_kernel_gs_base(vmx);
-#endif
        vcpu->arch.efer = efer;
        if (efer & EFER_LMA) {
                vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
@@ -5393,9 +5377,10 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                 * To use VMXON (and later other VMX instructions), a guest
                 * must first be able to turn on cr4.VMXE (see handle_vmon()).
                 * So basically the check on whether to allow nested VMX
-                * is here.
+                * is here.  We operate under the default treatment of SMM,
+                * so VMX cannot be enabled under SMM.
                 */
-               if (!nested_vmx_allowed(vcpu))
+               if (!nested_vmx_allowed(vcpu) || is_smm(vcpu))
                        return 1;
        }
 
@@ -6072,9 +6057,6 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
                        mode |= MSR_BITMAP_MODE_X2APIC_APICV;
        }
 
-       if (is_long_mode(vcpu))
-               mode |= MSR_BITMAP_MODE_LM;
-
        return mode;
 }
 
@@ -6115,9 +6097,6 @@ static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
        if (!changed)
                return;
 
-       vmx_set_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW,
-                                 !(mode & MSR_BITMAP_MODE_LM));
-
        if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
                vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
 
@@ -6183,6 +6162,32 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
        nested_mark_vmcs12_pages_dirty(vcpu);
 }
 
+static u8 vmx_get_rvi(void)
+{
+       return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
+}
+
+static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       void *vapic_page;
+       u32 vppr;
+       int rvi;
+
+       if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
+               !nested_cpu_has_vid(get_vmcs12(vcpu)) ||
+               WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
+               return false;
+
+       rvi = vmx_get_rvi();
+
+       vapic_page = kmap(vmx->nested.virtual_apic_page);
+       vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
+       kunmap(vmx->nested.virtual_apic_page);
+
+       return ((rvi & 0xf0) > (vppr & 0xf0));
+}
+
 static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
                                                     bool nested)
 {
@@ -6983,7 +6988,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
         * Cause the #SS fault with 0 error code in VM86 mode.
         */
        if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
-               if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
+               if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) {
                        if (vcpu->arch.halt_request) {
                                vcpu->arch.halt_request = 0;
                                return kvm_vcpu_halt(vcpu);
@@ -7054,7 +7059,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
 
        if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
                WARN_ON_ONCE(!enable_vmware_backdoor);
-               er = emulate_instruction(vcpu,
+               er = kvm_emulate_instruction(vcpu,
                        EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
                if (er == EMULATE_USER_EXIT)
                        return 0;
@@ -7157,7 +7162,7 @@ static int handle_io(struct kvm_vcpu *vcpu)
        ++vcpu->stat.io_exits;
 
        if (string)
-               return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+               return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
 
        port = exit_qualification >> 16;
        size = (exit_qualification & 7) + 1;
@@ -7231,7 +7236,7 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
 static int handle_desc(struct kvm_vcpu *vcpu)
 {
        WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
-       return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+       return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
 }
 
 static int handle_cr(struct kvm_vcpu *vcpu)
@@ -7480,7 +7485,7 @@ static int handle_vmcall(struct kvm_vcpu *vcpu)
 
 static int handle_invd(struct kvm_vcpu *vcpu)
 {
-       return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+       return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
 }
 
 static int handle_invlpg(struct kvm_vcpu *vcpu)
@@ -7547,7 +7552,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
                        return kvm_skip_emulated_instruction(vcpu);
                }
        }
-       return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+       return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
 }
 
 static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
@@ -7704,8 +7709,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
                if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
                        return kvm_skip_emulated_instruction(vcpu);
                else
-                       return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
-                                                      NULL, 0) == EMULATE_DONE;
+                       return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) ==
+                                                               EMULATE_DONE;
        }
 
        return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
@@ -7748,7 +7753,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
                if (kvm_test_request(KVM_REQ_EVENT, vcpu))
                        return 1;
 
-               err = emulate_instruction(vcpu, 0);
+               err = kvm_emulate_instruction(vcpu, 0);
 
                if (err == EMULATE_USER_EXIT) {
                        ++vcpu->stat.mmio_exits;
@@ -7966,6 +7971,9 @@ static __init int hardware_setup(void)
                kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
        }
 
+       if (!cpu_has_vmx_preemption_timer())
+               kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit;
+
        if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) {
                u64 vmx_msr;
 
@@ -9208,7 +9216,8 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
 
 static int handle_preemption_timer(struct kvm_vcpu *vcpu)
 {
-       kvm_lapic_expired_hv_timer(vcpu);
+       if (!to_vmx(vcpu)->req_immediate_exit)
+               kvm_lapic_expired_hv_timer(vcpu);
        return 1;
 }
 
@@ -10214,15 +10223,16 @@ static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
        if (!lapic_in_kernel(vcpu))
                return;
 
+       if (!flexpriority_enabled &&
+           !cpu_has_vmx_virtualize_x2apic_mode())
+               return;
+
        /* Postpone execution until vmcs01 is the current VMCS. */
        if (is_guest_mode(vcpu)) {
                to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
                return;
        }
 
-       if (!cpu_need_tpr_shadow(vcpu))
-               return;
-
        sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
        sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
                              SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
@@ -10344,6 +10354,14 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
        return max_irr;
 }
 
+static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
+{
+       u8 rvi = vmx_get_rvi();
+       u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
+
+       return ((rvi & 0xf0) > (vppr & 0xf0));
+}
+
 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
 {
        if (!kvm_vcpu_apicv_active(vcpu))
@@ -10595,24 +10613,43 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
                                        msrs[i].host, false);
 }
 
-static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
+static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val)
+{
+       vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val);
+       if (!vmx->loaded_vmcs->hv_timer_armed)
+               vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
+                             PIN_BASED_VMX_PREEMPTION_TIMER);
+       vmx->loaded_vmcs->hv_timer_armed = true;
+}
+
+static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u64 tscl;
        u32 delta_tsc;
 
-       if (vmx->hv_deadline_tsc == -1)
+       if (vmx->req_immediate_exit) {
+               vmx_arm_hv_timer(vmx, 0);
                return;
+       }
 
-       tscl = rdtsc();
-       if (vmx->hv_deadline_tsc > tscl)
-               /* sure to be 32 bit only because checked on set_hv_timer */
-               delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
-                       cpu_preemption_timer_multi);
-       else
-               delta_tsc = 0;
+       if (vmx->hv_deadline_tsc != -1) {
+               tscl = rdtsc();
+               if (vmx->hv_deadline_tsc > tscl)
+                       /* set_hv_timer ensures the delta fits in 32-bits */
+                       delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
+                               cpu_preemption_timer_multi);
+               else
+                       delta_tsc = 0;
+
+               vmx_arm_hv_timer(vmx, delta_tsc);
+               return;
+       }
 
-       vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
+       if (vmx->loaded_vmcs->hv_timer_armed)
+               vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
+                               PIN_BASED_VMX_PREEMPTION_TIMER);
+       vmx->loaded_vmcs->hv_timer_armed = false;
 }
 
 static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
@@ -10672,7 +10709,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
        atomic_switch_perf_msrs(vmx);
 
-       vmx_arm_hv_timer(vcpu);
+       vmx_update_hv_timer(vcpu);
 
        /*
         * If this vCPU has touched SPEC_CTRL, restore the guest's value if
@@ -11214,6 +11251,23 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
 #undef cr4_fixed1_update
 }
 
+static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       if (kvm_mpx_supported()) {
+               bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
+
+               if (mpx_enabled) {
+                       vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
+                       vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
+               } else {
+                       vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
+                       vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
+               }
+       }
+}
+
 static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -11230,8 +11284,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
                to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
                        ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
 
-       if (nested_vmx_allowed(vcpu))
+       if (nested_vmx_allowed(vcpu)) {
                nested_vmx_cr_fixed1_bits_update(vcpu);
+               nested_vmx_entry_exit_ctls_update(vcpu);
+       }
 }
 
 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
@@ -11427,16 +11483,18 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
        u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       if (vcpu->arch.virtual_tsc_khz == 0)
-               return;
-
-       /* Make sure short timeouts reliably trigger an immediate vmexit.
-        * hrtimer_start does not guarantee this. */
-       if (preemption_timeout <= 1) {
+       /*
+        * A timer value of zero is architecturally guaranteed to cause
+        * a VMExit prior to executing any instructions in the guest.
+        */
+       if (preemption_timeout == 0) {
                vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
                return;
        }
 
+       if (vcpu->arch.virtual_tsc_khz == 0)
+               return;
+
        preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
        preemption_timeout *= 1000000;
        do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
@@ -11646,11 +11704,15 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
         * bits 15:8 should be zero in posted_intr_nv,
         * the descriptor address has been already checked
         * in nested_get_vmcs12_pages.
+        *
+        * bits 5:0 of posted_intr_desc_addr should be zero.
         */
        if (nested_cpu_has_posted_intr(vmcs12) &&
           (!nested_cpu_has_vid(vmcs12) ||
            !nested_exit_intr_ack_set(vcpu) ||
-           vmcs12->posted_intr_nv & 0xff00))
+           (vmcs12->posted_intr_nv & 0xff00) ||
+           (vmcs12->posted_intr_desc_addr & 0x3f) ||
+           (!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr))))
                return -EINVAL;
 
        /* tpr shadow is needed by all apicv features. */
@@ -11993,8 +12055,13 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
        set_cr4_guest_host_mask(vmx);
 
-       if (vmx_mpx_supported())
-               vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+       if (kvm_mpx_supported()) {
+               if (vmx->nested.nested_run_pending &&
+                       (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+                       vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+               else
+                       vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
+       }
 
        if (enable_vpid) {
                if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
@@ -12076,11 +12143,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
 
        exec_control = vmcs12->pin_based_vm_exec_control;
 
-       /* Preemption timer setting is only taken from vmcs01.  */
-       exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+       /* Preemption timer setting is computed directly in vmx_vcpu_run.  */
        exec_control |= vmcs_config.pin_based_exec_ctrl;
-       if (vmx->hv_deadline_tsc == -1)
-               exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+       exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+       vmx->loaded_vmcs->hv_timer_armed = false;
 
        /* Posted interrupts setting is only taken from vmcs12.  */
        if (nested_cpu_has_posted_intr(vmcs12)) {
@@ -12318,6 +12384,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
            vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
                return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
 
+       if (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)
+               return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
        if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12))
                return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
 
@@ -12537,12 +12606,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
        bool from_vmentry = !!exit_qual;
        u32 dummy_exit_qual;
+       bool evaluate_pending_interrupts;
        int r = 0;
 
+       evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
+               (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
+       if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
+               evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
+
        enter_guest_mode(vcpu);
 
        if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
                vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
+       if (kvm_mpx_supported() &&
+               !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+               vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
 
        vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
        vmx_segment_cache_clear(vmx);
@@ -12575,6 +12653,23 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
        }
 
        /*
+        * If L1 had a pending IRQ/NMI until it executed
+        * VMLAUNCH/VMRESUME which wasn't delivered because it was
+        * disallowed (e.g. interrupts disabled), L0 needs to
+        * evaluate if this pending event should cause an exit from L2
+        * to L1 or delivered directly to L2 (e.g. In case L1 don't
+        * intercept EXTERNAL_INTERRUPT).
+        *
+        * Usually this would be handled by the processor noticing an
+        * IRQ/NMI window request, or checking RVI during evaluation of
+        * pending virtual interrupts.  However, this setting was done
+        * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
+        * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
+        */
+       if (unlikely(evaluate_pending_interrupts))
+               kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+       /*
         * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
         * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
         * returned as far as L1 is concerned. It will only return (and set
@@ -12841,6 +12936,11 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
        return 0;
 }
 
+static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
+{
+       to_vmx(vcpu)->req_immediate_exit = true;
+}
+
 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
 {
        ktime_t remaining =
@@ -13231,12 +13331,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
        vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
        vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
        vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
-       if (vmx->hv_deadline_tsc == -1)
-               vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
-                               PIN_BASED_VMX_PREEMPTION_TIMER);
-       else
-               vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
-                             PIN_BASED_VMX_PREEMPTION_TIMER);
+
        if (kvm_has_tsc_control)
                decache_tsc_multiplier(vmx);
 
@@ -13440,18 +13535,12 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
                return -ERANGE;
 
        vmx->hv_deadline_tsc = tscl + delta_tsc;
-       vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
-                       PIN_BASED_VMX_PREEMPTION_TIMER);
-
        return delta_tsc == 0;
 }
 
 static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
 {
-       struct vcpu_vmx *vmx = to_vmx(vcpu);
-       vmx->hv_deadline_tsc = -1;
-       vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
-                       PIN_BASED_VMX_PREEMPTION_TIMER);
+       to_vmx(vcpu)->hv_deadline_tsc = -1;
 }
 #endif
 
@@ -13932,6 +14021,14 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
            ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
                return -EINVAL;
 
+       /*
+        * SMM temporarily disables VMX, so we cannot be in guest mode,
+        * nor can VMLAUNCH/VMRESUME be pending.  Outside SMM, SMM flags
+        * must be zero.
+        */
+       if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
+               return -EINVAL;
+
        if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
            !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
                return -EINVAL;
@@ -13988,9 +14085,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
            check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
                return -EINVAL;
 
-       if (kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING)
-               vmx->nested.nested_run_pending = 1;
-
        vmx->nested.dirty_vmcs12 = true;
        ret = enter_vmx_non_root_mode(vcpu, NULL);
        if (ret)
@@ -14078,6 +14172,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .apicv_post_state_restore = vmx_apicv_post_state_restore,
        .hwapic_irr_update = vmx_hwapic_irr_update,
        .hwapic_isr_update = vmx_hwapic_isr_update,
+       .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
        .sync_pir_to_irr = vmx_sync_pir_to_irr,
        .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
 
@@ -14111,6 +14206,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .umip_emulated = vmx_umip_emulated,
 
        .check_nested_events = vmx_check_nested_events,
+       .request_immediate_exit = vmx_request_immediate_exit,
 
        .sched_in = vmx_sched_in,
 
index 506bd2b..ca71773 100644 (file)
@@ -628,7 +628,7 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu)
        gfn_t gfn;
        int r;
 
-       if (is_long_mode(vcpu) || !is_pae(vcpu))
+       if (is_long_mode(vcpu) || !is_pae(vcpu) || !is_paging(vcpu))
                return false;
 
        if (!test_bit(VCPU_EXREG_PDPTR,
@@ -2537,7 +2537,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                break;
        case MSR_PLATFORM_INFO:
                if (!msr_info->host_initiated ||
-                   data & ~MSR_PLATFORM_INFO_CPUID_FAULT ||
                    (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
                     cpuid_fault_enabled(vcpu)))
                        return 1;
@@ -2780,6 +2779,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                msr_info->data = vcpu->arch.osvw.status;
                break;
        case MSR_PLATFORM_INFO:
+               if (!msr_info->host_initiated &&
+                   !vcpu->kvm->arch.guest_can_read_msr_platform_info)
+                       return 1;
                msr_info->data = vcpu->arch.msr_platform_info;
                break;
        case MSR_MISC_FEATURES_ENABLES:
@@ -2927,6 +2929,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        case KVM_CAP_SPLIT_IRQCHIP:
        case KVM_CAP_IMMEDIATE_EXIT:
        case KVM_CAP_GET_MSR_FEATURES:
+       case KVM_CAP_MSR_PLATFORM_INFO:
                r = 1;
                break;
        case KVM_CAP_SYNC_REGS:
@@ -4007,19 +4010,23 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                        break;
 
                BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
+               r = -EFAULT;
                if (get_user(user_data_size, &user_kvm_nested_state->size))
-                       return -EFAULT;
+                       break;
 
                r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state,
                                                  user_data_size);
                if (r < 0)
-                       return r;
+                       break;
 
                if (r > user_data_size) {
                        if (put_user(r, &user_kvm_nested_state->size))
-                               return -EFAULT;
-                       return -E2BIG;
+                               r = -EFAULT;
+                       else
+                               r = -E2BIG;
+                       break;
                }
+
                r = 0;
                break;
        }
@@ -4031,19 +4038,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                if (!kvm_x86_ops->set_nested_state)
                        break;
 
+               r = -EFAULT;
                if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
-                       return -EFAULT;
+                       break;
 
+               r = -EINVAL;
                if (kvm_state.size < sizeof(kvm_state))
-                       return -EINVAL;
+                       break;
 
                if (kvm_state.flags &
                    ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE))
-                       return -EINVAL;
+                       break;
 
                /* nested_run_pending implies guest_mode.  */
                if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING)
-                       return -EINVAL;
+                       break;
 
                r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
                break;
@@ -4350,6 +4359,10 @@ split_irqchip_unlock:
                        kvm->arch.pause_in_guest = true;
                r = 0;
                break;
+       case KVM_CAP_MSR_PLATFORM_INFO:
+               kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
+               r = 0;
+               break;
        default:
                r = -EINVAL;
                break;
@@ -4685,7 +4698,7 @@ static void kvm_init_msr_list(void)
                 */
                switch (msrs_to_save[i]) {
                case MSR_IA32_BNDCFGS:
-                       if (!kvm_x86_ops->mpx_supported())
+                       if (!kvm_mpx_supported())
                                continue;
                        break;
                case MSR_TSC_AUX:
@@ -4987,7 +5000,7 @@ int handle_ud(struct kvm_vcpu *vcpu)
                emul_type = 0;
        }
 
-       er = emulate_instruction(vcpu, emul_type);
+       er = kvm_emulate_instruction(vcpu, emul_type);
        if (er == EMULATE_USER_EXIT)
                return 0;
        if (er != EMULATE_DONE)
@@ -5870,7 +5883,10 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
        gpa_t gpa = cr2;
        kvm_pfn_t pfn;
 
-       if (emulation_type & EMULTYPE_NO_REEXECUTE)
+       if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
+               return false;
+
+       if (WARN_ON_ONCE(is_guest_mode(vcpu)))
                return false;
 
        if (!vcpu->arch.mmu.direct_map) {
@@ -5958,7 +5974,10 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
         */
        vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
 
-       if (!(emulation_type & EMULTYPE_RETRY))
+       if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
+               return false;
+
+       if (WARN_ON_ONCE(is_guest_mode(vcpu)))
                return false;
 
        if (x86_page_table_writing_insn(ctxt))
@@ -6276,7 +6295,19 @@ restart:
 
        return r;
 }
-EXPORT_SYMBOL_GPL(x86_emulate_instruction);
+
+int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
+{
+       return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_instruction);
+
+int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
+                                       void *insn, int insn_len)
+{
+       return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
 
 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
                            unsigned short port)
@@ -7343,6 +7374,12 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
 
+void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
+{
+       smp_send_reschedule(vcpu->cpu);
+}
+EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
+
 /*
  * Returns 1 to let vcpu_run() continue the guest execution loop without
  * exiting to the userspace.  Otherwise, the value will be returned to the
@@ -7547,7 +7584,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        if (req_immediate_exit) {
                kvm_make_request(KVM_REQ_EVENT, vcpu);
-               smp_send_reschedule(vcpu->cpu);
+               kvm_x86_ops->request_immediate_exit(vcpu);
        }
 
        trace_kvm_entry(vcpu->vcpu_id);
@@ -7734,7 +7771,7 @@ static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
 {
        int r;
        vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
-       r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
+       r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
        if (r != EMULATE_DONE)
                return 0;
@@ -7811,6 +7848,29 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
        return 0;
 }
 
+/* Swap (qemu) user FPU context for the guest FPU context. */
+static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+{
+       preempt_disable();
+       copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
+       /* PKRU is separately restored in kvm_x86_ops->run.  */
+       __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
+                               ~XFEATURE_MASK_PKRU);
+       preempt_enable();
+       trace_kvm_fpu(1);
+}
+
+/* When vcpu_run ends, restore user space FPU context. */
+static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
+{
+       preempt_disable();
+       copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
+       copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
+       preempt_enable();
+       ++vcpu->stat.fpu_reload;
+       trace_kvm_fpu(0);
+}
+
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        int r;
@@ -8159,7 +8219,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
                kvm_update_cpuid(vcpu);
 
        idx = srcu_read_lock(&vcpu->kvm->srcu);
-       if (!is_long_mode(vcpu) && is_pae(vcpu)) {
+       if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu)) {
                load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
                mmu_reset_needed = 1;
        }
@@ -8388,29 +8448,6 @@ static void fx_init(struct kvm_vcpu *vcpu)
        vcpu->arch.cr0 |= X86_CR0_ET;
 }
 
-/* Swap (qemu) user FPU context for the guest FPU context. */
-void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
-{
-       preempt_disable();
-       copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
-       /* PKRU is separately restored in kvm_x86_ops->run.  */
-       __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
-                               ~XFEATURE_MASK_PKRU);
-       preempt_enable();
-       trace_kvm_fpu(1);
-}
-
-/* When vcpu_run ends, restore user space FPU context. */
-void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
-{
-       preempt_disable();
-       copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
-       copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
-       preempt_enable();
-       ++vcpu->stat.fpu_reload;
-       trace_kvm_fpu(0);
-}
-
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
        void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
@@ -8834,6 +8871,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
        pvclock_update_vm_gtod_copy(kvm);
 
+       kvm->arch.guest_can_read_msr_platform_info = true;
+
        INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
        INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
 
@@ -9182,6 +9221,13 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
        kvm_page_track_flush_slot(kvm, slot);
 }
 
+static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
+{
+       return (is_guest_mode(vcpu) &&
+                       kvm_x86_ops->guest_apic_has_interrupt &&
+                       kvm_x86_ops->guest_apic_has_interrupt(vcpu));
+}
+
 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
 {
        if (!list_empty_careful(&vcpu->async_pf.done))
@@ -9206,7 +9252,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
                return true;
 
        if (kvm_arch_interrupt_allowed(vcpu) &&
-           kvm_cpu_has_interrupt(vcpu))
+           (kvm_cpu_has_interrupt(vcpu) ||
+           kvm_guest_apic_has_interrupt(vcpu)))
                return true;
 
        if (kvm_hv_has_stimer_pending(vcpu))
index 257f276..67b9568 100644 (file)
@@ -274,6 +274,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
                                          int page_num);
 bool kvm_vector_hashing_enabled(void);
+int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
+                           int emulation_type, void *insn, int insn_len);
 
 #define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
                                | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
index c8c6ad0..3f435d7 100644 (file)
@@ -7,6 +7,8 @@
 #include <linux/uaccess.h>
 #include <linux/export.h>
 
+#include <asm/tlbflush.h>
+
 /*
  * We rely on the nested NMI work to allow atomic faults from the NMI path; the
  * nested NMI paths are careful to preserve CR2.
@@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
        if (__range_not_ok(from, n, TASK_SIZE))
                return n;
 
+       if (!nmi_uaccess_okay())
+               return n;
+
        /*
         * Even though this function is typically called from NMI/IRQ context
         * disable pagefaults so that its behaviour is consistent even when
index b9123c4..47bebfe 100644 (file)
@@ -837,7 +837,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long error_code,
 
        printk(KERN_CONT "\n");
 
-       show_opcodes((u8 *)regs->ip, loglvl);
+       show_opcodes(regs, loglvl);
 }
 
 static void
index 7a8fc26..faca978 100644 (file)
@@ -815,10 +815,14 @@ void free_kernel_image_pages(void *begin, void *end)
                set_memory_np_noalias(begin_ul, len_pages);
 }
 
+void __weak mem_encrypt_free_decrypted_mem(void) { }
+
 void __ref free_initmem(void)
 {
        e820__reallocate_tables();
 
+       mem_encrypt_free_decrypted_mem();
+
        free_kernel_image_pages(&__init_begin, &__init_end);
 }
 
index b2de398..006f373 100644 (file)
@@ -348,6 +348,30 @@ bool sev_active(void)
 EXPORT_SYMBOL(sev_active);
 
 /* Architecture __weak replacement functions */
+void __init mem_encrypt_free_decrypted_mem(void)
+{
+       unsigned long vaddr, vaddr_end, npages;
+       int r;
+
+       vaddr = (unsigned long)__start_bss_decrypted_unused;
+       vaddr_end = (unsigned long)__end_bss_decrypted;
+       npages = (vaddr_end - vaddr) >> PAGE_SHIFT;
+
+       /*
+        * The unused memory range was mapped decrypted, change the encryption
+        * attribute from decrypted to encrypted before freeing it.
+        */
+       if (mem_encrypt_active()) {
+               r = set_memory_encrypted(vaddr, npages);
+               if (r) {
+                       pr_warn("failed to free unused decrypted pages\n");
+                       return;
+               }
+       }
+
+       free_init_pages("unused decrypted", vaddr, vaddr_end);
+}
+
 void __init mem_encrypt_init(void)
 {
        if (!sme_me_mask)
index 8d6c34f..51a5a69 100644 (file)
@@ -1420,6 +1420,29 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
        return 0;
 }
 
+/*
+ * Machine check recovery code needs to change cache mode of poisoned
+ * pages to UC to avoid speculative access logging another error. But
+ * passing the address of the 1:1 mapping to set_memory_uc() is a fine
+ * way to encourage a speculative access. So we cheat and flip the top
+ * bit of the address. This works fine for the code that updates the
+ * page tables. But at the end of the process we need to flush the cache
+ * and the non-canonical address causes a #GP fault when used by the
+ * CLFLUSH instruction.
+ *
+ * But in the common case we already have a canonical address. This code
+ * will fix the top bit if needed and is a no-op otherwise.
+ */
+static inline unsigned long make_addr_canonical_again(unsigned long addr)
+{
+#ifdef CONFIG_X86_64
+       return (long)(addr << 1) >> 1;
+#else
+       return addr;
+#endif
+}
+
+
 static int change_page_attr_set_clr(unsigned long *addr, int numpages,
                                    pgprot_t mask_set, pgprot_t mask_clr,
                                    int force_split, int in_flag,
@@ -1465,7 +1488,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
                 * Save address for cache flush. *addr is modified in the call
                 * to __change_page_attr_set_clr() below.
                 */
-               baddr = *addr;
+               baddr = make_addr_canonical_again(*addr);
        }
 
        /* Must avoid aliasing mappings in the highmem code */
index e848a48..089e78c 100644 (file)
@@ -269,7 +269,7 @@ static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
        if (pgd_val(pgd) != 0) {
                pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
 
-               *pgdp = native_make_pgd(0);
+               pgd_clear(pgdp);
 
                paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
                pmd_free(mm, pmd);
@@ -494,7 +494,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
        int changed = !pte_same(*ptep, entry);
 
        if (changed && dirty)
-               *ptep = entry;
+               set_pte(ptep, entry);
 
        return changed;
 }
@@ -509,7 +509,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
        if (changed && dirty) {
-               *pmdp = entry;
+               set_pmd(pmdp, entry);
                /*
                 * We had a write-protection fault here and changed the pmd
                 * to to more permissive. No need to flush the TLB for that,
@@ -529,7 +529,7 @@ int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
        VM_BUG_ON(address & ~HPAGE_PUD_MASK);
 
        if (changed && dirty) {
-               *pudp = entry;
+               set_pud(pudp, entry);
                /*
                 * We had a write-protection fault here and changed the pud
                 * to to more permissive. No need to flush the TLB for that,
@@ -637,6 +637,15 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
 {
        unsigned long address = __fix_to_virt(idx);
 
+#ifdef CONFIG_X86_64
+       /*
+       * Ensure that the static initial page tables are covering the
+       * fixmap completely.
+       */
+       BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
+                    (FIXMAP_PMD_NUM * PTRS_PER_PTE));
+#endif
+
        if (idx >= __end_of_fixed_addresses) {
                BUG();
                return;
index 31341ae..c1fc1ae 100644 (file)
@@ -248,7 +248,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
  *
  * Returns a pointer to a PTE on success, or NULL on failure.
  */
-static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
+static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
 {
        gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
        pmd_t *pmd;
index 9517d1b..e96b99e 100644 (file)
@@ -305,6 +305,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 
                choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
 
+               /* Let nmi_uaccess_okay() know that we're changing CR3. */
+               this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
+               barrier();
+
                if (need_flush) {
                        this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
                        this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
@@ -335,6 +339,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                if (next != &init_mm)
                        this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
 
+               /* Make sure we write CR3 before loaded_mm. */
+               barrier();
+
                this_cpu_write(cpu_tlbstate.loaded_mm, next);
                this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
        }
index 324b933..9959657 100644 (file)
@@ -85,12 +85,7 @@ pgd_t * __init efi_call_phys_prolog(void)
 
 void __init efi_call_phys_epilog(pgd_t *save_pgd)
 {
-       struct desc_ptr gdt_descr;
-
-       gdt_descr.address = (unsigned long)get_cpu_gdt_rw(0);
-       gdt_descr.size = GDT_SIZE - 1;
-       load_gdt(&gdt_descr);
-
+       load_fixmap_gdt(0);
        load_cr3(save_pgd);
        __flush_tlb_all();
 }
index 45b700a..dd461c0 100644 (file)
@@ -435,14 +435,13 @@ static void xen_set_pud(pud_t *ptr, pud_t val)
 static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
 {
        trace_xen_mmu_set_pte_atomic(ptep, pte);
-       set_64bit((u64 *)ptep, native_pte_val(pte));
+       __xen_set_pte(ptep, pte);
 }
 
 static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
        trace_xen_mmu_pte_clear(mm, addr, ptep);
-       if (!xen_batched_set_pte(ptep, native_make_pte(0)))
-               native_pte_clear(mm, addr, ptep);
+       __xen_set_pte(ptep, native_make_pte(0));
 }
 
 static void xen_pmd_clear(pmd_t *pmdp)
@@ -1570,7 +1569,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
                pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
                               pte_val_ma(pte));
 #endif
-       native_set_pte(ptep, pte);
+       __xen_set_pte(ptep, pte);
 }
 
 /* Early in boot, while setting up the initial pagetable, assume
@@ -1908,7 +1907,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
        /* L3_k[511] -> level2_fixmap_pgt */
        convert_pfn_mfn(level3_kernel_pgt);
 
-       /* L3_k[511][506] -> level1_fixmap_pgt */
+       /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
        convert_pfn_mfn(level2_fixmap_pgt);
 
        /* We get [511][511] and have Xen's version of level2_kernel_pgt */
@@ -1953,7 +1952,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
        set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
        set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
        set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
-       set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
+
+       for (i = 0; i < FIXMAP_PMD_NUM; i++) {
+               set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
+                             PAGE_KERNEL_RO);
+       }
 
        /* Pin down new L4 */
        pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
@@ -2061,7 +2064,6 @@ void __init xen_relocate_p2m(void)
        pud_t *pud;
        pgd_t *pgd;
        unsigned long *new_p2m;
-       int save_pud;
 
        size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
        n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT;
@@ -2091,7 +2093,6 @@ void __init xen_relocate_p2m(void)
 
        pgd = __va(read_cr3_pa());
        new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
-       save_pud = n_pud;
        for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
                pud = early_memremap(pud_phys, PAGE_SIZE);
                clear_page(pud);
index 7d00d4a..95997e6 100644 (file)
@@ -478,7 +478,7 @@ static void xen_convert_regs(const struct xen_pmu_regs *xen_regs,
 irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
 {
        int err, ret = IRQ_NONE;
-       struct pt_regs regs;
+       struct pt_regs regs = {0};
        const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
        uint8_t xenpmu_flags = get_xenpmu_flags();
 
index 04d038f..b9ad83a 100644 (file)
@@ -4,6 +4,7 @@ config ZONE_DMA
 
 config XTENSA
        def_bool y
+       select ARCH_HAS_SG_CHAIN
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
        select ARCH_NO_COHERENT_DMA_MMAP if !MMU
index 295c120..d67e30f 100644 (file)
@@ -64,11 +64,7 @@ endif
 vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y))
 plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y))
 
-ifeq ($(KBUILD_SRC),)
-KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(vardirs) $(plfdirs))
-else
 KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(vardirs) $(plfdirs))
-endif
 
 KBUILD_DEFCONFIG := iss_defconfig
 
index f4bbb28..58709e8 100644 (file)
@@ -78,23 +78,28 @@ static struct notifier_block iss_panic_block = {
 
 void __init platform_setup(char **p_cmdline)
 {
+       static void *argv[COMMAND_LINE_SIZE / sizeof(void *)] __initdata;
+       static char cmdline[COMMAND_LINE_SIZE] __initdata;
        int argc = simc_argc();
        int argv_size = simc_argv_size();
 
        if (argc > 1) {
-               void **argv = alloc_bootmem(argv_size);
-               char *cmdline = alloc_bootmem(argv_size);
-               int i;
+               if (argv_size > sizeof(argv)) {
+                       pr_err("%s: command line too long: argv_size = %d\n",
+                              __func__, argv_size);
+               } else {
+                       int i;
 
-               cmdline[0] = 0;
-               simc_argv((void *)argv);
+                       cmdline[0] = 0;
+                       simc_argv((void *)argv);
 
-               for (i = 1; i < argc; ++i) {
-                       if (i > 1)
-                               strcat(cmdline, " ");
-                       strcat(cmdline, argv[i]);
+                       for (i = 1; i < argc; ++i) {
+                               if (i > 1)
+                                       strcat(cmdline, " ");
+                               strcat(cmdline, argv[i]);
+                       }
+                       *p_cmdline = cmdline;
                }
-               *p_cmdline = cmdline;
        }
 
        atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block);
index 58c6efa..9fe5952 100644 (file)
@@ -275,9 +275,9 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg)
 
 void bfqg_and_blkg_put(struct bfq_group *bfqg)
 {
-       bfqg_put(bfqg);
-
        blkg_put(bfqg_to_blkg(bfqg));
+
+       bfqg_put(bfqg);
 }
 
 /* @stats = 0 */
index b12966e..0093bed 100644 (file)
@@ -1684,7 +1684,7 @@ void generic_end_io_acct(struct request_queue *q, int req_op,
        const int sgrp = op_stat_group(req_op);
        int cpu = part_stat_lock();
 
-       part_stat_add(cpu, part, ticks[sgrp], duration);
+       part_stat_add(cpu, part, nsecs[sgrp], jiffies_to_nsecs(duration));
        part_round_stats(q, cpu, part);
        part_dec_in_flight(q, part, op_is_write(req_op));
 
@@ -2015,7 +2015,8 @@ int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
 {
        if (unlikely(bio->bi_blkg))
                return -EBUSY;
-       blkg_get(blkg);
+       if (!blkg_try_get(blkg))
+               return -ENODEV;
        bio->bi_blkg = blkg;
        return 0;
 }
index 694595b..c630e02 100644 (file)
@@ -310,28 +310,11 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
        }
 }
 
-static void blkg_pd_offline(struct blkcg_gq *blkg)
-{
-       int i;
-
-       lockdep_assert_held(blkg->q->queue_lock);
-       lockdep_assert_held(&blkg->blkcg->lock);
-
-       for (i = 0; i < BLKCG_MAX_POLS; i++) {
-               struct blkcg_policy *pol = blkcg_policy[i];
-
-               if (blkg->pd[i] && !blkg->pd[i]->offline &&
-                   pol->pd_offline_fn) {
-                       pol->pd_offline_fn(blkg->pd[i]);
-                       blkg->pd[i]->offline = true;
-               }
-       }
-}
-
 static void blkg_destroy(struct blkcg_gq *blkg)
 {
        struct blkcg *blkcg = blkg->blkcg;
        struct blkcg_gq *parent = blkg->parent;
+       int i;
 
        lockdep_assert_held(blkg->q->queue_lock);
        lockdep_assert_held(&blkcg->lock);
@@ -340,6 +323,13 @@ static void blkg_destroy(struct blkcg_gq *blkg)
        WARN_ON_ONCE(list_empty(&blkg->q_node));
        WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
 
+       for (i = 0; i < BLKCG_MAX_POLS; i++) {
+               struct blkcg_policy *pol = blkcg_policy[i];
+
+               if (blkg->pd[i] && pol->pd_offline_fn)
+                       pol->pd_offline_fn(blkg->pd[i]);
+       }
+
        if (parent) {
                blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
                blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
@@ -382,7 +372,6 @@ static void blkg_destroy_all(struct request_queue *q)
                struct blkcg *blkcg = blkg->blkcg;
 
                spin_lock(&blkcg->lock);
-               blkg_pd_offline(blkg);
                blkg_destroy(blkg);
                spin_unlock(&blkcg->lock);
        }
@@ -1053,59 +1042,64 @@ static struct cftype blkcg_legacy_files[] = {
        { }     /* terminate */
 };
 
+/*
+ * blkcg destruction is a three-stage process.
+ *
+ * 1. Destruction starts.  The blkcg_css_offline() callback is invoked
+ *    which offlines writeback.  Here we tie the next stage of blkg destruction
+ *    to the completion of writeback associated with the blkcg.  This lets us
+ *    avoid punting potentially large amounts of outstanding writeback to root
+ *    while maintaining any ongoing policies.  The next stage is triggered when
+ *    the nr_cgwbs count goes to zero.
+ *
+ * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
+ *    and handles the destruction of blkgs.  Here the css reference held by
+ *    the blkg is put back eventually allowing blkcg_css_free() to be called.
+ *    This work may occur in cgwb_release_workfn() on the cgwb_release
+ *    workqueue.  Any submitted ios that fail to get the blkg ref will be
+ *    punted to the root_blkg.
+ *
+ * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
+ *    This finally frees the blkcg.
+ */
+
 /**
  * blkcg_css_offline - cgroup css_offline callback
  * @css: css of interest
  *
- * This function is called when @css is about to go away and responsible
- * for offlining all blkgs pd and killing all wbs associated with @css.
- * blkgs pd offline should be done while holding both q and blkcg locks.
- * As blkcg lock is nested inside q lock, this function performs reverse
- * double lock dancing.
- *
- * This is the blkcg counterpart of ioc_release_fn().
+ * This function is called when @css is about to go away.  Here the cgwbs are
+ * offlined first and only once writeback associated with the blkcg has
+ * finished do we start step 2 (see above).
  */
 static void blkcg_css_offline(struct cgroup_subsys_state *css)
 {
        struct blkcg *blkcg = css_to_blkcg(css);
-       struct blkcg_gq *blkg;
-
-       spin_lock_irq(&blkcg->lock);
-
-       hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
-               struct request_queue *q = blkg->q;
-
-               if (spin_trylock(q->queue_lock)) {
-                       blkg_pd_offline(blkg);
-                       spin_unlock(q->queue_lock);
-               } else {
-                       spin_unlock_irq(&blkcg->lock);
-                       cpu_relax();
-                       spin_lock_irq(&blkcg->lock);
-               }
-       }
-
-       spin_unlock_irq(&blkcg->lock);
 
+       /* this prevents anyone from attaching or migrating to this blkcg */
        wb_blkcg_offline(blkcg);
+
+       /* put the base cgwb reference allowing step 2 to be triggered */
+       blkcg_cgwb_put(blkcg);
 }
 
 /**
- * blkcg_destroy_all_blkgs - destroy all blkgs associated with a blkcg
+ * blkcg_destroy_blkgs - responsible for shooting down blkgs
  * @blkcg: blkcg of interest
  *
- * This function is called when blkcg css is about to free and responsible for
- * destroying all blkgs associated with @blkcg.
- * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
+ * blkgs should be removed while holding both q and blkcg locks.  As blkcg lock
  * is nested inside q lock, this function performs reverse double lock dancing.
+ * Destroying the blkgs releases the reference held on the blkcg's css allowing
+ * blkcg_css_free to eventually be called.
+ *
+ * This is the blkcg counterpart of ioc_release_fn().
  */
-static void blkcg_destroy_all_blkgs(struct blkcg *blkcg)
+void blkcg_destroy_blkgs(struct blkcg *blkcg)
 {
        spin_lock_irq(&blkcg->lock);
+
        while (!hlist_empty(&blkcg->blkg_list)) {
                struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
-                                                   struct blkcg_gq,
-                                                   blkcg_node);
+                                               struct blkcg_gq, blkcg_node);
                struct request_queue *q = blkg->q;
 
                if (spin_trylock(q->queue_lock)) {
@@ -1117,6 +1111,7 @@ static void blkcg_destroy_all_blkgs(struct blkcg *blkcg)
                        spin_lock_irq(&blkcg->lock);
                }
        }
+
        spin_unlock_irq(&blkcg->lock);
 }
 
@@ -1125,8 +1120,6 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
        struct blkcg *blkcg = css_to_blkcg(css);
        int i;
 
-       blkcg_destroy_all_blkgs(blkcg);
-
        mutex_lock(&blkcg_pol_mutex);
 
        list_del(&blkcg->all_blkcgs_node);
@@ -1189,6 +1182,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
        INIT_HLIST_HEAD(&blkcg->blkg_list);
 #ifdef CONFIG_CGROUP_WRITEBACK
        INIT_LIST_HEAD(&blkcg->cgwb_list);
+       refcount_set(&blkcg->cgwb_refcnt, 1);
 #endif
        list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
 
@@ -1480,11 +1474,8 @@ void blkcg_deactivate_policy(struct request_queue *q,
 
        list_for_each_entry(blkg, &q->blkg_list, q_node) {
                if (blkg->pd[pol->plid]) {
-                       if (!blkg->pd[pol->plid]->offline &&
-                           pol->pd_offline_fn) {
+                       if (pol->pd_offline_fn)
                                pol->pd_offline_fn(blkg->pd[pol->plid]);
-                               blkg->pd[pol->plid]->offline = true;
-                       }
                        pol->pd_free_fn(blkg->pd[pol->plid]);
                        blkg->pd[pol->plid] = NULL;
                }
@@ -1519,8 +1510,10 @@ int blkcg_policy_register(struct blkcg_policy *pol)
        for (i = 0; i < BLKCG_MAX_POLS; i++)
                if (!blkcg_policy[i])
                        break;
-       if (i >= BLKCG_MAX_POLS)
+       if (i >= BLKCG_MAX_POLS) {
+               pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
                goto err_unlock;
+       }
 
        /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
        if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
index dee56c2..cff0a60 100644 (file)
@@ -2163,9 +2163,12 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
 {
        const int op = bio_op(bio);
 
-       if (part->policy && (op_is_write(op) && !op_is_flush(op))) {
+       if (part->policy && op_is_write(op)) {
                char b[BDEVNAME_SIZE];
 
+               if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
+                       return false;
+
                WARN_ONCE(1,
                       "generic_make_request: Trying to write "
                        "to read-only block-device %s (partno %d)\n",
@@ -2730,17 +2733,15 @@ void blk_account_io_done(struct request *req, u64 now)
         * containing request is enough.
         */
        if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
-               unsigned long duration;
                const int sgrp = op_stat_group(req_op(req));
                struct hd_struct *part;
                int cpu;
 
-               duration = nsecs_to_jiffies(now - req->start_time_ns);
                cpu = part_stat_lock();
                part = req->part;
 
                part_stat_inc(cpu, part, ios[sgrp]);
-               part_stat_add(cpu, part, ticks[sgrp], duration);
+               part_stat_add(cpu, part, nsecs[sgrp], now - req->start_time_ns);
                part_round_stats(req->q, cpu, part);
                part_dec_in_flight(req->q, part, rq_data_dir(req));
 
index 94e1ed6..41317c5 100644 (file)
@@ -322,16 +322,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
 
        /*
         * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
-        * queue_hw_ctx after freeze the queue. So we could use q_usage_counter
-        * to avoid race with it. __blk_mq_update_nr_hw_queues will users
-        * synchronize_rcu to ensure all of the users go out of the critical
-        * section below and see zeroed q_usage_counter.
+        * queue_hw_ctx after freeze the queue, so we use q_usage_counter
+        * to avoid race with it.
         */
-       rcu_read_lock();
-       if (percpu_ref_is_zero(&q->q_usage_counter)) {
-               rcu_read_unlock();
+       if (!percpu_ref_tryget(&q->q_usage_counter))
                return;
-       }
 
        queue_for_each_hw_ctx(q, hctx, i) {
                struct blk_mq_tags *tags = hctx->tags;
@@ -347,7 +342,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
                        bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
                bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
        }
-       rcu_read_unlock();
+       blk_queue_exit(q);
 }
 
 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
index 85a1c1a..e3c39ea 100644 (file)
@@ -1628,7 +1628,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                BUG_ON(!rq->q);
                if (rq->mq_ctx != this_ctx) {
                        if (this_ctx) {
-                               trace_block_unplug(this_q, depth, from_schedule);
+                               trace_block_unplug(this_q, depth, !from_schedule);
                                blk_mq_sched_insert_requests(this_q, this_ctx,
                                                                &ctx_list,
                                                                from_schedule);
@@ -1648,7 +1648,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
         * on 'ctx_list'. Do those.
         */
        if (this_ctx) {
-               trace_block_unplug(this_q, depth, from_schedule);
+               trace_block_unplug(this_q, depth, !from_schedule);
                blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
                                                from_schedule);
        }
index a3eede0..01d0620 100644 (file)
@@ -2129,8 +2129,9 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
 static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
 {
 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
-       if (bio->bi_css)
-               bio_associate_blkg(bio, tg_to_blkg(tg));
+       /* fallback to root_blkg if we fail to get a blkg ref */
+       if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV))
+               bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg);
        bio_issue_init(&bio->bi_issue, bio_sectors(bio));
 #endif
 }
index 84507d3..8e20a06 100644 (file)
@@ -123,16 +123,11 @@ static void rwb_wake_all(struct rq_wb *rwb)
        }
 }
 
-static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
+static void wbt_rqw_done(struct rq_wb *rwb, struct rq_wait *rqw,
+                        enum wbt_flags wb_acct)
 {
-       struct rq_wb *rwb = RQWB(rqos);
-       struct rq_wait *rqw;
        int inflight, limit;
 
-       if (!(wb_acct & WBT_TRACKED))
-               return;
-
-       rqw = get_rq_wait(rwb, wb_acct);
        inflight = atomic_dec_return(&rqw->inflight);
 
        /*
@@ -166,10 +161,22 @@ static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
                int diff = limit - inflight;
 
                if (!inflight || diff >= rwb->wb_background / 2)
-                       wake_up(&rqw->wait);
+                       wake_up_all(&rqw->wait);
        }
 }
 
+static void __wbt_done(struct rq_qos *rqos, enum wbt_flags wb_acct)
+{
+       struct rq_wb *rwb = RQWB(rqos);
+       struct rq_wait *rqw;
+
+       if (!(wb_acct & WBT_TRACKED))
+               return;
+
+       rqw = get_rq_wait(rwb, wb_acct);
+       wbt_rqw_done(rwb, rqw, wb_acct);
+}
+
 /*
  * Called on completion of a request. Note that it's also called when
  * a request is merged, when the request gets freed.
@@ -481,6 +488,34 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
        return limit;
 }
 
+struct wbt_wait_data {
+       struct wait_queue_entry wq;
+       struct task_struct *task;
+       struct rq_wb *rwb;
+       struct rq_wait *rqw;
+       unsigned long rw;
+       bool got_token;
+};
+
+static int wbt_wake_function(struct wait_queue_entry *curr, unsigned int mode,
+                            int wake_flags, void *key)
+{
+       struct wbt_wait_data *data = container_of(curr, struct wbt_wait_data,
+                                                       wq);
+
+       /*
+        * If we fail to get a budget, return -1 to interrupt the wake up
+        * loop in __wake_up_common.
+        */
+       if (!rq_wait_inc_below(data->rqw, get_limit(data->rwb, data->rw)))
+               return -1;
+
+       data->got_token = true;
+       list_del_init(&curr->entry);
+       wake_up_process(data->task);
+       return 1;
+}
+
 /*
  * Block if we will exceed our limit, or if we are currently waiting for
  * the timer to kick off queuing again.
@@ -491,19 +526,40 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
        __acquires(lock)
 {
        struct rq_wait *rqw = get_rq_wait(rwb, wb_acct);
-       DECLARE_WAITQUEUE(wait, current);
+       struct wbt_wait_data data = {
+               .wq = {
+                       .func   = wbt_wake_function,
+                       .entry  = LIST_HEAD_INIT(data.wq.entry),
+               },
+               .task = current,
+               .rwb = rwb,
+               .rqw = rqw,
+               .rw = rw,
+       };
        bool has_sleeper;
 
        has_sleeper = wq_has_sleeper(&rqw->wait);
        if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
                return;
 
-       add_wait_queue_exclusive(&rqw->wait, &wait);
+       prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
        do {
-               set_current_state(TASK_UNINTERRUPTIBLE);
+               if (data.got_token)
+                       break;
 
-               if (!has_sleeper && rq_wait_inc_below(rqw, get_limit(rwb, rw)))
+               if (!has_sleeper &&
+                   rq_wait_inc_below(rqw, get_limit(rwb, rw))) {
+                       finish_wait(&rqw->wait, &data.wq);
+
+                       /*
+                        * We raced with wbt_wake_function() getting a token,
+                        * which means we now have two. Put our local token
+                        * and wake anyone else potentially waiting for one.
+                        */
+                       if (data.got_token)
+                               wbt_rqw_done(rwb, rqw, wb_acct);
                        break;
+               }
 
                if (lock) {
                        spin_unlock_irq(lock);
@@ -511,11 +567,11 @@ static void __wbt_wait(struct rq_wb *rwb, enum wbt_flags wb_acct,
                        spin_lock_irq(lock);
                } else
                        io_schedule();
+
                has_sleeper = false;
        } while (1);
 
-       __set_current_state(TASK_RUNNING);
-       remove_wait_queue(&rqw->wait, &wait);
+       finish_wait(&rqw->wait, &data.wq);
 }
 
 static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
@@ -580,11 +636,6 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
                return;
        }
 
-       if (current_is_kswapd())
-               flags |= WBT_KSWAPD;
-       if (bio_op(bio) == REQ_OP_DISCARD)
-               flags |= WBT_DISCARD;
-
        __wbt_wait(rwb, flags, bio->bi_opf, lock);
 
        if (!blk_stat_is_active(rwb->cb))
index db588ad..9a442c2 100644 (file)
@@ -37,7 +37,7 @@ struct bsg_device {
        struct request_queue *queue;
        spinlock_t lock;
        struct hlist_node dev_list;
-       atomic_t ref_count;
+       refcount_t ref_count;
        char name[20];
        int max_queue;
 };
@@ -252,7 +252,7 @@ static int bsg_put_device(struct bsg_device *bd)
 
        mutex_lock(&bsg_mutex);
 
-       if (!atomic_dec_and_test(&bd->ref_count)) {
+       if (!refcount_dec_and_test(&bd->ref_count)) {
                mutex_unlock(&bsg_mutex);
                return 0;
        }
@@ -290,7 +290,7 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
 
        bd->queue = rq;
 
-       atomic_set(&bd->ref_count, 1);
+       refcount_set(&bd->ref_count, 1);
        hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
 
        strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
@@ -308,7 +308,7 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
 
        hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
                if (bd->queue == q) {
-                       atomic_inc(&bd->ref_count);
+                       refcount_inc(&bd->ref_count);
                        goto found;
                }
        }
index 5ea6e7d..fae58b2 100644 (file)
@@ -609,7 +609,7 @@ void elv_drain_elevator(struct request_queue *q)
 
        while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
                ;
-       if (q->nr_sorted && printed++ < 10) {
+       if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
                printk(KERN_ERR "%s: forced dispatching is broken "
                       "(nr_sorted=%u), please report this\n",
                       q->elevator->type->elevator_name, q->nr_sorted);
@@ -895,8 +895,7 @@ int elv_register(struct elevator_type *e)
        spin_lock(&elv_list_lock);
        if (elevator_find(e->elevator_name, e->uses_mq)) {
                spin_unlock(&elv_list_lock);
-               if (e->icq_cache)
-                       kmem_cache_destroy(e->icq_cache);
+               kmem_cache_destroy(e->icq_cache);
                return -EBUSY;
        }
        list_add_tail(&e->list, &elv_list);
index 8cc719a..be5bab2 100644 (file)
@@ -1343,18 +1343,18 @@ static int diskstats_show(struct seq_file *seqf, void *v)
                           part_stat_read(hd, ios[STAT_READ]),
                           part_stat_read(hd, merges[STAT_READ]),
                           part_stat_read(hd, sectors[STAT_READ]),
-                          jiffies_to_msecs(part_stat_read(hd, ticks[STAT_READ])),
+                          (unsigned int)part_stat_read_msecs(hd, STAT_READ),
                           part_stat_read(hd, ios[STAT_WRITE]),
                           part_stat_read(hd, merges[STAT_WRITE]),
                           part_stat_read(hd, sectors[STAT_WRITE]),
-                          jiffies_to_msecs(part_stat_read(hd, ticks[STAT_WRITE])),
+                          (unsigned int)part_stat_read_msecs(hd, STAT_WRITE),
                           inflight[0],
                           jiffies_to_msecs(part_stat_read(hd, io_ticks)),
                           jiffies_to_msecs(part_stat_read(hd, time_in_queue)),
                           part_stat_read(hd, ios[STAT_DISCARD]),
                           part_stat_read(hd, merges[STAT_DISCARD]),
                           part_stat_read(hd, sectors[STAT_DISCARD]),
-                          jiffies_to_msecs(part_stat_read(hd, ticks[STAT_DISCARD]))
+                          (unsigned int)part_stat_read_msecs(hd, STAT_DISCARD)
                        );
        }
        disk_part_iter_exit(&piter);
index 5a8975a..d3d14e8 100644 (file)
@@ -136,18 +136,18 @@ ssize_t part_stat_show(struct device *dev,
                part_stat_read(p, ios[STAT_READ]),
                part_stat_read(p, merges[STAT_READ]),
                (unsigned long long)part_stat_read(p, sectors[STAT_READ]),
-               jiffies_to_msecs(part_stat_read(p, ticks[STAT_READ])),
+               (unsigned int)part_stat_read_msecs(p, STAT_READ),
                part_stat_read(p, ios[STAT_WRITE]),
                part_stat_read(p, merges[STAT_WRITE]),
                (unsigned long long)part_stat_read(p, sectors[STAT_WRITE]),
-               jiffies_to_msecs(part_stat_read(p, ticks[STAT_WRITE])),
+               (unsigned int)part_stat_read_msecs(p, STAT_WRITE),
                inflight[0],
                jiffies_to_msecs(part_stat_read(p, io_ticks)),
                jiffies_to_msecs(part_stat_read(p, time_in_queue)),
                part_stat_read(p, ios[STAT_DISCARD]),
                part_stat_read(p, merges[STAT_DISCARD]),
                (unsigned long long)part_stat_read(p, sectors[STAT_DISCARD]),
-               jiffies_to_msecs(part_stat_read(p, ticks[STAT_DISCARD])));
+               (unsigned int)part_stat_read_msecs(p, STAT_DISCARD));
 }
 
 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
index 9706613..bf64cfa 100644 (file)
@@ -879,7 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
 #define LPSS_GPIODEF0_DMA_LLP          BIT(13)
 
 static DEFINE_MUTEX(lpss_iosf_mutex);
-static bool lpss_iosf_d3_entered;
+static bool lpss_iosf_d3_entered = true;
 
 static void lpss_iosf_enter_d3_state(void)
 {
index 292088f..d2e29a1 100644 (file)
 #include <linux/delay.h>
 #ifdef CONFIG_X86
 #include <asm/mpspec.h>
+#include <linux/dmi.h>
 #endif
 #include <linux/acpi_iort.h>
 #include <linux/pci.h>
 #include <acpi/apei.h>
-#include <linux/dmi.h>
 #include <linux/suspend.h>
 
 #include "internal.h"
@@ -82,10 +82,6 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
        },
        {}
 };
-#else
-static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
-       {}
-};
 #endif
 
 /* --------------------------------------------------------------------------
@@ -1033,11 +1029,16 @@ void __init acpi_early_init(void)
 
        acpi_permanent_mmap = true;
 
+#ifdef CONFIG_X86
        /*
         * If the machine falls into the DMI check table,
-        * DSDT will be copied to memory
+        * DSDT will be copied to memory.
+        * Note that calling dmi_check_system() here on other architectures
+        * would not be OK because only x86 initializes dmi early enough.
+        * Thankfully only x86 systems need such quirks for now.
         */
        dmi_check_system(dsdt_dmi_table);
+#endif
 
        status = acpi_reallocate_root_table();
        if (ACPI_FAILURE(status)) {
index 3f3b7b2..64fd96e 100644 (file)
@@ -332,6 +332,35 @@ err_no_vma:
        return vma ? -ENOMEM : -ESRCH;
 }
 
+
+static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
+               struct vm_area_struct *vma)
+{
+       if (vma)
+               alloc->vma_vm_mm = vma->vm_mm;
+       /*
+        * If we see alloc->vma is not NULL, buffer data structures set up
+        * completely. Look at smp_rmb side binder_alloc_get_vma.
+        * We also want to guarantee new alloc->vma_vm_mm is always visible
+        * if alloc->vma is set.
+        */
+       smp_wmb();
+       alloc->vma = vma;
+}
+
+static inline struct vm_area_struct *binder_alloc_get_vma(
+               struct binder_alloc *alloc)
+{
+       struct vm_area_struct *vma = NULL;
+
+       if (alloc->vma) {
+               /* Look at description in binder_alloc_set_vma */
+               smp_rmb();
+               vma = alloc->vma;
+       }
+       return vma;
+}
+
 static struct binder_buffer *binder_alloc_new_buf_locked(
                                struct binder_alloc *alloc,
                                size_t data_size,
@@ -348,7 +377,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
        size_t size, data_offsets_size;
        int ret;
 
-       if (alloc->vma == NULL) {
+       if (!binder_alloc_get_vma(alloc)) {
                binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
                                   "%d: binder_alloc_buf, no vma\n",
                                   alloc->pid);
@@ -723,9 +752,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
        buffer->free = 1;
        binder_insert_free_buffer(alloc, buffer);
        alloc->free_async_space = alloc->buffer_size / 2;
-       barrier();
-       alloc->vma = vma;
-       alloc->vma_vm_mm = vma->vm_mm;
+       binder_alloc_set_vma(alloc, vma);
        mmgrab(alloc->vma_vm_mm);
 
        return 0;
@@ -754,10 +781,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
        int buffers, page_count;
        struct binder_buffer *buffer;
 
-       BUG_ON(alloc->vma);
-
        buffers = 0;
        mutex_lock(&alloc->mutex);
+       BUG_ON(alloc->vma);
+
        while ((n = rb_first(&alloc->allocated_buffers))) {
                buffer = rb_entry(n, struct binder_buffer, rb_node);
 
@@ -900,7 +927,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
  */
 void binder_alloc_vma_close(struct binder_alloc *alloc)
 {
-       WRITE_ONCE(alloc->vma, NULL);
+       binder_alloc_set_vma(alloc, NULL);
 }
 
 /**
@@ -935,7 +962,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
        index = page - alloc->pages;
        page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
-       vma = alloc->vma;
+       vma = binder_alloc_get_vma(alloc);
        if (vma) {
                if (!mmget_not_zero(alloc->vma_vm_mm))
                        goto err_mmget;
index 172e328..a9dd4ea 100644 (file)
@@ -5359,10 +5359,20 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
  */
 int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
 {
+       u64 done_mask, ap_qc_active = ap->qc_active;
        int nr_done = 0;
-       u64 done_mask;
 
-       done_mask = ap->qc_active ^ qc_active;
+       /*
+        * If the internal tag is set on ap->qc_active, then we care about
+        * bit0 on the passed in qc_active mask. Move that bit up to match
+        * the internal tag.
+        */
+       if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
+               qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
+               qc_active ^= qc_active & 0x01;
+       }
+
+       done_mask = ap_qc_active ^ qc_active;
 
        if (unlikely(done_mask & qc_active)) {
                ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
@@ -7394,4 +7404,4 @@ EXPORT_SYMBOL_GPL(ata_cable_unknown);
 EXPORT_SYMBOL_GPL(ata_cable_ignore);
 EXPORT_SYMBOL_GPL(ata_cable_sata);
 EXPORT_SYMBOL_GPL(ata_host_get);
-EXPORT_SYMBOL_GPL(ata_host_put);
\ No newline at end of file
+EXPORT_SYMBOL_GPL(ata_host_put);
index 5d4b72e..569a4a6 100644 (file)
@@ -256,14 +256,12 @@ static struct ata_port_operations pata_ftide010_port_ops = {
        .qc_issue       = ftide010_qc_issue,
 };
 
-static struct ata_port_info ftide010_port_info[] = {
-       {
-               .flags          = ATA_FLAG_SLAVE_POSS,
-               .mwdma_mask     = ATA_MWDMA2,
-               .udma_mask      = ATA_UDMA6,
-               .pio_mask       = ATA_PIO4,
-               .port_ops       = &pata_ftide010_port_ops,
-       },
+static struct ata_port_info ftide010_port_info = {
+       .flags          = ATA_FLAG_SLAVE_POSS,
+       .mwdma_mask     = ATA_MWDMA2,
+       .udma_mask      = ATA_UDMA6,
+       .pio_mask       = ATA_PIO4,
+       .port_ops       = &pata_ftide010_port_ops,
 };
 
 #if IS_ENABLED(CONFIG_SATA_GEMINI)
@@ -349,6 +347,7 @@ static int pata_ftide010_gemini_cable_detect(struct ata_port *ap)
 }
 
 static int pata_ftide010_gemini_init(struct ftide010 *ftide,
+                                    struct ata_port_info *pi,
                                     bool is_ata1)
 {
        struct device *dev = ftide->dev;
@@ -373,7 +372,13 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide,
 
        /* Flag port as SATA-capable */
        if (gemini_sata_bridge_enabled(sg, is_ata1))
-               ftide010_port_info[0].flags |= ATA_FLAG_SATA;
+               pi->flags |= ATA_FLAG_SATA;
+
+       /* This device has broken DMA, only PIO works */
+       if (of_machine_is_compatible("itian,sq201")) {
+               pi->mwdma_mask = 0;
+               pi->udma_mask = 0;
+       }
 
        /*
         * We assume that a simple 40-wire cable is used in the PATA mode.
@@ -435,6 +440,7 @@ static int pata_ftide010_gemini_init(struct ftide010 *ftide,
 }
 #else
 static int pata_ftide010_gemini_init(struct ftide010 *ftide,
+                                    struct ata_port_info *pi,
                                     bool is_ata1)
 {
        return -ENOTSUPP;
@@ -446,7 +452,7 @@ static int pata_ftide010_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct device_node *np = dev->of_node;
-       const struct ata_port_info pi = ftide010_port_info[0];
+       struct ata_port_info pi = ftide010_port_info;
        const struct ata_port_info *ppi[] = { &pi, NULL };
        struct ftide010 *ftide;
        struct resource *res;
@@ -490,6 +496,7 @@ static int pata_ftide010_probe(struct platform_device *pdev)
                 * are ATA0. This will also set up the cable types.
                 */
                ret = pata_ftide010_gemini_init(ftide,
+                               &pi,
                                (res->start == 0x63400000));
                if (ret)
                        goto err_dis_clk;
index 0943e70..8e9213b 100644 (file)
@@ -209,22 +209,28 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name)
 static int alloc_lookup_fw_priv(const char *fw_name,
                                struct firmware_cache *fwc,
                                struct fw_priv **fw_priv, void *dbuf,
-                               size_t size)
+                               size_t size, enum fw_opt opt_flags)
 {
        struct fw_priv *tmp;
 
        spin_lock(&fwc->lock);
-       tmp = __lookup_fw_priv(fw_name);
-       if (tmp) {
-               kref_get(&tmp->ref);
-               spin_unlock(&fwc->lock);
-               *fw_priv = tmp;
-               pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
-               return 1;
+       if (!(opt_flags & FW_OPT_NOCACHE)) {
+               tmp = __lookup_fw_priv(fw_name);
+               if (tmp) {
+                       kref_get(&tmp->ref);
+                       spin_unlock(&fwc->lock);
+                       *fw_priv = tmp;
+                       pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
+                       return 1;
+               }
        }
+
        tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
-       if (tmp)
-               list_add(&tmp->list, &fwc->head);
+       if (tmp) {
+               INIT_LIST_HEAD(&tmp->list);
+               if (!(opt_flags & FW_OPT_NOCACHE))
+                       list_add(&tmp->list, &fwc->head);
+       }
        spin_unlock(&fwc->lock);
 
        *fw_priv = tmp;
@@ -493,7 +499,8 @@ int assign_fw(struct firmware *fw, struct device *device,
  */
 static int
 _request_firmware_prepare(struct firmware **firmware_p, const char *name,
-                         struct device *device, void *dbuf, size_t size)
+                         struct device *device, void *dbuf, size_t size,
+                         enum fw_opt opt_flags)
 {
        struct firmware *firmware;
        struct fw_priv *fw_priv;
@@ -511,7 +518,8 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
                return 0; /* assigned */
        }
 
-       ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size);
+       ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
+                                 opt_flags);
 
        /*
         * bind with 'priv' now to avoid warning in failure path
@@ -571,7 +579,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
                goto out;
        }
 
-       ret = _request_firmware_prepare(&fw, name, device, buf, size);
+       ret = _request_firmware_prepare(&fw, name, device, buf, size,
+                                       opt_flags);
        if (ret <= 0) /* error or already assigned */
                goto out;
 
index c8a1cb0..817320c 100644 (file)
@@ -417,25 +417,23 @@ static ssize_t show_valid_zones(struct device *dev,
        int nid;
 
        /*
-        * The block contains more than one zone can not be offlined.
-        * This can happen e.g. for ZONE_DMA and ZONE_DMA32
-        */
-       if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn))
-               return sprintf(buf, "none\n");
-
-       start_pfn = valid_start_pfn;
-       nr_pages = valid_end_pfn - start_pfn;
-
-       /*
         * Check the existing zone. Make sure that we do that only on the
         * online nodes otherwise the page_zone is not reliable
         */
        if (mem->state == MEM_ONLINE) {
+               /*
+                * The block contains more than one zone can not be offlined.
+                * This can happen e.g. for ZONE_DMA and ZONE_DMA32
+                */
+               if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages,
+                                         &valid_start_pfn, &valid_end_pfn))
+                       return sprintf(buf, "none\n");
+               start_pfn = valid_start_pfn;
                strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
                goto out;
        }
 
-       nid = pfn_to_nid(start_pfn);
+       nid = mem->nid;
        default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
        strcat(buf, default_zone->name);
 
index 8e2e475..5a42ae4 100644 (file)
@@ -185,7 +185,7 @@ EXPORT_SYMBOL_GPL(of_pm_clk_add_clk);
 int of_pm_clk_add_clks(struct device *dev)
 {
        struct clk **clks;
-       unsigned int i, count;
+       int i, count;
        int ret;
 
        if (!dev || !dev->of_node)
index 3f68e29..a690fd4 100644 (file)
@@ -1713,8 +1713,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
 
        dpm_wait_for_subordinate(dev, async);
 
-       if (async_error)
+       if (async_error) {
+               dev->power.direct_complete = false;
                goto Complete;
+       }
 
        /*
         * If a device configured to wake up the system from sleep states
@@ -1726,6 +1728,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
                pm_wakeup_event(dev, 0);
 
        if (pm_wakeup_pending()) {
+               dev->power.direct_complete = false;
                async_error = -EBUSY;
                goto Complete;
        }
index 48f6227..f2b6f4d 100644 (file)
@@ -3467,6 +3467,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
                                          (struct floppy_struct **)&outparam);
                if (ret)
                        return ret;
+               memcpy(&inparam.g, outparam,
+                               offsetof(struct floppy_struct, name));
+               outparam = &inparam.g;
                break;
        case FDMSGON:
                UDP->flags |= FTD_MSG;
index 3863c00..14a5125 100644 (file)
@@ -1239,6 +1239,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
        case NBD_SET_SOCK:
                return nbd_add_socket(nbd, arg, false);
        case NBD_SET_BLKSIZE:
+               if (!arg || !is_power_of_2(arg) || arg < 512 ||
+                   arg > PAGE_SIZE)
+                       return -EINVAL;
                nbd_size_set(nbd, arg,
                             div_s64(config->bytesize, arg));
                return 0;
index d81781f..34e0030 100644 (file)
@@ -87,10 +87,10 @@ struct nullb {
 #ifdef CONFIG_BLK_DEV_ZONED
 int null_zone_init(struct nullb_device *dev);
 void null_zone_exit(struct nullb_device *dev);
-blk_status_t null_zone_report(struct nullb *nullb,
-                                           struct nullb_cmd *cmd);
-void null_zone_write(struct nullb_cmd *cmd);
-void null_zone_reset(struct nullb_cmd *cmd);
+blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio);
+void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+                       unsigned int nr_sectors);
+void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
 #else
 static inline int null_zone_init(struct nullb_device *dev)
 {
@@ -98,11 +98,14 @@ static inline int null_zone_init(struct nullb_device *dev)
 }
 static inline void null_zone_exit(struct nullb_device *dev) {}
 static inline blk_status_t null_zone_report(struct nullb *nullb,
-                                           struct nullb_cmd *cmd)
+                                           struct bio *bio)
 {
        return BLK_STS_NOTSUPP;
 }
-static inline void null_zone_write(struct nullb_cmd *cmd) {}
-static inline void null_zone_reset(struct nullb_cmd *cmd) {}
+static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+                                  unsigned int nr_sectors)
+{
+}
+static inline void null_zone_reset(struct nullb_cmd *cmd, sector_t sector) {}
 #endif /* CONFIG_BLK_DEV_ZONED */
 #endif /* __NULL_BLK_H */
index 6127e3f..093b614 100644 (file)
@@ -1157,16 +1157,33 @@ static void null_restart_queue_async(struct nullb *nullb)
        }
 }
 
+static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd)
+{
+       struct nullb_device *dev = cmd->nq->dev;
+
+       if (dev->queue_mode == NULL_Q_BIO) {
+               if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) {
+                       cmd->error = null_zone_report(nullb, cmd->bio);
+                       return true;
+               }
+       } else {
+               if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
+                       cmd->error = null_zone_report(nullb, cmd->rq->bio);
+                       return true;
+               }
+       }
+
+       return false;
+}
+
 static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
 {
        struct nullb_device *dev = cmd->nq->dev;
        struct nullb *nullb = dev->nullb;
        int err = 0;
 
-       if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
-               cmd->error = null_zone_report(nullb, cmd);
+       if (cmd_report_zone(nullb, cmd))
                goto out;
-       }
 
        if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
                struct request *rq = cmd->rq;
@@ -1234,10 +1251,24 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
        cmd->error = errno_to_blk_status(err);
 
        if (!cmd->error && dev->zoned) {
-               if (req_op(cmd->rq) == REQ_OP_WRITE)
-                       null_zone_write(cmd);
-               else if (req_op(cmd->rq) == REQ_OP_ZONE_RESET)
-                       null_zone_reset(cmd);
+               sector_t sector;
+               unsigned int nr_sectors;
+               int op;
+
+               if (dev->queue_mode == NULL_Q_BIO) {
+                       op = bio_op(cmd->bio);
+                       sector = cmd->bio->bi_iter.bi_sector;
+                       nr_sectors = cmd->bio->bi_iter.bi_size >> 9;
+               } else {
+                       op = req_op(cmd->rq);
+                       sector = blk_rq_pos(cmd->rq);
+                       nr_sectors = blk_rq_sectors(cmd->rq);
+               }
+
+               if (op == REQ_OP_WRITE)
+                       null_zone_write(cmd, sector, nr_sectors);
+               else if (op == REQ_OP_ZONE_RESET)
+                       null_zone_reset(cmd, sector);
        }
 out:
        /* Complete IO by inline, softirq or timer */
index a979ca0..7c6b86d 100644 (file)
@@ -48,8 +48,8 @@ void null_zone_exit(struct nullb_device *dev)
        kvfree(dev->zones);
 }
 
-static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
-                             unsigned int zno, unsigned int nr_zones)
+static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio,
+                              unsigned int zno, unsigned int nr_zones)
 {
        struct blk_zone_report_hdr *hdr = NULL;
        struct bio_vec bvec;
@@ -57,7 +57,7 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
        void *addr;
        unsigned int zones_to_cpy;
 
-       bio_for_each_segment(bvec, rq->bio, iter) {
+       bio_for_each_segment(bvec, bio, iter) {
                addr = kmap_atomic(bvec.bv_page);
 
                zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
@@ -84,29 +84,24 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
        }
 }
 
-blk_status_t null_zone_report(struct nullb *nullb,
-                                    struct nullb_cmd *cmd)
+blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio)
 {
        struct nullb_device *dev = nullb->dev;
-       struct request *rq = cmd->rq;
-       unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
+       unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector);
        unsigned int nr_zones = dev->nr_zones - zno;
-       unsigned int max_zones = (blk_rq_bytes(rq) /
-                                       sizeof(struct blk_zone)) - 1;
+       unsigned int max_zones;
 
+       max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1;
        nr_zones = min_t(unsigned int, nr_zones, max_zones);
-
-       null_zone_fill_rq(nullb->dev, rq, zno, nr_zones);
+       null_zone_fill_bio(nullb->dev, bio, zno, nr_zones);
 
        return BLK_STS_OK;
 }
 
-void null_zone_write(struct nullb_cmd *cmd)
+void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+                    unsigned int nr_sectors)
 {
        struct nullb_device *dev = cmd->nq->dev;
-       struct request *rq = cmd->rq;
-       sector_t sector = blk_rq_pos(rq);
-       unsigned int rq_sectors = blk_rq_sectors(rq);
        unsigned int zno = null_zone_no(dev, sector);
        struct blk_zone *zone = &dev->zones[zno];
 
@@ -118,7 +113,7 @@ void null_zone_write(struct nullb_cmd *cmd)
        case BLK_ZONE_COND_EMPTY:
        case BLK_ZONE_COND_IMP_OPEN:
                /* Writes must be at the write pointer position */
-               if (blk_rq_pos(rq) != zone->wp) {
+               if (sector != zone->wp) {
                        cmd->error = BLK_STS_IOERR;
                        break;
                }
@@ -126,7 +121,7 @@ void null_zone_write(struct nullb_cmd *cmd)
                if (zone->cond == BLK_ZONE_COND_EMPTY)
                        zone->cond = BLK_ZONE_COND_IMP_OPEN;
 
-               zone->wp += rq_sectors;
+               zone->wp += nr_sectors;
                if (zone->wp == zone->start + zone->len)
                        zone->cond = BLK_ZONE_COND_FULL;
                break;
@@ -137,11 +132,10 @@ void null_zone_write(struct nullb_cmd *cmd)
        }
 }
 
-void null_zone_reset(struct nullb_cmd *cmd)
+void null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
 {
        struct nullb_device *dev = cmd->nq->dev;
-       struct request *rq = cmd->rq;
-       unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
+       unsigned int zno = null_zone_no(dev, sector);
        struct blk_zone *zone = &dev->zones[zno];
 
        zone->cond = BLK_ZONE_COND_EMPTY;
index 7915f3b..73ed5f3 100644 (file)
@@ -4207,11 +4207,13 @@ static ssize_t rbd_parent_show(struct device *dev,
 
                count += sprintf(&buf[count], "%s"
                            "pool_id %llu\npool_name %s\n"
+                           "pool_ns %s\n"
                            "image_id %s\nimage_name %s\n"
                            "snap_id %llu\nsnap_name %s\n"
                            "overlap %llu\n",
                            !count ? "" : "\n", /* first? */
                            spec->pool_id, spec->pool_name,
+                           spec->pool_ns ?: "",
                            spec->image_id, spec->image_name ?: "(unknown)",
                            spec->snap_id, spec->snap_name,
                            rbd_dev->parent_overlap);
@@ -4584,47 +4586,177 @@ static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
                                                &rbd_dev->header.features);
 }
 
+struct parent_image_info {
+       u64             pool_id;
+       const char      *pool_ns;
+       const char      *image_id;
+       u64             snap_id;
+
+       bool            has_overlap;
+       u64             overlap;
+};
+
+/*
+ * The caller is responsible for @pii.
+ */
+static int decode_parent_image_spec(void **p, void *end,
+                                   struct parent_image_info *pii)
+{
+       u8 struct_v;
+       u32 struct_len;
+       int ret;
+
+       ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
+                                 &struct_v, &struct_len);
+       if (ret)
+               return ret;
+
+       ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
+       pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
+       if (IS_ERR(pii->pool_ns)) {
+               ret = PTR_ERR(pii->pool_ns);
+               pii->pool_ns = NULL;
+               return ret;
+       }
+       pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
+       if (IS_ERR(pii->image_id)) {
+               ret = PTR_ERR(pii->image_id);
+               pii->image_id = NULL;
+               return ret;
+       }
+       ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
+       return 0;
+
+e_inval:
+       return -EINVAL;
+}
+
+static int __get_parent_info(struct rbd_device *rbd_dev,
+                            struct page *req_page,
+                            struct page *reply_page,
+                            struct parent_image_info *pii)
+{
+       struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+       size_t reply_len = PAGE_SIZE;
+       void *p, *end;
+       int ret;
+
+       ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+                            "rbd", "parent_get", CEPH_OSD_FLAG_READ,
+                            req_page, sizeof(u64), reply_page, &reply_len);
+       if (ret)
+               return ret == -EOPNOTSUPP ? 1 : ret;
+
+       p = page_address(reply_page);
+       end = p + reply_len;
+       ret = decode_parent_image_spec(&p, end, pii);
+       if (ret)
+               return ret;
+
+       ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+                            "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
+                            req_page, sizeof(u64), reply_page, &reply_len);
+       if (ret)
+               return ret;
+
+       p = page_address(reply_page);
+       end = p + reply_len;
+       ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
+       if (pii->has_overlap)
+               ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+
+       return 0;
+
+e_inval:
+       return -EINVAL;
+}
+
+/*
+ * The caller is responsible for @pii.
+ */
+static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
+                                   struct page *req_page,
+                                   struct page *reply_page,
+                                   struct parent_image_info *pii)
+{
+       struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+       size_t reply_len = PAGE_SIZE;
+       void *p, *end;
+       int ret;
+
+       ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
+                            "rbd", "get_parent", CEPH_OSD_FLAG_READ,
+                            req_page, sizeof(u64), reply_page, &reply_len);
+       if (ret)
+               return ret;
+
+       p = page_address(reply_page);
+       end = p + reply_len;
+       ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
+       pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
+       if (IS_ERR(pii->image_id)) {
+               ret = PTR_ERR(pii->image_id);
+               pii->image_id = NULL;
+               return ret;
+       }
+       ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
+       pii->has_overlap = true;
+       ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+
+       return 0;
+
+e_inval:
+       return -EINVAL;
+}
+
+static int get_parent_info(struct rbd_device *rbd_dev,
+                          struct parent_image_info *pii)
+{
+       struct page *req_page, *reply_page;
+       void *p;
+       int ret;
+
+       req_page = alloc_page(GFP_KERNEL);
+       if (!req_page)
+               return -ENOMEM;
+
+       reply_page = alloc_page(GFP_KERNEL);
+       if (!reply_page) {
+               __free_page(req_page);
+               return -ENOMEM;
+       }
+
+       p = page_address(req_page);
+       ceph_encode_64(&p, rbd_dev->spec->snap_id);
+       ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
+       if (ret > 0)
+               ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
+                                              pii);
+
+       __free_page(req_page);
+       __free_page(reply_page);
+       return ret;
+}
+
 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
 {
        struct rbd_spec *parent_spec;
-       size_t size;
-       void *reply_buf = NULL;
-       __le64 snapid;
-       void *p;
-       void *end;
-       u64 pool_id;
-       char *image_id;
-       u64 snap_id;
-       u64 overlap;
+       struct parent_image_info pii = { 0 };
        int ret;
 
        parent_spec = rbd_spec_alloc();
        if (!parent_spec)
                return -ENOMEM;
 
-       size = sizeof (__le64) +                                /* pool_id */
-               sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX +        /* image_id */
-               sizeof (__le64) +                               /* snap_id */
-               sizeof (__le64);                                /* overlap */
-       reply_buf = kmalloc(size, GFP_KERNEL);
-       if (!reply_buf) {
-               ret = -ENOMEM;
+       ret = get_parent_info(rbd_dev, &pii);
+       if (ret)
                goto out_err;
-       }
 
-       snapid = cpu_to_le64(rbd_dev->spec->snap_id);
-       ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
-                                 &rbd_dev->header_oloc, "get_parent",
-                                 &snapid, sizeof(snapid), reply_buf, size);
-       dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
-       if (ret < 0)
-               goto out_err;
+       dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
+            __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
+            pii.has_overlap, pii.overlap);
 
-       p = reply_buf;
-       end = reply_buf + ret;
-       ret = -ERANGE;
-       ceph_decode_64_safe(&p, end, pool_id, out_err);
-       if (pool_id == CEPH_NOPOOL) {
+       if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
                /*
                 * Either the parent never existed, or we have
                 * record of it but the image got flattened so it no
@@ -4633,6 +4765,10 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
                 * overlap to 0.  The effect of this is that all new
                 * requests will be treated as if the image had no
                 * parent.
+                *
+                * If !pii.has_overlap, the parent image spec is not
+                * applicable.  It's there to avoid duplication in each
+                * snapshot record.
                 */
                if (rbd_dev->parent_overlap) {
                        rbd_dev->parent_overlap = 0;
@@ -4647,51 +4783,36 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
        /* The ceph file layout needs to fit pool id in 32 bits */
 
        ret = -EIO;
-       if (pool_id > (u64)U32_MAX) {
+       if (pii.pool_id > (u64)U32_MAX) {
                rbd_warn(NULL, "parent pool id too large (%llu > %u)",
-                       (unsigned long long)pool_id, U32_MAX);
+                       (unsigned long long)pii.pool_id, U32_MAX);
                goto out_err;
        }
 
-       image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
-       if (IS_ERR(image_id)) {
-               ret = PTR_ERR(image_id);
-               goto out_err;
-       }
-       ceph_decode_64_safe(&p, end, snap_id, out_err);
-       ceph_decode_64_safe(&p, end, overlap, out_err);
-
        /*
         * The parent won't change (except when the clone is
         * flattened, already handled that).  So we only need to
         * record the parent spec we have not already done so.
         */
        if (!rbd_dev->parent_spec) {
-               parent_spec->pool_id = pool_id;
-               parent_spec->image_id = image_id;
-               parent_spec->snap_id = snap_id;
-
-               /* TODO: support cloning across namespaces */
-               if (rbd_dev->spec->pool_ns) {
-                       parent_spec->pool_ns = kstrdup(rbd_dev->spec->pool_ns,
-                                                      GFP_KERNEL);
-                       if (!parent_spec->pool_ns) {
-                               ret = -ENOMEM;
-                               goto out_err;
-                       }
+               parent_spec->pool_id = pii.pool_id;
+               if (pii.pool_ns && *pii.pool_ns) {
+                       parent_spec->pool_ns = pii.pool_ns;
+                       pii.pool_ns = NULL;
                }
+               parent_spec->image_id = pii.image_id;
+               pii.image_id = NULL;
+               parent_spec->snap_id = pii.snap_id;
 
                rbd_dev->parent_spec = parent_spec;
                parent_spec = NULL;     /* rbd_dev now owns this */
-       } else {
-               kfree(image_id);
        }
 
        /*
         * We always update the parent overlap.  If it's zero we issue
         * a warning, as we will proceed as if there was no parent.
         */
-       if (!overlap) {
+       if (!pii.overlap) {
                if (parent_spec) {
                        /* refresh, careful to warn just once */
                        if (rbd_dev->parent_overlap)
@@ -4702,14 +4823,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
                        rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
                }
        }
-       rbd_dev->parent_overlap = overlap;
+       rbd_dev->parent_overlap = pii.overlap;
 
 out:
        ret = 0;
 out_err:
-       kfree(reply_buf);
+       kfree(pii.pool_ns);
+       kfree(pii.image_id);
        rbd_spec_put(parent_spec);
-
        return ret;
 }
 
index b55b245..fd1e19f 100644 (file)
@@ -84,6 +84,18 @@ MODULE_PARM_DESC(max_persistent_grants,
                  "Maximum number of grants to map persistently");
 
 /*
+ * How long a persistent grant is allowed to remain allocated without being in
+ * use. The time is in seconds, 0 means indefinitely long.
+ */
+
+static unsigned int xen_blkif_pgrant_timeout = 60;
+module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout,
+                  uint, 0644);
+MODULE_PARM_DESC(persistent_grant_unused_seconds,
+                "Time in seconds an unused persistent grant is allowed to "
+                "remain allocated. Default is 60, 0 means unlimited.");
+
+/*
  * Maximum number of rings/queues blkback supports, allow as many queues as there
  * are CPUs if user has not specified a value.
  */
@@ -123,6 +135,13 @@ module_param(log_stats, int, 0644);
 /* Number of free pages to remove on each call to gnttab_free_pages */
 #define NUM_BATCH_FREE_PAGES 10
 
+static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
+{
+       return xen_blkif_pgrant_timeout &&
+              (jiffies - persistent_gnt->last_used >=
+               HZ * xen_blkif_pgrant_timeout);
+}
+
 static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
 {
        unsigned long flags;
@@ -236,8 +255,7 @@ static int add_persistent_gnt(struct xen_blkif_ring *ring,
                }
        }
 
-       bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
-       set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
+       persistent_gnt->active = true;
        /* Add new node and rebalance tree. */
        rb_link_node(&(persistent_gnt->node), parent, new);
        rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
@@ -261,11 +279,11 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
                else if (gref > data->gnt)
                        node = node->rb_right;
                else {
-                       if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
+                       if (data->active) {
                                pr_alert_ratelimited("requesting a grant already in use\n");
                                return NULL;
                        }
-                       set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
+                       data->active = true;
                        atomic_inc(&ring->persistent_gnt_in_use);
                        return data;
                }
@@ -276,10 +294,10 @@ static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
 static void put_persistent_gnt(struct xen_blkif_ring *ring,
                                struct persistent_gnt *persistent_gnt)
 {
-       if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
+       if (!persistent_gnt->active)
                pr_alert_ratelimited("freeing a grant already unused\n");
-       set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
-       clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
+       persistent_gnt->last_used = jiffies;
+       persistent_gnt->active = false;
        atomic_dec(&ring->persistent_gnt_in_use);
 }
 
@@ -371,26 +389,26 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring)
        struct persistent_gnt *persistent_gnt;
        struct rb_node *n;
        unsigned int num_clean, total;
-       bool scan_used = false, clean_used = false;
+       bool scan_used = false;
        struct rb_root *root;
 
-       if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
-           (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
-           !ring->blkif->vbd.overflow_max_grants)) {
-               goto out;
-       }
-
        if (work_busy(&ring->persistent_purge_work)) {
                pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
                goto out;
        }
 
-       num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
-       num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
-       num_clean = min(ring->persistent_gnt_c, num_clean);
-       if ((num_clean == 0) ||
-           (num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use))))
-               goto out;
+       if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
+           (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
+           !ring->blkif->vbd.overflow_max_grants)) {
+               num_clean = 0;
+       } else {
+               num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
+               num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants +
+                           num_clean;
+               num_clean = min(ring->persistent_gnt_c, num_clean);
+               pr_debug("Going to purge at least %u persistent grants\n",
+                        num_clean);
+       }
 
        /*
         * At this point, we can assure that there will be no calls
@@ -401,9 +419,7 @@ static void purge_persistent_gnt(struct xen_blkif_ring *ring)
          * number of grants.
         */
 
-       total = num_clean;
-
-       pr_debug("Going to purge %u persistent grants\n", num_clean);
+       total = 0;
 
        BUG_ON(!list_empty(&ring->persistent_purge_list));
        root = &ring->persistent_gnts;
@@ -412,46 +428,37 @@ purge_list:
                BUG_ON(persistent_gnt->handle ==
                        BLKBACK_INVALID_HANDLE);
 
-               if (clean_used) {
-                       clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
+               if (persistent_gnt->active)
                        continue;
-               }
-
-               if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
+               if (!scan_used && !persistent_gnt_timeout(persistent_gnt))
                        continue;
-               if (!scan_used &&
-                   (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
+               if (scan_used && total >= num_clean)
                        continue;
 
                rb_erase(&persistent_gnt->node, root);
                list_add(&persistent_gnt->remove_node,
                         &ring->persistent_purge_list);
-               if (--num_clean == 0)
-                       goto finished;
+               total++;
        }
        /*
-        * If we get here it means we also need to start cleaning
+        * Check whether we also need to start cleaning
         * grants that were used since last purge in order to cope
         * with the requested num
         */
-       if (!scan_used && !clean_used) {
-               pr_debug("Still missing %u purged frames\n", num_clean);
+       if (!scan_used && total < num_clean) {
+               pr_debug("Still missing %u purged frames\n", num_clean - total);
                scan_used = true;
                goto purge_list;
        }
-finished:
-       if (!clean_used) {
-               pr_debug("Finished scanning for grants to clean, removing used flag\n");
-               clean_used = true;
-               goto purge_list;
-       }
 
-       ring->persistent_gnt_c -= (total - num_clean);
-       ring->blkif->vbd.overflow_max_grants = 0;
+       if (total) {
+               ring->persistent_gnt_c -= total;
+               ring->blkif->vbd.overflow_max_grants = 0;
 
-       /* We can defer this work */
-       schedule_work(&ring->persistent_purge_work);
-       pr_debug("Purged %u/%u\n", (total - num_clean), total);
+               /* We can defer this work */
+               schedule_work(&ring->persistent_purge_work);
+               pr_debug("Purged %u/%u\n", num_clean, total);
+       }
 
 out:
        return;
index ecb35fe..1d3002d 100644 (file)
@@ -233,16 +233,6 @@ struct xen_vbd {
 
 struct backend_info;
 
-/* Number of available flags */
-#define PERSISTENT_GNT_FLAGS_SIZE      2
-/* This persistent grant is currently in use */
-#define PERSISTENT_GNT_ACTIVE          0
-/*
- * This persistent grant has been used, this flag is set when we remove the
- * PERSISTENT_GNT_ACTIVE, to know that this grant has been used recently.
- */
-#define PERSISTENT_GNT_WAS_ACTIVE      1
-
 /* Number of requests that we can fit in a ring */
 #define XEN_BLKIF_REQS_PER_PAGE                32
 
@@ -250,7 +240,8 @@ struct persistent_gnt {
        struct page *page;
        grant_ref_t gnt;
        grant_handle_t handle;
-       DECLARE_BITMAP(flags, PERSISTENT_GNT_FLAGS_SIZE);
+       unsigned long last_used;
+       bool active;
        struct rb_node node;
        struct list_head remove_node;
 };
@@ -278,7 +269,6 @@ struct xen_blkif_ring {
        wait_queue_head_t       pending_free_wq;
 
        /* Tree to store persistent grants. */
-       spinlock_t              pers_gnts_lock;
        struct rb_root          persistent_gnts;
        unsigned int            persistent_gnt_c;
        atomic_t                persistent_gnt_in_use;
index 8986ada..429d201 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/scatterlist.h>
 #include <linux/bitmap.h>
 #include <linux/list.h>
+#include <linux/workqueue.h>
 
 #include <xen/xen.h>
 #include <xen/xenbus.h>
@@ -121,6 +122,8 @@ static inline struct blkif_req *blkif_req(struct request *rq)
 
 static DEFINE_MUTEX(blkfront_mutex);
 static const struct block_device_operations xlvbd_block_fops;
+static struct delayed_work blkfront_work;
+static LIST_HEAD(info_list);
 
 /*
  * Maximum number of segments in indirect requests, the actual value used by
@@ -216,6 +219,7 @@ struct blkfront_info
        /* Save uncomplete reqs and bios for migration. */
        struct list_head requests;
        struct bio_list bio_list;
+       struct list_head info_list;
 };
 
 static unsigned int nr_minors;
@@ -1759,6 +1763,12 @@ abort_transaction:
        return err;
 }
 
+static void free_info(struct blkfront_info *info)
+{
+       list_del(&info->info_list);
+       kfree(info);
+}
+
 /* Common code used when first setting up, and when resuming. */
 static int talk_to_blkback(struct xenbus_device *dev,
                           struct blkfront_info *info)
@@ -1880,7 +1890,10 @@ again:
  destroy_blkring:
        blkif_free(info, 0);
 
-       kfree(info);
+       mutex_lock(&blkfront_mutex);
+       free_info(info);
+       mutex_unlock(&blkfront_mutex);
+
        dev_set_drvdata(&dev->dev, NULL);
 
        return err;
@@ -1991,6 +2004,10 @@ static int blkfront_probe(struct xenbus_device *dev,
        info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
        dev_set_drvdata(&dev->dev, info);
 
+       mutex_lock(&blkfront_mutex);
+       list_add(&info->info_list, &info_list);
+       mutex_unlock(&blkfront_mutex);
+
        return 0;
 }
 
@@ -2301,6 +2318,12 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
        if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
                indirect_segments = 0;
        info->max_indirect_segments = indirect_segments;
+
+       if (info->feature_persistent) {
+               mutex_lock(&blkfront_mutex);
+               schedule_delayed_work(&blkfront_work, HZ * 10);
+               mutex_unlock(&blkfront_mutex);
+       }
 }
 
 /*
@@ -2482,7 +2505,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
        mutex_unlock(&info->mutex);
 
        if (!bdev) {
-               kfree(info);
+               mutex_lock(&blkfront_mutex);
+               free_info(info);
+               mutex_unlock(&blkfront_mutex);
                return 0;
        }
 
@@ -2502,7 +2527,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
        if (info && !bdev->bd_openers) {
                xlvbd_release_gendisk(info);
                disk->private_data = NULL;
-               kfree(info);
+               mutex_lock(&blkfront_mutex);
+               free_info(info);
+               mutex_unlock(&blkfront_mutex);
        }
 
        mutex_unlock(&bdev->bd_mutex);
@@ -2585,7 +2612,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
                dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
                xlvbd_release_gendisk(info);
                disk->private_data = NULL;
-               kfree(info);
+               free_info(info);
        }
 
 out:
@@ -2618,6 +2645,61 @@ static struct xenbus_driver blkfront_driver = {
        .is_ready = blkfront_is_ready,
 };
 
+static void purge_persistent_grants(struct blkfront_info *info)
+{
+       unsigned int i;
+       unsigned long flags;
+
+       for (i = 0; i < info->nr_rings; i++) {
+               struct blkfront_ring_info *rinfo = &info->rinfo[i];
+               struct grant *gnt_list_entry, *tmp;
+
+               spin_lock_irqsave(&rinfo->ring_lock, flags);
+
+               if (rinfo->persistent_gnts_c == 0) {
+                       spin_unlock_irqrestore(&rinfo->ring_lock, flags);
+                       continue;
+               }
+
+               list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
+                                        node) {
+                       if (gnt_list_entry->gref == GRANT_INVALID_REF ||
+                           gnttab_query_foreign_access(gnt_list_entry->gref))
+                               continue;
+
+                       list_del(&gnt_list_entry->node);
+                       gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
+                       rinfo->persistent_gnts_c--;
+                       gnt_list_entry->gref = GRANT_INVALID_REF;
+                       list_add_tail(&gnt_list_entry->node, &rinfo->grants);
+               }
+
+               spin_unlock_irqrestore(&rinfo->ring_lock, flags);
+       }
+}
+
+static void blkfront_delay_work(struct work_struct *work)
+{
+       struct blkfront_info *info;
+       bool need_schedule_work = false;
+
+       mutex_lock(&blkfront_mutex);
+
+       list_for_each_entry(info, &info_list, info_list) {
+               if (info->feature_persistent) {
+                       need_schedule_work = true;
+                       mutex_lock(&info->mutex);
+                       purge_persistent_grants(info);
+                       mutex_unlock(&info->mutex);
+               }
+       }
+
+       if (need_schedule_work)
+               schedule_delayed_work(&blkfront_work, HZ * 10);
+
+       mutex_unlock(&blkfront_mutex);
+}
+
 static int __init xlblk_init(void)
 {
        int ret;
@@ -2626,6 +2708,15 @@ static int __init xlblk_init(void)
        if (!xen_domain())
                return -ENODEV;
 
+       if (!xen_has_pv_disk_devices())
+               return -ENODEV;
+
+       if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
+               pr_warn("xen_blk: can't get major %d with name %s\n",
+                       XENVBD_MAJOR, DEV_NAME);
+               return -ENODEV;
+       }
+
        if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
                xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
 
@@ -2641,14 +2732,7 @@ static int __init xlblk_init(void)
                xen_blkif_max_queues = nr_cpus;
        }
 
-       if (!xen_has_pv_disk_devices())
-               return -ENODEV;
-
-       if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
-               printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
-                      XENVBD_MAJOR, DEV_NAME);
-               return -ENODEV;
-       }
+       INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work);
 
        ret = xenbus_register_frontend(&blkfront_driver);
        if (ret) {
@@ -2663,6 +2747,8 @@ module_init(xlblk_init);
 
 static void __exit xlblk_exit(void)
 {
+       cancel_delayed_work_sync(&blkfront_work);
+
        xenbus_unregister_driver(&blkfront_driver);
        unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
        kfree(minors);
index 2df11cc..845b031 100644 (file)
@@ -200,6 +200,7 @@ config BT_HCIUART_RTL
        depends on BT_HCIUART
        depends on BT_HCIUART_SERDEV
        depends on GPIOLIB
+       depends on ACPI
        select BT_HCIUART_3WIRE
        select BT_RTL
        help
index ed2a5c7..4593baf 100644 (file)
@@ -144,8 +144,10 @@ static int mtk_setup_fw(struct hci_dev *hdev)
        fw_size = fw->size;
 
        /* The size of patch header is 30 bytes, should be skip */
-       if (fw_size < 30)
-               return -EINVAL;
+       if (fw_size < 30) {
+               err = -EINVAL;
+               goto free_fw;
+       }
 
        fw_size -= 30;
        fw_ptr += 30;
@@ -172,8 +174,8 @@ static int mtk_setup_fw(struct hci_dev *hdev)
                fw_ptr += dlen;
        }
 
+free_fw:
        release_firmware(fw);
-
        return err;
 }
 
index 963bb03..ea6238e 100644 (file)
@@ -543,6 +543,8 @@ static void hci_uart_tty_close(struct tty_struct *tty)
        }
        clear_bit(HCI_UART_PROTO_SET, &hu->flags);
 
+       percpu_free_rwsem(&hu->proto_lock);
+
        kfree(hu);
 }
 
index c9bac9d..e4fe954 100644 (file)
@@ -498,32 +498,29 @@ static int sysc_check_registers(struct sysc *ddata)
 
 /**
  * syc_ioremap - ioremap register space for the interconnect target module
- * @ddata: deviec driver data
+ * @ddata: device driver data
  *
  * Note that the interconnect target module registers can be anywhere
- * within the first child device address space. For example, SGX has
- * them at offset 0x1fc00 in the 32MB module address space. We just
- * what we need around the interconnect target module registers.
+ * within the interconnect target module range. For example, SGX has
+ * them at offset 0x1fc00 in the 32MB module address space. And cpsw
+ * has them at offset 0x1200 in the CPSW_WR child. Usually the
+ * the interconnect target module registers are at the beginning of
+ * the module range though.
  */
 static int sysc_ioremap(struct sysc *ddata)
 {
-       u32 size = 0;
-
-       if (ddata->offsets[SYSC_SYSSTATUS] >= 0)
-               size = ddata->offsets[SYSC_SYSSTATUS];
-       else if (ddata->offsets[SYSC_SYSCONFIG] >= 0)
-               size = ddata->offsets[SYSC_SYSCONFIG];
-       else if (ddata->offsets[SYSC_REVISION] >= 0)
-               size = ddata->offsets[SYSC_REVISION];
-       else
-               return -EINVAL;
+       int size;
 
-       size &= 0xfff00;
-       size += SZ_256;
+       size = max3(ddata->offsets[SYSC_REVISION],
+                   ddata->offsets[SYSC_SYSCONFIG],
+                   ddata->offsets[SYSC_SYSSTATUS]);
+
+       if (size < 0 || (size + sizeof(u32)) > ddata->module_size)
+               return -EINVAL;
 
        ddata->module_va = devm_ioremap(ddata->dev,
                                        ddata->module_pa,
-                                       size);
+                                       size + sizeof(u32));
        if (!ddata->module_va)
                return -EIO;
 
@@ -1224,10 +1221,10 @@ static int sysc_child_suspend_noirq(struct device *dev)
        if (!pm_runtime_status_suspended(dev)) {
                error = pm_generic_runtime_suspend(dev);
                if (error) {
-                       dev_err(dev, "%s error at %i: %i\n",
-                               __func__, __LINE__, error);
+                       dev_warn(dev, "%s busy at %i: %i\n",
+                                __func__, __LINE__, error);
 
-                       return error;
+                       return 0;
                }
 
                error = sysc_runtime_suspend(ddata->dev);
index 113fc6e..a5d5a96 100644 (file)
@@ -2546,7 +2546,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
        if (!CDROM_CAN(CDC_SELECT_DISC) ||
            (arg == CDSL_CURRENT || arg == CDSL_NONE))
                return cdi->ops->drive_status(cdi, CDSL_CURRENT);
-       if (((int)arg >= cdi->capacity))
+       if (arg >= cdi->capacity)
                return -EINVAL;
        return cdrom_slot_status(cdi, arg);
 }
index ce277ee..4072849 100644 (file)
@@ -566,5 +566,5 @@ config RANDOM_TRUST_CPU
        that CPU manufacturer (perhaps with the insistence or mandate
        of a Nation State's intelligence or law enforcement agencies)
        has not installed a hidden back door to compromise the CPU's
-       random number generation facilities.
-
+       random number generation facilities. This can also be configured
+       at boot with "random.trust_cpu=on/off".
index a339766..97d6856 100644 (file)
@@ -59,8 +59,6 @@ enum bt_states {
        BT_STATE_RESET3,
        BT_STATE_RESTART,
        BT_STATE_PRINTME,
-       BT_STATE_CAPABILITIES_BEGIN,
-       BT_STATE_CAPABILITIES_END,
        BT_STATE_LONG_BUSY      /* BT doesn't get hosed :-) */
 };
 
@@ -86,7 +84,6 @@ struct si_sm_data {
        int             error_retries;  /* end of "common" fields */
        int             nonzero_status; /* hung BMCs stay all 0 */
        enum bt_states  complete;       /* to divert the state machine */
-       int             BT_CAP_outreqs;
        long            BT_CAP_req2rsp;
        int             BT_CAP_retries; /* Recommended retries */
 };
@@ -137,8 +134,6 @@ static char *state2txt(unsigned char state)
        case BT_STATE_RESET3:           return("RESET3");
        case BT_STATE_RESTART:          return("RESTART");
        case BT_STATE_LONG_BUSY:        return("LONG_BUSY");
-       case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN");
-       case BT_STATE_CAPABILITIES_END: return("CAP_END");
        }
        return("BAD STATE");
 }
@@ -185,7 +180,6 @@ static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
        bt->complete = BT_STATE_IDLE;   /* end here */
        bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC;
        bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
-       /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */
        return 3; /* We claim 3 bytes of space; ought to check SPMI table */
 }
 
@@ -451,7 +445,7 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
 
 static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
 {
-       unsigned char status, BT_CAP[8];
+       unsigned char status;
        static enum bt_states last_printed = BT_STATE_PRINTME;
        int i;
 
@@ -504,12 +498,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
                if (status & BT_H_BUSY)         /* clear a leftover H_BUSY */
                        BT_CONTROL(BT_H_BUSY);
 
-               bt->timeout = bt->BT_CAP_req2rsp;
-
-               /* Read BT capabilities if it hasn't been done yet */
-               if (!bt->BT_CAP_outreqs)
-                       BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
-                                       SI_SM_CALL_WITHOUT_DELAY);
                BT_SI_SM_RETURN(SI_SM_IDLE);
 
        case BT_STATE_XACTION_START:
@@ -614,37 +602,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
                BT_STATE_CHANGE(BT_STATE_XACTION_START,
                                SI_SM_CALL_WITH_DELAY);
 
-       /*
-        * Get BT Capabilities, using timing of upper level state machine.
-        * Set outreqs to prevent infinite loop on timeout.
-        */
-       case BT_STATE_CAPABILITIES_BEGIN:
-               bt->BT_CAP_outreqs = 1;
-               {
-                       unsigned char GetBT_CAP[] = { 0x18, 0x36 };
-                       bt->state = BT_STATE_IDLE;
-                       bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
-               }
-               bt->complete = BT_STATE_CAPABILITIES_END;
-               BT_STATE_CHANGE(BT_STATE_XACTION_START,
-                               SI_SM_CALL_WITH_DELAY);
-
-       case BT_STATE_CAPABILITIES_END:
-               i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
-               bt_init_data(bt, bt->io);
-               if ((i == 8) && !BT_CAP[2]) {
-                       bt->BT_CAP_outreqs = BT_CAP[3];
-                       bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
-                       bt->BT_CAP_retries = BT_CAP[7];
-               } else
-                       printk(KERN_WARNING "IPMI BT: using default values\n");
-               if (!bt->BT_CAP_outreqs)
-                       bt->BT_CAP_outreqs = 1;
-               printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
-                       bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
-               bt->timeout = bt->BT_CAP_req2rsp;
-               return SI_SM_CALL_WITHOUT_DELAY;
-
        default:        /* should never occur */
                return error_recovery(bt,
                                      status,
@@ -655,6 +612,11 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
 
 static int bt_detect(struct si_sm_data *bt)
 {
+       unsigned char GetBT_CAP[] = { 0x18, 0x36 };
+       unsigned char BT_CAP[8];
+       enum si_sm_result smi_result;
+       int rv;
+
        /*
         * It's impossible for the BT status and interrupt registers to be
         * all 1's, (assuming a properly functioning, self-initialized BMC)
@@ -665,6 +627,48 @@ static int bt_detect(struct si_sm_data *bt)
        if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
                return 1;
        reset_flags(bt);
+
+       /*
+        * Try getting the BT capabilities here.
+        */
+       rv = bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
+       if (rv) {
+               dev_warn(bt->io->dev,
+                        "Can't start capabilities transaction: %d\n", rv);
+               goto out_no_bt_cap;
+       }
+
+       smi_result = SI_SM_CALL_WITHOUT_DELAY;
+       for (;;) {
+               if (smi_result == SI_SM_CALL_WITH_DELAY ||
+                   smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
+                       schedule_timeout_uninterruptible(1);
+                       smi_result = bt_event(bt, jiffies_to_usecs(1));
+               } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+                       smi_result = bt_event(bt, 0);
+               } else
+                       break;
+       }
+
+       rv = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
+       bt_init_data(bt, bt->io);
+       if (rv < 8) {
+               dev_warn(bt->io->dev, "bt cap response too short: %d\n", rv);
+               goto out_no_bt_cap;
+       }
+
+       if (BT_CAP[2]) {
+               dev_warn(bt->io->dev, "Error fetching bt cap: %x\n", BT_CAP[2]);
+out_no_bt_cap:
+               dev_warn(bt->io->dev, "using default values\n");
+       } else {
+               bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
+               bt->BT_CAP_retries = BT_CAP[7];
+       }
+
+       dev_info(bt->io->dev, "req2rsp=%ld secs retries=%d\n",
+                bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
+
        return 0;
 }
 
index 51832b8..7fc9612 100644 (file)
@@ -3381,39 +3381,45 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
 
        rv = handlers->start_processing(send_info, intf);
        if (rv)
-               goto out;
+               goto out_err;
 
        rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
        if (rv) {
                dev_err(si_dev, "Unable to get the device id: %d\n", rv);
-               goto out;
+               goto out_err_started;
        }
 
        mutex_lock(&intf->bmc_reg_mutex);
        rv = __scan_channels(intf, &id);
        mutex_unlock(&intf->bmc_reg_mutex);
+       if (rv)
+               goto out_err_bmc_reg;
 
- out:
-       if (rv) {
-               ipmi_bmc_unregister(intf);
-               list_del_rcu(&intf->link);
-               mutex_unlock(&ipmi_interfaces_mutex);
-               synchronize_srcu(&ipmi_interfaces_srcu);
-               cleanup_srcu_struct(&intf->users_srcu);
-               kref_put(&intf->refcount, intf_free);
-       } else {
-               /*
-                * Keep memory order straight for RCU readers.  Make
-                * sure everything else is committed to memory before
-                * setting intf_num to mark the interface valid.
-                */
-               smp_wmb();
-               intf->intf_num = i;
-               mutex_unlock(&ipmi_interfaces_mutex);
+       /*
+        * Keep memory order straight for RCU readers.  Make
+        * sure everything else is committed to memory before
+        * setting intf_num to mark the interface valid.
+        */
+       smp_wmb();
+       intf->intf_num = i;
+       mutex_unlock(&ipmi_interfaces_mutex);
 
-               /* After this point the interface is legal to use. */
-               call_smi_watchers(i, intf->si_dev);
-       }
+       /* After this point the interface is legal to use. */
+       call_smi_watchers(i, intf->si_dev);
+
+       return 0;
+
+ out_err_bmc_reg:
+       ipmi_bmc_unregister(intf);
+ out_err_started:
+       if (intf->handlers->shutdown)
+               intf->handlers->shutdown(intf->send_info);
+ out_err:
+       list_del_rcu(&intf->link);
+       mutex_unlock(&ipmi_interfaces_mutex);
+       synchronize_srcu(&ipmi_interfaces_srcu);
+       cleanup_srcu_struct(&intf->users_srcu);
+       kref_put(&intf->refcount, intf_free);
 
        return rv;
 }
@@ -3504,7 +3510,8 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
        }
        srcu_read_unlock(&intf->users_srcu, index);
 
-       intf->handlers->shutdown(intf->send_info);
+       if (intf->handlers->shutdown)
+               intf->handlers->shutdown(intf->send_info);
 
        cleanup_smi_msgs(intf);
 
index 90ec010..5faa917 100644 (file)
@@ -2083,18 +2083,9 @@ static int try_smi_init(struct smi_info *new_smi)
                 si_to_str[new_smi->io.si_type]);
 
        WARN_ON(new_smi->io.dev->init_name != NULL);
-       kfree(init_name);
-
-       return 0;
-
-out_err:
-       if (new_smi->intf) {
-               ipmi_unregister_smi(new_smi->intf);
-               new_smi->intf = NULL;
-       }
 
+ out_err:
        kfree(init_name);
-
        return rv;
 }
 
@@ -2227,6 +2218,8 @@ static void shutdown_smi(void *send_info)
 
        kfree(smi_info->si_sm);
        smi_info->si_sm = NULL;
+
+       smi_info->intf = NULL;
 }
 
 /*
@@ -2240,10 +2233,8 @@ static void cleanup_one_si(struct smi_info *smi_info)
 
        list_del(&smi_info->link);
 
-       if (smi_info->intf) {
+       if (smi_info->intf)
                ipmi_unregister_smi(smi_info->intf);
-               smi_info->intf = NULL;
-       }
 
        if (smi_info->pdev) {
                if (smi_info->pdev_registered)
index 18e4650..29e67a8 100644 (file)
@@ -181,6 +181,8 @@ struct ssif_addr_info {
        struct device *dev;
        struct i2c_client *client;
 
+       struct i2c_client *added_client;
+
        struct mutex clients_mutex;
        struct list_head clients;
 
@@ -1214,18 +1216,11 @@ static void shutdown_ssif(void *send_info)
                complete(&ssif_info->wake_thread);
                kthread_stop(ssif_info->thread);
        }
-
-       /*
-        * No message can be outstanding now, we have removed the
-        * upper layer and it permitted us to do so.
-        */
-       kfree(ssif_info);
 }
 
 static int ssif_remove(struct i2c_client *client)
 {
        struct ssif_info *ssif_info = i2c_get_clientdata(client);
-       struct ipmi_smi *intf;
        struct ssif_addr_info *addr_info;
 
        if (!ssif_info)
@@ -1235,9 +1230,7 @@ static int ssif_remove(struct i2c_client *client)
         * After this point, we won't deliver anything asychronously
         * to the message handler.  We can unregister ourself.
         */
-       intf = ssif_info->intf;
-       ssif_info->intf = NULL;
-       ipmi_unregister_smi(intf);
+       ipmi_unregister_smi(ssif_info->intf);
 
        list_for_each_entry(addr_info, &ssif_infos, link) {
                if (addr_info->client == client) {
@@ -1246,6 +1239,8 @@ static int ssif_remove(struct i2c_client *client)
                }
        }
 
+       kfree(ssif_info);
+
        return 0;
 }
 
@@ -1648,15 +1643,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
 
  out:
        if (rv) {
-               /*
-                * Note that if addr_info->client is assigned, we
-                * leave it.  The i2c client hangs around even if we
-                * return a failure here, and the failure here is not
-                * propagated back to the i2c code.  This seems to be
-                * design intent, strange as it may be.  But if we
-                * don't leave it, ssif_platform_remove will not remove
-                * the client like it should.
-                */
+               if (addr_info)
+                       addr_info->client = NULL;
+
                dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv);
                kfree(ssif_info);
        }
@@ -1676,7 +1665,8 @@ static int ssif_adapter_handler(struct device *adev, void *opaque)
        if (adev->type != &i2c_adapter_type)
                return 0;
 
-       i2c_new_device(to_i2c_adapter(adev), &addr_info->binfo);
+       addr_info->added_client = i2c_new_device(to_i2c_adapter(adev),
+                                                &addr_info->binfo);
 
        if (!addr_info->adapter_name)
                return 1; /* Only try the first I2C adapter by default. */
@@ -1849,7 +1839,7 @@ static int ssif_platform_remove(struct platform_device *dev)
                return 0;
 
        mutex_lock(&ssif_infos_mutex);
-       i2c_unregister_device(addr_info->client);
+       i2c_unregister_device(addr_info->added_client);
 
        list_del(&addr_info->link);
        kfree(addr_info);
index bb882ab..e6124bd 100644 (file)
@@ -16,6 +16,8 @@
 
 #include "kcs_bmc.h"
 
+#define DEVICE_NAME "ipmi-kcs"
+
 #define KCS_MSG_BUFSIZ    1000
 
 #define KCS_ZERO_DATA     0
@@ -429,8 +431,6 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
        if (!kcs_bmc)
                return NULL;
 
-       dev_set_name(dev, "ipmi-kcs%u", channel);
-
        spin_lock_init(&kcs_bmc->lock);
        kcs_bmc->channel = channel;
 
@@ -444,7 +444,8 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
                return NULL;
 
        kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
-       kcs_bmc->miscdev.name = dev_name(dev);
+       kcs_bmc->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%u",
+                                              DEVICE_NAME, channel);
        kcs_bmc->miscdev.fops = &kcs_bmc_fops;
 
        return kcs_bmc;
index bf5f99f..c75b6cd 100644 (file)
@@ -779,6 +779,13 @@ static struct crng_state **crng_node_pool __read_mostly;
 
 static void invalidate_batched_entropy(void);
 
+static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+static int __init parse_trust_cpu(char *arg)
+{
+       return kstrtobool(arg, &trust_cpu);
+}
+early_param("random.trust_cpu", parse_trust_cpu);
+
 static void crng_initialize(struct crng_state *crng)
 {
        int             i;
@@ -799,12 +806,10 @@ static void crng_initialize(struct crng_state *crng)
                }
                crng->state[i] ^= rv;
        }
-#ifdef CONFIG_RANDOM_TRUST_CPU
-       if (arch_init) {
+       if (trust_cpu && arch_init) {
                crng_init = 2;
                pr_notice("random: crng done (trusting CPU's manufacturer)\n");
        }
-#endif
        crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
 }
 
index 740af90..c5edf8f 100644 (file)
@@ -558,8 +558,8 @@ static void __init npcm7xx_clk_init(struct device_node *clk_np)
        if (!clk_base)
                goto npcm7xx_init_error;
 
-       npcm7xx_clk_data = kzalloc(sizeof(*npcm7xx_clk_data->hws) *
-               NPCM7XX_NUM_CLOCKS + sizeof(npcm7xx_clk_data), GFP_KERNEL);
+       npcm7xx_clk_data = kzalloc(struct_size(npcm7xx_clk_data, hws,
+                                  NPCM7XX_NUM_CLOCKS), GFP_KERNEL);
        if (!npcm7xx_clk_data)
                goto npcm7xx_init_np_err;
 
index 08ef699..d977193 100644 (file)
@@ -55,6 +55,7 @@ struct clk_plt_data {
        u8 nparents;
        struct clk_plt *clks[PMC_CLK_NUM];
        struct clk_lookup *mclk_lookup;
+       struct clk_lookup *ether_clk_lookup;
 };
 
 /* Return an index in parent table */
@@ -186,13 +187,6 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
        pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
        spin_lock_init(&pclk->lock);
 
-       /*
-        * If the clock was already enabled by the firmware mark it as critical
-        * to avoid it being gated by the clock framework if no driver owns it.
-        */
-       if (plt_clk_is_enabled(&pclk->hw))
-               init.flags |= CLK_IS_CRITICAL;
-
        ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
        if (ret) {
                pclk = ERR_PTR(ret);
@@ -351,11 +345,20 @@ static int plt_clk_probe(struct platform_device *pdev)
                goto err_unreg_clk_plt;
        }
 
+       data->ether_clk_lookup = clkdev_hw_create(&data->clks[4]->hw,
+                                                 "ether_clk", NULL);
+       if (!data->ether_clk_lookup) {
+               err = -ENOMEM;
+               goto err_drop_mclk;
+       }
+
        plt_clk_free_parent_names_loop(parent_names, data->nparents);
 
        platform_set_drvdata(pdev, data);
        return 0;
 
+err_drop_mclk:
+       clkdev_drop(data->mclk_lookup);
 err_unreg_clk_plt:
        plt_clk_unregister_loop(data, i);
        plt_clk_unregister_parents(data);
@@ -369,6 +372,7 @@ static int plt_clk_remove(struct platform_device *pdev)
 
        data = platform_get_drvdata(pdev);
 
+       clkdev_drop(data->ether_clk_lookup);
        clkdev_drop(data->mclk_lookup);
        plt_clk_unregister_loop(data, PMC_CLK_NUM);
        plt_clk_unregister_parents(data);
index fb62f39..3a0996f 100644 (file)
@@ -46,7 +46,7 @@ static int st_clk_probe(struct platform_device *pdev)
                clk_oscout1_parents, ARRAY_SIZE(clk_oscout1_parents),
                0, st_data->base + CLKDRVSTR2, OSCOUT1CLK25MHZ, 3, 0, NULL);
 
-       clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_25M]->clk);
+       clk_set_parent(hws[ST_CLK_MUX]->clk, hws[ST_CLK_48M]->clk);
 
        hws[ST_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1", "oscout1_mux",
                0, st_data->base + MISCCLKCNTL1, OSCCLKENB,
index ec8a437..2fab18f 100644 (file)
@@ -180,26 +180,29 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
        data->base = of_iomap(node, 0);
        if (!data->base) {
                pr_err("Could not map PIT address\n");
-               return -ENXIO;
+               ret = -ENXIO;
+               goto exit;
        }
 
        data->mck = of_clk_get(node, 0);
        if (IS_ERR(data->mck)) {
                pr_err("Unable to get mck clk\n");
-               return PTR_ERR(data->mck);
+               ret = PTR_ERR(data->mck);
+               goto exit;
        }
 
        ret = clk_prepare_enable(data->mck);
        if (ret) {
                pr_err("Unable to enable mck\n");
-               return ret;
+               goto exit;
        }
 
        /* Get the interrupts property */
        data->irq = irq_of_parse_and_map(node, 0);
        if (!data->irq) {
                pr_err("Unable to get IRQ from DT\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto exit;
        }
 
        /*
@@ -227,7 +230,7 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
        ret = clocksource_register_hz(&data->clksrc, pit_rate);
        if (ret) {
                pr_err("Failed to register clocksource\n");
-               return ret;
+               goto exit;
        }
 
        /* Set up irq handler */
@@ -236,7 +239,8 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
                          "at91_tick", data);
        if (ret) {
                pr_err("Unable to setup IRQ\n");
-               return ret;
+               clocksource_unregister(&data->clksrc);
+               goto exit;
        }
 
        /* Set up and register clockevents */
@@ -254,6 +258,10 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
        clockevents_register_device(&data->clkevt);
 
        return 0;
+
+exit:
+       kfree(data);
+       return ret;
 }
 TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
                       at91sam926x_pit_dt_init);
index c020038..cf93f64 100644 (file)
@@ -130,13 +130,17 @@ static int fttmr010_timer_set_next_event(unsigned long cycles,
        cr &= ~fttmr010->t1_enable_val;
        writel(cr, fttmr010->base + TIMER_CR);
 
-       /* Setup the match register forward/backward in time */
-       cr = readl(fttmr010->base + TIMER1_COUNT);
-       if (fttmr010->count_down)
-               cr -= cycles;
-       else
-               cr += cycles;
-       writel(cr, fttmr010->base + TIMER1_MATCH1);
+       if (fttmr010->count_down) {
+               /*
+                * ASPEED Timer Controller will load TIMER1_LOAD register
+                * into TIMER1_COUNT register when the timer is re-enabled.
+                */
+               writel(cycles, fttmr010->base + TIMER1_LOAD);
+       } else {
+               /* Setup the match register forward in time */
+               cr = readl(fttmr010->base + TIMER1_COUNT);
+               writel(cr + cycles, fttmr010->base + TIMER1_MATCH1);
+       }
 
        /* Start */
        cr = readl(fttmr010->base + TIMER_CR);
index 29e2e1a..6949a91 100644 (file)
@@ -97,6 +97,9 @@ static int __init ti_32k_timer_init(struct device_node *np)
                return -ENXIO;
        }
 
+       if (!of_machine_is_compatible("ti,am43"))
+               ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
+
        ti_32k_timer.counter = ti_32k_timer.base;
 
        /*
index a1830fa..2a3675c 100644 (file)
@@ -44,7 +44,7 @@ enum _msm8996_version {
 
 struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
 
-static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
+static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
 {
        size_t len;
        u32 *msm_id;
@@ -222,7 +222,7 @@ static int __init qcom_cpufreq_kryo_init(void)
 }
 module_init(qcom_cpufreq_kryo_init);
 
-static void __init qcom_cpufreq_kryo_exit(void)
+static void __exit qcom_cpufreq_kryo_exit(void)
 {
        platform_device_unregister(kryo_cpufreq_pdev);
        platform_driver_unregister(&qcom_cpufreq_kryo_driver);
index 110483f..e26a409 100644 (file)
@@ -379,9 +379,20 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                if (idx == -1)
                        idx = i; /* first enabled state */
                if (s->target_residency > data->predicted_us) {
-                       if (!tick_nohz_tick_stopped())
+                       if (data->predicted_us < TICK_USEC)
                                break;
 
+                       if (!tick_nohz_tick_stopped()) {
+                               /*
+                                * If the state selected so far is shallow,
+                                * waking up early won't hurt, so retain the
+                                * tick in that case and let the governor run
+                                * again in the next iteration of the loop.
+                                */
+                               expected_interval = drv->states[idx].target_residency;
+                               break;
+                       }
+
                        /*
                         * If the state selected so far is shallow and this
                         * state's target residency matches the time till the
index d676679..ec40f99 100644 (file)
@@ -1553,8 +1553,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
        edesc->src_nents = src_nents;
        edesc->dst_nents = dst_nents;
        edesc->sec4_sg_bytes = sec4_sg_bytes;
-       edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
-                        desc_bytes;
+       edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
+                                                 desc_bytes);
        edesc->iv_dir = DMA_TO_DEVICE;
 
        /* Make sure IV is located in a DMAable area */
@@ -1757,8 +1757,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
        edesc->src_nents = src_nents;
        edesc->dst_nents = dst_nents;
        edesc->sec4_sg_bytes = sec4_sg_bytes;
-       edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
-                        desc_bytes;
+       edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
+                                                 desc_bytes);
        edesc->iv_dir = DMA_FROM_DEVICE;
 
        /* Make sure IV is located in a DMAable area */
index 6e61cc9..d7aa7d7 100644 (file)
@@ -679,10 +679,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
        int ret = 0;
 
        if (keylen != 2 * AES_MIN_KEY_SIZE  && keylen != 2 * AES_MAX_KEY_SIZE) {
-               crypto_ablkcipher_set_flags(ablkcipher,
-                                           CRYPTO_TFM_RES_BAD_KEY_LEN);
                dev_err(jrdev, "key size mismatch\n");
-               return -EINVAL;
+               goto badkey;
        }
 
        ctx->cdata.keylen = keylen;
@@ -715,7 +713,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
        return ret;
 badkey:
        crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
-       return 0;
+       return -EINVAL;
 }
 
 /*
index 578ea63..f26d62e 100644 (file)
@@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
        dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
        dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
        dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
-       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
-       dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
+       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
+       dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
 }
 
 static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
@@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
        dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
        dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
        dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
-       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
-       dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
+       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
+       dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
 }
 
 /* RSA Job Completion handler */
@@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
                goto unmap_p;
        }
 
-       pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
+       pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
        if (dma_mapping_error(dev, pdb->tmp1_dma)) {
                dev_err(dev, "Unable to map RSA tmp1 memory\n");
                goto unmap_q;
        }
 
-       pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
+       pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
        if (dma_mapping_error(dev, pdb->tmp2_dma)) {
                dev_err(dev, "Unable to map RSA tmp2 memory\n");
                goto unmap_tmp1;
@@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
        return 0;
 
 unmap_tmp1:
-       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
+       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
 unmap_q:
        dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
 unmap_p:
@@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
                goto unmap_dq;
        }
 
-       pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
+       pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
        if (dma_mapping_error(dev, pdb->tmp1_dma)) {
                dev_err(dev, "Unable to map RSA tmp1 memory\n");
                goto unmap_qinv;
        }
 
-       pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
+       pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
        if (dma_mapping_error(dev, pdb->tmp2_dma)) {
                dev_err(dev, "Unable to map RSA tmp2 memory\n");
                goto unmap_tmp1;
@@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
        return 0;
 
 unmap_tmp1:
-       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
+       dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
 unmap_qinv:
        dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
 unmap_dq:
index f4f2580..acdd720 100644 (file)
@@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg)
                BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
 
                /* Unmap just-run descriptor so we can post-process */
-               dma_unmap_single(dev, jrp->outring[hw_idx].desc,
+               dma_unmap_single(dev,
+                                caam_dma_to_cpu(jrp->outring[hw_idx].desc),
                                 jrp->entinfo[sw_idx].desc_size,
                                 DMA_TO_DEVICE);
 
index 9a476bb..af59645 100644 (file)
@@ -35,6 +35,7 @@ struct nitrox_cmdq {
        /* requests in backlog queues */
        atomic_t backlog_count;
 
+       int write_idx;
        /* command size 32B/64B */
        u8 instr_size;
        u8 qno;
@@ -87,7 +88,7 @@ struct nitrox_bh {
        struct bh_data *slc;
 };
 
-/* NITROX-5 driver state */
+/* NITROX-V driver state */
 #define NITROX_UCODE_LOADED    0
 #define NITROX_READY           1
 
index ebe2673..4d31df0 100644 (file)
@@ -36,6 +36,7 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq)
        cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
        cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
        cmdq->qsize = (qsize + PKT_IN_ALIGN);
+       cmdq->write_idx = 0;
 
        spin_lock_init(&cmdq->response_lock);
        spin_lock_init(&cmdq->cmdq_lock);
index deaefd5..4a362fc 100644 (file)
  *   Invalid flag options in AES-CCM IV.
  */
 
+static inline int incr_index(int index, int count, int max)
+{
+       if ((index + count) >= max)
+               index = index + count - max;
+       else
+               index += count;
+
+       return index;
+}
+
 /**
  * dma_free_sglist - unmap and free the sg lists.
  * @ndev: N5 device
@@ -426,30 +436,29 @@ static void post_se_instr(struct nitrox_softreq *sr,
                          struct nitrox_cmdq *cmdq)
 {
        struct nitrox_device *ndev = sr->ndev;
-       union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell;
-       u64 offset;
+       int idx;
        u8 *ent;
 
        spin_lock_bh(&cmdq->cmdq_lock);
 
-       /* get the next write offset */
-       offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno);
-       pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset);
+       idx = cmdq->write_idx;
        /* copy the instruction */
-       ent = cmdq->head + pkt_in_baoff_dbell.s.aoff;
+       ent = cmdq->head + (idx * cmdq->instr_size);
        memcpy(ent, &sr->instr, cmdq->instr_size);
-       /* flush the command queue updates */
-       dma_wmb();
 
-       sr->tstamp = jiffies;
        atomic_set(&sr->status, REQ_POSTED);
        response_list_add(sr, cmdq);
+       sr->tstamp = jiffies;
+       /* flush the command queue updates */
+       dma_wmb();
 
        /* Ring doorbell with count 1 */
        writeq(1, cmdq->dbell_csr_addr);
        /* orders the doorbell rings */
        mmiowb();
 
+       cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
+
        spin_unlock_bh(&cmdq->cmdq_lock);
 }
 
@@ -459,6 +468,9 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
        struct nitrox_softreq *sr, *tmp;
        int ret = 0;
 
+       if (!atomic_read(&cmdq->backlog_count))
+               return 0;
+
        spin_lock_bh(&cmdq->backlog_lock);
 
        list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
@@ -466,7 +478,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
 
                /* submit until space available */
                if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
-                       ret = -EBUSY;
+                       ret = -ENOSPC;
                        break;
                }
                /* delete from backlog list */
@@ -491,23 +503,20 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
 {
        struct nitrox_cmdq *cmdq = sr->cmdq;
        struct nitrox_device *ndev = sr->ndev;
-       int ret = -EBUSY;
+
+       /* try to post backlog requests */
+       post_backlog_cmds(cmdq);
 
        if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
                if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
-                       return -EAGAIN;
-
+                       return -ENOSPC;
+               /* add to backlog list */
                backlog_list_add(sr, cmdq);
-       } else {
-               ret = post_backlog_cmds(cmdq);
-               if (ret) {
-                       backlog_list_add(sr, cmdq);
-                       return ret;
-               }
-               post_se_instr(sr, cmdq);
-               ret = -EINPROGRESS;
+               return -EBUSY;
        }
-       return ret;
+       post_se_instr(sr, cmdq);
+
+       return -EINPROGRESS;
 }
 
 /**
@@ -624,11 +633,9 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
         */
        sr->instr.fdata[0] = *((u64 *)&req->gph);
        sr->instr.fdata[1] = 0;
-       /* flush the soft_req changes before posting the cmd */
-       wmb();
 
        ret = nitrox_enqueue_request(sr);
-       if (ret == -EAGAIN)
+       if (ret == -ENOSPC)
                goto send_fail;
 
        return ret;
index 218739b..72790d8 100644 (file)
@@ -38,6 +38,17 @@ static DEFINE_MUTEX(sev_cmd_mutex);
 static struct sev_misc_dev *misc_dev;
 static struct psp_device *psp_master;
 
+static int psp_cmd_timeout = 100;
+module_param(psp_cmd_timeout, int, 0644);
+MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
+
+static int psp_probe_timeout = 5;
+module_param(psp_probe_timeout, int, 0644);
+MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
+
+static bool psp_dead;
+static int psp_timeout;
+
 static struct psp_device *psp_alloc_struct(struct sp_device *sp)
 {
        struct device *dev = sp->dev;
@@ -82,10 +93,19 @@ done:
        return IRQ_HANDLED;
 }
 
-static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg)
+static int sev_wait_cmd_ioc(struct psp_device *psp,
+                           unsigned int *reg, unsigned int timeout)
 {
-       wait_event(psp->sev_int_queue, psp->sev_int_rcvd);
+       int ret;
+
+       ret = wait_event_timeout(psp->sev_int_queue,
+                       psp->sev_int_rcvd, timeout * HZ);
+       if (!ret)
+               return -ETIMEDOUT;
+
        *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
+
+       return 0;
 }
 
 static int sev_cmd_buffer_len(int cmd)
@@ -133,12 +153,15 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
        if (!psp)
                return -ENODEV;
 
+       if (psp_dead)
+               return -EBUSY;
+
        /* Get the physical address of the command buffer */
        phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
        phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
 
-       dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x\n",
-               cmd, phys_msb, phys_lsb);
+       dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
+               cmd, phys_msb, phys_lsb, psp_timeout);
 
        print_hex_dump_debug("(in):  ", DUMP_PREFIX_OFFSET, 16, 2, data,
                             sev_cmd_buffer_len(cmd), false);
@@ -154,7 +177,18 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
        iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
 
        /* wait for command completion */
-       sev_wait_cmd_ioc(psp, &reg);
+       ret = sev_wait_cmd_ioc(psp, &reg, psp_timeout);
+       if (ret) {
+               if (psp_ret)
+                       *psp_ret = 0;
+
+               dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd);
+               psp_dead = true;
+
+               return ret;
+       }
+
+       psp_timeout = psp_cmd_timeout;
 
        if (psp_ret)
                *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
@@ -888,6 +922,8 @@ void psp_pci_init(void)
 
        psp_master = sp->psp_data;
 
+       psp_timeout = psp_probe_timeout;
+
        if (sev_get_api_version())
                goto err;
 
index 5c539af..010bbf6 100644 (file)
@@ -367,7 +367,8 @@ static inline void dsgl_walk_init(struct dsgl_walk *walk,
        walk->to = (struct phys_sge_pairs *)(dsgl + 1);
 }
 
-static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
+static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
+                                int pci_chan_id)
 {
        struct cpl_rx_phys_dsgl *phys_cpl;
 
@@ -385,6 +386,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
        phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
        phys_cpl->rss_hdr_int.qid = htons(qid);
        phys_cpl->rss_hdr_int.hash_val = 0;
+       phys_cpl->rss_hdr_int.channel = pci_chan_id;
 }
 
 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
@@ -718,7 +720,7 @@ static inline void create_wreq(struct chcr_context *ctx,
                FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
                                !!lcb, ctx->tx_qidx);
 
-       chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
+       chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
                                                       qid);
        chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
                                     ((sizeof(chcr_req->wreq)) >> 4)));
@@ -1339,16 +1341,23 @@ static int chcr_device_init(struct chcr_context *ctx)
                                    adap->vres.ncrypto_fc);
                rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
                txq_perchan = ntxq / u_ctx->lldi.nchan;
-               rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
-               rxq_idx += id % rxq_perchan;
-               txq_idx = ctx->dev->tx_channel_id * txq_perchan;
-               txq_idx += id % txq_perchan;
                spin_lock(&ctx->dev->lock_chcr_dev);
-               ctx->rx_qidx = rxq_idx;
-               ctx->tx_qidx = txq_idx;
+               ctx->tx_chan_id = ctx->dev->tx_channel_id;
                ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
                ctx->dev->rx_channel_id = 0;
                spin_unlock(&ctx->dev->lock_chcr_dev);
+               rxq_idx = ctx->tx_chan_id * rxq_perchan;
+               rxq_idx += id % rxq_perchan;
+               txq_idx = ctx->tx_chan_id * txq_perchan;
+               txq_idx += id % txq_perchan;
+               ctx->rx_qidx = rxq_idx;
+               ctx->tx_qidx = txq_idx;
+               /* Channel Id used by SGE to forward packet to Host.
+                * Same value should be used in cpl_fw6_pld RSS_CH field
+                * by FW. Driver programs PCI channel ID to be used in fw
+                * at the time of queue allocation with value "pi->tx_chan"
+                */
+               ctx->pci_chan_id = txq_idx / txq_perchan;
        }
 out:
        return err;
@@ -2503,6 +2512,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct dsgl_walk dsgl_walk;
        unsigned int authsize = crypto_aead_authsize(tfm);
+       struct chcr_context *ctx = a_ctx(tfm);
        u32 temp;
 
        dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2512,7 +2522,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
        dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
        temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
        dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
-       dsgl_walk_end(&dsgl_walk, qid);
+       dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
 }
 
 void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
@@ -2544,6 +2554,8 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
                             unsigned short qid)
 {
        struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+       struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
+       struct chcr_context *ctx = c_ctx(tfm);
        struct dsgl_walk dsgl_walk;
 
        dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2552,7 +2564,7 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
        reqctx->dstsg = dsgl_walk.last_sg;
        reqctx->dst_ofst = dsgl_walk.last_sg_len;
 
-       dsgl_walk_end(&dsgl_walk, qid);
+       dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
 }
 
 void chcr_add_hash_src_ent(struct ahash_request *req,
index 54835cb..0d2c70c 100644 (file)
@@ -255,6 +255,8 @@ struct chcr_context {
        struct chcr_dev *dev;
        unsigned char tx_qidx;
        unsigned char rx_qidx;
+       unsigned char tx_chan_id;
+       unsigned char pci_chan_id;
        struct __crypto_ctx crypto_ctx[0];
 };
 
index a53a0e6..7725b6e 100644 (file)
@@ -96,6 +96,10 @@ enum csk_flags {
        CSK_CONN_INLINE,        /* Connection on HW */
 };
 
+enum chtls_cdev_state {
+       CHTLS_CDEV_STATE_UP = 1
+};
+
 struct listen_ctx {
        struct sock *lsk;
        struct chtls_dev *cdev;
@@ -146,6 +150,7 @@ struct chtls_dev {
        unsigned int send_page_order;
        int max_host_sndbuf;
        struct key_map kmap;
+       unsigned int cdev_state;
 };
 
 struct chtls_hws {
index 9b07f91..f59b044 100644 (file)
@@ -160,6 +160,7 @@ static void chtls_register_dev(struct chtls_dev *cdev)
        tlsdev->hash = chtls_create_hash;
        tlsdev->unhash = chtls_destroy_hash;
        tls_register_device(&cdev->tlsdev);
+       cdev->cdev_state = CHTLS_CDEV_STATE_UP;
 }
 
 static void chtls_unregister_dev(struct chtls_dev *cdev)
@@ -281,8 +282,10 @@ static void chtls_free_all_uld(void)
        struct chtls_dev *cdev, *tmp;
 
        mutex_lock(&cdev_mutex);
-       list_for_each_entry_safe(cdev, tmp, &cdev_list, list)
-               chtls_free_uld(cdev);
+       list_for_each_entry_safe(cdev, tmp, &cdev_list, list) {
+               if (cdev->cdev_state == CHTLS_CDEV_STATE_UP)
+                       chtls_free_uld(cdev);
+       }
        mutex_unlock(&cdev_mutex);
 }
 
index a10c418..56bd281 100644 (file)
@@ -63,7 +63,7 @@ struct dcp {
        struct dcp_coherent_block       *coh;
 
        struct completion               completion[DCP_MAX_CHANS];
-       struct mutex                    mutex[DCP_MAX_CHANS];
+       spinlock_t                      lock[DCP_MAX_CHANS];
        struct task_struct              *thread[DCP_MAX_CHANS];
        struct crypto_queue             queue[DCP_MAX_CHANS];
 };
@@ -349,13 +349,20 @@ static int dcp_chan_thread_aes(void *data)
 
        int ret;
 
-       do {
-               __set_current_state(TASK_INTERRUPTIBLE);
+       while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
 
-               mutex_lock(&sdcp->mutex[chan]);
+               spin_lock(&sdcp->lock[chan]);
                backlog = crypto_get_backlog(&sdcp->queue[chan]);
                arq = crypto_dequeue_request(&sdcp->queue[chan]);
-               mutex_unlock(&sdcp->mutex[chan]);
+               spin_unlock(&sdcp->lock[chan]);
+
+               if (!backlog && !arq) {
+                       schedule();
+                       continue;
+               }
+
+               set_current_state(TASK_RUNNING);
 
                if (backlog)
                        backlog->complete(backlog, -EINPROGRESS);
@@ -363,11 +370,8 @@ static int dcp_chan_thread_aes(void *data)
                if (arq) {
                        ret = mxs_dcp_aes_block_crypt(arq);
                        arq->complete(arq, ret);
-                       continue;
                }
-
-               schedule();
-       } while (!kthread_should_stop());
+       }
 
        return 0;
 }
@@ -409,9 +413,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
        rctx->ecb = ecb;
        actx->chan = DCP_CHAN_CRYPTO;
 
-       mutex_lock(&sdcp->mutex[actx->chan]);
+       spin_lock(&sdcp->lock[actx->chan]);
        ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
-       mutex_unlock(&sdcp->mutex[actx->chan]);
+       spin_unlock(&sdcp->lock[actx->chan]);
 
        wake_up_process(sdcp->thread[actx->chan]);
 
@@ -640,13 +644,20 @@ static int dcp_chan_thread_sha(void *data)
        struct ahash_request *req;
        int ret, fini;
 
-       do {
-               __set_current_state(TASK_INTERRUPTIBLE);
+       while (!kthread_should_stop()) {
+               set_current_state(TASK_INTERRUPTIBLE);
 
-               mutex_lock(&sdcp->mutex[chan]);
+               spin_lock(&sdcp->lock[chan]);
                backlog = crypto_get_backlog(&sdcp->queue[chan]);
                arq = crypto_dequeue_request(&sdcp->queue[chan]);
-               mutex_unlock(&sdcp->mutex[chan]);
+               spin_unlock(&sdcp->lock[chan]);
+
+               if (!backlog && !arq) {
+                       schedule();
+                       continue;
+               }
+
+               set_current_state(TASK_RUNNING);
 
                if (backlog)
                        backlog->complete(backlog, -EINPROGRESS);
@@ -658,12 +669,8 @@ static int dcp_chan_thread_sha(void *data)
                        ret = dcp_sha_req_to_buf(arq);
                        fini = rctx->fini;
                        arq->complete(arq, ret);
-                       if (!fini)
-                               continue;
                }
-
-               schedule();
-       } while (!kthread_should_stop());
+       }
 
        return 0;
 }
@@ -721,9 +728,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
                rctx->init = 1;
        }
 
-       mutex_lock(&sdcp->mutex[actx->chan]);
+       spin_lock(&sdcp->lock[actx->chan]);
        ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
-       mutex_unlock(&sdcp->mutex[actx->chan]);
+       spin_unlock(&sdcp->lock[actx->chan]);
 
        wake_up_process(sdcp->thread[actx->chan]);
        mutex_unlock(&actx->mutex);
@@ -997,7 +1004,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, sdcp);
 
        for (i = 0; i < DCP_MAX_CHANS; i++) {
-               mutex_init(&sdcp->mutex[i]);
+               spin_lock_init(&sdcp->lock[i]);
                init_completion(&sdcp->completion[i]);
                crypto_init_queue(&sdcp->queue[i], 50);
        }
index ba197f3..763c216 100644 (file)
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret, bar_mask;
+       unsigned long bar_mask;
+       int ret;
 
        switch (ent->device) {
        case ADF_C3XXX_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Find and map all the device's BARS */
        i = 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-                        ADF_PCI_MAX_BARS * 2) {
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
                bar->base_addr = pci_resource_start(pdev, bar_nr);
index 24ec908..613c7d5 100644 (file)
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret, bar_mask;
+       unsigned long bar_mask;
+       int ret;
 
        switch (ent->device) {
        case ADF_C3XXXIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Find and map all the device's BARS */
        i = 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-                        ADF_PCI_MAX_BARS * 2) {
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
                bar->base_addr = pci_resource_start(pdev, bar_nr);
index 59a5a0d..9cb8329 100644 (file)
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret, bar_mask;
+       unsigned long bar_mask;
+       int ret;
 
        switch (ent->device) {
        case ADF_C62X_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Find and map all the device's BARS */
        i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-                        ADF_PCI_MAX_BARS * 2) {
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
                bar->base_addr = pci_resource_start(pdev, bar_nr);
index b9f3e0e..278452b 100644 (file)
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret, bar_mask;
+       unsigned long bar_mask;
+       int ret;
 
        switch (ent->device) {
        case ADF_C62XIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Find and map all the device's BARS */
        i = 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-                        ADF_PCI_MAX_BARS * 2) {
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
                bar->base_addr = pci_resource_start(pdev, bar_nr);
index be5c5a9..3a9708e 100644 (file)
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret, bar_mask;
+       unsigned long bar_mask;
+       int ret;
 
        switch (ent->device) {
        case ADF_DH895XCC_PCI_DEVICE_ID:
@@ -237,8 +238,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Find and map all the device's BARS */
        i = 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-                        ADF_PCI_MAX_BARS * 2) {
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
                bar->base_addr = pci_resource_start(pdev, bar_nr);
index 26ab17b..3da0f95 100644 (file)
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        struct adf_hw_device_data *hw_data;
        char name[ADF_DEVICE_NAME_LENGTH];
        unsigned int i, bar_nr;
-       int ret, bar_mask;
+       unsigned long bar_mask;
+       int ret;
 
        switch (ent->device) {
        case ADF_DH895XCCIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Find and map all the device's BARS */
        i = 0;
        bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
-       for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
-                        ADF_PCI_MAX_BARS * 2) {
+       for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
                struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
 
                bar->base_addr = pci_resource_start(pdev, bar_nr);
index 5285ece..b718958 100644 (file)
@@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
                ret = crypto_skcipher_encrypt(req);
                skcipher_request_zero(req);
        } else {
-               preempt_disable();
-               pagefault_disable();
-               enable_kernel_vsx();
-
                blkcipher_walk_init(&walk, dst, src, nbytes);
                ret = blkcipher_walk_virt(desc, &walk);
                while ((nbytes = walk.nbytes)) {
+                       preempt_disable();
+                       pagefault_disable();
+                       enable_kernel_vsx();
                        aes_p8_cbc_encrypt(walk.src.virt.addr,
                                           walk.dst.virt.addr,
                                           nbytes & AES_BLOCK_MASK,
                                           &ctx->enc_key, walk.iv, 1);
+                       disable_kernel_vsx();
+                       pagefault_enable();
+                       preempt_enable();
+
                        nbytes &= AES_BLOCK_SIZE - 1;
                        ret = blkcipher_walk_done(desc, &walk, nbytes);
                }
-
-               disable_kernel_vsx();
-               pagefault_enable();
-               preempt_enable();
        }
 
        return ret;
@@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
                ret = crypto_skcipher_decrypt(req);
                skcipher_request_zero(req);
        } else {
-               preempt_disable();
-               pagefault_disable();
-               enable_kernel_vsx();
-
                blkcipher_walk_init(&walk, dst, src, nbytes);
                ret = blkcipher_walk_virt(desc, &walk);
                while ((nbytes = walk.nbytes)) {
+                       preempt_disable();
+                       pagefault_disable();
+                       enable_kernel_vsx();
                        aes_p8_cbc_encrypt(walk.src.virt.addr,
                                           walk.dst.virt.addr,
                                           nbytes & AES_BLOCK_MASK,
                                           &ctx->dec_key, walk.iv, 0);
+                       disable_kernel_vsx();
+                       pagefault_enable();
+                       preempt_enable();
+
                        nbytes &= AES_BLOCK_SIZE - 1;
                        ret = blkcipher_walk_done(desc, &walk, nbytes);
                }
-
-               disable_kernel_vsx();
-               pagefault_enable();
-               preempt_enable();
        }
 
        return ret;
index 8bd9aff..e9954a7 100644 (file)
@@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
                ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
                skcipher_request_zero(req);
        } else {
+               blkcipher_walk_init(&walk, dst, src, nbytes);
+
+               ret = blkcipher_walk_virt(desc, &walk);
+
                preempt_disable();
                pagefault_disable();
                enable_kernel_vsx();
 
-               blkcipher_walk_init(&walk, dst, src, nbytes);
-
-               ret = blkcipher_walk_virt(desc, &walk);
                iv = walk.iv;
                memset(tweak, 0, AES_BLOCK_SIZE);
                aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
 
+               disable_kernel_vsx();
+               pagefault_enable();
+               preempt_enable();
+
                while ((nbytes = walk.nbytes)) {
+                       preempt_disable();
+                       pagefault_disable();
+                       enable_kernel_vsx();
                        if (enc)
                                aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
                                                nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
                        else
                                aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
                                                nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
+                       disable_kernel_vsx();
+                       pagefault_enable();
+                       preempt_enable();
 
                        nbytes &= AES_BLOCK_SIZE - 1;
                        ret = blkcipher_walk_done(desc, &walk, nbytes);
                }
-
-               disable_kernel_vsx();
-               pagefault_enable();
-               preempt_enable();
        }
        return ret;
 }
index 6fd4608..948806e 100644 (file)
@@ -392,7 +392,8 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
 {
        struct file *filp = vmf->vma->vm_file;
        unsigned long fault_size;
-       int rc, id;
+       vm_fault_t rc = VM_FAULT_SIGBUS;
+       int id;
        pfn_t pfn;
        struct dev_dax *dev_dax = filp->private_data;
 
@@ -534,6 +535,11 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
        return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
 }
 
+static const struct address_space_operations dev_dax_aops = {
+       .set_page_dirty         = noop_set_page_dirty,
+       .invalidatepage         = noop_invalidatepage,
+};
+
 static int dax_open(struct inode *inode, struct file *filp)
 {
        struct dax_device *dax_dev = inode_dax(inode);
@@ -543,6 +549,7 @@ static int dax_open(struct inode *inode, struct file *filp)
        dev_dbg(&dev_dax->dev, "trace\n");
        inode->i_mapping = __dax_inode->i_mapping;
        inode->i_mapping->host = __dax_inode;
+       inode->i_mapping->a_ops = &dev_dax_aops;
        filp->f_mapping = inode->i_mapping;
        filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
        filp->private_data = dev_dax;
index b76cb17..adfd316 100644 (file)
@@ -639,7 +639,7 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
        int ret;
        struct device *dev = &mbdev->dev;
 
-       mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL);
+       mic_dma_dev = devm_kzalloc(dev, sizeof(*mic_dma_dev), GFP_KERNEL);
        if (!mic_dma_dev) {
                ret = -ENOMEM;
                goto alloc_error;
@@ -664,7 +664,6 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
 reg_error:
        mic_dma_uninit(mic_dma_dev);
 init_error:
-       kfree(mic_dma_dev);
        mic_dma_dev = NULL;
 alloc_error:
        dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret);
@@ -674,7 +673,6 @@ alloc_error:
 static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev)
 {
        mic_dma_uninit(mic_dma_dev);
-       kfree(mic_dma_dev);
 }
 
 /* DEBUGFS CODE */
index 721e6c5..6434294 100644 (file)
@@ -166,7 +166,13 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
                                        le32_to_cpu(attr->sustained_freq_khz);
                dom_info->sustained_perf_level =
                                        le32_to_cpu(attr->sustained_perf_level);
-               dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) /
+               if (!dom_info->sustained_freq_khz ||
+                   !dom_info->sustained_perf_level)
+                       /* CPUFreq converts to kHz, hence default 1000 */
+                       dom_info->mult_factor = 1000;
+               else
+                       dom_info->mult_factor =
+                                       (dom_info->sustained_freq_khz * 1000) /
                                        dom_info->sustained_perf_level;
                memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
        }
index d8e159f..89110df 100644 (file)
@@ -90,14 +90,17 @@ config EFI_ARMSTUB
 config EFI_ARMSTUB_DTB_LOADER
        bool "Enable the DTB loader"
        depends on EFI_ARMSTUB
+       default y
        help
          Select this config option to add support for the dtb= command
          line parameter, allowing a device tree blob to be loaded into
          memory from the EFI System Partition by the stub.
 
-         The device tree is typically provided by the platform or by
-         the bootloader, so this option is mostly for development
-         purposes only.
+         If the device tree is provided by the platform or by
+         the bootloader this option may not be needed.
+         But, for various development reasons and to maintain existing
+         functionality for bootloaders that do not have such support
+         this option is necessary.
 
 config EFI_BOOTLOADER_CONTROL
        tristate "EFI Bootloader Control"
index fc9fd2d..0b84053 100644 (file)
@@ -420,7 +420,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
                /* Create region for each port */
                fme_region = dfl_fme_create_region(pdata, mgr,
                                                   fme_br->br, i);
-               if (!fme_region) {
+               if (IS_ERR(fme_region)) {
                        ret = PTR_ERR(fme_region);
                        goto destroy_region;
                }
index 0b7e19c..51a5ac2 100644 (file)
@@ -14,6 +14,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/fpga/fpga-mgr.h>
 #include <linux/fpga/fpga-region.h>
 
 #include "dfl-fme-pr.h"
@@ -66,9 +67,10 @@ eprobe_mgr_put:
 static int fme_region_remove(struct platform_device *pdev)
 {
        struct fpga_region *region = dev_get_drvdata(&pdev->dev);
+       struct fpga_manager *mgr = region->mgr;
 
        fpga_region_unregister(region);
-       fpga_mgr_put(region->mgr);
+       fpga_mgr_put(mgr);
 
        return 0;
 }
index 24b8f98..c983dac 100644 (file)
@@ -125,7 +125,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
  *
  * Given a device, get an exclusive reference to a fpga bridge.
  *
- * Return: fpga manager struct or IS_ERR() condition containing error code.
+ * Return: fpga bridge struct or IS_ERR() condition containing error code.
  */
 struct fpga_bridge *fpga_bridge_get(struct device *dev,
                                    struct fpga_image_info *info)
index 35fabb8..052a134 100644 (file)
@@ -437,9 +437,10 @@ eprobe_mgr_put:
 static int of_fpga_region_remove(struct platform_device *pdev)
 {
        struct fpga_region *region = platform_get_drvdata(pdev);
+       struct fpga_manager *mgr = region->mgr;
 
        fpga_region_unregister(region);
-       fpga_mgr_put(region->mgr);
+       fpga_mgr_put(mgr);
 
        return 0;
 }
index 3530ccd..da9781a 100644 (file)
@@ -41,6 +41,8 @@ struct adp5588_gpio {
        uint8_t int_en[3];
        uint8_t irq_mask[3];
        uint8_t irq_stat[3];
+       uint8_t int_input_en[3];
+       uint8_t int_lvl_cached[3];
 };
 
 static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
@@ -173,12 +175,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
        struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
        int i;
 
-       for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
+       for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
+               if (dev->int_input_en[i]) {
+                       mutex_lock(&dev->lock);
+                       dev->dir[i] &= ~dev->int_input_en[i];
+                       dev->int_input_en[i] = 0;
+                       adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
+                                          dev->dir[i]);
+                       mutex_unlock(&dev->lock);
+               }
+
+               if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
+                       dev->int_lvl_cached[i] = dev->int_lvl[i];
+                       adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
+                                          dev->int_lvl[i]);
+               }
+
                if (dev->int_en[i] ^ dev->irq_mask[i]) {
                        dev->int_en[i] = dev->irq_mask[i];
                        adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
                                           dev->int_en[i]);
                }
+       }
 
        mutex_unlock(&dev->irq_lock);
 }
@@ -221,9 +239,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
        else
                return -EINVAL;
 
-       adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
-       adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
-                          dev->int_lvl[bank]);
+       dev->int_input_en[bank] |= bit;
 
        return 0;
 }
index 28da700..044888f 100644 (file)
@@ -728,6 +728,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
 out_unregister:
        dwapb_gpio_unregister(gpio);
        dwapb_irq_teardown(gpio);
+       clk_disable_unprepare(gpio->clk);
 
        return err;
 }
index c48ed9d..8b9d7e4 100644 (file)
@@ -25,7 +25,6 @@
 
 struct acpi_gpio_event {
        struct list_head node;
-       struct list_head initial_sync_list;
        acpi_handle handle;
        unsigned int pin;
        unsigned int irq;
@@ -49,10 +48,19 @@ struct acpi_gpio_chip {
        struct mutex conn_lock;
        struct gpio_chip *chip;
        struct list_head events;
+       struct list_head deferred_req_irqs_list_entry;
 };
 
-static LIST_HEAD(acpi_gpio_initial_sync_list);
-static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
+/*
+ * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
+ * (so builtin drivers) we register the ACPI GpioInt event handlers from a
+ * late_initcall_sync handler, so that other builtin drivers can register their
+ * OpRegions before the event handlers can run.  This list contains gpiochips
+ * for which the acpi_gpiochip_request_interrupts() has been deferred.
+ */
+static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
+static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
+static bool acpi_gpio_deferred_req_irqs_done;
 
 static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
 {
@@ -89,21 +97,6 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
        return gpiochip_get_desc(chip, pin);
 }
 
-static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
-{
-       mutex_lock(&acpi_gpio_initial_sync_list_lock);
-       list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
-       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
-}
-
-static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
-{
-       mutex_lock(&acpi_gpio_initial_sync_list_lock);
-       if (!list_empty(&event->initial_sync_list))
-               list_del_init(&event->initial_sync_list);
-       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
-}
-
 static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
 {
        struct acpi_gpio_event *event = data;
@@ -186,7 +179,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
 
        gpiod_direction_input(desc);
 
-       value = gpiod_get_value(desc);
+       value = gpiod_get_value_cansleep(desc);
 
        ret = gpiochip_lock_as_irq(chip, pin);
        if (ret) {
@@ -229,7 +222,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
        event->irq = irq;
        event->pin = pin;
        event->desc = desc;
-       INIT_LIST_HEAD(&event->initial_sync_list);
 
        ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
                                   "ACPI:Event", event);
@@ -251,10 +243,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
         * may refer to OperationRegions from other (builtin) drivers which
         * may be probed after us.
         */
-       if (handler == acpi_gpio_irq_handler &&
-           (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
-            ((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
-               acpi_gpio_add_to_initial_sync_list(event);
+       if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
+           ((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
+               handler(event->irq, event);
 
        return AE_OK;
 
@@ -283,6 +274,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
        struct acpi_gpio_chip *acpi_gpio;
        acpi_handle handle;
        acpi_status status;
+       bool defer;
 
        if (!chip->parent || !chip->to_irq)
                return;
@@ -295,6 +287,16 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
        if (ACPI_FAILURE(status))
                return;
 
+       mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+       defer = !acpi_gpio_deferred_req_irqs_done;
+       if (defer)
+               list_add(&acpi_gpio->deferred_req_irqs_list_entry,
+                        &acpi_gpio_deferred_req_irqs_list);
+       mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+
+       if (defer)
+               return;
+
        acpi_walk_resources(handle, "_AEI",
                            acpi_gpiochip_request_interrupt, acpi_gpio);
 }
@@ -325,11 +327,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
        if (ACPI_FAILURE(status))
                return;
 
+       mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+       if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry))
+               list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
+       mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
+
        list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
                struct gpio_desc *desc;
 
-               acpi_gpio_del_from_initial_sync_list(event);
-
                if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
                        disable_irq_wake(event->irq);
 
@@ -1052,6 +1057,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
 
        acpi_gpio->chip = chip;
        INIT_LIST_HEAD(&acpi_gpio->events);
+       INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry);
 
        status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio);
        if (ACPI_FAILURE(status)) {
@@ -1198,20 +1204,28 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
        return con_id == NULL;
 }
 
-/* Sync the initial state of handlers after all builtin drivers have probed */
-static int acpi_gpio_initial_sync(void)
+/* Run deferred acpi_gpiochip_request_interrupts() */
+static int acpi_gpio_handle_deferred_request_interrupts(void)
 {
-       struct acpi_gpio_event *event, *ep;
+       struct acpi_gpio_chip *acpi_gpio, *tmp;
+
+       mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
+       list_for_each_entry_safe(acpi_gpio, tmp,
+                                &acpi_gpio_deferred_req_irqs_list,
+                                deferred_req_irqs_list_entry) {
+               acpi_handle handle;
 
-       mutex_lock(&acpi_gpio_initial_sync_list_lock);
-       list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
-                                initial_sync_list) {
-               acpi_evaluate_object(event->handle, NULL, NULL, NULL);
-               list_del_init(&event->initial_sync_list);
+               handle = ACPI_HANDLE(acpi_gpio->chip->parent);
+               acpi_walk_resources(handle, "_AEI",
+                                   acpi_gpiochip_request_interrupt, acpi_gpio);
+
+               list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
        }
-       mutex_unlock(&acpi_gpio_initial_sync_list_lock);
+
+       acpi_gpio_deferred_req_irqs_done = true;
+       mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
 
        return 0;
 }
 /* We must use _sync so that this runs after the first deferred_probe run */
-late_initcall_sync(acpi_gpio_initial_sync);
+late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
index a4f1157..d4e7a09 100644 (file)
@@ -31,6 +31,7 @@ static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
        struct of_phandle_args *gpiospec = data;
 
        return chip->gpiodev->dev.of_node == gpiospec->np &&
+                               chip->of_xlate &&
                                chip->of_xlate(chip, gpiospec, NULL) >= 0;
 }
 
index e8f8a19..a57300c 100644 (file)
@@ -571,7 +571,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
                if (ret)
                        goto out_free_descs;
                lh->descs[i] = desc;
-               count = i;
+               count = i + 1;
 
                if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
                        set_bit(FLAG_ACTIVE_LOW, &desc->flags);
index f8bbbb3..0c791e3 100644 (file)
@@ -272,7 +272,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
 
 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
                        void **mem_obj, uint64_t *gpu_addr,
-                       void **cpu_ptr)
+                       void **cpu_ptr, bool mqd_gfx9)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
        struct amdgpu_bo *bo = NULL;
@@ -287,6 +287,10 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
        bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
        bp.type = ttm_bo_type_kernel;
        bp.resv = NULL;
+
+       if (mqd_gfx9)
+               bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
+
        r = amdgpu_bo_create(adev, &bp, &bo);
        if (r) {
                dev_err(adev->dev,
index 2f379c1..cc9aeab 100644 (file)
@@ -136,7 +136,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
 /* Shared API */
 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
                        void **mem_obj, uint64_t *gpu_addr,
-                       void **cpu_ptr);
+                       void **cpu_ptr, bool mqd_gfx9);
 void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
 void get_local_mem_info(struct kgd_dev *kgd,
                        struct kfd_local_mem_info *mem_info);
index ea3f698..9803b91 100644 (file)
@@ -685,7 +685,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
 
        while (true) {
                temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
-               if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+               if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
                        break;
                if (time_after(jiffies, end_jiffies))
                        return -ETIME;
index 693ec5e..8816c69 100644 (file)
@@ -367,12 +367,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                                break;
                        case CHIP_POLARIS10:
                                if (type == CGS_UCODE_ID_SMU) {
-                                       if ((adev->pdev->device == 0x67df) &&
-                                           ((adev->pdev->revision == 0xe0) ||
-                                            (adev->pdev->revision == 0xe3) ||
-                                            (adev->pdev->revision == 0xe4) ||
-                                            (adev->pdev->revision == 0xe5) ||
-                                            (adev->pdev->revision == 0xe7) ||
+                                       if (((adev->pdev->device == 0x67df) &&
+                                            ((adev->pdev->revision == 0xe0) ||
+                                             (adev->pdev->revision == 0xe3) ||
+                                             (adev->pdev->revision == 0xe4) ||
+                                             (adev->pdev->revision == 0xe5) ||
+                                             (adev->pdev->revision == 0xe7) ||
+                                             (adev->pdev->revision == 0xef))) ||
+                                           ((adev->pdev->device == 0x6fdf) &&
                                             (adev->pdev->revision == 0xef))) {
                                                info->is_kicker = true;
                                                strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
index 502b94f..b31d121 100644 (file)
@@ -39,6 +39,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
 {
        struct drm_gem_object *gobj;
        unsigned long size;
+       int r;
 
        gobj = drm_gem_object_lookup(p->filp, data->handle);
        if (gobj == NULL)
@@ -50,20 +51,26 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
        p->uf_entry.tv.shared = true;
        p->uf_entry.user_pages = NULL;
 
-       size = amdgpu_bo_size(p->uf_entry.robj);
-       if (size != PAGE_SIZE || (data->offset + 8) > size)
-               return -EINVAL;
-
-       *offset = data->offset;
-
        drm_gem_object_put_unlocked(gobj);
 
+       size = amdgpu_bo_size(p->uf_entry.robj);
+       if (size != PAGE_SIZE || (data->offset + 8) > size) {
+               r = -EINVAL;
+               goto error_unref;
+       }
+
        if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
-               amdgpu_bo_unref(&p->uf_entry.robj);
-               return -EINVAL;
+               r = -EINVAL;
+               goto error_unref;
        }
 
+       *offset = data->offset;
+
        return 0;
+
+error_unref:
+       amdgpu_bo_unref(&p->uf_entry.robj);
+       return r;
 }
 
 static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
@@ -1012,13 +1019,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
                if (r)
                        return r;
 
-               if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
-                       parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
-                       if (!parser->ctx->preamble_presented) {
-                               parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
-                               parser->ctx->preamble_presented = true;
-                       }
-               }
+               if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
+                       parser->job->preamble_status |=
+                               AMDGPU_PREAMBLE_IB_PRESENT;
 
                if (parser->ring && parser->ring != ring)
                        return -EINVAL;
@@ -1207,26 +1210,24 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
        int r;
 
+       job = p->job;
+       p->job = NULL;
+
+       r = drm_sched_job_init(&job->base, entity, p->filp);
+       if (r)
+               goto error_unlock;
+
+       /* No memory allocation is allowed while holding the mn lock */
        amdgpu_mn_lock(p->mn);
        amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
                struct amdgpu_bo *bo = e->robj;
 
                if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
-                       amdgpu_mn_unlock(p->mn);
-                       return -ERESTARTSYS;
+                       r = -ERESTARTSYS;
+                       goto error_abort;
                }
        }
 
-       job = p->job;
-       p->job = NULL;
-
-       r = drm_sched_job_init(&job->base, entity, p->filp);
-       if (r) {
-               amdgpu_job_free(job);
-               amdgpu_mn_unlock(p->mn);
-               return r;
-       }
-
        job->owner = p->filp;
        p->fence = dma_fence_get(&job->base.s_fence->finished);
 
@@ -1241,6 +1242,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
 
        amdgpu_cs_post_dependencies(p);
 
+       if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
+           !p->ctx->preamble_presented) {
+               job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
+               p->ctx->preamble_presented = true;
+       }
+
        cs->out.handle = seq;
        job->uf_sequence = seq;
 
@@ -1258,6 +1265,15 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
        amdgpu_mn_unlock(p->mn);
 
        return 0;
+
+error_abort:
+       dma_fence_put(&job->base.s_fence->finished);
+       job->base.s_fence = NULL;
+       amdgpu_mn_unlock(p->mn);
+
+error_unlock:
+       amdgpu_job_free(job);
+       return r;
 }
 
 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
index 8ab5ccb..39bf2ce 100644 (file)
@@ -2063,6 +2063,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
        static enum amd_ip_block_type ip_order[] = {
                AMD_IP_BLOCK_TYPE_GMC,
                AMD_IP_BLOCK_TYPE_COMMON,
+               AMD_IP_BLOCK_TYPE_PSP,
                AMD_IP_BLOCK_TYPE_IH,
        };
 
@@ -2093,7 +2094,6 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
 
        static enum amd_ip_block_type ip_order[] = {
                AMD_IP_BLOCK_TYPE_SMC,
-               AMD_IP_BLOCK_TYPE_PSP,
                AMD_IP_BLOCK_TYPE_DCE,
                AMD_IP_BLOCK_TYPE_GFX,
                AMD_IP_BLOCK_TYPE_SDMA,
index 8843a06..0f41d86 100644 (file)
@@ -740,6 +740,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
        {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
        {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+       {0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
        /* Polaris12 */
        {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
        {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
index 5518e62..51b5e97 100644 (file)
@@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                return r;
        }
 
+       need_ctx_switch = ring->current_ctx != fence_ctx;
        if (ring->funcs->emit_pipeline_sync && job &&
            ((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
+            (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
             amdgpu_vm_need_pipeline_sync(ring, job))) {
                need_pipe_sync = true;
                dma_fence_put(tmp);
@@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        }
 
        skip_preamble = ring->current_ctx == fence_ctx;
-       need_ctx_switch = ring->current_ctx != fence_ctx;
        if (job && ring->funcs->emit_cntxcntl) {
                if (need_ctx_switch)
                        status |= AMDGPU_HAVE_CTX_SWITCH;
index 8f98629..7b4e657 100644 (file)
@@ -1932,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
                        amdgpu_fence_wait_empty(ring);
        }
 
-       mutex_lock(&adev->pm.mutex);
-       /* update battery/ac status */
-       if (power_supply_is_system_supplied() > 0)
-               adev->pm.ac_power = true;
-       else
-               adev->pm.ac_power = false;
-       mutex_unlock(&adev->pm.mutex);
-
        if (adev->powerplay.pp_funcs->dispatch_tasks) {
                if (!amdgpu_device_has_dc_support(adev)) {
                        mutex_lock(&adev->pm.mutex);
index 0cc5190..5f3f540 100644 (file)
@@ -258,6 +258,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
 {
        int i;
 
+       cancel_delayed_work_sync(&adev->vce.idle_work);
+
        if (adev->vce.vcpu_bo == NULL)
                return 0;
 
@@ -268,7 +270,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
        if (i == AMDGPU_MAX_VCE_HANDLES)
                return 0;
 
-       cancel_delayed_work_sync(&adev->vce.idle_work);
        /* TODO: suspending running encoding sessions isn't supported */
        return -EINVAL;
 }
index fd654a4..400fc74 100644 (file)
@@ -153,11 +153,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
        unsigned size;
        void *ptr;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        if (adev->vcn.vcpu_bo == NULL)
                return 0;
 
-       cancel_delayed_work_sync(&adev->vcn.idle_work);
-
        size = amdgpu_bo_size(adev->vcn.vcpu_bo);
        ptr = adev->vcn.cpu_addr;
 
index ece0ac7..b17771d 100644 (file)
@@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
         * is validated on next vm use to avoid fault.
         * */
        list_move_tail(&base->vm_status, &vm->evicted);
+       base->moved = true;
 }
 
 /**
@@ -369,7 +370,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
        uint64_t addr;
        int r;
 
-       addr = amdgpu_bo_gpu_offset(bo);
        entries = amdgpu_bo_size(bo) / 8;
 
        if (pte_support_ats) {
@@ -401,6 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
        if (r)
                goto error;
 
+       addr = amdgpu_bo_gpu_offset(bo);
        if (ats_entries) {
                uint64_t ats_value;
 
@@ -2483,28 +2484,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
  * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
  *
  * @adev: amdgpu_device pointer
- * @vm_size: the default vm size if it's set auto
+ * @min_vm_size: the minimum vm size in GB if it's set auto
  * @fragment_size_default: Default PTE fragment size
  * @max_level: max VMPT level
  * @max_bits: max address space size in bits
  *
  */
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
                           uint32_t fragment_size_default, unsigned max_level,
                           unsigned max_bits)
 {
+       unsigned int max_size = 1 << (max_bits - 30);
+       unsigned int vm_size;
        uint64_t tmp;
 
        /* adjust vm size first */
        if (amdgpu_vm_size != -1) {
-               unsigned max_size = 1 << (max_bits - 30);
-
                vm_size = amdgpu_vm_size;
                if (vm_size > max_size) {
                        dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
                                 amdgpu_vm_size, max_size);
                        vm_size = max_size;
                }
+       } else {
+               struct sysinfo si;
+               unsigned int phys_ram_gb;
+
+               /* Optimal VM size depends on the amount of physical
+                * RAM available. Underlying requirements and
+                * assumptions:
+                *
+                *  - Need to map system memory and VRAM from all GPUs
+                *     - VRAM from other GPUs not known here
+                *     - Assume VRAM <= system memory
+                *  - On GFX8 and older, VM space can be segmented for
+                *    different MTYPEs
+                *  - Need to allow room for fragmentation, guard pages etc.
+                *
+                * This adds up to a rough guess of system memory x3.
+                * Round up to power of two to maximize the available
+                * VM size with the given page table size.
+                */
+               si_meminfo(&si);
+               phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
+                              (1 << 30) - 1) >> 30;
+               vm_size = roundup_pow_of_two(
+                       min(max(phys_ram_gb * 3, min_vm_size), max_size));
        }
 
        adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
index 67a15d4..9fa9df0 100644 (file)
@@ -321,7 +321,7 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
 void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
                      struct amdgpu_bo_va *bo_va);
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
                           uint32_t fragment_size_default, unsigned max_level,
                           unsigned max_bits);
 int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
index 5cd4521..5a9534a 100644 (file)
@@ -5664,6 +5664,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
        if (amdgpu_sriov_vf(adev))
                return 0;
 
+       if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
+                               AMD_PG_SUPPORT_RLC_SMU_HS |
+                               AMD_PG_SUPPORT_CP |
+                               AMD_PG_SUPPORT_GFX_DMG))
+               adev->gfx.rlc.funcs->enter_safe_mode(adev);
        switch (adev->asic_type) {
        case CHIP_CARRIZO:
        case CHIP_STONEY:
@@ -5713,7 +5718,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
        default:
                break;
        }
-
+       if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
+                               AMD_PG_SUPPORT_RLC_SMU_HS |
+                               AMD_PG_SUPPORT_CP |
+                               AMD_PG_SUPPORT_GFX_DMG))
+               adev->gfx.rlc.funcs->exit_safe_mode(adev);
        return 0;
 }
 
index 75317f2..ad151fe 100644 (file)
@@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
        amdgpu_gart_table_vram_unpin(adev);
 }
 
-static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
-{
-       amdgpu_gart_table_vram_free(adev);
-       amdgpu_gart_fini(adev);
-}
-
 static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
                                     u32 status, u32 addr, u32 mc_client)
 {
@@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle)
 
        amdgpu_gem_force_release(adev);
        amdgpu_vm_manager_fini(adev);
-       gmc_v6_0_gart_fini(adev);
+       amdgpu_gart_table_vram_free(adev);
        amdgpu_bo_fini(adev);
+       amdgpu_gart_fini(adev);
        release_firmware(adev->gmc.fw);
        adev->gmc.fw = NULL;
 
index 36dc367..f8d8a3a 100644 (file)
@@ -747,19 +747,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
 }
 
 /**
- * gmc_v7_0_gart_fini - vm fini callback
- *
- * @adev: amdgpu_device pointer
- *
- * Tears down the driver GART/VM setup (CIK).
- */
-static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
-{
-       amdgpu_gart_table_vram_free(adev);
-       amdgpu_gart_fini(adev);
-}
-
-/**
  * gmc_v7_0_vm_decode_fault - print human readable fault info
  *
  * @adev: amdgpu_device pointer
@@ -1095,8 +1082,9 @@ static int gmc_v7_0_sw_fini(void *handle)
        amdgpu_gem_force_release(adev);
        amdgpu_vm_manager_fini(adev);
        kfree(adev->gmc.vm_fault_info);
-       gmc_v7_0_gart_fini(adev);
+       amdgpu_gart_table_vram_free(adev);
        amdgpu_bo_fini(adev);
+       amdgpu_gart_fini(adev);
        release_firmware(adev->gmc.fw);
        adev->gmc.fw = NULL;
 
index 70fc97b..9333109 100644 (file)
@@ -969,19 +969,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
 }
 
 /**
- * gmc_v8_0_gart_fini - vm fini callback
- *
- * @adev: amdgpu_device pointer
- *
- * Tears down the driver GART/VM setup (CIK).
- */
-static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
-{
-       amdgpu_gart_table_vram_free(adev);
-       amdgpu_gart_fini(adev);
-}
-
-/**
  * gmc_v8_0_vm_decode_fault - print human readable fault info
  *
  * @adev: amdgpu_device pointer
@@ -1199,8 +1186,9 @@ static int gmc_v8_0_sw_fini(void *handle)
        amdgpu_gem_force_release(adev);
        amdgpu_vm_manager_fini(adev);
        kfree(adev->gmc.vm_fault_info);
-       gmc_v8_0_gart_fini(adev);
+       amdgpu_gart_table_vram_free(adev);
        amdgpu_bo_fini(adev);
+       amdgpu_gart_fini(adev);
        release_firmware(adev->gmc.fw);
        adev->gmc.fw = NULL;
 
index 399a5db..72f8018 100644 (file)
@@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle)
        return 0;
 }
 
-/**
- * gmc_v9_0_gart_fini - vm fini callback
- *
- * @adev: amdgpu_device pointer
- *
- * Tears down the driver GART/VM setup (CIK).
- */
-static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
-{
-       amdgpu_gart_table_vram_free(adev);
-       amdgpu_gart_fini(adev);
-}
-
 static int gmc_v9_0_sw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        amdgpu_gem_force_release(adev);
        amdgpu_vm_manager_fini(adev);
-       gmc_v9_0_gart_fini(adev);
 
        /*
        * TODO:
@@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle)
        */
        amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
 
+       amdgpu_gart_table_vram_free(adev);
        amdgpu_bo_fini(adev);
+       amdgpu_gart_fini(adev);
 
        return 0;
 }
index 3f57f64..cb79a93 100644 (file)
@@ -65,8 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
                                            int min_temp, int max_temp);
 static int kv_init_fps_limits(struct amdgpu_device *adev);
 
-static void kv_dpm_powergate_uvd(void *handle, bool gate);
-static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
 static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
 static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
 
@@ -1354,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
                return ret;
        }
 
-       kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
-
        if (adev->irq.installed &&
            amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
                ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
@@ -1374,6 +1370,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
 
 static void kv_dpm_disable(struct amdgpu_device *adev)
 {
+       struct kv_power_info *pi = kv_get_pi(adev);
+
        amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
                       AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
        amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
@@ -1387,8 +1385,10 @@ static void kv_dpm_disable(struct amdgpu_device *adev)
        /* powerup blocks */
        kv_dpm_powergate_acp(adev, false);
        kv_dpm_powergate_samu(adev, false);
-       kv_dpm_powergate_vce(adev, false);
-       kv_dpm_powergate_uvd(adev, false);
+       if (pi->caps_vce_pg) /* power on the VCE block */
+               amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
+       if (pi->caps_uvd_pg) /* power on the UVD block */
+               amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
 
        kv_enable_smc_cac(adev, false);
        kv_enable_didt(adev, false);
@@ -1551,7 +1551,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
        int ret;
 
        if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
-               kv_dpm_powergate_vce(adev, false);
                if (pi->caps_stable_p_state)
                        pi->vce_boot_level = table->count - 1;
                else
@@ -1573,7 +1572,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
                kv_enable_vce_dpm(adev, true);
        } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
                kv_enable_vce_dpm(adev, false);
-               kv_dpm_powergate_vce(adev, true);
        }
 
        return 0;
@@ -1702,24 +1700,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate)
        }
 }
 
-static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
+static void kv_dpm_powergate_vce(void *handle, bool gate)
 {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct kv_power_info *pi = kv_get_pi(adev);
-
-       if (pi->vce_power_gated == gate)
-               return;
+       int ret;
 
        pi->vce_power_gated = gate;
 
-       if (!pi->caps_vce_pg)
-               return;
-
-       if (gate)
-               amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
-       else
-               amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
+       if (gate) {
+               /* stop the VCE block */
+               ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+                                                            AMD_PG_STATE_GATE);
+               kv_enable_vce_dpm(adev, false);
+               if (pi->caps_vce_pg) /* power off the VCE block */
+                       amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
+       } else {
+               if (pi->caps_vce_pg) /* power on the VCE block */
+                       amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
+               kv_enable_vce_dpm(adev, true);
+               /* re-init the VCE block */
+               ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+                                                            AMD_PG_STATE_UNGATE);
+       }
 }
 
+
 static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
 {
        struct kv_power_info *pi = kv_get_pi(adev);
@@ -3061,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle)
        else
                adev->pm.dpm_enabled = true;
        mutex_unlock(&adev->pm.mutex);
-
+       amdgpu_pm_compute_clocks(adev);
        return ret;
 }
 
@@ -3313,6 +3319,9 @@ static int kv_set_powergating_by_smu(void *handle,
        case AMD_IP_BLOCK_TYPE_UVD:
                kv_dpm_powergate_uvd(handle, gate);
                break;
+       case AMD_IP_BLOCK_TYPE_VCE:
+               kv_dpm_powergate_vce(handle, gate);
+               break;
        default:
                break;
        }
index e7ca462..7c3b634 100644 (file)
@@ -70,6 +70,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
@@ -81,7 +82,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
        SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
-       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000)
 };
 
 static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
@@ -109,7 +111,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
-       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0)
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
 };
 
 static const struct soc15_reg_golden golden_settings_sdma_4_2[] =
index db327b4..1de9699 100644 (file)
@@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
 
        si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
        si_thermal_start_thermal_controller(adev);
-       ni_update_current_ps(adev, boot_ps);
 
        return 0;
 }
@@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle)
        else
                adev->pm.dpm_enabled = true;
        mutex_unlock(&adev->pm.mutex);
-
+       amdgpu_pm_compute_clocks(adev);
        return ret;
 }
 
index 1b04871..29ac74f 100644 (file)
@@ -457,7 +457,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 
        if (kfd->kfd2kgd->init_gtt_mem_allocation(
                        kfd->kgd, size, &kfd->gtt_mem,
-                       &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
+                       &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
+                       false)) {
                dev_err(kfd_device, "Could not allocate %d bytes\n", size);
                goto out;
        }
index ec0d62a..4f22e74 100644 (file)
@@ -358,8 +358,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
                                        struct queue *q,
                                        struct qcm_process_device *qpd)
 {
-       int retval;
        struct mqd_manager *mqd_mgr;
+       int retval;
 
        mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
        if (!mqd_mgr)
@@ -387,8 +387,12 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
        if (!q->properties.is_active)
                return 0;
 
-       retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
-                       &q->properties, q->process->mm);
+       if (WARN(q->process->mm != current->mm,
+                "should only run in user thread"))
+               retval = -EFAULT;
+       else
+               retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
+                                          &q->properties, current->mm);
        if (retval)
                goto out_uninit_mqd;
 
@@ -545,9 +549,15 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
                retval = map_queues_cpsch(dqm);
        else if (q->properties.is_active &&
                 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
-                 q->properties.type == KFD_QUEUE_TYPE_SDMA))
-               retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
-                                      &q->properties, q->process->mm);
+                 q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
+               if (WARN(q->process->mm != current->mm,
+                        "should only run in user thread"))
+                       retval = -EFAULT;
+               else
+                       retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
+                                                  q->pipe, q->queue,
+                                                  &q->properties, current->mm);
+       }
 
 out_unlock:
        dqm_unlock(dqm);
@@ -653,6 +663,7 @@ out:
 static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
                                          struct qcm_process_device *qpd)
 {
+       struct mm_struct *mm = NULL;
        struct queue *q;
        struct mqd_manager *mqd_mgr;
        struct kfd_process_device *pdd;
@@ -686,6 +697,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
                kfd_flush_tlb(pdd);
        }
 
+       /* Take a safe reference to the mm_struct, which may otherwise
+        * disappear even while the kfd_process is still referenced.
+        */
+       mm = get_task_mm(pdd->process->lead_thread);
+       if (!mm) {
+               retval = -EFAULT;
+               goto out;
+       }
+
        /* activate all active queues on the qpd */
        list_for_each_entry(q, &qpd->queues_list, list) {
                if (!q->properties.is_evicted)
@@ -700,14 +720,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
                q->properties.is_evicted = false;
                q->properties.is_active = true;
                retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
-                                      q->queue, &q->properties,
-                                      q->process->mm);
+                                      q->queue, &q->properties, mm);
                if (retval)
                        goto out;
                dqm->queue_count++;
        }
        qpd->evicted = 0;
 out:
+       if (mm)
+               mmput(mm);
        dqm_unlock(dqm);
        return retval;
 }
index 7a61f38..0149475 100644 (file)
@@ -62,9 +62,20 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
        struct amd_iommu_device_info iommu_info;
        unsigned int pasid_limit;
        int err;
+       struct kfd_topology_device *top_dev;
 
-       if (!kfd->device_info->needs_iommu_device)
+       top_dev = kfd_topology_device_by_id(kfd->id);
+
+       /*
+        * Overwrite ATS capability according to needs_iommu_device to fix
+        * potential missing corresponding bit in CRAT of BIOS.
+        */
+       if (!kfd->device_info->needs_iommu_device) {
+               top_dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT;
                return 0;
+       }
+
+       top_dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
 
        iommu_info.flags = 0;
        err = amd_iommu_device_info(kfd->pdev, &iommu_info);
index f5fc367..0cedb37 100644 (file)
@@ -88,7 +88,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
                                ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
                        &((*mqd_mem_obj)->gtt_mem),
                        &((*mqd_mem_obj)->gpu_addr),
-                       (void *)&((*mqd_mem_obj)->cpu_ptr));
+                       (void *)&((*mqd_mem_obj)->cpu_ptr), true);
        } else
                retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
                                mqd_mem_obj);
index f971710..92b285c 100644 (file)
@@ -806,6 +806,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu);
 int kfd_topology_remove_device(struct kfd_dev *gpu);
 struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
                                                uint32_t proximity_domain);
+struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
 struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
 struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
 int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
index bc95d4d..80f5db4 100644 (file)
@@ -63,22 +63,33 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
        return device;
 }
 
-struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
+struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id)
 {
-       struct kfd_topology_device *top_dev;
-       struct kfd_dev *device = NULL;
+       struct kfd_topology_device *top_dev = NULL;
+       struct kfd_topology_device *ret = NULL;
 
        down_read(&topology_lock);
 
        list_for_each_entry(top_dev, &topology_device_list, list)
                if (top_dev->gpu_id == gpu_id) {
-                       device = top_dev->gpu;
+                       ret = top_dev;
                        break;
                }
 
        up_read(&topology_lock);
 
-       return device;
+       return ret;
+}
+
+struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
+{
+       struct kfd_topology_device *top_dev;
+
+       top_dev = kfd_topology_device_by_id(gpu_id);
+       if (!top_dev)
+               return NULL;
+
+       return top_dev->gpu;
 }
 
 struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
index 800f481..6903fe6 100644 (file)
@@ -641,6 +641,87 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
        return NULL;
 }
 
+static void emulated_link_detect(struct dc_link *link)
+{
+       struct dc_sink_init_data sink_init_data = { 0 };
+       struct display_sink_capability sink_caps = { 0 };
+       enum dc_edid_status edid_status;
+       struct dc_context *dc_ctx = link->ctx;
+       struct dc_sink *sink = NULL;
+       struct dc_sink *prev_sink = NULL;
+
+       link->type = dc_connection_none;
+       prev_sink = link->local_sink;
+
+       if (prev_sink != NULL)
+               dc_sink_retain(prev_sink);
+
+       switch (link->connector_signal) {
+       case SIGNAL_TYPE_HDMI_TYPE_A: {
+               sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+               sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+               break;
+       }
+
+       case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+               sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+               sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+               break;
+       }
+
+       case SIGNAL_TYPE_DVI_DUAL_LINK: {
+               sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+               sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+               break;
+       }
+
+       case SIGNAL_TYPE_LVDS: {
+               sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+               sink_caps.signal = SIGNAL_TYPE_LVDS;
+               break;
+       }
+
+       case SIGNAL_TYPE_EDP: {
+               sink_caps.transaction_type =
+                       DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+               sink_caps.signal = SIGNAL_TYPE_EDP;
+               break;
+       }
+
+       case SIGNAL_TYPE_DISPLAY_PORT: {
+               sink_caps.transaction_type =
+                       DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+               sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
+               break;
+       }
+
+       default:
+               DC_ERROR("Invalid connector type! signal:%d\n",
+                       link->connector_signal);
+               return;
+       }
+
+       sink_init_data.link = link;
+       sink_init_data.sink_signal = sink_caps.signal;
+
+       sink = dc_sink_create(&sink_init_data);
+       if (!sink) {
+               DC_ERROR("Failed to create sink!\n");
+               return;
+       }
+
+       link->local_sink = sink;
+
+       edid_status = dm_helpers_read_local_edid(
+                       link->ctx,
+                       link,
+                       sink);
+
+       if (edid_status != EDID_OK)
+               DC_ERROR("Failed to read EDID");
+
+}
+
 static int dm_resume(void *handle)
 {
        struct amdgpu_device *adev = handle;
@@ -654,6 +735,7 @@ static int dm_resume(void *handle)
        struct drm_plane *plane;
        struct drm_plane_state *new_plane_state;
        struct dm_plane_state *dm_new_plane_state;
+       enum dc_connection_type new_connection_type = dc_connection_none;
        int ret;
        int i;
 
@@ -684,7 +766,13 @@ static int dm_resume(void *handle)
                        continue;
 
                mutex_lock(&aconnector->hpd_lock);
-               dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+               if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+                       DRM_ERROR("KMS: Failed to detect connector\n");
+
+               if (aconnector->base.force && new_connection_type == dc_connection_none)
+                       emulated_link_detect(aconnector->dc_link);
+               else
+                       dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
 
                if (aconnector->fake_enable && aconnector->dc_link->local_sink)
                        aconnector->fake_enable = false;
@@ -922,6 +1010,7 @@ static void handle_hpd_irq(void *param)
        struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
        struct drm_connector *connector = &aconnector->base;
        struct drm_device *dev = connector->dev;
+       enum dc_connection_type new_connection_type = dc_connection_none;
 
        /* In case of failure or MST no need to update connector status or notify the OS
         * since (for MST case) MST does this in it's own context.
@@ -931,7 +1020,21 @@ static void handle_hpd_irq(void *param)
        if (aconnector->fake_enable)
                aconnector->fake_enable = false;
 
-       if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
+       if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+               DRM_ERROR("KMS: Failed to detect connector\n");
+
+       if (aconnector->base.force && new_connection_type == dc_connection_none) {
+               emulated_link_detect(aconnector->dc_link);
+
+
+               drm_modeset_lock_all(dev);
+               dm_restore_drm_connector_state(dev, connector);
+               drm_modeset_unlock_all(dev);
+
+               if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+                       drm_kms_helper_hotplug_event(dev);
+
+       } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
                amdgpu_dm_update_connector_after_detect(aconnector);
 
 
@@ -1031,6 +1134,7 @@ static void handle_hpd_rx_irq(void *param)
        struct drm_device *dev = connector->dev;
        struct dc_link *dc_link = aconnector->dc_link;
        bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
+       enum dc_connection_type new_connection_type = dc_connection_none;
 
        /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
         * conflict, after implement i2c helper, this mutex should be
@@ -1042,7 +1146,24 @@ static void handle_hpd_rx_irq(void *param)
        if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
                        !is_mst_root_connector) {
                /* Downstream Port status changed. */
-               if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
+               if (!dc_link_detect_sink(dc_link, &new_connection_type))
+                       DRM_ERROR("KMS: Failed to detect connector\n");
+
+               if (aconnector->base.force && new_connection_type == dc_connection_none) {
+                       emulated_link_detect(dc_link);
+
+                       if (aconnector->fake_enable)
+                               aconnector->fake_enable = false;
+
+                       amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+                       drm_modeset_lock_all(dev);
+                       dm_restore_drm_connector_state(dev, connector);
+                       drm_modeset_unlock_all(dev);
+
+                       drm_kms_helper_hotplug_event(dev);
+               } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
 
                        if (aconnector->fake_enable)
                                aconnector->fake_enable = false;
@@ -1433,6 +1554,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
        struct amdgpu_mode_info *mode_info = &adev->mode_info;
        uint32_t link_cnt;
        int32_t total_overlay_planes, total_primary_planes;
+       enum dc_connection_type new_connection_type = dc_connection_none;
 
        link_cnt = dm->dc->caps.max_links;
        if (amdgpu_dm_mode_config_init(dm->adev)) {
@@ -1499,7 +1621,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 
                link = dc_get_link_at_index(dm->dc, i);
 
-               if (dc_link_detect(link, DETECT_REASON_BOOT)) {
+               if (!dc_link_detect_sink(link, &new_connection_type))
+                       DRM_ERROR("KMS: Failed to detect connector\n");
+
+               if (aconnector->base.force && new_connection_type == dc_connection_none) {
+                       emulated_link_detect(link);
+                       amdgpu_dm_update_connector_after_detect(aconnector);
+
+               } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
                        amdgpu_dm_update_connector_after_detect(aconnector);
                        register_backlight_device(dm, link);
                }
@@ -2494,7 +2623,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        if (dm_state && dm_state->freesync_capable)
                stream->ignore_msa_timing_param = true;
 finish:
-       if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL)
+       if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
                dc_sink_release(sink);
 
        return stream;
@@ -4504,12 +4633,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
        }
        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
 
-       /* Signal HW programming completion */
-       drm_atomic_helper_commit_hw_done(state);
 
        if (wait_for_vblank)
                drm_atomic_helper_wait_for_flip_done(dev, state);
 
+       /*
+        * FIXME:
+        * Delay hw_done() until flip_done() is signaled. This is to block
+        * another commit from freeing the CRTC state while we're still
+        * waiting on flip_done.
+        */
+       drm_atomic_helper_commit_hw_done(state);
+
        drm_atomic_helper_cleanup_planes(dev, state);
 
        /* Finally, drop a runtime PM reference for each newly disabled CRTC,
index fbe878a..4ba0003 100644 (file)
@@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp,
 {
        struct dc_context *ctx = pp->ctx;
        struct amdgpu_device *adev = ctx->driver_context;
+       void *pp_handle = adev->powerplay.pp_handle;
        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       struct pp_display_clock_request clock = {0};
 
-       if (!pp_funcs || !pp_funcs->display_configuration_changed)
+       if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
                return;
 
-       amdgpu_dpm_display_configuration_changed(adev);
+       clock.clock_type = amd_pp_dcf_clock;
+       clock.clock_freq_in_khz = req->hard_min_dcefclk_khz;
+       pp_funcs->display_clock_voltage_request(pp_handle, &clock);
+
+       clock.clock_type = amd_pp_f_clock;
+       clock.clock_freq_in_khz = req->hard_min_fclk_khz;
+       pp_funcs->display_clock_voltage_request(pp_handle, &clock);
 }
 
 void pp_rv_set_wm_ranges(struct pp_smu *pp,
index 5678679..fced3c1 100644 (file)
@@ -195,7 +195,7 @@ static bool program_hpd_filter(
        return result;
 }
 
-static bool detect_sink(struct dc_link *link, enum dc_connection_type *type)
+bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
 {
        uint32_t is_hpd_high = 0;
        struct gpio *hpd_pin;
@@ -604,7 +604,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
        if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
                return false;
 
-       if (false == detect_sink(link, &new_connection_type)) {
+       if (false == dc_link_detect_sink(link, &new_connection_type)) {
                BREAK_TO_DEBUGGER();
                return false;
        }
@@ -754,8 +754,12 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
                         * fail-safe mode
                         */
                        if (dc_is_hdmi_signal(link->connector_signal) ||
-                           dc_is_dvi_signal(link->connector_signal))
+                           dc_is_dvi_signal(link->connector_signal)) {
+                               if (prev_sink != NULL)
+                                       dc_sink_release(prev_sink);
+
                                return false;
+                       }
                default:
                        break;
                }
index d43cefb..1b48ab9 100644 (file)
@@ -215,6 +215,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
 
 bool dc_link_is_dp_sink_present(struct dc_link *link);
 
+bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
 /*
  * DPCD access interfaces
  */
index 14384d9..b2f3087 100644 (file)
@@ -2560,7 +2560,7 @@ static void pplib_apply_display_requirements(
        dc->prev_display_config = *pp_display_cfg;
 }
 
-void dce110_set_bandwidth(
+static void dce110_set_bandwidth(
                struct dc *dc,
                struct dc_state *context,
                bool decrease_allowed)
index e4c5db7..d6db3db 100644 (file)
@@ -68,11 +68,6 @@ void dce110_fill_display_configs(
        const struct dc_state *context,
        struct dm_pp_display_configuration *pp_display_cfg);
 
-void dce110_set_bandwidth(
-               struct dc *dc,
-               struct dc_state *context,
-               bool decrease_allowed);
-
 uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
 
 void dp_receiver_power_ctrl(struct dc_link *link, bool on);
index 5853522..eb0f5f9 100644 (file)
@@ -244,17 +244,6 @@ static void dce120_update_dchub(
        dh_data->dchub_info_valid = false;
 }
 
-static void dce120_set_bandwidth(
-               struct dc *dc,
-               struct dc_state *context,
-               bool decrease_allowed)
-{
-       if (context->stream_count <= 0)
-               return;
-
-       dce110_set_bandwidth(dc, context, decrease_allowed);
-}
-
 void dce120_hw_sequencer_construct(struct dc *dc)
 {
        /* All registers used by dce11.2 match those in dce11 in offset and
@@ -263,6 +252,5 @@ void dce120_hw_sequencer_construct(struct dc *dc)
        dce110_hw_sequencer_construct(dc);
        dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
        dc->hwss.update_dchub = dce120_update_dchub;
-       dc->hwss.set_bandwidth = dce120_set_bandwidth;
 }
 
index 14391b0..43b82e1 100644 (file)
@@ -292,7 +292,7 @@ struct tile_config {
 struct kfd2kgd_calls {
        int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
                                        void **mem_obj, uint64_t *gpu_addr,
-                                       void **cpu_ptr);
+                                       void **cpu_ptr, bool mqd_gfx9);
 
        void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
 
index 08b5bb2..94d6dab 100644 (file)
@@ -754,6 +754,7 @@ static int malidp_bind(struct device *dev)
        drm->irq_enabled = true;
 
        ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+       drm_crtc_vblank_reset(&malidp->crtc);
        if (ret < 0) {
                DRM_ERROR("failed to initialise vblank\n");
                goto vblank_fail;
index c94a442..2781e46 100644 (file)
@@ -384,7 +384,8 @@ static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
 
 static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
                                     dma_addr_t *addrs, s32 *pitches,
-                                    int num_planes, u16 w, u16 h, u32 fmt_id)
+                                    int num_planes, u16 w, u16 h, u32 fmt_id,
+                                    const s16 *rgb2yuv_coeffs)
 {
        u32 base = MALIDP500_SE_MEMWRITE_BASE;
        u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@@ -416,6 +417,16 @@ static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
 
        malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
                        MALIDP500_SE_MEMWRITE_OUT_SIZE);
+
+       if (rgb2yuv_coeffs) {
+               int i;
+
+               for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
+                       malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
+                                       MALIDP500_SE_RGB_YUV_COEFFS + i * 4);
+               }
+       }
+
        malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
 
        return 0;
@@ -658,7 +669,8 @@ static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
 
 static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
                                     dma_addr_t *addrs, s32 *pitches,
-                                    int num_planes, u16 w, u16 h, u32 fmt_id)
+                                    int num_planes, u16 w, u16 h, u32 fmt_id,
+                                    const s16 *rgb2yuv_coeffs)
 {
        u32 base = MALIDP550_SE_MEMWRITE_BASE;
        u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@@ -689,6 +701,15 @@ static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
        malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
                          MALIDP550_SE_CONTROL);
 
+       if (rgb2yuv_coeffs) {
+               int i;
+
+               for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
+                       malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
+                                       MALIDP550_SE_RGB_YUV_COEFFS + i * 4);
+               }
+       }
+
        return 0;
 }
 
index ad2e969..9fc94c0 100644 (file)
@@ -191,7 +191,8 @@ struct malidp_hw {
         * @param fmt_id - internal format ID of output buffer
         */
        int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs,
-                              s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id);
+                              s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id,
+                              const s16 *rgb2yuv_coeffs);
 
        /*
         * Disable the writing to memory of the next frame's content.
index ba6ae66..91472e5 100644 (file)
@@ -26,6 +26,8 @@ struct malidp_mw_connector_state {
        s32 pitches[2];
        u8 format;
        u8 n_planes;
+       bool rgb2yuv_initialized;
+       const s16 *rgb2yuv_coeffs;
 };
 
 static int malidp_mw_connector_get_modes(struct drm_connector *connector)
@@ -84,7 +86,7 @@ static void malidp_mw_connector_destroy(struct drm_connector *connector)
 static struct drm_connector_state *
 malidp_mw_connector_duplicate_state(struct drm_connector *connector)
 {
-       struct malidp_mw_connector_state *mw_state;
+       struct malidp_mw_connector_state *mw_state, *mw_current_state;
 
        if (WARN_ON(!connector->state))
                return NULL;
@@ -93,7 +95,10 @@ malidp_mw_connector_duplicate_state(struct drm_connector *connector)
        if (!mw_state)
                return NULL;
 
-       /* No need to preserve any of our driver-local data */
+       mw_current_state = to_mw_state(connector->state);
+       mw_state->rgb2yuv_coeffs = mw_current_state->rgb2yuv_coeffs;
+       mw_state->rgb2yuv_initialized = mw_current_state->rgb2yuv_initialized;
+
        __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base);
 
        return &mw_state->base;
@@ -108,6 +113,13 @@ static const struct drm_connector_funcs malidp_mw_connector_funcs = {
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
+static const s16 rgb2yuv_coeffs_bt709_limited[MALIDP_COLORADJ_NUM_COEFFS] = {
+       47,  157,   16,
+       -26,  -87,  112,
+       112, -102,  -10,
+       16,  128,  128
+};
+
 static int
 malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
                               struct drm_crtc_state *crtc_state,
@@ -157,6 +169,9 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
        }
        mw_state->n_planes = n_planes;
 
+       if (fb->format->is_yuv)
+               mw_state->rgb2yuv_coeffs = rgb2yuv_coeffs_bt709_limited;
+
        return 0;
 }
 
@@ -239,10 +254,12 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
 
                drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
                conn_state->writeback_job = NULL;
-
                hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
                                           mw_state->pitches, mw_state->n_planes,
-                                          fb->width, fb->height, mw_state->format);
+                                          fb->width, fb->height, mw_state->format,
+                                          !mw_state->rgb2yuv_initialized ?
+                                          mw_state->rgb2yuv_coeffs : NULL);
+               mw_state->rgb2yuv_initialized = !!mw_state->rgb2yuv_coeffs;
        } else {
                DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n");
                hwdev->hw->disable_memwrite(hwdev);
index 3579d36..6ffe849 100644 (file)
 #define MALIDP500_SE_BASE              0x00c00
 #define MALIDP500_SE_CONTROL           0x00c0c
 #define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
+#define MALIDP500_SE_RGB_YUV_COEFFS    0x00C74
 #define MALIDP500_SE_MEMWRITE_BASE     0x00e00
 #define MALIDP500_DC_IRQ_BASE          0x00f00
 #define MALIDP500_CONFIG_VALID         0x00f00
 #define MALIDP550_SE_CONTROL           0x08010
 #define   MALIDP550_SE_MEMWRITE_ONESHOT        (1 << 7)
 #define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030
+#define MALIDP550_SE_RGB_YUV_COEFFS    0x08078
 #define MALIDP550_SE_MEMWRITE_BASE     0x08100
 #define MALIDP550_DC_BASE              0x0c000
 #define MALIDP550_DC_CONTROL           0x0c010
index 3eb061e..018fcdb 100644 (file)
@@ -2067,7 +2067,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
        struct drm_connector *connector;
        struct drm_connector_list_iter conn_iter;
 
-       if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
+       if (!drm_drv_uses_atomic_modeset(dev))
                return;
 
        list_for_each_entry(plane, &config->plane_list, head) {
index baff50a..df31c38 100644 (file)
@@ -63,20 +63,21 @@ static void drm_client_close(struct drm_client_dev *client)
 EXPORT_SYMBOL(drm_client_close);
 
 /**
- * drm_client_new - Create a DRM client
+ * drm_client_init - Initialise a DRM client
  * @dev: DRM device
  * @client: DRM client
  * @name: Client name
  * @funcs: DRM client functions (optional)
  *
+ * This initialises the client and opens a &drm_file. Use drm_client_add() to complete the process.
  * The caller needs to hold a reference on @dev before calling this function.
  * The client is freed when the &drm_device is unregistered. See drm_client_release().
  *
  * Returns:
  * Zero on success or negative error code on failure.
  */
-int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
-                  const char *name, const struct drm_client_funcs *funcs)
+int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
+                   const char *name, const struct drm_client_funcs *funcs)
 {
        int ret;
 
@@ -95,10 +96,6 @@ int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
        if (ret)
                goto err_put_module;
 
-       mutex_lock(&dev->clientlist_mutex);
-       list_add(&client->list, &dev->clientlist);
-       mutex_unlock(&dev->clientlist_mutex);
-
        drm_dev_get(dev);
 
        return 0;
@@ -109,13 +106,33 @@ err_put_module:
 
        return ret;
 }
-EXPORT_SYMBOL(drm_client_new);
+EXPORT_SYMBOL(drm_client_init);
+
+/**
+ * drm_client_add - Add client to the device list
+ * @client: DRM client
+ *
+ * Add the client to the &drm_device client list to activate its callbacks.
+ * @client must be initialized by a call to drm_client_init(). After
+ * drm_client_add() it is no longer permissible to call drm_client_release()
+ * directly (outside the unregister callback), instead cleanup will happen
+ * automatically on driver unload.
+ */
+void drm_client_add(struct drm_client_dev *client)
+{
+       struct drm_device *dev = client->dev;
+
+       mutex_lock(&dev->clientlist_mutex);
+       list_add(&client->list, &dev->clientlist);
+       mutex_unlock(&dev->clientlist_mutex);
+}
+EXPORT_SYMBOL(drm_client_add);
 
 /**
  * drm_client_release - Release DRM client resources
  * @client: DRM client
  *
- * Releases resources by closing the &drm_file that was opened by drm_client_new().
+ * Releases resources by closing the &drm_file that was opened by drm_client_init().
  * It is called automatically if the &drm_client_funcs.unregister callback is _not_ set.
  *
  * This function should only be called from the unregister callback. An exception
index 6f28fe5..373bd4c 100644 (file)
@@ -151,7 +151,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
                return ret;
        }
 
-       if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+       if (drm_drv_uses_atomic_modeset(dev)) {
                ret = drm_atomic_debugfs_init(minor);
                if (ret) {
                        DRM_ERROR("Failed to create atomic debugfs files\n");
index 9da36a6..9ac1f2e 100644 (file)
@@ -160,7 +160,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
 
        fb_helper = &fbdev_cma->fb_helper;
 
-       ret = drm_client_new(dev, &fb_helper->client, "fbdev", NULL);
+       ret = drm_client_init(dev, &fb_helper->client, "fbdev", NULL);
        if (ret)
                goto err_free;
 
@@ -169,6 +169,8 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
        if (ret)
                goto err_client_put;
 
+       drm_client_add(&fb_helper->client);
+
        return fbdev_cma;
 
 err_client_put:
index 4b0dd20..515a7ae 100644 (file)
@@ -2370,7 +2370,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
 {
        int c, o;
        struct drm_connector *connector;
-       const struct drm_connector_helper_funcs *connector_funcs;
        int my_score, best_score, score;
        struct drm_fb_helper_crtc **crtcs, *crtc;
        struct drm_fb_helper_connector *fb_helper_conn;
@@ -2399,8 +2398,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
        if (drm_has_preferred_mode(fb_helper_conn, width, height))
                my_score++;
 
-       connector_funcs = connector->helper_private;
-
        /*
         * select a crtc for this connector and then attempt to configure
         * remaining connectors
@@ -3221,12 +3218,14 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
        if (!fb_helper)
                return -ENOMEM;
 
-       ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
+       ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
        if (ret) {
                kfree(fb_helper);
                return ret;
        }
 
+       drm_client_add(&fb_helper->client);
+
        fb_helper->preferred_bpp = preferred_bpp;
 
        drm_fbdev_client_hotplug(&fb_helper->client);
index b54fb78..b82da96 100644 (file)
@@ -566,14 +566,14 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
        lessee_priv->is_master = 1;
        lessee_priv->authenticated = 1;
 
-       /* Hook up the fd */
-       fd_install(fd, lessee_file);
-
        /* Pass fd back to userspace */
        DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id);
        cl->fd = fd;
        cl->lessee_id = lessee->lessee_id;
 
+       /* Hook up the fd */
+       fd_install(fd, lessee_file);
+
        DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
        return 0;
 
index b902361..1d9a9d2 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/err.h>
 #include <linux/module.h>
 
-#include <drm/drm_device.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_panel.h>
 
@@ -105,13 +104,6 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
        if (panel->connector)
                return -EBUSY;
 
-       panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
-       if (!panel->link) {
-               dev_err(panel->dev, "failed to link panel to %s\n",
-                       dev_name(connector->dev->dev));
-               return -EINVAL;
-       }
-
        panel->connector = connector;
        panel->drm = connector->dev;
 
@@ -133,8 +125,6 @@ EXPORT_SYMBOL(drm_panel_attach);
  */
 int drm_panel_detach(struct drm_panel *panel)
 {
-       device_link_del(panel->link);
-
        panel->connector = NULL;
        panel->drm = NULL;
 
index adb3cb2..759278f 100644 (file)
@@ -97,6 +97,8 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
 {
        int ret;
 
+       WARN_ON(*fence);
+
        *fence = drm_syncobj_fence_get(syncobj);
        if (*fence)
                return 1;
@@ -743,6 +745,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
 
        if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
                for (i = 0; i < count; ++i) {
+                       if (entries[i].fence)
+                               continue;
+
                        drm_syncobj_fence_get_or_add_callback(syncobjs[i],
                                                              &entries[i].fence,
                                                              &entries[i].syncobj_cb,
index 9b2720b..83c1f46 100644 (file)
@@ -592,8 +592,6 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct component_match *match = NULL;
 
-       dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-
        if (!dev->platform_data) {
                struct device_node *core_node;
 
@@ -655,13 +653,30 @@ static int __init etnaviv_init(void)
        for_each_compatible_node(np, NULL, "vivante,gc") {
                if (!of_device_is_available(np))
                        continue;
-               pdev = platform_device_register_simple("etnaviv", -1,
-                                                      NULL, 0);
-               if (IS_ERR(pdev)) {
-                       ret = PTR_ERR(pdev);
+
+               pdev = platform_device_alloc("etnaviv", -1);
+               if (!pdev) {
+                       ret = -ENOMEM;
+                       of_node_put(np);
+                       goto unregister_platform_driver;
+               }
+               pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+               /*
+                * Apply the same DMA configuration to the virtual etnaviv
+                * device as the GPU we found. This assumes that all Vivante
+                * GPUs in the system share the same DMA constraints.
+                */
+               of_dma_configure(&pdev->dev, np, true);
+
+               ret = platform_device_add(pdev);
+               if (ret) {
+                       platform_device_put(pdev);
                        of_node_put(np);
                        goto unregister_platform_driver;
                }
+
                etnaviv_drm = pdev;
                of_node_put(np);
                break;
index 87f6b56..797d9ee 100644 (file)
@@ -55,37 +55,12 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
 static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
                                        unsigned long start, unsigned long size)
 {
-       struct iommu_domain *domain;
-       int ret;
-
-       domain = iommu_domain_alloc(priv->dma_dev->bus);
-       if (!domain)
-               return -ENOMEM;
-
-       ret = iommu_get_dma_cookie(domain);
-       if (ret)
-               goto free_domain;
-
-       ret = iommu_dma_init_domain(domain, start, size, NULL);
-       if (ret)
-               goto put_cookie;
-
-       priv->mapping = domain;
+       priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
        return 0;
-
-put_cookie:
-       iommu_put_dma_cookie(domain);
-free_domain:
-       iommu_domain_free(domain);
-       return ret;
 }
 
 static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
 {
-       struct iommu_domain *domain = priv->mapping;
-
-       iommu_put_dma_cookie(domain);
-       iommu_domain_free(domain);
        priv->mapping = NULL;
 }
 
@@ -94,7 +69,9 @@ static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
 {
        struct iommu_domain *domain = priv->mapping;
 
-       return iommu_attach_device(domain, dev);
+       if (dev != priv->dma_dev)
+               return iommu_attach_device(domain, dev);
+       return 0;
 }
 
 static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
@@ -102,7 +79,8 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
 {
        struct iommu_domain *domain = priv->mapping;
 
-       iommu_detach_device(domain, dev);
+       if (dev != priv->dma_dev)
+               iommu_detach_device(domain, dev);
 }
 #else
 #error Unsupported architecture and IOMMU/DMA-mapping glue code
index 5d2f0d5..250b5e0 100644 (file)
@@ -191,7 +191,8 @@ static irqreturn_t tda9950_irq(int irq, void *data)
                        break;
                }
                /* TDA9950 executes all retries for us */
-               tx_status |= CEC_TX_STATUS_MAX_RETRIES;
+               if (tx_status != CEC_TX_STATUS_OK)
+                       tx_status |= CEC_TX_STATUS_MAX_RETRIES;
                cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
                                  nack_cnt, 0, err_cnt);
                break;
@@ -310,7 +311,7 @@ static void tda9950_release(struct tda9950_priv *priv)
        /* Wait up to .5s for it to signal non-busy */
        do {
                csr = tda9950_read(client, REG_CSR);
-               if (!(csr & CSR_BUSY) || --timeout)
+               if (!(csr & CSR_BUSY) || !--timeout)
                        break;
                msleep(10);
        } while (1);
index 6e3f566..51ed99a 100644 (file)
@@ -170,20 +170,22 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
                unsigned int tiling_mode = 0;
                unsigned int stride = 0;
 
-               switch (info->drm_format_mod << 10) {
-               case PLANE_CTL_TILED_LINEAR:
+               switch (info->drm_format_mod) {
+               case DRM_FORMAT_MOD_LINEAR:
                        tiling_mode = I915_TILING_NONE;
                        break;
-               case PLANE_CTL_TILED_X:
+               case I915_FORMAT_MOD_X_TILED:
                        tiling_mode = I915_TILING_X;
                        stride = info->stride;
                        break;
-               case PLANE_CTL_TILED_Y:
+               case I915_FORMAT_MOD_Y_TILED:
+               case I915_FORMAT_MOD_Yf_TILED:
                        tiling_mode = I915_TILING_Y;
                        stride = info->stride;
                        break;
                default:
-                       gvt_dbg_core("not supported tiling mode\n");
+                       gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
+                                    info->drm_format_mod);
                }
                obj->tiling_and_stride = tiling_mode | stride;
        } else {
@@ -222,9 +224,26 @@ static int vgpu_get_plane_info(struct drm_device *dev,
                info->height = p.height;
                info->stride = p.stride;
                info->drm_format = p.drm_format;
-               info->drm_format_mod = p.tiled;
+
+               switch (p.tiled) {
+               case PLANE_CTL_TILED_LINEAR:
+                       info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
+                       break;
+               case PLANE_CTL_TILED_X:
+                       info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
+                       break;
+               case PLANE_CTL_TILED_Y:
+                       info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
+                       break;
+               case PLANE_CTL_TILED_YF:
+                       info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
+                       break;
+               default:
+                       gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
+               }
+
                info->size = (((p.stride * p.height * p.bpp) / 8) +
-                               (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+                             (PAGE_SIZE - 1)) >> PAGE_SHIFT;
        } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
                ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
                if (ret)
index face664..481896f 100644 (file)
@@ -220,8 +220,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
        if (IS_SKYLAKE(dev_priv)
                || IS_KABYLAKE(dev_priv)
                || IS_BROXTON(dev_priv)) {
-               plane->tiled = (val & PLANE_CTL_TILED_MASK) >>
-               _PLANE_CTL_TILED_SHIFT;
+               plane->tiled = val & PLANE_CTL_TILED_MASK;
                fmt = skl_format_to_drm(
                        val & PLANE_CTL_FORMAT_MASK,
                        val & PLANE_CTL_ORDER_RGBX,
@@ -260,7 +259,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
                return  -EINVAL;
        }
 
-       plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10),
+       plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
                (IS_SKYLAKE(dev_priv)
                || IS_KABYLAKE(dev_priv)
                || IS_BROXTON(dev_priv)) ?
index cb055f3..60c1550 100644 (file)
@@ -101,7 +101,7 @@ struct intel_gvt;
 /* color space conversion and gamma correction are not included */
 struct intel_vgpu_primary_plane_format {
        u8      enabled;        /* plane is enabled */
-       u8      tiled;          /* X-tiled */
+       u32     tiled;          /* tiling mode: linear, X-tiled, Y tiled, etc */
        u8      bpp;            /* bits per pixel */
        u32     hw_format;      /* format field in the PRI_CTL register */
        u32     drm_format;     /* format in DRM definition */
index 7a58ca5..94c1089 100644 (file)
@@ -1296,6 +1296,19 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu,
        return 0;
 }
 
+static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu,
+               unsigned int offset, void *p_data, unsigned int bytes)
+{
+       write_vreg(vgpu, offset, p_data, bytes);
+
+       if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST)
+               vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE;
+       else
+               vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE;
+
+       return 0;
+}
+
 static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
        unsigned int offset, void *p_data, unsigned int bytes)
 {
@@ -1525,9 +1538,15 @@ static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu,
        u32 v = *(u32 *)p_data;
        u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0;
 
-       vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
-       vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
-       vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
+       switch (offset) {
+       case _PHY_CTL_FAMILY_EDP:
+               vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data;
+               break;
+       case _PHY_CTL_FAMILY_DDI:
+               vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data;
+               vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data;
+               break;
+       }
 
        vgpu_vreg(vgpu, offset) = v;
 
@@ -2812,6 +2831,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
                skl_power_well_ctl_write);
 
+       MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
+
        MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
        MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
        MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -2987,8 +3008,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
                NULL, gen9_trtte_write);
        MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
 
-       MMIO_D(_MMIO(0x45008), D_SKL_PLUS);
-
        MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
 
        MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
@@ -3025,7 +3044,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
        MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
-               NULL, NULL);
+                NULL, NULL);
+       MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+                NULL, NULL);
 
        MMIO_D(_MMIO(0x4ab8), D_KBL);
        MMIO_D(_MMIO(0x2248), D_KBL | D_SKL);
@@ -3189,6 +3210,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
        MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
 
        MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
+       MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT);
 
        MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
        MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
index a45f46d..9ad89e3 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/device.h>
 #include <linux/mm.h>
 #include <linux/mmu_context.h>
+#include <linux/sched/mm.h>
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/rbtree.h>
@@ -1792,16 +1793,21 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
        info = (struct kvmgt_guest_info *)handle;
        kvm = info->kvm;
 
-       if (kthread)
+       if (kthread) {
+               if (!mmget_not_zero(kvm->mm))
+                       return -EFAULT;
                use_mm(kvm->mm);
+       }
 
        idx = srcu_read_lock(&kvm->srcu);
        ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
                      kvm_read_guest(kvm, gpa, buf, len);
        srcu_read_unlock(&kvm->srcu, idx);
 
-       if (kthread)
+       if (kthread) {
                unuse_mm(kvm->mm);
+               mmput(kvm->mm);
+       }
 
        return ret;
 }
@@ -1827,6 +1833,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
 {
        struct kvmgt_guest_info *info;
        struct kvm *kvm;
+       int idx;
+       bool ret;
 
        if (!handle_valid(handle))
                return false;
@@ -1834,8 +1842,11 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
        info = (struct kvmgt_guest_info *)handle;
        kvm = info->kvm;
 
-       return kvm_is_visible_gfn(kvm, gfn);
+       idx = srcu_read_lock(&kvm->srcu);
+       ret = kvm_is_visible_gfn(kvm, gfn);
+       srcu_read_unlock(&kvm->srcu, idx);
 
+       return ret;
 }
 
 struct intel_gvt_mpt kvmgt_mpt = {
index 9943660..9bb9a85 100644 (file)
@@ -244,6 +244,34 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
 
                /* set the bit 0:2(Core C-State ) to C0 */
                vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
+
+               if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+                       vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
+                                   ~(BIT(0) | BIT(1));
+                       vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
+                                   ~PHY_POWER_GOOD;
+                       vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
+                                   ~PHY_POWER_GOOD;
+                       vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &=
+                                   ~BIT(30);
+                       vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &=
+                                   ~BIT(30);
+                       vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &=
+                                   ~BXT_PHY_LANE_ENABLED;
+                       vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |=
+                                   BXT_PHY_CMNLANE_POWERDOWN_ACK |
+                                   BXT_PHY_LANE_POWERDOWN_ACK;
+                       vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &=
+                                   ~BXT_PHY_LANE_ENABLED;
+                       vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |=
+                                   BXT_PHY_CMNLANE_POWERDOWN_ACK |
+                                   BXT_PHY_LANE_POWERDOWN_ACK;
+                       vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &=
+                                   ~BXT_PHY_LANE_ENABLED;
+                       vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
+                                   BXT_PHY_CMNLANE_POWERDOWN_ACK |
+                                   BXT_PHY_LANE_POWERDOWN_ACK;
+               }
        } else {
 #define GVT_GEN8_MMIO_RESET_OFFSET             (0x44200)
                /* only reset the engine related, so starting with 0x44200
index 42e1e6b..e872f48 100644 (file)
@@ -562,11 +562,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
         * performace for batch mmio read/write, so we need
         * handle forcewake mannually.
         */
-       intel_runtime_pm_get(dev_priv);
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
        switch_mmio(pre, next, ring_id);
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-       intel_runtime_pm_put(dev_priv);
 }
 
 /**
index fa75a2e..b0d3a43 100644 (file)
@@ -42,8 +42,6 @@
 #define DEVICE_TYPE_EFP3   0x20
 #define DEVICE_TYPE_EFP4   0x10
 
-#define DEV_SIZE       38
-
 struct opregion_header {
        u8 signature[16];
        u32 size;
@@ -63,6 +61,10 @@ struct bdb_data_header {
        u16 size; /* data size */
 } __packed;
 
+/* For supporting windows guest with opregion, here hardcode the emulated
+ * bdb header version as '186', and the corresponding child_device_config
+ * length should be '33' but not '38'.
+ */
 struct efp_child_device_config {
        u16 handle;
        u16 device_type;
@@ -109,12 +111,6 @@ struct efp_child_device_config {
        u8 mipi_bridge_type; /* 171 */
        u16 device_class_ext;
        u8 dvo_function;
-       u8 dp_usb_type_c:1; /* 195 */
-       u8 skip6:7;
-       u8 dp_usb_type_c_2x_gpio_index; /* 195 */
-       u16 dp_usb_type_c_2x_gpio_pin; /* 195 */
-       u8 iboost_dp:4; /* 196 */
-       u8 iboost_hdmi:4; /* 196 */
 } __packed;
 
 struct vbt {
@@ -155,7 +151,7 @@ static void virt_vbt_generation(struct vbt *v)
        v->header.bdb_offset = offsetof(struct vbt, bdb_header);
 
        strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
-       v->bdb_header.version = 186; /* child_dev_size = 38 */
+       v->bdb_header.version = 186; /* child_dev_size = 33 */
        v->bdb_header.header_size = sizeof(v->bdb_header);
 
        v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
@@ -169,11 +165,13 @@ static void virt_vbt_generation(struct vbt *v)
 
        /* child device */
        num_child = 4; /* each port has one child */
+       v->general_definitions.child_dev_size =
+               sizeof(struct efp_child_device_config);
        v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
        /* size will include child devices */
        v->general_definitions_header.size =
-               sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE;
-       v->general_definitions.child_dev_size = DEV_SIZE;
+               sizeof(struct bdb_general_definitions) +
+                       num_child * v->general_definitions.child_dev_size;
 
        /* portA */
        v->child0.handle = DEVICE_TYPE_EFP1;
index 09d7bb7..c32e7d5 100644 (file)
@@ -47,11 +47,15 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
        return false;
 }
 
+/* We give 2 seconds higher prio for vGPU during start */
+#define GVT_SCHED_VGPU_PRI_TIME  2
+
 struct vgpu_sched_data {
        struct list_head lru_list;
        struct intel_vgpu *vgpu;
        bool active;
-
+       bool pri_sched;
+       ktime_t pri_time;
        ktime_t sched_in_time;
        ktime_t sched_time;
        ktime_t left_ts;
@@ -183,6 +187,14 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
                if (!vgpu_has_pending_workload(vgpu_data->vgpu))
                        continue;
 
+               if (vgpu_data->pri_sched) {
+                       if (ktime_before(ktime_get(), vgpu_data->pri_time)) {
+                               vgpu = vgpu_data->vgpu;
+                               break;
+                       } else
+                               vgpu_data->pri_sched = false;
+               }
+
                /* Return the vGPU only if it has time slice left */
                if (vgpu_data->left_ts > 0) {
                        vgpu = vgpu_data->vgpu;
@@ -202,6 +214,7 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        struct vgpu_sched_data *vgpu_data;
        struct intel_vgpu *vgpu = NULL;
+
        /* no active vgpu or has already had a target */
        if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
                goto out;
@@ -209,12 +222,13 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
        vgpu = find_busy_vgpu(sched_data);
        if (vgpu) {
                scheduler->next_vgpu = vgpu;
-
-               /* Move the last used vGPU to the tail of lru_list */
                vgpu_data = vgpu->sched_data;
-               list_del_init(&vgpu_data->lru_list);
-               list_add_tail(&vgpu_data->lru_list,
-                               &sched_data->lru_runq_head);
+               if (!vgpu_data->pri_sched) {
+                       /* Move the last used vGPU to the tail of lru_list */
+                       list_del_init(&vgpu_data->lru_list);
+                       list_add_tail(&vgpu_data->lru_list,
+                                     &sched_data->lru_runq_head);
+               }
        } else {
                scheduler->next_vgpu = gvt->idle_vgpu;
        }
@@ -328,11 +342,17 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
 {
        struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
        struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+       ktime_t now;
 
        if (!list_empty(&vgpu_data->lru_list))
                return;
 
-       list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head);
+       now = ktime_get();
+       vgpu_data->pri_time = ktime_add(now,
+                                       ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0));
+       vgpu_data->pri_sched = true;
+
+       list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head);
 
        if (!hrtimer_active(&sched_data->timer))
                hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
@@ -426,6 +446,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                &vgpu->gvt->scheduler;
        int ring_id;
        struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 
        if (!vgpu_data->active)
                return;
@@ -444,6 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                scheduler->current_vgpu = NULL;
        }
 
+       intel_runtime_pm_get(dev_priv);
        spin_lock_bh(&scheduler->mmio_context_lock);
        for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
                if (scheduler->engine_owner[ring_id] == vgpu) {
@@ -452,5 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
                }
        }
        spin_unlock_bh(&scheduler->mmio_context_lock);
+       intel_runtime_pm_put(dev_priv);
        mutex_unlock(&vgpu->gvt->sched_lock);
 }
index a4e8e3c..c628be0 100644 (file)
@@ -281,6 +281,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
        intel_vgpu_clean_submission(vgpu);
        intel_vgpu_clean_display(vgpu);
        intel_vgpu_clean_opregion(vgpu);
+       intel_vgpu_reset_ggtt(vgpu, true);
        intel_vgpu_clean_gtt(vgpu);
        intel_gvt_hypervisor_detach_vgpu(vgpu);
        intel_vgpu_free_resource(vgpu);
index f7f2aa7..a262a64 100644 (file)
@@ -232,6 +232,20 @@ static bool compress_init(struct compress *c)
        return true;
 }
 
+static void *compress_next_page(struct drm_i915_error_object *dst)
+{
+       unsigned long page;
+
+       if (dst->page_count >= dst->num_pages)
+               return ERR_PTR(-ENOSPC);
+
+       page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+       if (!page)
+               return ERR_PTR(-ENOMEM);
+
+       return dst->pages[dst->page_count++] = (void *)page;
+}
+
 static int compress_page(struct compress *c,
                         void *src,
                         struct drm_i915_error_object *dst)
@@ -245,19 +259,14 @@ static int compress_page(struct compress *c,
 
        do {
                if (zstream->avail_out == 0) {
-                       unsigned long page;
-
-                       page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
-                       if (!page)
-                               return -ENOMEM;
+                       zstream->next_out = compress_next_page(dst);
+                       if (IS_ERR(zstream->next_out))
+                               return PTR_ERR(zstream->next_out);
 
-                       dst->pages[dst->page_count++] = (void *)page;
-
-                       zstream->next_out = (void *)page;
                        zstream->avail_out = PAGE_SIZE;
                }
 
-               if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
+               if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
                        return -EIO;
        } while (zstream->avail_in);
 
@@ -268,19 +277,42 @@ static int compress_page(struct compress *c,
        return 0;
 }
 
-static void compress_fini(struct compress *c,
+static int compress_flush(struct compress *c,
                          struct drm_i915_error_object *dst)
 {
        struct z_stream_s *zstream = &c->zstream;
 
-       if (dst) {
-               zlib_deflate(zstream, Z_FINISH);
-               dst->unused = zstream->avail_out;
-       }
+       do {
+               switch (zlib_deflate(zstream, Z_FINISH)) {
+               case Z_OK: /* more space requested */
+                       zstream->next_out = compress_next_page(dst);
+                       if (IS_ERR(zstream->next_out))
+                               return PTR_ERR(zstream->next_out);
+
+                       zstream->avail_out = PAGE_SIZE;
+                       break;
+
+               case Z_STREAM_END:
+                       goto end;
+
+               default: /* any error */
+                       return -EIO;
+               }
+       } while (1);
+
+end:
+       memset(zstream->next_out, 0, zstream->avail_out);
+       dst->unused = zstream->avail_out;
+       return 0;
+}
+
+static void compress_fini(struct compress *c,
+                         struct drm_i915_error_object *dst)
+{
+       struct z_stream_s *zstream = &c->zstream;
 
        zlib_deflateEnd(zstream);
        kfree(zstream->workspace);
-
        if (c->tmp)
                free_page((unsigned long)c->tmp);
 }
@@ -319,6 +351,12 @@ static int compress_page(struct compress *c,
        return 0;
 }
 
+static int compress_flush(struct compress *c,
+                         struct drm_i915_error_object *dst)
+{
+       return 0;
+}
+
 static void compress_fini(struct compress *c,
                          struct drm_i915_error_object *dst)
 {
@@ -917,6 +955,7 @@ i915_error_object_create(struct drm_i915_private *i915,
        unsigned long num_pages;
        struct sgt_iter iter;
        dma_addr_t dma;
+       int ret;
 
        if (!vma)
                return NULL;
@@ -930,6 +969,7 @@ i915_error_object_create(struct drm_i915_private *i915,
 
        dst->gtt_offset = vma->node.start;
        dst->gtt_size = vma->node.size;
+       dst->num_pages = num_pages;
        dst->page_count = 0;
        dst->unused = 0;
 
@@ -938,28 +978,26 @@ i915_error_object_create(struct drm_i915_private *i915,
                return NULL;
        }
 
+       ret = -EINVAL;
        for_each_sgt_dma(dma, iter, vma->pages) {
                void __iomem *s;
-               int ret;
 
                ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
 
                s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
                ret = compress_page(&compress, (void  __force *)s, dst);
                io_mapping_unmap_atomic(s);
-
                if (ret)
-                       goto unwind;
+                       break;
        }
-       goto out;
 
-unwind:
-       while (dst->page_count--)
-               free_page((unsigned long)dst->pages[dst->page_count]);
-       kfree(dst);
-       dst = NULL;
+       if (ret || compress_flush(&compress, dst)) {
+               while (dst->page_count--)
+                       free_page((unsigned long)dst->pages[dst->page_count]);
+               kfree(dst);
+               dst = NULL;
+       }
 
-out:
        compress_fini(&compress, dst);
        ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
        return dst;
index f893a4e..8710fb1 100644 (file)
@@ -135,6 +135,7 @@ struct i915_gpu_state {
                struct drm_i915_error_object {
                        u64 gtt_offset;
                        u64 gtt_size;
+                       int num_pages;
                        int page_count;
                        int unused;
                        u32 *pages[0];
index 90628a4..2987796 100644 (file)
@@ -3091,36 +3091,27 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
        spin_unlock(&i915->irq_lock);
 }
 
-static void
-gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl,
-                     u32 *iir)
+static u32
+gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
 {
        void __iomem * const regs = dev_priv->regs;
+       u32 iir;
 
        if (!(master_ctl & GEN11_GU_MISC_IRQ))
-               return;
+               return 0;
+
+       iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
+       if (likely(iir))
+               raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
 
-       *iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
-       if (likely(*iir))
-               raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
+       return iir;
 }
 
 static void
-gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv,
-                         const u32 master_ctl, const u32 iir)
+gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
 {
-       if (!(master_ctl & GEN11_GU_MISC_IRQ))
-               return;
-
-       if (unlikely(!iir)) {
-               DRM_ERROR("GU_MISC iir blank!\n");
-               return;
-       }
-
        if (iir & GEN11_GU_MISC_GSE)
                intel_opregion_asle_intr(dev_priv);
-       else
-               DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
 }
 
 static irqreturn_t gen11_irq_handler(int irq, void *arg)
@@ -3157,12 +3148,12 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
                enable_rpm_wakeref_asserts(i915);
        }
 
-       gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir);
+       gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
 
        /* Acknowledge and enable interrupts. */
        raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
 
-       gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir);
+       gen11_gu_misc_irq_handler(i915, gu_misc_iir);
 
        return IRQ_HANDLED;
 }
index 6a4d138..1df3ce1 100644 (file)
@@ -592,7 +592,6 @@ static const struct intel_device_info intel_cannonlake_info = {
        GEN10_FEATURES, \
        GEN(11), \
        .ddb_size = 2048, \
-       .has_csr = 0, \
        .has_logical_ring_elsq = 1
 
 static const struct intel_device_info intel_icelake_11_info = {
index 08ec744..9e63cd4 100644 (file)
@@ -10422,7 +10422,7 @@ enum skl_power_gate {
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
 #define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
-                                                          _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
 #define  DSC_INITIAL_DEC_DELAY(dec_delay)       ((dec_delay) << 16)
 #define  DSC_INITIAL_XMIT_DELAY(xmit_delay)     ((xmit_delay) << 0)
@@ -10437,7 +10437,7 @@ enum skl_power_gate {
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
                                                           _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
 #define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
-                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \
+                                                          _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB, \
                                                           _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
 #define  DSC_SCALE_DEC_INTINT(scale_dec)       ((scale_dec) << 16)
 #define  DSC_SCALE_INC_INT(scale_inc)          ((scale_inc) << 0)
index 11d834f..98358b4 100644 (file)
@@ -199,7 +199,6 @@ vma_create(struct drm_i915_gem_object *obj,
                vma->flags |= I915_VMA_GGTT;
                list_add(&vma->obj_link, &obj->vma_list);
        } else {
-               i915_ppgtt_get(i915_vm_to_ppgtt(vm));
                list_add_tail(&vma->obj_link, &obj->vma_list);
        }
 
@@ -807,9 +806,6 @@ static void __i915_vma_destroy(struct i915_vma *vma)
        if (vma->obj)
                rb_erase(&vma->obj_node, &vma->obj->vma_tree);
 
-       if (!i915_vma_is_ggtt(vma))
-               i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
-
        rbtree_postorder_for_each_entry_safe(iter, n, &vma->active, node) {
                GEM_BUG_ON(i915_gem_active_isset(&iter->base));
                kfree(iter);
index b725835..769f3f5 100644 (file)
@@ -962,9 +962,6 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
 {
        int ret;
 
-       if (INTEL_INFO(dev_priv)->num_pipes == 0)
-               return;
-
        ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
        if (ret < 0) {
                DRM_ERROR("failed to add audio component (%d)\n", ret);
index 8761513..c9af348 100644 (file)
@@ -2708,7 +2708,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
        if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
                intel_dp_stop_link_train(intel_dp);
 
-       intel_ddi_enable_pipe_clock(crtc_state);
+       if (!is_mst)
+               intel_ddi_enable_pipe_clock(crtc_state);
 }
 
 static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -2810,14 +2811,14 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
        bool is_mst = intel_crtc_has_type(old_crtc_state,
                                          INTEL_OUTPUT_DP_MST);
 
-       intel_ddi_disable_pipe_clock(old_crtc_state);
-
-       /*
-        * Power down sink before disabling the port, otherwise we end
-        * up getting interrupts from the sink on detecting link loss.
-        */
-       if (!is_mst)
+       if (!is_mst) {
+               intel_ddi_disable_pipe_clock(old_crtc_state);
+               /*
+                * Power down sink before disabling the port, otherwise we end
+                * up getting interrupts from the sink on detecting link loss.
+                */
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+       }
 
        intel_disable_ddi_buf(encoder);
 
index ed3fa1c..d295109 100644 (file)
@@ -2988,6 +2988,7 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
        int w = drm_rect_width(&plane_state->base.src) >> 16;
        int h = drm_rect_height(&plane_state->base.src) >> 16;
        int dst_x = plane_state->base.dst.x1;
+       int dst_w = drm_rect_width(&plane_state->base.dst);
        int pipe_src_w = crtc_state->pipe_src_w;
        int max_width = skl_max_plane_width(fb, 0, rotation);
        int max_height = 4096;
@@ -3009,10 +3010,10 @@ static int skl_check_main_surface(const struct intel_crtc_state *crtc_state,
         * screen may cause FIFO underflow and display corruption.
         */
        if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
-           (dst_x + w < 4 || dst_x > pipe_src_w - 4)) {
+           (dst_x + dst_w < 4 || dst_x > pipe_src_w - 4)) {
                DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
-                             dst_x + w < 4 ? "end" : "start",
-                             dst_x + w < 4 ? dst_x + w : dst_x,
+                             dst_x + dst_w < 4 ? "end" : "start",
+                             dst_x + dst_w < 4 ? dst_x + dst_w : dst_x,
                              4, pipe_src_w - 4);
                return -ERANGE;
        }
@@ -5078,10 +5079,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
                mutex_lock(&dev_priv->pcu_lock);
                WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
                mutex_unlock(&dev_priv->pcu_lock);
-               /* wait for pcode to finish disabling IPS, which may take up to 42ms */
+               /*
+                * Wait for PCODE to finish disabling IPS. The BSpec specified
+                * 42ms timeout value leads to occasional timeouts so use 100ms
+                * instead.
+                */
                if (intel_wait_for_register(dev_priv,
                                            IPS_CTL, IPS_ENABLE, 0,
-                                           42))
+                                           100))
                        DRM_ERROR("Timed out waiting for IPS disable\n");
        } else {
                I915_WRITE(IPS_CTL, 0);
index cd0f649..1193202 100644 (file)
@@ -4160,18 +4160,6 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
        return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
 }
 
-/*
- * If display is now connected check links status,
- * there has been known issues of link loss triggering
- * long pulse.
- *
- * Some sinks (eg. ASUS PB287Q) seem to perform some
- * weird HPD ping pong during modesets. So we can apparently
- * end up with HPD going low during a modeset, and then
- * going back up soon after. And once that happens we must
- * retrain the link to get a picture. That's in case no
- * userspace component reacted to intermittent HPD dip.
- */
 int intel_dp_retrain_link(struct intel_encoder *encoder,
                          struct drm_modeset_acquire_ctx *ctx)
 {
@@ -4661,7 +4649,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
 }
 
 static int
-intel_dp_long_pulse(struct intel_connector *connector)
+intel_dp_long_pulse(struct intel_connector *connector,
+                   struct drm_modeset_acquire_ctx *ctx)
 {
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
@@ -4720,6 +4709,22 @@ intel_dp_long_pulse(struct intel_connector *connector)
                 */
                status = connector_status_disconnected;
                goto out;
+       } else {
+               /*
+                * If display is now connected check links status,
+                * there has been known issues of link loss triggering
+                * long pulse.
+                *
+                * Some sinks (eg. ASUS PB287Q) seem to perform some
+                * weird HPD ping pong during modesets. So we can apparently
+                * end up with HPD going low during a modeset, and then
+                * going back up soon after. And once that happens we must
+                * retrain the link to get a picture. That's in case no
+                * userspace component reacted to intermittent HPD dip.
+                */
+               struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+               intel_dp_retrain_link(encoder, ctx);
        }
 
        /*
@@ -4781,7 +4786,7 @@ intel_dp_detect(struct drm_connector *connector,
                                return ret;
                }
 
-               status = intel_dp_long_pulse(intel_dp->attached_connector);
+               status = intel_dp_long_pulse(intel_dp->attached_connector, ctx);
        }
 
        intel_dp->detect_done = false;
index 7e3e016..4ecd653 100644 (file)
@@ -166,6 +166,8 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
        struct intel_connector *connector =
                to_intel_connector(old_conn_state->connector);
 
+       intel_ddi_disable_pipe_clock(old_crtc_state);
+
        /* this can fail */
        drm_dp_check_act_status(&intel_dp->mst_mgr);
        /* and this can also fail */
@@ -252,6 +254,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
        I915_WRITE(DP_TP_STATUS(port), temp);
 
        ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+
+       intel_ddi_enable_pipe_clock(pipe_config);
 }
 
 static void intel_mst_enable_dp(struct intel_encoder *encoder,
index a907640..192972a 100644 (file)
@@ -943,8 +943,12 @@ static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
 
        ret = i2c_transfer(adapter, &msg, 1);
        if (ret == 1)
-               return 0;
-       return ret >= 0 ? -EIO : ret;
+               ret = 0;
+       else if (ret >= 0)
+               ret = -EIO;
+
+       kfree(write_buf);
+       return ret;
 }
 
 static
index 5dae16c..3e085c5 100644 (file)
@@ -74,7 +74,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
        DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
                      lspcon_mode_name(mode));
 
-       wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 100);
+       wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400);
        if (current_mode != mode)
                DRM_ERROR("LSPCON mode hasn't settled\n");
 
index c2f10d8..443dfae 100644 (file)
@@ -181,8 +181,9 @@ struct intel_overlay {
        u32 brightness, contrast, saturation;
        u32 old_xscale, old_yscale;
        /* register access */
-       u32 flip_addr;
        struct drm_i915_gem_object *reg_bo;
+       struct overlay_registers __iomem *regs;
+       u32 flip_addr;
        /* flip handling */
        struct i915_gem_active last_flip;
 };
@@ -210,29 +211,6 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
                                  PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
 }
 
-static struct overlay_registers __iomem *
-intel_overlay_map_regs(struct intel_overlay *overlay)
-{
-       struct drm_i915_private *dev_priv = overlay->i915;
-       struct overlay_registers __iomem *regs;
-
-       if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
-               regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
-       else
-               regs = io_mapping_map_wc(&dev_priv->ggtt.iomap,
-                                        overlay->flip_addr,
-                                        PAGE_SIZE);
-
-       return regs;
-}
-
-static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
-                                    struct overlay_registers __iomem *regs)
-{
-       if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
-               io_mapping_unmap(regs);
-}
-
 static void intel_overlay_submit_request(struct intel_overlay *overlay,
                                         struct i915_request *rq,
                                         i915_gem_retire_fn retire)
@@ -784,13 +762,13 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
                                      struct drm_i915_gem_object *new_bo,
                                      struct put_image_params *params)
 {
-       int ret, tmp_width;
-       struct overlay_registers __iomem *regs;
-       bool scale_changed = false;
+       struct overlay_registers __iomem *regs = overlay->regs;
        struct drm_i915_private *dev_priv = overlay->i915;
        u32 swidth, swidthsw, sheight, ostride;
        enum pipe pipe = overlay->crtc->pipe;
+       bool scale_changed = false;
        struct i915_vma *vma;
+       int ret, tmp_width;
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
        WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
@@ -815,30 +793,19 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 
        if (!overlay->active) {
                u32 oconfig;
-               regs = intel_overlay_map_regs(overlay);
-               if (!regs) {
-                       ret = -ENOMEM;
-                       goto out_unpin;
-               }
+
                oconfig = OCONF_CC_OUT_8BIT;
                if (IS_GEN4(dev_priv))
                        oconfig |= OCONF_CSC_MODE_BT709;
                oconfig |= pipe == 0 ?
                        OCONF_PIPE_A : OCONF_PIPE_B;
                iowrite32(oconfig, &regs->OCONFIG);
-               intel_overlay_unmap_regs(overlay, regs);
 
                ret = intel_overlay_on(overlay);
                if (ret != 0)
                        goto out_unpin;
        }
 
-       regs = intel_overlay_map_regs(overlay);
-       if (!regs) {
-               ret = -ENOMEM;
-               goto out_unpin;
-       }
-
        iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
        iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
 
@@ -882,8 +849,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 
        iowrite32(overlay_cmd_reg(params), &regs->OCMD);
 
-       intel_overlay_unmap_regs(overlay, regs);
-
        ret = intel_overlay_continue(overlay, vma, scale_changed);
        if (ret)
                goto out_unpin;
@@ -901,7 +866,6 @@ out_pin_section:
 int intel_overlay_switch_off(struct intel_overlay *overlay)
 {
        struct drm_i915_private *dev_priv = overlay->i915;
-       struct overlay_registers __iomem *regs;
        int ret;
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -918,9 +882,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
        if (ret != 0)
                return ret;
 
-       regs = intel_overlay_map_regs(overlay);
-       iowrite32(0, &regs->OCMD);
-       intel_overlay_unmap_regs(overlay, regs);
+       iowrite32(0, &overlay->regs->OCMD);
 
        return intel_overlay_off(overlay);
 }
@@ -1305,7 +1267,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
        struct drm_intel_overlay_attrs *attrs = data;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_overlay *overlay;
-       struct overlay_registers __iomem *regs;
        int ret;
 
        overlay = dev_priv->overlay;
@@ -1345,15 +1306,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
                overlay->contrast   = attrs->contrast;
                overlay->saturation = attrs->saturation;
 
-               regs = intel_overlay_map_regs(overlay);
-               if (!regs) {
-                       ret = -ENOMEM;
-                       goto out_unlock;
-               }
-
-               update_reg_attrs(overlay, regs);
-
-               intel_overlay_unmap_regs(overlay, regs);
+               update_reg_attrs(overlay, overlay->regs);
 
                if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
                        if (IS_GEN2(dev_priv))
@@ -1386,12 +1339,47 @@ out_unlock:
        return ret;
 }
 
+static int get_registers(struct intel_overlay *overlay, bool use_phys)
+{
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       int err;
+
+       obj = i915_gem_object_create_stolen(overlay->i915, PAGE_SIZE);
+       if (obj == NULL)
+               obj = i915_gem_object_create_internal(overlay->i915, PAGE_SIZE);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+
+       vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+       if (IS_ERR(vma)) {
+               err = PTR_ERR(vma);
+               goto err_put_bo;
+       }
+
+       if (use_phys)
+               overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
+       else
+               overlay->flip_addr = i915_ggtt_offset(vma);
+       overlay->regs = i915_vma_pin_iomap(vma);
+       i915_vma_unpin(vma);
+
+       if (IS_ERR(overlay->regs)) {
+               err = PTR_ERR(overlay->regs);
+               goto err_put_bo;
+       }
+
+       overlay->reg_bo = obj;
+       return 0;
+
+err_put_bo:
+       i915_gem_object_put(obj);
+       return err;
+}
+
 void intel_setup_overlay(struct drm_i915_private *dev_priv)
 {
        struct intel_overlay *overlay;
-       struct drm_i915_gem_object *reg_bo;
-       struct overlay_registers __iomem *regs;
-       struct i915_vma *vma = NULL;
        int ret;
 
        if (!HAS_OVERLAY(dev_priv))
@@ -1401,46 +1389,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
        if (!overlay)
                return;
 
-       mutex_lock(&dev_priv->drm.struct_mutex);
-       if (WARN_ON(dev_priv->overlay))
-               goto out_free;
-
        overlay->i915 = dev_priv;
 
-       reg_bo = NULL;
-       if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
-               reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE);
-       if (reg_bo == NULL)
-               reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE);
-       if (IS_ERR(reg_bo))
-               goto out_free;
-       overlay->reg_bo = reg_bo;
-
-       if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
-               ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
-               if (ret) {
-                       DRM_ERROR("failed to attach phys overlay regs\n");
-                       goto out_free_bo;
-               }
-               overlay->flip_addr = reg_bo->phys_handle->busaddr;
-       } else {
-               vma = i915_gem_object_ggtt_pin(reg_bo, NULL,
-                                              0, PAGE_SIZE, PIN_MAPPABLE);
-               if (IS_ERR(vma)) {
-                       DRM_ERROR("failed to pin overlay register bo\n");
-                       ret = PTR_ERR(vma);
-                       goto out_free_bo;
-               }
-               overlay->flip_addr = i915_ggtt_offset(vma);
-
-               ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
-               if (ret) {
-                       DRM_ERROR("failed to move overlay register bo into the GTT\n");
-                       goto out_unpin_bo;
-               }
-       }
-
-       /* init all values */
        overlay->color_key = 0x0101fe;
        overlay->color_key_enabled = true;
        overlay->brightness = -19;
@@ -1449,44 +1399,51 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
 
        init_request_active(&overlay->last_flip, NULL);
 
-       regs = intel_overlay_map_regs(overlay);
-       if (!regs)
-               goto out_unpin_bo;
+       mutex_lock(&dev_priv->drm.struct_mutex);
+
+       ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
+       if (ret)
+               goto out_free;
+
+       ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true);
+       if (ret)
+               goto out_reg_bo;
 
-       memset_io(regs, 0, sizeof(struct overlay_registers));
-       update_polyphase_filter(regs);
-       update_reg_attrs(overlay, regs);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
 
-       intel_overlay_unmap_regs(overlay, regs);
+       memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
+       update_polyphase_filter(overlay->regs);
+       update_reg_attrs(overlay, overlay->regs);
 
        dev_priv->overlay = overlay;
-       mutex_unlock(&dev_priv->drm.struct_mutex);
-       DRM_INFO("initialized overlay support\n");
+       DRM_INFO("Initialized overlay support.\n");
        return;
 
-out_unpin_bo:
-       if (vma)
-               i915_vma_unpin(vma);
-out_free_bo:
-       i915_gem_object_put(reg_bo);
+out_reg_bo:
+       i915_gem_object_put(overlay->reg_bo);
 out_free:
        mutex_unlock(&dev_priv->drm.struct_mutex);
        kfree(overlay);
-       return;
 }
 
 void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
 {
-       if (!dev_priv->overlay)
+       struct intel_overlay *overlay;
+
+       overlay = fetch_and_zero(&dev_priv->overlay);
+       if (!overlay)
                return;
 
-       /* The bo's should be free'd by the generic code already.
+       /*
+        * The bo's should be free'd by the generic code already.
         * Furthermore modesetting teardown happens beforehand so the
-        * hardware should be off already */
-       WARN_ON(dev_priv->overlay->active);
+        * hardware should be off already.
+        */
+       WARN_ON(overlay->active);
+
+       i915_gem_object_put(overlay->reg_bo);
 
-       i915_gem_object_put(dev_priv->overlay->reg_bo);
-       kfree(dev_priv->overlay);
+       kfree(overlay);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
@@ -1498,37 +1455,11 @@ struct intel_overlay_error_state {
        u32 isr;
 };
 
-static struct overlay_registers __iomem *
-intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
-{
-       struct drm_i915_private *dev_priv = overlay->i915;
-       struct overlay_registers __iomem *regs;
-
-       if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
-               /* Cast to make sparse happy, but it's wc memory anyway, so
-                * equivalent to the wc io mapping on X86. */
-               regs = (struct overlay_registers __iomem *)
-                       overlay->reg_bo->phys_handle->vaddr;
-       else
-               regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap,
-                                               overlay->flip_addr);
-
-       return regs;
-}
-
-static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
-                                       struct overlay_registers __iomem *regs)
-{
-       if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
-               io_mapping_unmap_atomic(regs);
-}
-
 struct intel_overlay_error_state *
 intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
 {
        struct intel_overlay *overlay = dev_priv->overlay;
        struct intel_overlay_error_state *error;
-       struct overlay_registers __iomem *regs;
 
        if (!overlay || !overlay->active)
                return NULL;
@@ -1541,18 +1472,9 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
        error->isr = I915_READ(ISR);
        error->base = overlay->flip_addr;
 
-       regs = intel_overlay_map_regs_atomic(overlay);
-       if (!regs)
-               goto err;
-
-       memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
-       intel_overlay_unmap_regs_atomic(overlay, regs);
+       memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
 
        return error;
-
-err:
-       kfree(error);
-       return NULL;
 }
 
 void
index 978782a..28d1911 100644 (file)
@@ -132,6 +132,11 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
        writel(0x0, comp->regs + DISP_REG_OVL_RST);
 }
 
+static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
+{
+       return 4;
+}
+
 static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
 {
        unsigned int reg;
@@ -157,6 +162,11 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx)
 
 static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
 {
+       /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
+        * is defined in mediatek HW data sheet.
+        * The alphabet order in XXX is no relation to data
+        * arrangement in memory.
+        */
        switch (fmt) {
        default:
        case DRM_FORMAT_RGB565:
@@ -221,6 +231,7 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
        .stop = mtk_ovl_stop,
        .enable_vblank = mtk_ovl_enable_vblank,
        .disable_vblank = mtk_ovl_disable_vblank,
+       .layer_nr = mtk_ovl_layer_nr,
        .layer_on = mtk_ovl_layer_on,
        .layer_off = mtk_ovl_layer_off,
        .layer_config = mtk_ovl_layer_config,
index 585943c..b0a5cff 100644 (file)
 #define RDMA_REG_UPDATE_INT                            BIT(0)
 #define DISP_REG_RDMA_GLOBAL_CON               0x0010
 #define RDMA_ENGINE_EN                                 BIT(0)
+#define RDMA_MODE_MEMORY                               BIT(1)
 #define DISP_REG_RDMA_SIZE_CON_0               0x0014
+#define RDMA_MATRIX_ENABLE                             BIT(17)
+#define RDMA_MATRIX_INT_MTX_SEL                                GENMASK(23, 20)
+#define RDMA_MATRIX_INT_MTX_BT601_to_RGB               (6 << 20)
 #define DISP_REG_RDMA_SIZE_CON_1               0x0018
 #define DISP_REG_RDMA_TARGET_LINE              0x001c
+#define DISP_RDMA_MEM_CON                      0x0024
+#define MEM_MODE_INPUT_FORMAT_RGB565                   (0x000 << 4)
+#define MEM_MODE_INPUT_FORMAT_RGB888                   (0x001 << 4)
+#define MEM_MODE_INPUT_FORMAT_RGBA8888                 (0x002 << 4)
+#define MEM_MODE_INPUT_FORMAT_ARGB8888                 (0x003 << 4)
+#define MEM_MODE_INPUT_FORMAT_UYVY                     (0x004 << 4)
+#define MEM_MODE_INPUT_FORMAT_YUYV                     (0x005 << 4)
+#define MEM_MODE_INPUT_SWAP                            BIT(8)
+#define DISP_RDMA_MEM_SRC_PITCH                        0x002c
+#define DISP_RDMA_MEM_GMC_SETTING_0            0x0030
 #define DISP_REG_RDMA_FIFO_CON                 0x0040
 #define RDMA_FIFO_UNDERFLOW_EN                         BIT(31)
 #define RDMA_FIFO_PSEUDO_SIZE(bytes)                   (((bytes) / 16) << 16)
 #define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes)                ((bytes) / 16)
 #define RDMA_FIFO_SIZE(rdma)                   ((rdma)->data->fifo_size)
+#define DISP_RDMA_MEM_START_ADDR               0x0f00
+
+#define RDMA_MEM_GMC                           0x40402020
 
 struct mtk_disp_rdma_data {
        unsigned int fifo_size;
@@ -138,12 +155,87 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
        writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
 }
 
+static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
+                                    unsigned int fmt)
+{
+       /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
+        * is defined in mediatek HW data sheet.
+        * The alphabet order in XXX is no relation to data
+        * arrangement in memory.
+        */
+       switch (fmt) {
+       default:
+       case DRM_FORMAT_RGB565:
+               return MEM_MODE_INPUT_FORMAT_RGB565;
+       case DRM_FORMAT_BGR565:
+               return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP;
+       case DRM_FORMAT_RGB888:
+               return MEM_MODE_INPUT_FORMAT_RGB888;
+       case DRM_FORMAT_BGR888:
+               return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP;
+       case DRM_FORMAT_RGBX8888:
+       case DRM_FORMAT_RGBA8888:
+               return MEM_MODE_INPUT_FORMAT_ARGB8888;
+       case DRM_FORMAT_BGRX8888:
+       case DRM_FORMAT_BGRA8888:
+               return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP;
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+               return MEM_MODE_INPUT_FORMAT_RGBA8888;
+       case DRM_FORMAT_XBGR8888:
+       case DRM_FORMAT_ABGR8888:
+               return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP;
+       case DRM_FORMAT_UYVY:
+               return MEM_MODE_INPUT_FORMAT_UYVY;
+       case DRM_FORMAT_YUYV:
+               return MEM_MODE_INPUT_FORMAT_YUYV;
+       }
+}
+
+static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp)
+{
+       return 1;
+}
+
+static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
+                                 struct mtk_plane_state *state)
+{
+       struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
+       struct mtk_plane_pending_state *pending = &state->pending;
+       unsigned int addr = pending->addr;
+       unsigned int pitch = pending->pitch & 0xffff;
+       unsigned int fmt = pending->format;
+       unsigned int con;
+
+       con = rdma_fmt_convert(rdma, fmt);
+       writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON);
+
+       if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) {
+               rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
+                                RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE);
+               rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
+                                RDMA_MATRIX_INT_MTX_SEL,
+                                RDMA_MATRIX_INT_MTX_BT601_to_RGB);
+       } else {
+               rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
+                                RDMA_MATRIX_ENABLE, 0);
+       }
+
+       writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR);
+       writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH);
+       writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0);
+       rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON,
+                        RDMA_MODE_MEMORY, RDMA_MODE_MEMORY);
+}
+
 static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
        .config = mtk_rdma_config,
        .start = mtk_rdma_start,
        .stop = mtk_rdma_stop,
        .enable_vblank = mtk_rdma_enable_vblank,
        .disable_vblank = mtk_rdma_disable_vblank,
+       .layer_nr = mtk_rdma_layer_nr,
+       .layer_config = mtk_rdma_layer_config,
 };
 
 static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
index 2d6aa15..0b976df 100644 (file)
@@ -45,7 +45,8 @@ struct mtk_drm_crtc {
        bool                            pending_needs_vblank;
        struct drm_pending_vblank_event *event;
 
-       struct drm_plane                planes[OVL_LAYER_NR];
+       struct drm_plane                *planes;
+       unsigned int                    layer_nr;
        bool                            pending_planes;
 
        void __iomem                    *config_regs;
@@ -171,9 +172,9 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
 static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
 {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
-       struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+       struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
 
-       mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base);
+       mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base);
 
        return 0;
 }
@@ -181,9 +182,9 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
 static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
 {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
-       struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+       struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
 
-       mtk_ddp_comp_disable_vblank(ovl);
+       mtk_ddp_comp_disable_vblank(comp);
 }
 
 static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
@@ -286,7 +287,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
        }
 
        /* Initially configure all planes */
-       for (i = 0; i < OVL_LAYER_NR; i++) {
+       for (i = 0; i < mtk_crtc->layer_nr; i++) {
                struct drm_plane *plane = &mtk_crtc->planes[i];
                struct mtk_plane_state *plane_state;
 
@@ -334,7 +335,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
 {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
        struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
-       struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+       struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
        unsigned int i;
 
        /*
@@ -343,7 +344,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
         * queue update module registers on vblank.
         */
        if (state->pending_config) {
-               mtk_ddp_comp_config(ovl, state->pending_width,
+               mtk_ddp_comp_config(comp, state->pending_width,
                                    state->pending_height,
                                    state->pending_vrefresh, 0);
 
@@ -351,14 +352,14 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
        }
 
        if (mtk_crtc->pending_planes) {
-               for (i = 0; i < OVL_LAYER_NR; i++) {
+               for (i = 0; i < mtk_crtc->layer_nr; i++) {
                        struct drm_plane *plane = &mtk_crtc->planes[i];
                        struct mtk_plane_state *plane_state;
 
                        plane_state = to_mtk_plane_state(plane->state);
 
                        if (plane_state->pending.config) {
-                               mtk_ddp_comp_layer_config(ovl, i, plane_state);
+                               mtk_ddp_comp_layer_config(comp, i, plane_state);
                                plane_state->pending.config = false;
                        }
                }
@@ -370,12 +371,12 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
                                       struct drm_crtc_state *old_state)
 {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
-       struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+       struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
        int ret;
 
        DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
 
-       ret = mtk_smi_larb_get(ovl->larb_dev);
+       ret = mtk_smi_larb_get(comp->larb_dev);
        if (ret) {
                DRM_ERROR("Failed to get larb: %d\n", ret);
                return;
@@ -383,7 +384,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
 
        ret = mtk_crtc_ddp_hw_init(mtk_crtc);
        if (ret) {
-               mtk_smi_larb_put(ovl->larb_dev);
+               mtk_smi_larb_put(comp->larb_dev);
                return;
        }
 
@@ -395,7 +396,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
                                        struct drm_crtc_state *old_state)
 {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
-       struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+       struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
        int i;
 
        DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
@@ -403,7 +404,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
                return;
 
        /* Set all pending plane state to disabled */
-       for (i = 0; i < OVL_LAYER_NR; i++) {
+       for (i = 0; i < mtk_crtc->layer_nr; i++) {
                struct drm_plane *plane = &mtk_crtc->planes[i];
                struct mtk_plane_state *plane_state;
 
@@ -418,7 +419,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
 
        drm_crtc_vblank_off(crtc);
        mtk_crtc_ddp_hw_fini(mtk_crtc);
-       mtk_smi_larb_put(ovl->larb_dev);
+       mtk_smi_larb_put(comp->larb_dev);
 
        mtk_crtc->enabled = false;
 }
@@ -450,7 +451,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
 
        if (mtk_crtc->event)
                mtk_crtc->pending_needs_vblank = true;
-       for (i = 0; i < OVL_LAYER_NR; i++) {
+       for (i = 0; i < mtk_crtc->layer_nr; i++) {
                struct drm_plane *plane = &mtk_crtc->planes[i];
                struct mtk_plane_state *plane_state;
 
@@ -516,7 +517,7 @@ err_cleanup_crtc:
        return ret;
 }
 
-void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl)
+void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
 {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
        struct mtk_drm_private *priv = crtc->dev->dev_private;
@@ -598,7 +599,12 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
                mtk_crtc->ddp_comp[i] = comp;
        }
 
-       for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) {
+       mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
+       mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr *
+                                       sizeof(struct drm_plane),
+                                       GFP_KERNEL);
+
+       for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) {
                type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY :
                                (zpos == 1) ? DRM_PLANE_TYPE_CURSOR :
                                                DRM_PLANE_TYPE_OVERLAY;
@@ -609,7 +615,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
        }
 
        ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
-                               &mtk_crtc->planes[1], pipe);
+                               mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
+                               NULL, pipe);
        if (ret < 0)
                goto unprepare;
        drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
index 9d9410c..091adb2 100644 (file)
 #include "mtk_drm_ddp_comp.h"
 #include "mtk_drm_plane.h"
 
-#define OVL_LAYER_NR   4
 #define MTK_LUT_SIZE   512
 #define MTK_MAX_BPC    10
 #define MTK_MIN_BPC    3
 
 void mtk_drm_crtc_commit(struct drm_crtc *crtc);
-void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl);
+void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp);
 int mtk_drm_crtc_create(struct drm_device *drm_dev,
                        const enum mtk_ddp_comp_id *path,
                        unsigned int path_len);
index 87e4191..546b3e3 100644 (file)
 #define OVL1_MOUT_EN_COLOR1            0x1
 #define GAMMA_MOUT_EN_RDMA1            0x1
 #define RDMA0_SOUT_DPI0                        0x2
+#define RDMA0_SOUT_DPI1                        0x3
+#define RDMA0_SOUT_DSI1                        0x1
 #define RDMA0_SOUT_DSI2                        0x4
 #define RDMA0_SOUT_DSI3                        0x5
 #define RDMA1_SOUT_DPI0                        0x2
 #define DPI0_SEL_IN_RDMA2              0x3
 #define DPI1_SEL_IN_RDMA1              (0x1 << 8)
 #define DPI1_SEL_IN_RDMA2              (0x3 << 8)
+#define DSI0_SEL_IN_RDMA1              0x1
+#define DSI0_SEL_IN_RDMA2              0x4
 #define DSI1_SEL_IN_RDMA1              0x1
 #define DSI1_SEL_IN_RDMA2              0x4
 #define DSI2_SEL_IN_RDMA1              (0x1 << 16)
@@ -224,6 +228,12 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
        } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
                *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
                value = RDMA0_SOUT_DPI0;
+       } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) {
+               *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+               value = RDMA0_SOUT_DPI1;
+       } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) {
+               *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+               value = RDMA0_SOUT_DSI1;
        } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
                *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
                value = RDMA0_SOUT_DSI2;
@@ -282,6 +292,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
        } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
                *addr = DISP_REG_CONFIG_DPI_SEL_IN;
                value = DPI1_SEL_IN_RDMA1;
+       } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) {
+               *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+               value = DSI0_SEL_IN_RDMA1;
        } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
                *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
                value = DSI1_SEL_IN_RDMA1;
@@ -297,8 +310,11 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
        } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
                *addr = DISP_REG_CONFIG_DPI_SEL_IN;
                value = DPI1_SEL_IN_RDMA2;
-       } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+       } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) {
                *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+               value = DSI0_SEL_IN_RDMA2;
+       } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+               *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
                value = DSI1_SEL_IN_RDMA2;
        } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
                *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
index 7413ffe..8399229 100644 (file)
@@ -78,6 +78,7 @@ struct mtk_ddp_comp_funcs {
        void (*stop)(struct mtk_ddp_comp *comp);
        void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
        void (*disable_vblank)(struct mtk_ddp_comp *comp);
+       unsigned int (*layer_nr)(struct mtk_ddp_comp *comp);
        void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx);
        void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
        void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
@@ -128,6 +129,14 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp)
                comp->funcs->disable_vblank(comp);
 }
 
+static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
+{
+       if (comp->funcs && comp->funcs->layer_nr)
+               return comp->funcs->layer_nr(comp);
+
+       return 0;
+}
+
 static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp,
                                         unsigned int idx)
 {
index 3972111..47ec604 100644 (file)
@@ -381,7 +381,7 @@ static int mtk_drm_bind(struct device *dev)
 err_deinit:
        mtk_drm_kms_deinit(drm);
 err_free:
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
        return ret;
 }
 
@@ -390,7 +390,7 @@ static void mtk_drm_unbind(struct device *dev)
        struct mtk_drm_private *private = dev_get_drvdata(dev);
 
        drm_dev_unregister(private->drm);
-       drm_dev_unref(private->drm);
+       drm_dev_put(private->drm);
        private->drm = NULL;
 }
 
@@ -564,7 +564,7 @@ static int mtk_drm_remove(struct platform_device *pdev)
 
        drm_dev_unregister(drm);
        mtk_drm_kms_deinit(drm);
-       drm_dev_unref(drm);
+       drm_dev_put(drm);
 
        component_master_del(&pdev->dev, &mtk_drm_ops);
        pm_runtime_disable(&pdev->dev);
@@ -580,29 +580,24 @@ static int mtk_drm_sys_suspend(struct device *dev)
 {
        struct mtk_drm_private *private = dev_get_drvdata(dev);
        struct drm_device *drm = private->drm;
+       int ret;
 
-       drm_kms_helper_poll_disable(drm);
-
-       private->suspend_state = drm_atomic_helper_suspend(drm);
-       if (IS_ERR(private->suspend_state)) {
-               drm_kms_helper_poll_enable(drm);
-               return PTR_ERR(private->suspend_state);
-       }
-
+       ret = drm_mode_config_helper_suspend(drm);
        DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n");
-       return 0;
+
+       return ret;
 }
 
 static int mtk_drm_sys_resume(struct device *dev)
 {
        struct mtk_drm_private *private = dev_get_drvdata(dev);
        struct drm_device *drm = private->drm;
+       int ret;
 
-       drm_atomic_helper_resume(drm, private->suspend_state);
-       drm_kms_helper_poll_enable(drm);
-
+       ret = drm_mode_config_helper_resume(drm);
        DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n");
-       return 0;
+
+       return ret;
 }
 #endif
 
index 8412119..5691dfa 100644 (file)
@@ -1123,17 +1123,21 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
        int ret;
 
        if (dpcd >= 0x12) {
-               ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
+               /* Even if we're enabling MST, start with disabling the
+                * branching unit to clear any sink-side MST topology state
+                * that wasn't set by us
+                */
+               ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0);
                if (ret < 0)
                        return ret;
 
-               dpcd &= ~DP_MST_EN;
-               if (state)
-                       dpcd |= DP_MST_EN;
-
-               ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
-               if (ret < 0)
-                       return ret;
+               if (state) {
+                       /* Now, start initializing */
+                       ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL,
+                                                DP_MST_EN);
+                       if (ret < 0)
+                               return ret;
+               }
        }
 
        return nvif_mthd(disp, 0, &args, sizeof(args));
@@ -1142,31 +1146,58 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
 int
 nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
 {
-       int ret, state = 0;
+       struct drm_dp_aux *aux;
+       int ret;
+       bool old_state, new_state;
+       u8 mstm_ctrl;
 
        if (!mstm)
                return 0;
 
-       if (dpcd[0] >= 0x12) {
-               ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
+       mutex_lock(&mstm->mgr.lock);
+
+       old_state = mstm->mgr.mst_state;
+       new_state = old_state;
+       aux = mstm->mgr.aux;
+
+       if (old_state) {
+               /* Just check that the MST hub is still as we expect it */
+               ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl);
+               if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) {
+                       DRM_DEBUG_KMS("Hub gone, disabling MST topology\n");
+                       new_state = false;
+               }
+       } else if (dpcd[0] >= 0x12) {
+               ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]);
                if (ret < 0)
-                       return ret;
+                       goto probe_error;
 
                if (!(dpcd[1] & DP_MST_CAP))
                        dpcd[0] = 0x11;
                else
-                       state = allow;
+                       new_state = allow;
+       }
+
+       if (new_state == old_state) {
+               mutex_unlock(&mstm->mgr.lock);
+               return new_state;
        }
 
-       ret = nv50_mstm_enable(mstm, dpcd[0], state);
+       ret = nv50_mstm_enable(mstm, dpcd[0], new_state);
        if (ret)
-               return ret;
+               goto probe_error;
+
+       mutex_unlock(&mstm->mgr.lock);
 
-       ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
+       ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state);
        if (ret)
                return nv50_mstm_enable(mstm, dpcd[0], 0);
 
-       return mstm->mgr.mst_state;
+       return new_state;
+
+probe_error:
+       mutex_unlock(&mstm->mgr.lock);
+       return ret;
 }
 
 static void
@@ -2074,7 +2105,7 @@ nv50_disp_atomic_state_alloc(struct drm_device *dev)
 static const struct drm_mode_config_funcs
 nv50_disp_func = {
        .fb_create = nouveau_user_framebuffer_create,
-       .output_poll_changed = drm_fb_helper_output_poll_changed,
+       .output_poll_changed = nouveau_fbcon_output_poll_changed,
        .atomic_check = nv50_disp_atomic_check,
        .atomic_commit = nv50_disp_atomic_commit,
        .atomic_state_alloc = nv50_disp_atomic_state_alloc,
index 51932c7..247f72c 100644 (file)
@@ -409,59 +409,45 @@ static struct nouveau_encoder *
 nouveau_connector_ddc_detect(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
-       struct nouveau_connector *nv_connector = nouveau_connector(connector);
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
-       struct nouveau_encoder *nv_encoder = NULL;
+       struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
        struct drm_encoder *encoder;
-       int i, panel = -ENODEV;
-
-       /* eDP panels need powering on by us (if the VBIOS doesn't default it
-        * to on) before doing any AUX channel transactions.  LVDS panel power
-        * is handled by the SOR itself, and not required for LVDS DDC.
-        */
-       if (nv_connector->type == DCB_CONNECTOR_eDP) {
-               panel = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
-               if (panel == 0) {
-                       nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
-                       msleep(300);
-               }
-       }
+       int i, ret;
+       bool switcheroo_ddc = false;
 
        drm_connector_for_each_possible_encoder(connector, encoder, i) {
                nv_encoder = nouveau_encoder(encoder);
 
-               if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
-                       int ret = nouveau_dp_detect(nv_encoder);
+               switch (nv_encoder->dcb->type) {
+               case DCB_OUTPUT_DP:
+                       ret = nouveau_dp_detect(nv_encoder);
                        if (ret == NOUVEAU_DP_MST)
                                return NULL;
-                       if (ret == NOUVEAU_DP_SST)
-                               break;
-               } else
-               if ((vga_switcheroo_handler_flags() &
-                    VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
-                   nv_encoder->dcb->type == DCB_OUTPUT_LVDS &&
-                   nv_encoder->i2c) {
-                       int ret;
-                       vga_switcheroo_lock_ddc(dev->pdev);
-                       ret = nvkm_probe_i2c(nv_encoder->i2c, 0x50);
-                       vga_switcheroo_unlock_ddc(dev->pdev);
-                       if (ret)
+                       else if (ret == NOUVEAU_DP_SST)
+                               found = nv_encoder;
+
+                       break;
+               case DCB_OUTPUT_LVDS:
+                       switcheroo_ddc = !!(vga_switcheroo_handler_flags() &
+                                           VGA_SWITCHEROO_CAN_SWITCH_DDC);
+               /* fall-through */
+               default:
+                       if (!nv_encoder->i2c)
                                break;
-               } else
-               if (nv_encoder->i2c) {
+
+                       if (switcheroo_ddc)
+                               vga_switcheroo_lock_ddc(dev->pdev);
                        if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
-                               break;
+                               found = nv_encoder;
+                       if (switcheroo_ddc)
+                               vga_switcheroo_unlock_ddc(dev->pdev);
+
+                       break;
                }
+               if (found)
+                       break;
        }
 
-       /* eDP panel not detected, restore panel power GPIO to previous
-        * state to avoid confusing the SOR for other output types.
-        */
-       if (!nv_encoder && panel == 0)
-               nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
-
-       return nv_encoder;
+       return found;
 }
 
 static struct nouveau_encoder *
@@ -555,12 +541,16 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
                nv_connector->edid = NULL;
        }
 
-       /* Outputs are only polled while runtime active, so acquiring a
-        * runtime PM ref here is unnecessary (and would deadlock upon
-        * runtime suspend because it waits for polling to finish).
+       /* Outputs are only polled while runtime active, so resuming the
+        * device here is unnecessary (and would deadlock upon runtime suspend
+        * because it waits for polling to finish). We do however, want to
+        * prevent the autosuspend timer from elapsing during this operation
+        * if possible.
         */
-       if (!drm_kms_helper_is_poll_worker()) {
-               ret = pm_runtime_get_sync(connector->dev->dev);
+       if (drm_kms_helper_is_poll_worker()) {
+               pm_runtime_get_noresume(dev->dev);
+       } else {
+               ret = pm_runtime_get_sync(dev->dev);
                if (ret < 0 && ret != -EACCES)
                        return conn_status;
        }
@@ -638,10 +628,8 @@ detect_analog:
 
  out:
 
-       if (!drm_kms_helper_is_poll_worker()) {
-               pm_runtime_mark_last_busy(connector->dev->dev);
-               pm_runtime_put_autosuspend(connector->dev->dev);
-       }
+       pm_runtime_mark_last_busy(dev->dev);
+       pm_runtime_put_autosuspend(dev->dev);
 
        return conn_status;
 }
@@ -1105,6 +1093,26 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
        const struct nvif_notify_conn_rep_v0 *rep = notify->data;
        const char *name = connector->name;
        struct nouveau_encoder *nv_encoder;
+       int ret;
+
+       ret = pm_runtime_get(drm->dev->dev);
+       if (ret == 0) {
+               /* We can't block here if there's a pending PM request
+                * running, as we'll deadlock nouveau_display_fini() when it
+                * calls nvif_put() on our nvif_notify struct. So, simply
+                * defer the hotplug event until the device finishes resuming
+                */
+               NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n",
+                        name);
+               schedule_work(&drm->hpd_work);
+
+               pm_runtime_put_noidle(drm->dev->dev);
+               return NVIF_NOTIFY_KEEP;
+       } else if (ret != 1 && ret != -EACCES) {
+               NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n",
+                       name, ret);
+               return NVIF_NOTIFY_DROP;
+       }
 
        if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
                NV_DEBUG(drm, "service %s\n", name);
@@ -1122,6 +1130,8 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
                drm_helper_hpd_irq_event(connector->dev);
        }
 
+       pm_runtime_mark_last_busy(drm->dev->dev);
+       pm_runtime_put_autosuspend(drm->dev->dev);
        return NVIF_NOTIFY_KEEP;
 }
 
index 139368b..540c0cb 100644 (file)
@@ -293,7 +293,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
 
 static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
        .fb_create = nouveau_user_framebuffer_create,
-       .output_poll_changed = drm_fb_helper_output_poll_changed,
+       .output_poll_changed = nouveau_fbcon_output_poll_changed,
 };
 
 
@@ -355,8 +355,6 @@ nouveau_display_hpd_work(struct work_struct *work)
        pm_runtime_get_sync(drm->dev->dev);
 
        drm_helper_hpd_irq_event(drm->dev);
-       /* enable polling for external displays */
-       drm_kms_helper_poll_enable(drm->dev);
 
        pm_runtime_mark_last_busy(drm->dev->dev);
        pm_runtime_put_sync(drm->dev->dev);
@@ -379,15 +377,29 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
 {
        struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
        struct acpi_bus_event *info = data;
+       int ret;
 
        if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
                if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
-                       /*
-                        * This may be the only indication we receive of a
-                        * connector hotplug on a runtime suspended GPU,
-                        * schedule hpd_work to check.
-                        */
-                       schedule_work(&drm->hpd_work);
+                       ret = pm_runtime_get(drm->dev->dev);
+                       if (ret == 1 || ret == -EACCES) {
+                               /* If the GPU is already awake, or in a state
+                                * where we can't wake it up, it can handle
+                                * it's own hotplug events.
+                                */
+                               pm_runtime_put_autosuspend(drm->dev->dev);
+                       } else if (ret == 0) {
+                               /* This may be the only indication we receive
+                                * of a connector hotplug on a runtime
+                                * suspended GPU, schedule hpd_work to check.
+                                */
+                               NV_DEBUG(drm, "ACPI requested connector reprobe\n");
+                               schedule_work(&drm->hpd_work);
+                               pm_runtime_put_noidle(drm->dev->dev);
+                       } else {
+                               NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
+                                       ret);
+                       }
 
                        /* acpi-video should not generate keypresses for this */
                        return NOTIFY_BAD;
@@ -411,6 +423,11 @@ nouveau_display_init(struct drm_device *dev)
        if (ret)
                return ret;
 
+       /* enable connector detection and polling for connectors without HPD
+        * support
+        */
+       drm_kms_helper_poll_enable(dev);
+
        /* enable hotplug interrupts */
        drm_connector_list_iter_begin(dev, &conn_iter);
        nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
@@ -425,7 +442,7 @@ nouveau_display_init(struct drm_device *dev)
 }
 
 void
-nouveau_display_fini(struct drm_device *dev, bool suspend)
+nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
 {
        struct nouveau_display *disp = nouveau_display(dev);
        struct nouveau_drm *drm = nouveau_drm(dev);
@@ -450,6 +467,9 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
        }
        drm_connector_list_iter_end(&conn_iter);
 
+       if (!runtime)
+               cancel_work_sync(&drm->hpd_work);
+
        drm_kms_helper_poll_disable(dev);
        disp->fini(dev);
 }
@@ -618,11 +638,11 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
                        }
                }
 
-               nouveau_display_fini(dev, true);
+               nouveau_display_fini(dev, true, runtime);
                return 0;
        }
 
-       nouveau_display_fini(dev, true);
+       nouveau_display_fini(dev, true, runtime);
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                struct nouveau_framebuffer *nouveau_fb;
index 54aa7c3..ff92b54 100644 (file)
@@ -62,7 +62,7 @@ nouveau_display(struct drm_device *dev)
 int  nouveau_display_create(struct drm_device *dev);
 void nouveau_display_destroy(struct drm_device *dev);
 int  nouveau_display_init(struct drm_device *dev);
-void nouveau_display_fini(struct drm_device *dev, bool suspend);
+void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
 int  nouveau_display_suspend(struct drm_device *dev, bool runtime);
 void nouveau_display_resume(struct drm_device *dev, bool runtime);
 int  nouveau_display_vblank_enable(struct drm_device *, unsigned int);
index c7ec86d..74d2283 100644 (file)
@@ -230,7 +230,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
                mutex_unlock(&drm->master.lock);
        }
        if (ret) {
-               NV_ERROR(drm, "Client allocation failed: %d\n", ret);
+               NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret);
                goto done;
        }
 
@@ -240,37 +240,37 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
                               }, sizeof(struct nv_device_v0),
                               &cli->device);
        if (ret) {
-               NV_ERROR(drm, "Device allocation failed: %d\n", ret);
+               NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret);
                goto done;
        }
 
        ret = nvif_mclass(&cli->device.object, mmus);
        if (ret < 0) {
-               NV_ERROR(drm, "No supported MMU class\n");
+               NV_PRINTK(err, cli, "No supported MMU class\n");
                goto done;
        }
 
        ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
        if (ret) {
-               NV_ERROR(drm, "MMU allocation failed: %d\n", ret);
+               NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
                goto done;
        }
 
        ret = nvif_mclass(&cli->mmu.object, vmms);
        if (ret < 0) {
-               NV_ERROR(drm, "No supported VMM class\n");
+               NV_PRINTK(err, cli, "No supported VMM class\n");
                goto done;
        }
 
        ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
        if (ret) {
-               NV_ERROR(drm, "VMM allocation failed: %d\n", ret);
+               NV_PRINTK(err, cli, "VMM allocation failed: %d\n", ret);
                goto done;
        }
 
        ret = nvif_mclass(&cli->mmu.object, mems);
        if (ret < 0) {
-               NV_ERROR(drm, "No supported MEM class\n");
+               NV_PRINTK(err, cli, "No supported MEM class\n");
                goto done;
        }
 
@@ -592,10 +592,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
                pm_runtime_allow(dev->dev);
                pm_runtime_mark_last_busy(dev->dev);
                pm_runtime_put(dev->dev);
-       } else {
-               /* enable polling for external displays */
-               drm_kms_helper_poll_enable(dev);
        }
+
        return 0;
 
 fail_dispinit:
@@ -629,7 +627,7 @@ nouveau_drm_unload(struct drm_device *dev)
        nouveau_debugfs_fini(drm);
 
        if (dev->mode_config.num_crtc)
-               nouveau_display_fini(dev, false);
+               nouveau_display_fini(dev, false, false);
        nouveau_display_destroy(dev);
 
        nouveau_bios_takedown(dev);
@@ -835,7 +833,6 @@ nouveau_pmops_runtime_suspend(struct device *dev)
                return -EBUSY;
        }
 
-       drm_kms_helper_poll_disable(drm_dev);
        nouveau_switcheroo_optimus_dsm();
        ret = nouveau_do_suspend(drm_dev, true);
        pci_save_state(pdev);
index 844498c..0f64c0a 100644 (file)
@@ -466,6 +466,7 @@ nouveau_fbcon_set_suspend_work(struct work_struct *work)
        console_unlock();
 
        if (state == FBINFO_STATE_RUNNING) {
+               nouveau_fbcon_hotplug_resume(drm->fbcon);
                pm_runtime_mark_last_busy(drm->dev->dev);
                pm_runtime_put_sync(drm->dev->dev);
        }
@@ -487,6 +488,61 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
        schedule_work(&drm->fbcon_work);
 }
 
+void
+nouveau_fbcon_output_poll_changed(struct drm_device *dev)
+{
+       struct nouveau_drm *drm = nouveau_drm(dev);
+       struct nouveau_fbdev *fbcon = drm->fbcon;
+       int ret;
+
+       if (!fbcon)
+               return;
+
+       mutex_lock(&fbcon->hotplug_lock);
+
+       ret = pm_runtime_get(dev->dev);
+       if (ret == 1 || ret == -EACCES) {
+               drm_fb_helper_hotplug_event(&fbcon->helper);
+
+               pm_runtime_mark_last_busy(dev->dev);
+               pm_runtime_put_autosuspend(dev->dev);
+       } else if (ret == 0) {
+               /* If the GPU was already in the process of suspending before
+                * this event happened, then we can't block here as we'll
+                * deadlock the runtime pmops since they wait for us to
+                * finish. So, just defer this event for when we runtime
+                * resume again. It will be handled by fbcon_work.
+                */
+               NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");
+               fbcon->hotplug_waiting = true;
+               pm_runtime_put_noidle(drm->dev->dev);
+       } else {
+               DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",
+                        ret);
+       }
+
+       mutex_unlock(&fbcon->hotplug_lock);
+}
+
+void
+nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)
+{
+       struct nouveau_drm *drm;
+
+       if (!fbcon)
+               return;
+       drm = nouveau_drm(fbcon->helper.dev);
+
+       mutex_lock(&fbcon->hotplug_lock);
+       if (fbcon->hotplug_waiting) {
+               fbcon->hotplug_waiting = false;
+
+               NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");
+               drm_fb_helper_hotplug_event(&fbcon->helper);
+       }
+       mutex_unlock(&fbcon->hotplug_lock);
+}
+
 int
 nouveau_fbcon_init(struct drm_device *dev)
 {
@@ -505,6 +561,7 @@ nouveau_fbcon_init(struct drm_device *dev)
 
        drm->fbcon = fbcon;
        INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
+       mutex_init(&fbcon->hotplug_lock);
 
        drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
 
index a6f192e..db9d520 100644 (file)
@@ -41,6 +41,9 @@ struct nouveau_fbdev {
        struct nvif_object gdi;
        struct nvif_object blit;
        struct nvif_object twod;
+
+       struct mutex hotplug_lock;
+       bool hotplug_waiting;
 };
 
 void nouveau_fbcon_restore(void);
@@ -68,6 +71,8 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
 void nouveau_fbcon_accel_save_disable(struct drm_device *dev);
 void nouveau_fbcon_accel_restore(struct drm_device *dev);
 
+void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
+void nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon);
 extern int nouveau_nofbaccel;
 
 #endif /* __NV50_FBCON_H__ */
index 3da5a43..8f1ce48 100644 (file)
@@ -46,12 +46,10 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
                pr_err("VGA switcheroo: switched nouveau on\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
                nouveau_pmops_resume(&pdev->dev);
-               drm_kms_helper_poll_enable(dev);
                dev->switch_power_state = DRM_SWITCH_POWER_ON;
        } else {
                pr_err("VGA switcheroo: switched nouveau off\n");
                dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
-               drm_kms_helper_poll_disable(dev);
                nouveau_switcheroo_optimus_dsm();
                nouveau_pmops_suspend(&pdev->dev);
                dev->switch_power_state = DRM_SWITCH_POWER_OFF;
index 32fa94a..cbd33e8 100644 (file)
@@ -275,6 +275,7 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
        struct nvkm_outp *outp, *outt, *pair;
        struct nvkm_conn *conn;
        struct nvkm_head *head;
+       struct nvkm_ior *ior;
        struct nvbios_connE connE;
        struct dcb_output dcbE;
        u8  hpd = 0, ver, hdr;
@@ -399,6 +400,19 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
                        return ret;
        }
 
+       /* Enforce identity-mapped SOR assignment for panels, which have
+        * certain bits (ie. backlight controls) wired to a specific SOR.
+        */
+       list_for_each_entry(outp, &disp->outp, head) {
+               if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||
+                   outp->conn->info.type == DCB_CONNECTOR_eDP) {
+                       ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);
+                       if (!WARN_ON(!ior))
+                               ior->identity = true;
+                       outp->identity = true;
+               }
+       }
+
        i = 0;
        list_for_each_entry(head, &disp->head, head)
                i = max(i, head->id + 1);
index 7c5bed2..5f301e6 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <subdev/bios.h>
 #include <subdev/bios/init.h>
+#include <subdev/gpio.h>
 #include <subdev/i2c.h>
 
 #include <nvif/event.h>
@@ -412,14 +413,10 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
 }
 
 static void
-nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
+nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
 {
        struct nvkm_dp *dp = nvkm_dp(outp);
 
-       /* Prevent link from being retrained if sink sends an IRQ. */
-       atomic_set(&dp->lt.done, 0);
-       ior->dp.nr = 0;
-
        /* Execute DisableLT script from DP Info Table. */
        nvbios_init(&ior->disp->engine.subdev, dp->info.script[4],
                init.outp = &dp->outp.info;
@@ -428,6 +425,16 @@ nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
        );
 }
 
+static void
+nvkm_dp_release(struct nvkm_outp *outp)
+{
+       struct nvkm_dp *dp = nvkm_dp(outp);
+
+       /* Prevent link from being retrained if sink sends an IRQ. */
+       atomic_set(&dp->lt.done, 0);
+       dp->outp.ior->dp.nr = 0;
+}
+
 static int
 nvkm_dp_acquire(struct nvkm_outp *outp)
 {
@@ -491,7 +498,7 @@ done:
        return ret;
 }
 
-static void
+static bool
 nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
 {
        struct nvkm_i2c_aux *aux = dp->aux;
@@ -505,7 +512,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
 
                if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd,
                                sizeof(dp->dpcd)))
-                       return;
+                       return true;
        }
 
        if (dp->present) {
@@ -515,6 +522,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
        }
 
        atomic_set(&dp->lt.done, 0);
+       return false;
 }
 
 static int
@@ -555,9 +563,38 @@ nvkm_dp_fini(struct nvkm_outp *outp)
 static void
 nvkm_dp_init(struct nvkm_outp *outp)
 {
+       struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
        struct nvkm_dp *dp = nvkm_dp(outp);
+
        nvkm_notify_put(&dp->outp.conn->hpd);
-       nvkm_dp_enable(dp, true);
+
+       /* eDP panels need powering on by us (if the VBIOS doesn't default it
+        * to on) before doing any AUX channel transactions.  LVDS panel power
+        * is handled by the SOR itself, and not required for LVDS DDC.
+        */
+       if (dp->outp.conn->info.type == DCB_CONNECTOR_eDP) {
+               int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
+               if (power == 0)
+                       nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
+
+               /* We delay here unconditionally, even if already powered,
+                * because some laptop panels having a significant resume
+                * delay before the panel begins responding.
+                *
+                * This is likely a bit of a hack, but no better idea for
+                * handling this at the moment.
+                */
+               msleep(300);
+
+               /* If the eDP panel can't be detected, we need to restore
+                * the panel power GPIO to avoid breaking another output.
+                */
+               if (!nvkm_dp_enable(dp, true) && power == 0)
+                       nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0);
+       } else {
+               nvkm_dp_enable(dp, true);
+       }
+
        nvkm_notify_get(&dp->hpd);
 }
 
@@ -576,6 +613,7 @@ nvkm_dp_func = {
        .fini = nvkm_dp_fini,
        .acquire = nvkm_dp_acquire,
        .release = nvkm_dp_release,
+       .disable = nvkm_dp_disable,
 };
 
 static int
index e0b4e0c..1991121 100644 (file)
@@ -16,6 +16,7 @@ struct nvkm_ior {
        char name[8];
 
        struct list_head head;
+       bool identity;
 
        struct nvkm_ior_state {
                struct nvkm_outp *outp;
index f89c7b9..def005d 100644 (file)
@@ -501,11 +501,11 @@ nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head)
        nv50_disp_super_ied_off(head, ior, 2);
 
        /* If we're shutting down the OR's only active head, execute
-        * the output path's release function.
+        * the output path's disable function.
         */
        if (ior->arm.head == (1 << head->id)) {
-               if ((outp = ior->arm.outp) && outp->func->release)
-                       outp->func->release(outp, ior);
+               if ((outp = ior->arm.outp) && outp->func->disable)
+                       outp->func->disable(outp, ior);
        }
 }
 
index be9e7f8..c62030c 100644 (file)
@@ -93,6 +93,8 @@ nvkm_outp_release(struct nvkm_outp *outp, u8 user)
        if (ior) {
                outp->acquired &= ~user;
                if (!outp->acquired) {
+                       if (outp->func->release && outp->ior)
+                               outp->func->release(outp);
                        outp->ior->asy.outp = NULL;
                        outp->ior = NULL;
                }
@@ -127,17 +129,26 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
        if (proto == UNKNOWN)
                return -ENOSYS;
 
+       /* Deal with panels requiring identity-mapped SOR assignment. */
+       if (outp->identity) {
+               ior = nvkm_ior_find(outp->disp, SOR, ffs(outp->info.or) - 1);
+               if (WARN_ON(!ior))
+                       return -ENOSPC;
+               return nvkm_outp_acquire_ior(outp, user, ior);
+       }
+
        /* First preference is to reuse the OR that is currently armed
         * on HW, if any, in order to prevent unnecessary switching.
         */
        list_for_each_entry(ior, &outp->disp->ior, head) {
-               if (!ior->asy.outp && ior->arm.outp == outp)
+               if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp)
                        return nvkm_outp_acquire_ior(outp, user, ior);
        }
 
        /* Failing that, a completely unused OR is the next best thing. */
        list_for_each_entry(ior, &outp->disp->ior, head) {
-               if (!ior->asy.outp && ior->type == type && !ior->arm.outp &&
+               if (!ior->identity &&
+                   !ior->asy.outp && ior->type == type && !ior->arm.outp &&
                    (ior->func->route.set || ior->id == __ffs(outp->info.or)))
                        return nvkm_outp_acquire_ior(outp, user, ior);
        }
@@ -146,7 +157,7 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
         * but will be released during the next modeset.
         */
        list_for_each_entry(ior, &outp->disp->ior, head) {
-               if (!ior->asy.outp && ior->type == type &&
+               if (!ior->identity && !ior->asy.outp && ior->type == type &&
                    (ior->func->route.set || ior->id == __ffs(outp->info.or)))
                        return nvkm_outp_acquire_ior(outp, user, ior);
        }
@@ -245,7 +256,6 @@ nvkm_outp_ctor(const struct nvkm_outp_func *func, struct nvkm_disp *disp,
        outp->index = index;
        outp->info = *dcbE;
        outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
-       outp->or = ffs(outp->info.or) - 1;
 
        OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
                       "edid %x bus %d head %x",
index ea84d7d..6c8aa5c 100644 (file)
@@ -13,10 +13,10 @@ struct nvkm_outp {
        struct dcb_output info;
 
        struct nvkm_i2c_bus *i2c;
-       int or;
 
        struct list_head head;
        struct nvkm_conn *conn;
+       bool identity;
 
        /* Assembly state. */
 #define NVKM_OUTP_PRIV 1
@@ -41,7 +41,8 @@ struct nvkm_outp_func {
        void (*init)(struct nvkm_outp *);
        void (*fini)(struct nvkm_outp *);
        int (*acquire)(struct nvkm_outp *);
-       void (*release)(struct nvkm_outp *, struct nvkm_ior *);
+       void (*release)(struct nvkm_outp *);
+       void (*disable)(struct nvkm_outp *, struct nvkm_ior *);
 };
 
 #define OUTP_MSG(o,l,f,a...) do {                                              \
index b80618e..17235e9 100644 (file)
@@ -86,10 +86,8 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post,
        struct nvkm_bios *bios = subdev->device->bios;
        struct nvbios_pmuR pmu;
 
-       if (!nvbios_pmuRm(bios, type, &pmu)) {
-               nvkm_error(subdev, "VBIOS PMU fuc %02x not found\n", type);
+       if (!nvbios_pmuRm(bios, type, &pmu))
                return -EINVAL;
-       }
 
        if (!post)
                return 0;
@@ -124,29 +122,30 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
                return -EINVAL;
        }
 
+       /* Upload DEVINIT application from VBIOS onto PMU. */
        ret = pmu_load(init, 0x04, post, &exec, &args);
-       if (ret)
+       if (ret) {
+               nvkm_error(subdev, "VBIOS PMU/DEVINIT not found\n");
                return ret;
+       }
 
-       /* upload first chunk of init data */
+       /* Upload tables required by opcodes in boot scripts. */
        if (post) {
-               // devinit tables
                u32 pmu = pmu_args(init, args + 0x08, 0x08);
                u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
                u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
                pmu_data(init, pmu, img, len);
        }
 
-       /* upload second chunk of init data */
+       /* Upload boot scripts. */
        if (post) {
-               // devinit boot scripts
                u32 pmu = pmu_args(init, args + 0x08, 0x10);
                u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
                u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
                pmu_data(init, pmu, img, len);
        }
 
-       /* execute init tables */
+       /* Execute DEVINIT. */
        if (post) {
                nvkm_wr32(device, 0x10a040, 0x00005000);
                pmu_exec(init, exec);
@@ -157,8 +156,11 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
                        return -ETIMEDOUT;
        }
 
-       /* load and execute some other ucode image (bios therm?) */
-       return pmu_load(init, 0x01, post, NULL, NULL);
+       /* Optional: Execute PRE_OS application on PMU, which should at
+        * least take care of fans until a full PMU has been loaded.
+        */
+       pmu_load(init, 0x01, post, NULL, NULL);
+       return 0;
 }
 
 static const struct nvkm_devinit_func
index de269eb..7459def 100644 (file)
@@ -1423,7 +1423,7 @@ nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
 void
 nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
 {
-       if (vmm->func->part && inst) {
+       if (inst && vmm->func->part) {
                mutex_lock(&vmm->mutex);
                vmm->func->part(vmm, inst);
                mutex_unlock(&vmm->mutex);
index a534b22..5fa0441 100644 (file)
@@ -111,7 +111,8 @@ static int vexpress_muxfpga_probe(struct platform_device *pdev)
 }
 
 static const struct of_device_id vexpress_muxfpga_match[] = {
-       { .compatible = "arm,vexpress-muxfpga", }
+       { .compatible = "arm,vexpress-muxfpga", },
+       {}
 };
 
 static struct platform_driver vexpress_muxfpga_driver = {
index dd19d67..8b0cd08 100644 (file)
@@ -418,7 +418,6 @@ static const struct of_device_id sun4i_drv_of_table[] = {
        { .compatible = "allwinner,sun8i-a33-display-engine" },
        { .compatible = "allwinner,sun8i-a83t-display-engine" },
        { .compatible = "allwinner,sun8i-h3-display-engine" },
-       { .compatible = "allwinner,sun8i-r40-display-engine" },
        { .compatible = "allwinner,sun8i-v3s-display-engine" },
        { .compatible = "allwinner,sun9i-a80-display-engine" },
        { }
index 82502b3..a564b5d 100644 (file)
@@ -398,7 +398,6 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = {
 
 static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
        .has_phy_clk = true,
-       .has_second_pll = true,
        .phy_init = &sun8i_hdmi_phy_init_h3,
        .phy_disable = &sun8i_hdmi_phy_disable_h3,
        .phy_config = &sun8i_hdmi_phy_config_h3,
index fc37136..cb65b0e 100644 (file)
@@ -545,22 +545,6 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
        .vi_num         = 1,
 };
 
-static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
-       .ccsc           = 0,
-       .mod_rate       = 297000000,
-       .scaler_mask    = 0xf,
-       .ui_num         = 3,
-       .vi_num         = 1,
-};
-
-static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
-       .ccsc           = 1,
-       .mod_rate       = 297000000,
-       .scaler_mask    = 0x3,
-       .ui_num         = 1,
-       .vi_num         = 1,
-};
-
 static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
        .vi_num = 2,
        .ui_num = 1,
@@ -583,14 +567,6 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
                .data = &sun8i_h3_mixer0_cfg,
        },
        {
-               .compatible = "allwinner,sun8i-r40-de2-mixer-0",
-               .data = &sun8i_r40_mixer0_cfg,
-       },
-       {
-               .compatible = "allwinner,sun8i-r40-de2-mixer-1",
-               .data = &sun8i_r40_mixer1_cfg,
-       },
-       {
                .compatible = "allwinner,sun8i-v3s-de2-mixer",
                .data = &sun8i_v3s_mixer_cfg,
        },
index 55fe398..d5240b7 100644 (file)
@@ -253,7 +253,6 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev)
 
 /* sun4i_drv uses this list to check if a device node is a TCON TOP */
 const struct of_device_id sun8i_tcon_top_of_table[] = {
-       { .compatible = "allwinner,sun8i-r40-tcon-top" },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table);
index dbb62f6..dd9ffde 100644 (file)
@@ -432,9 +432,11 @@ static void udl_fbdev_destroy(struct drm_device *dev,
 {
        drm_fb_helper_unregister_fbi(&ufbdev->helper);
        drm_fb_helper_fini(&ufbdev->helper);
-       drm_framebuffer_unregister_private(&ufbdev->ufb.base);
-       drm_framebuffer_cleanup(&ufbdev->ufb.base);
-       drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
+       if (ufbdev->ufb.obj) {
+               drm_framebuffer_unregister_private(&ufbdev->ufb.base);
+               drm_framebuffer_cleanup(&ufbdev->ufb.base);
+               drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
+       }
 }
 
 int udl_fbdev_init(struct drm_device *dev)
index cfb50fe..a3275fa 100644 (file)
@@ -297,6 +297,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
        vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
                                                       vc4_state->crtc_h);
 
+       vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
+                              vc4_state->y_scaling[0] == VC4_SCALING_NONE);
+
        if (num_planes > 1) {
                vc4_state->is_yuv = true;
 
@@ -312,24 +315,17 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
                        vc4_get_scaling_mode(vc4_state->src_h[1],
                                             vc4_state->crtc_h);
 
-               /* YUV conversion requires that scaling be enabled,
-                * even on a plane that's otherwise 1:1.  Choose TPZ
-                * for simplicity.
+               /* YUV conversion requires that horizontal scaling be enabled,
+                * even on a plane that's otherwise 1:1. Looks like only PPF
+                * works in that case, so let's pick that one.
                 */
-               if (vc4_state->x_scaling[0] == VC4_SCALING_NONE)
-                       vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
-               if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
-                       vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
+               if (vc4_state->is_unity)
+                       vc4_state->x_scaling[0] = VC4_SCALING_PPF;
        } else {
                vc4_state->x_scaling[1] = VC4_SCALING_NONE;
                vc4_state->y_scaling[1] = VC4_SCALING_NONE;
        }
 
-       vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
-                              vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
-                              vc4_state->x_scaling[1] == VC4_SCALING_NONE &&
-                              vc4_state->y_scaling[1] == VC4_SCALING_NONE);
-
        /* No configuring scaling on the cursor plane, since it gets
           non-vblank-synced updates, and scaling requires requires
           LBM changes which have to be vblank-synced.
@@ -672,7 +668,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
                vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
        }
 
-       if (!vc4_state->is_unity) {
+       if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
+           vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
+           vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+           vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
                /* LBM Base Address. */
                if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
                    vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
index 1f13457..f0ab6b2 100644 (file)
@@ -3729,7 +3729,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 {
        struct vmw_buffer_object *vbo =
                container_of(bo, struct vmw_buffer_object, base);
-       struct ttm_operation_ctx ctx = { interruptible, true };
+       struct ttm_operation_ctx ctx = { interruptible, false };
        int ret;
 
        if (vbo->pin_count > 0)
index 23beff5..6a712a8 100644 (file)
@@ -1512,21 +1512,19 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
                                        struct drm_rect *rects)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
-       struct drm_mode_config *mode_config = &dev->mode_config;
        struct drm_rect bounding_box = {0};
        u64 total_pixels = 0, pixel_mem, bb_mem;
        int i;
 
        for (i = 0; i < num_rects; i++) {
                /*
-                * Currently this check is limiting the topology within max
-                * texture/screentarget size. This should change in future when
-                * user-space support multiple fb with topology.
+                * For STDU only individual screen (screen target) is limited by
+                * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
                 */
-               if (rects[i].x1 < 0 ||  rects[i].y1 < 0 ||
-                   rects[i].x2 > mode_config->max_width ||
-                   rects[i].y2 > mode_config->max_height) {
-                       DRM_ERROR("Invalid GUI layout.\n");
+               if (dev_priv->active_display_unit == vmw_du_screen_target &&
+                   (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
+                    drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
+                       DRM_ERROR("Screen size not supported.\n");
                        return -EINVAL;
                }
 
@@ -1615,7 +1613,7 @@ static int vmw_kms_check_topology(struct drm_device *dev,
                struct drm_connector_state *conn_state;
                struct vmw_connector_state *vmw_conn_state;
 
-               if (!new_crtc_state->enable && old_crtc_state->enable) {
+               if (!new_crtc_state->enable) {
                        rects[i].x1 = 0;
                        rects[i].y1 = 0;
                        rects[i].x2 = 0;
@@ -2216,12 +2214,16 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
        if (dev_priv->assume_16bpp)
                assumed_bpp = 2;
 
+       max_width  = min(max_width,  dev_priv->texture_max_width);
+       max_height = min(max_height, dev_priv->texture_max_height);
+
+       /*
+        * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
+        * HEIGHT registers.
+        */
        if (dev_priv->active_display_unit == vmw_du_screen_target) {
                max_width  = min(max_width,  dev_priv->stdu_max_width);
-               max_width  = min(max_width,  dev_priv->texture_max_width);
-
                max_height = min(max_height, dev_priv->stdu_max_height);
-               max_height = min(max_height, dev_priv->texture_max_height);
        }
 
        /* Add preferred mode */
@@ -2376,6 +2378,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv)
 {
        struct vmw_private *dev_priv = vmw_priv(dev);
+       struct drm_mode_config *mode_config = &dev->mode_config;
        struct drm_vmw_update_layout_arg *arg =
                (struct drm_vmw_update_layout_arg *)data;
        void __user *user_rects;
@@ -2421,6 +2424,21 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
                drm_rects[i].y1 = curr_rect.y;
                drm_rects[i].x2 = curr_rect.x + curr_rect.w;
                drm_rects[i].y2 = curr_rect.y + curr_rect.h;
+
+               /*
+                * Currently this check is limiting the topology within
+                * mode_config->max (which actually is max texture size
+                * supported by virtual device). This limit is here to address
+                * window managers that create a big framebuffer for whole
+                * topology.
+                */
+               if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
+                   drm_rects[i].x2 > mode_config->max_width ||
+                   drm_rects[i].y2 > mode_config->max_height) {
+                       DRM_ERROR("Invalid GUI layout.\n");
+                       ret = -EINVAL;
+                       goto out_free;
+               }
        }
 
        ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
index 93f6b96..f30e839 100644 (file)
@@ -1600,31 +1600,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
 
        dev_priv->active_display_unit = vmw_du_screen_target;
 
-       if (dev_priv->capabilities & SVGA_CAP_3D) {
-               /*
-                * For 3D VMs, display (scanout) buffer size is the smaller of
-                * max texture and max STDU
-                */
-               uint32_t max_width, max_height;
-
-               max_width = min(dev_priv->texture_max_width,
-                               dev_priv->stdu_max_width);
-               max_height = min(dev_priv->texture_max_height,
-                                dev_priv->stdu_max_height);
-
-               dev->mode_config.max_width = max_width;
-               dev->mode_config.max_height = max_height;
-       } else {
-               /*
-                * Given various display aspect ratios, there's no way to
-                * estimate these using prim_bb_mem.  So just set these to
-                * something arbitrarily large and we will reject any layout
-                * that doesn't fit prim_bb_mem later
-                */
-               dev->mode_config.max_width = 8192;
-               dev->mode_config.max_height = 8192;
-       }
-
        vmw_kms_create_implicit_placement_property(dev_priv, false);
 
        for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
index e125233..80a01cd 100644 (file)
@@ -1404,22 +1404,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
        *srf_out = NULL;
 
        if (for_scanout) {
-               uint32_t max_width, max_height;
-
                if (!svga3dsurface_is_screen_target_format(format)) {
                        DRM_ERROR("Invalid Screen Target surface format.");
                        return -EINVAL;
                }
 
-               max_width = min(dev_priv->texture_max_width,
-                               dev_priv->stdu_max_width);
-               max_height = min(dev_priv->texture_max_height,
-                                dev_priv->stdu_max_height);
-
-               if (size.width > max_width || size.height > max_height) {
+               if (size.width > dev_priv->texture_max_width ||
+                   size.height > dev_priv->texture_max_height) {
                        DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
                                  size.width, size.height,
-                                 max_width, max_height);
+                                 dev_priv->texture_max_width,
+                                 dev_priv->texture_max_height);
                        return -EINVAL;
                }
        } else {
@@ -1495,8 +1490,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
        if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
                srf->res.backup_size += sizeof(SVGA3dDXSOState);
 
+       /*
+        * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
+        * size greater than STDU max width/height. This is really a workaround
+        * to support creation of big framebuffer requested by some user-space
+        * for whole topology. That big framebuffer won't really be used for
+        * binding with screen target as during prepare_fb a separate surface is
+        * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
+        */
        if (dev_priv->active_display_unit == vmw_du_screen_target &&
-           for_scanout)
+           for_scanout && size.width <= dev_priv->stdu_max_width &&
+           size.height <= dev_priv->stdu_max_height)
                srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
 
        /*
index a96bf46..cf2a185 100644 (file)
@@ -215,6 +215,8 @@ static void vga_switcheroo_enable(void)
                        return;
 
                client->id = ret | ID_BIT_AUDIO;
+               if (client->ops->gpu_bound)
+                       client->ops->gpu_bound(client->pdev, ret);
        }
 
        vga_switcheroo_debugfs_init(&vgasr_priv);
index 25b7bd5..1cb4199 100644 (file)
@@ -335,7 +335,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                struct hid_field *field, struct hid_usage *usage,
                unsigned long **bit, int *max)
 {
-       if (usage->hid == (HID_UP_CUSTOM | 0x0003)) {
+       if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
+                       usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
                /* The fn key on Apple USB keyboards */
                set_bit(EV_REP, hi->input->evbit);
                hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
@@ -472,6 +473,12 @@ static const struct hid_device_id apple_devices[] = {
                .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
                .driver_data = APPLE_HAS_FN },
+       { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+               .driver_data = APPLE_HAS_FN },
+       { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+               .driver_data = APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
                .driver_data = APPLE_HAS_FN },
        { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
index 3da354a..44564f6 100644 (file)
@@ -1000,7 +1000,7 @@ int hid_open_report(struct hid_device *device)
        parser = vzalloc(sizeof(struct hid_parser));
        if (!parser) {
                ret = -ENOMEM;
-               goto err;
+               goto alloc_err;
        }
 
        parser->device = device;
@@ -1039,6 +1039,7 @@ int hid_open_report(struct hid_device *device)
                                hid_err(device, "unbalanced delimiter at end of report description\n");
                                goto err;
                        }
+                       kfree(parser->collection_stack);
                        vfree(parser);
                        device->status |= HID_STAT_PARSED;
                        return 0;
@@ -1047,6 +1048,8 @@ int hid_open_report(struct hid_device *device)
 
        hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
 err:
+       kfree(parser->collection_stack);
+alloc_err:
        vfree(parser);
        hid_close_report(device);
        return ret;
index 79bdf0c..bc49909 100644 (file)
@@ -88,6 +88,7 @@
 #define USB_DEVICE_ID_ANTON_TOUCH_PAD  0x3101
 
 #define USB_VENDOR_ID_APPLE            0x05ac
+#define BT_VENDOR_ID_APPLE             0x004c
 #define USB_DEVICE_ID_APPLE_MIGHTYMOUSE        0x0304
 #define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
 #define USB_DEVICE_ID_APPLE_MAGICTRACKPAD      0x030e
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO   0x0256
 #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS   0x0257
 #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI   0x0267
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI   0x026c
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI   0x0290
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO    0x0291
 #define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS    0x0292
 #define I2C_VENDOR_ID_HANTICK          0x0911
 #define I2C_PRODUCT_ID_HANTICK_5288    0x5288
 
-#define I2C_VENDOR_ID_RAYD             0x2386
-#define I2C_PRODUCT_ID_RAYD_3118       0x3118
-
 #define USB_VENDOR_ID_HANWANG          0x0b57
 #define USB_DEVICE_ID_HANWANG_TABLET_FIRST     0x5000
 #define USB_DEVICE_ID_HANWANG_TABLET_LAST      0x8fff
 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
 #define USB_DEVICE_ID_SAITEK_PS1000    0x0621
 #define USB_DEVICE_ID_SAITEK_RAT7_OLD  0x0ccb
+#define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION    0x0ccd
 #define USB_DEVICE_ID_SAITEK_RAT7      0x0cd7
 #define USB_DEVICE_ID_SAITEK_RAT9      0x0cfa
 #define USB_DEVICE_ID_SAITEK_MMO7      0x0cd0
 #define USB_DEVICE_ID_SIS817_TOUCH     0x0817
 #define USB_DEVICE_ID_SIS_TS           0x1013
 #define USB_DEVICE_ID_SIS1030_TOUCH    0x1030
-#define USB_DEVICE_ID_SIS10FB_TOUCH    0x10fb
 
 #define USB_VENDOR_ID_SKYCABLE                 0x1223
 #define        USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER       0x3F07
index 4e94ea3..a481eaf 100644 (file)
@@ -1582,6 +1582,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
        input_dev->dev.parent = &hid->dev;
 
        hidinput->input = input_dev;
+       hidinput->application = application;
        list_add_tail(&hidinput->list, &hid->inputs);
 
        INIT_LIST_HEAD(&hidinput->reports);
@@ -1677,8 +1678,7 @@ static struct hid_input *hidinput_match_application(struct hid_report *report)
        struct hid_input *hidinput;
 
        list_for_each_entry(hidinput, &hid->inputs, list) {
-               if (hidinput->report &&
-                   hidinput->report->application == report->application)
+               if (hidinput->application == report->application)
                        return hidinput;
        }
 
@@ -1815,6 +1815,7 @@ void hidinput_disconnect(struct hid_device *hid)
                        input_unregister_device(hidinput->input);
                else
                        input_free_device(hidinput->input);
+               kfree(hidinput->name);
                kfree(hidinput);
        }
 
index 40fbb7c..da954f3 100644 (file)
@@ -1375,7 +1375,8 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
                                     struct hid_usage *usage,
                                     enum latency_mode latency,
                                     bool surface_switch,
-                                    bool button_switch)
+                                    bool button_switch,
+                                    bool *inputmode_found)
 {
        struct mt_device *td = hid_get_drvdata(hdev);
        struct mt_class *cls = &td->mtclass;
@@ -1387,6 +1388,14 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
 
        switch (usage->hid) {
        case HID_DG_INPUTMODE:
+               /*
+                * Some elan panels wrongly declare 2 input mode features,
+                * and silently ignore when we set the value in the second
+                * field. Skip the second feature and hope for the best.
+                */
+               if (*inputmode_found)
+                       return false;
+
                if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) {
                        report_len = hid_report_len(report);
                        buf = hid_alloc_report_buf(report, GFP_KERNEL);
@@ -1402,6 +1411,7 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
                }
 
                field->value[index] = td->inputmode_value;
+               *inputmode_found = true;
                return true;
 
        case HID_DG_CONTACTMAX:
@@ -1439,6 +1449,7 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
        struct hid_usage *usage;
        int i, j;
        bool update_report;
+       bool inputmode_found = false;
 
        rep_enum = &hdev->report_enum[HID_FEATURE_REPORT];
        list_for_each_entry(rep, &rep_enum->report_list, list) {
@@ -1457,7 +1468,8 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
                                                             usage,
                                                             latency,
                                                             surface_switch,
-                                                            button_switch))
+                                                            button_switch,
+                                                            &inputmode_found))
                                        update_report = true;
                        }
                }
@@ -1685,6 +1697,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
         */
        hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
 
+       if (id->group != HID_GROUP_MULTITOUCH_WIN_8)
+               hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+
        timer_setup(&td->release_timer, mt_expired_timeout, 0);
 
        ret = hid_parse(hdev);
index 39e6426..683861f 100644 (file)
@@ -183,6 +183,8 @@ static const struct hid_device_id saitek_devices[] = {
                .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
                .driver_data = SAITEK_RELEASE_MODE_RAT7 },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION),
+               .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9),
                .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
index 50af72b..2b63487 100644 (file)
@@ -579,6 +579,28 @@ void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev)
 }
 EXPORT_SYMBOL_GPL(sensor_hub_device_close);
 
+static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+               unsigned int *rsize)
+{
+       /*
+        * Checks if the report descriptor of Thinkpad Helix 2 has a logical
+        * minimum for magnetic flux axis greater than the maximum.
+        */
+       if (hdev->product == USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA &&
+               *rsize == 2558 && rdesc[913] == 0x17 && rdesc[914] == 0x40 &&
+               rdesc[915] == 0x81 && rdesc[916] == 0x08 &&
+               rdesc[917] == 0x00 && rdesc[918] == 0x27 &&
+               rdesc[921] == 0x07 && rdesc[922] == 0x00) {
+               /* Sets negative logical minimum for mag x, y and z */
+               rdesc[914] = rdesc[935] = rdesc[956] = 0xc0;
+               rdesc[915] = rdesc[936] = rdesc[957] = 0x7e;
+               rdesc[916] = rdesc[937] = rdesc[958] = 0xf7;
+               rdesc[917] = rdesc[938] = rdesc[959] = 0xff;
+       }
+
+       return rdesc;
+}
+
 static int sensor_hub_probe(struct hid_device *hdev,
                                const struct hid_device_id *id)
 {
@@ -743,6 +765,7 @@ static struct hid_driver sensor_hub_driver = {
        .probe = sensor_hub_probe,
        .remove = sensor_hub_remove,
        .raw_event = sensor_hub_raw_event,
+       .report_fixup = sensor_hub_report_fixup,
 #ifdef CONFIG_PM
        .suspend = sensor_hub_suspend,
        .resume = sensor_hub_resume,
index 2ce194a..4e3592e 100644 (file)
@@ -47,7 +47,7 @@
 /* quirks to control the device */
 #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV       BIT(0)
 #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET       BIT(1)
-#define I2C_HID_QUIRK_RESEND_REPORT_DESCR      BIT(2)
+#define I2C_HID_QUIRK_NO_RUNTIME_PM            BIT(2)
 
 /* flags */
 #define I2C_HID_STARTED                0
@@ -169,11 +169,8 @@ static const struct i2c_hid_quirks {
        { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
                I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
        { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
-               I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
-       { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118,
-               I2C_HID_QUIRK_RESEND_REPORT_DESCR },
-       { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
-               I2C_HID_QUIRK_RESEND_REPORT_DESCR },
+               I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
+               I2C_HID_QUIRK_NO_RUNTIME_PM },
        { 0, 0 }
 };
 
@@ -1107,7 +1104,9 @@ static int i2c_hid_probe(struct i2c_client *client,
                goto err_mem_free;
        }
 
-       pm_runtime_put(&client->dev);
+       if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
+               pm_runtime_put(&client->dev);
+
        return 0;
 
 err_mem_free:
@@ -1132,7 +1131,8 @@ static int i2c_hid_remove(struct i2c_client *client)
        struct i2c_hid *ihid = i2c_get_clientdata(client);
        struct hid_device *hid;
 
-       pm_runtime_get_sync(&client->dev);
+       if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
+               pm_runtime_get_sync(&client->dev);
        pm_runtime_disable(&client->dev);
        pm_runtime_set_suspended(&client->dev);
        pm_runtime_put_noidle(&client->dev);
@@ -1235,19 +1235,15 @@ static int i2c_hid_resume(struct device *dev)
        pm_runtime_enable(dev);
 
        enable_irq(client->irq);
-       ret = i2c_hid_hwreset(client);
-       if (ret)
-               return ret;
 
-       /* RAYDIUM device (2386:3118) need to re-send report descr cmd
-        * after resume, after this it will be back normal.
-        * otherwise it issues too many incomplete reports.
+       /* Instead of resetting device, simply powers the device on. This
+        * solves "incomplete reports" on Raydium devices 2386:3118 and
+        * 2386:4B33 and fixes various SIS touchscreens no longer sending
+        * data after a suspend/resume.
         */
-       if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
-               ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
-               if (ret)
-                       return ret;
-       }
+       ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+       if (ret)
+               return ret;
 
        if (hid->driver && hid->driver->reset_resume) {
                ret = hid->driver->reset_resume(hid);
index 97869b7..08a8327 100644 (file)
@@ -29,6 +29,8 @@
 #define CNL_Ax_DEVICE_ID       0x9DFC
 #define GLK_Ax_DEVICE_ID       0x31A2
 #define CNL_H_DEVICE_ID                0xA37C
+#define ICL_MOBILE_DEVICE_ID   0x34FC
+#define SPT_H_DEVICE_ID                0xA135
 
 #define        REVISION_ID_CHT_A0      0x6
 #define        REVISION_ID_CHT_Ax_SI   0x0
index 050f987..256b301 100644 (file)
@@ -38,6 +38,8 @@ static const struct pci_device_id ish_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
        {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
        {0, }
 };
 MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
index ced0418..f4d08c8 100644 (file)
@@ -76,6 +76,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
                                        __u32 version)
 {
        int ret = 0;
+       unsigned int cur_cpu;
        struct vmbus_channel_initiate_contact *msg;
        unsigned long flags;
 
@@ -118,9 +119,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
         * the CPU attempting to connect may not be CPU 0.
         */
        if (version >= VERSION_WIN8_1) {
-               msg->target_vcpu =
-                       hv_cpu_number_to_vp_number(smp_processor_id());
-               vmbus_connection.connect_cpu = smp_processor_id();
+               cur_cpu = get_cpu();
+               msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu);
+               vmbus_connection.connect_cpu = cur_cpu;
+               put_cpu();
        } else {
                msg->target_vcpu = 0;
                vmbus_connection.connect_cpu = 0;
index b1b548a..c71cc85 100644 (file)
@@ -1291,6 +1291,9 @@ static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
        if (!attribute->show)
                return -EIO;
 
+       if (chan->state != CHANNEL_OPENED_STATE)
+               return -EINVAL;
+
        return attribute->show(chan, buf);
 }
 
index 90837f7..f4c7516 100644 (file)
@@ -302,14 +302,18 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
        return clamp_val(reg, 0, 1023) & (0xff << 2);
 }
 
-static u16 adt7475_read_word(struct i2c_client *client, int reg)
+static int adt7475_read_word(struct i2c_client *client, int reg)
 {
-       u16 val;
+       int val1, val2;
 
-       val = i2c_smbus_read_byte_data(client, reg);
-       val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8);
+       val1 = i2c_smbus_read_byte_data(client, reg);
+       if (val1 < 0)
+               return val1;
+       val2 = i2c_smbus_read_byte_data(client, reg + 1);
+       if (val2 < 0)
+               return val2;
 
-       return val;
+       return val1 | (val2 << 8);
 }
 
 static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)
@@ -962,13 +966,14 @@ static ssize_t show_pwmfreq(struct device *dev, struct device_attribute *attr,
 {
        struct adt7475_data *data = adt7475_update_device(dev);
        struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
-       int i = clamp_val(data->range[sattr->index] & 0xf, 0,
-                         ARRAY_SIZE(pwmfreq_table) - 1);
+       int idx;
 
        if (IS_ERR(data))
                return PTR_ERR(data);
+       idx = clamp_val(data->range[sattr->index] & 0xf, 0,
+                       ARRAY_SIZE(pwmfreq_table) - 1);
 
-       return sprintf(buf, "%d\n", pwmfreq_table[i]);
+       return sprintf(buf, "%d\n", pwmfreq_table[idx]);
 }
 
 static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr,
@@ -1004,6 +1009,10 @@ static ssize_t pwm_use_point2_pwm_at_crit_show(struct device *dev,
                                        char *buf)
 {
        struct adt7475_data *data = adt7475_update_device(dev);
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
        return sprintf(buf, "%d\n", !!(data->config4 & CONFIG4_MAXDUTY));
 }
 
index e9e6aea..71d3445 100644 (file)
@@ -17,7 +17,7 @@
  * Bi-directional Current/Power Monitor with I2C Interface
  * Datasheet: http://www.ti.com/product/ina230
  *
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
  * Thanks to Jan Volkering
  *
  * This program is free software; you can redistribute it and/or modify
@@ -329,6 +329,15 @@ static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
        return 0;
 }
 
+static ssize_t ina2xx_show_shunt(struct device *dev,
+                             struct device_attribute *da,
+                             char *buf)
+{
+       struct ina2xx_data *data = dev_get_drvdata(dev);
+
+       return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt);
+}
+
 static ssize_t ina2xx_store_shunt(struct device *dev,
                                  struct device_attribute *da,
                                  const char *buf, size_t count)
@@ -403,7 +412,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
 
 /* shunt resistance */
 static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
-                         ina2xx_show_value, ina2xx_store_shunt,
+                         ina2xx_show_shunt, ina2xx_store_shunt,
                          INA2XX_CALIBRATION);
 
 /* update interval (ina226 only) */
index c6bd61e..78603b7 100644 (file)
@@ -63,6 +63,7 @@
 #include <linux/bitops.h>
 #include <linux/dmi.h>
 #include <linux/io.h>
+#include <linux/nospec.h>
 #include "lm75.h"
 
 #define USE_ALTERNATE
@@ -206,8 +207,6 @@ superio_exit(int ioreg)
 
 #define NUM_FAN                7
 
-#define TEMP_SOURCE_VIRTUAL    0x1f
-
 /* Common and NCT6775 specific data */
 
 /* Voltage min/max registers for nr=7..14 are in bank 5 */
@@ -298,8 +297,9 @@ static const u16 NCT6775_REG_PWM_READ[] = {
 
 static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 };
 static const u16 NCT6775_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d };
-static const u16 NCT6775_REG_FAN_PULSES[] = { 0x641, 0x642, 0x643, 0x644, 0 };
-static const u16 NCT6775_FAN_PULSE_SHIFT[] = { 0, 0, 0, 0, 0, 0 };
+static const u16 NCT6775_REG_FAN_PULSES[NUM_FAN] = {
+       0x641, 0x642, 0x643, 0x644 };
+static const u16 NCT6775_FAN_PULSE_SHIFT[NUM_FAN] = { };
 
 static const u16 NCT6775_REG_TEMP[] = {
        0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d };
@@ -372,6 +372,7 @@ static const char *const nct6775_temp_label[] = {
 };
 
 #define NCT6775_TEMP_MASK      0x001ffffe
+#define NCT6775_VIRT_TEMP_MASK 0x00000000
 
 static const u16 NCT6775_REG_TEMP_ALTERNATE[32] = {
        [13] = 0x661,
@@ -424,8 +425,8 @@ static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 };
 
 static const u16 NCT6776_REG_FAN_MIN[] = {
        0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c };
-static const u16 NCT6776_REG_FAN_PULSES[] = {
-       0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 };
+static const u16 NCT6776_REG_FAN_PULSES[NUM_FAN] = {
+       0x644, 0x645, 0x646, 0x647, 0x648, 0x649 };
 
 static const u16 NCT6776_REG_WEIGHT_DUTY_BASE[] = {
        0x13e, 0x23e, 0x33e, 0x83e, 0x93e, 0xa3e };
@@ -460,6 +461,7 @@ static const char *const nct6776_temp_label[] = {
 };
 
 #define NCT6776_TEMP_MASK      0x007ffffe
+#define NCT6776_VIRT_TEMP_MASK 0x00000000
 
 static const u16 NCT6776_REG_TEMP_ALTERNATE[32] = {
        [14] = 0x401,
@@ -500,9 +502,9 @@ static const s8 NCT6779_BEEP_BITS[] = {
        30, 31 };                       /* intrusion0, intrusion1 */
 
 static const u16 NCT6779_REG_FAN[] = {
-       0x4b0, 0x4b2, 0x4b4, 0x4b6, 0x4b8, 0x4ba, 0x660 };
-static const u16 NCT6779_REG_FAN_PULSES[] = {
-       0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 };
+       0x4c0, 0x4c2, 0x4c4, 0x4c6, 0x4c8, 0x4ca, 0x4ce };
+static const u16 NCT6779_REG_FAN_PULSES[NUM_FAN] = {
+       0x644, 0x645, 0x646, 0x647, 0x648, 0x649 };
 
 static const u16 NCT6779_REG_CRITICAL_PWM_ENABLE[] = {
        0x136, 0x236, 0x336, 0x836, 0x936, 0xa36, 0xb36 };
@@ -558,7 +560,9 @@ static const char *const nct6779_temp_label[] = {
 };
 
 #define NCT6779_TEMP_MASK      0x07ffff7e
+#define NCT6779_VIRT_TEMP_MASK 0x00000000
 #define NCT6791_TEMP_MASK      0x87ffff7e
+#define NCT6791_VIRT_TEMP_MASK 0x80000000
 
 static const u16 NCT6779_REG_TEMP_ALTERNATE[32]
        = { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0,
@@ -637,6 +641,7 @@ static const char *const nct6792_temp_label[] = {
 };
 
 #define NCT6792_TEMP_MASK      0x9fffff7e
+#define NCT6792_VIRT_TEMP_MASK 0x80000000
 
 static const char *const nct6793_temp_label[] = {
        "",
@@ -674,6 +679,7 @@ static const char *const nct6793_temp_label[] = {
 };
 
 #define NCT6793_TEMP_MASK      0xbfff037e
+#define NCT6793_VIRT_TEMP_MASK 0x80000000
 
 static const char *const nct6795_temp_label[] = {
        "",
@@ -711,6 +717,7 @@ static const char *const nct6795_temp_label[] = {
 };
 
 #define NCT6795_TEMP_MASK      0xbfffff7e
+#define NCT6795_VIRT_TEMP_MASK 0x80000000
 
 static const char *const nct6796_temp_label[] = {
        "",
@@ -723,8 +730,8 @@ static const char *const nct6796_temp_label[] = {
        "AUXTIN4",
        "SMBUSMASTER 0",
        "SMBUSMASTER 1",
-       "",
-       "",
+       "Virtual_TEMP",
+       "Virtual_TEMP",
        "",
        "",
        "",
@@ -747,7 +754,8 @@ static const char *const nct6796_temp_label[] = {
        "Virtual_TEMP"
 };
 
-#define NCT6796_TEMP_MASK      0xbfff03fe
+#define NCT6796_TEMP_MASK      0xbfff0ffe
+#define NCT6796_VIRT_TEMP_MASK 0x80000c00
 
 /* NCT6102D/NCT6106D specific data */
 
@@ -778,8 +786,8 @@ static const u16 NCT6106_REG_TEMP_CONFIG[] = {
 
 static const u16 NCT6106_REG_FAN[] = { 0x20, 0x22, 0x24 };
 static const u16 NCT6106_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4 };
-static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6, 0, 0 };
-static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4, 0, 0 };
+static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6 };
+static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4 };
 
 static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 };
 static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 };
@@ -916,6 +924,11 @@ static unsigned int fan_from_reg16(u16 reg, unsigned int divreg)
        return 1350000U / (reg << divreg);
 }
 
+static unsigned int fan_from_reg_rpm(u16 reg, unsigned int divreg)
+{
+       return reg;
+}
+
 static u16 fan_to_reg(u32 fan, unsigned int divreg)
 {
        if (!fan)
@@ -968,6 +981,7 @@ struct nct6775_data {
        u16 reg_temp_config[NUM_TEMP];
        const char * const *temp_label;
        u32 temp_mask;
+       u32 virt_temp_mask;
 
        u16 REG_CONFIG;
        u16 REG_VBAT;
@@ -1275,11 +1289,11 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
        case nct6795:
        case nct6796:
                return reg == 0x150 || reg == 0x153 || reg == 0x155 ||
-                 ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) ||
+                 (reg & 0xfff0) == 0x4c0 ||
                  reg == 0x402 ||
                  reg == 0x63a || reg == 0x63c || reg == 0x63e ||
                  reg == 0x640 || reg == 0x642 || reg == 0x64a ||
-                 reg == 0x64c || reg == 0x660 ||
+                 reg == 0x64c ||
                  reg == 0x73 || reg == 0x75 || reg == 0x77 || reg == 0x79 ||
                  reg == 0x7b || reg == 0x7d;
        }
@@ -1557,7 +1571,7 @@ static void nct6775_update_pwm(struct device *dev)
                reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]);
                data->pwm_weight_temp_sel[i] = reg & 0x1f;
                /* If weight is disabled, report weight source as 0 */
-               if (j == 1 && !(reg & 0x80))
+               if (!(reg & 0x80))
                        data->pwm_weight_temp_sel[i] = 0;
 
                /* Weight temp data */
@@ -1681,9 +1695,13 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
                        if (data->has_fan_min & BIT(i))
                                data->fan_min[i] = nct6775_read_value(data,
                                           data->REG_FAN_MIN[i]);
-                       data->fan_pulses[i] =
-                         (nct6775_read_value(data, data->REG_FAN_PULSES[i])
-                               >> data->FAN_PULSE_SHIFT[i]) & 0x03;
+
+                       if (data->REG_FAN_PULSES[i]) {
+                               data->fan_pulses[i] =
+                                 (nct6775_read_value(data,
+                                                     data->REG_FAN_PULSES[i])
+                                  >> data->FAN_PULSE_SHIFT[i]) & 0x03;
+                       }
 
                        nct6775_select_fan_div(dev, data, i, reg);
                }
@@ -2689,6 +2707,7 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr,
                return err;
        if (val > NUM_TEMP)
                return -EINVAL;
+       val = array_index_nospec(val, NUM_TEMP + 1);
        if (val && (!(data->have_temp & BIT(val - 1)) ||
                    !data->temp_src[val - 1]))
                return -EINVAL;
@@ -3637,6 +3656,7 @@ static int nct6775_probe(struct platform_device *pdev)
 
                data->temp_label = nct6776_temp_label;
                data->temp_mask = NCT6776_TEMP_MASK;
+               data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK;
 
                data->REG_VBAT = NCT6106_REG_VBAT;
                data->REG_DIODE = NCT6106_REG_DIODE;
@@ -3715,6 +3735,7 @@ static int nct6775_probe(struct platform_device *pdev)
 
                data->temp_label = nct6775_temp_label;
                data->temp_mask = NCT6775_TEMP_MASK;
+               data->virt_temp_mask = NCT6775_VIRT_TEMP_MASK;
 
                data->REG_CONFIG = NCT6775_REG_CONFIG;
                data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3787,6 +3808,7 @@ static int nct6775_probe(struct platform_device *pdev)
 
                data->temp_label = nct6776_temp_label;
                data->temp_mask = NCT6776_TEMP_MASK;
+               data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK;
 
                data->REG_CONFIG = NCT6775_REG_CONFIG;
                data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3851,7 +3873,7 @@ static int nct6775_probe(struct platform_device *pdev)
                data->ALARM_BITS = NCT6779_ALARM_BITS;
                data->BEEP_BITS = NCT6779_BEEP_BITS;
 
-               data->fan_from_reg = fan_from_reg13;
+               data->fan_from_reg = fan_from_reg_rpm;
                data->fan_from_reg_min = fan_from_reg13;
                data->target_temp_mask = 0xff;
                data->tolerance_mask = 0x07;
@@ -3859,6 +3881,7 @@ static int nct6775_probe(struct platform_device *pdev)
 
                data->temp_label = nct6779_temp_label;
                data->temp_mask = NCT6779_TEMP_MASK;
+               data->virt_temp_mask = NCT6779_VIRT_TEMP_MASK;
 
                data->REG_CONFIG = NCT6775_REG_CONFIG;
                data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3931,7 +3954,7 @@ static int nct6775_probe(struct platform_device *pdev)
                data->ALARM_BITS = NCT6791_ALARM_BITS;
                data->BEEP_BITS = NCT6779_BEEP_BITS;
 
-               data->fan_from_reg = fan_from_reg13;
+               data->fan_from_reg = fan_from_reg_rpm;
                data->fan_from_reg_min = fan_from_reg13;
                data->target_temp_mask = 0xff;
                data->tolerance_mask = 0x07;
@@ -3942,22 +3965,27 @@ static int nct6775_probe(struct platform_device *pdev)
                case nct6791:
                        data->temp_label = nct6779_temp_label;
                        data->temp_mask = NCT6791_TEMP_MASK;
+                       data->virt_temp_mask = NCT6791_VIRT_TEMP_MASK;
                        break;
                case nct6792:
                        data->temp_label = nct6792_temp_label;
                        data->temp_mask = NCT6792_TEMP_MASK;
+                       data->virt_temp_mask = NCT6792_VIRT_TEMP_MASK;
                        break;
                case nct6793:
                        data->temp_label = nct6793_temp_label;
                        data->temp_mask = NCT6793_TEMP_MASK;
+                       data->virt_temp_mask = NCT6793_VIRT_TEMP_MASK;
                        break;
                case nct6795:
                        data->temp_label = nct6795_temp_label;
                        data->temp_mask = NCT6795_TEMP_MASK;
+                       data->virt_temp_mask = NCT6795_VIRT_TEMP_MASK;
                        break;
                case nct6796:
                        data->temp_label = nct6796_temp_label;
                        data->temp_mask = NCT6796_TEMP_MASK;
+                       data->virt_temp_mask = NCT6796_VIRT_TEMP_MASK;
                        break;
                }
 
@@ -4141,7 +4169,7 @@ static int nct6775_probe(struct platform_device *pdev)
                 * for each fan reflects a different temperature, and there
                 * are no duplicates.
                 */
-               if (src != TEMP_SOURCE_VIRTUAL) {
+               if (!(data->virt_temp_mask & BIT(src))) {
                        if (mask & BIT(src))
                                continue;
                        mask |= BIT(src);
index fb4e4a6..be5ba46 100644 (file)
@@ -164,3 +164,4 @@ module_platform_driver(rpi_hwmon_driver);
 MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
 MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver");
 MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:raspberrypi-hwmon");
index da962aa..fc6b7f8 100644 (file)
@@ -139,7 +139,8 @@ static int intel_th_remove(struct device *dev)
                        th->thdev[i] = NULL;
                }
 
-               th->num_thdevs = lowest;
+               if (lowest >= 0)
+                       th->num_thdevs = lowest;
        }
 
        if (thdrv->attr_group)
@@ -487,7 +488,7 @@ static const struct intel_th_subdevice {
                                .flags  = IORESOURCE_MEM,
                        },
                        {
-                               .start  = TH_MMIO_SW,
+                               .start  = 1, /* use resource[1] */
                                .end    = 0,
                                .flags  = IORESOURCE_MEM,
                        },
@@ -580,6 +581,7 @@ intel_th_subdevice_alloc(struct intel_th *th,
        struct intel_th_device *thdev;
        struct resource res[3];
        unsigned int req = 0;
+       bool is64bit = false;
        int r, err;
 
        thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
@@ -589,12 +591,18 @@ intel_th_subdevice_alloc(struct intel_th *th,
 
        thdev->drvdata = th->drvdata;
 
+       for (r = 0; r < th->num_resources; r++)
+               if (th->resource[r].flags & IORESOURCE_MEM_64) {
+                       is64bit = true;
+                       break;
+               }
+
        memcpy(res, subdev->res,
               sizeof(struct resource) * subdev->nres);
 
        for (r = 0; r < subdev->nres; r++) {
                struct resource *devres = th->resource;
-               int bar = TH_MMIO_CONFIG;
+               int bar = 0; /* cut subdevices' MMIO from resource[0] */
 
                /*
                 * Take .end == 0 to mean 'take the whole bar',
@@ -603,6 +611,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
                 */
                if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
                        bar = res[r].start;
+                       if (is64bit)
+                               bar *= 2;
                        res[r].start = 0;
                        res[r].end = resource_size(&devres[bar]) - 1;
                }
index c2e55e5..1cf6290 100644 (file)
@@ -160,6 +160,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
+       {
+               /* Ice Lake PCH */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
        { 0 },
 };
 
index 6ec65ad..c33dcfb 100644 (file)
@@ -110,8 +110,8 @@ static int sclhi(struct i2c_algo_bit_data *adap)
        }
 #ifdef DEBUG
        if (jiffies != start && i2c_debug >= 3)
-               pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go "
-                        "high\n", jiffies - start);
+               pr_debug("i2c-algo-bit: needed %ld jiffies for SCL to go high\n",
+                        jiffies - start);
 #endif
 
 done:
@@ -171,8 +171,9 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c)
                setsda(adap, sb);
                udelay((adap->udelay + 1) / 2);
                if (sclhi(adap) < 0) { /* timed out */
-                       bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, "
-                               "timeout at bit #%d\n", (int)c, i);
+                       bit_dbg(1, &i2c_adap->dev,
+                               "i2c_outb: 0x%02x, timeout at bit #%d\n",
+                               (int)c, i);
                        return -ETIMEDOUT;
                }
                /* FIXME do arbitration here:
@@ -185,8 +186,8 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c)
        }
        sdahi(adap);
        if (sclhi(adap) < 0) { /* timeout */
-               bit_dbg(1, &i2c_adap->dev, "i2c_outb: 0x%02x, "
-                       "timeout at ack\n", (int)c);
+               bit_dbg(1, &i2c_adap->dev,
+                       "i2c_outb: 0x%02x, timeout at ack\n", (int)c);
                return -ETIMEDOUT;
        }
 
@@ -215,8 +216,9 @@ static int i2c_inb(struct i2c_adapter *i2c_adap)
        sdahi(adap);
        for (i = 0; i < 8; i++) {
                if (sclhi(adap) < 0) { /* timeout */
-                       bit_dbg(1, &i2c_adap->dev, "i2c_inb: timeout at bit "
-                               "#%d\n", 7 - i);
+                       bit_dbg(1, &i2c_adap->dev,
+                               "i2c_inb: timeout at bit #%d\n",
+                               7 - i);
                        return -ETIMEDOUT;
                }
                indata *= 2;
@@ -265,8 +267,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
                goto bailout;
        }
        if (!scl) {
-               printk(KERN_WARNING "%s: SCL unexpected low "
-                      "while pulling SDA low!\n", name);
+               printk(KERN_WARNING
+                      "%s: SCL unexpected low while pulling SDA low!\n",
+                      name);
                goto bailout;
        }
 
@@ -278,8 +281,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
                goto bailout;
        }
        if (!scl) {
-               printk(KERN_WARNING "%s: SCL unexpected low "
-                      "while pulling SDA high!\n", name);
+               printk(KERN_WARNING
+                      "%s: SCL unexpected low while pulling SDA high!\n",
+                      name);
                goto bailout;
        }
 
@@ -291,8 +295,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
                goto bailout;
        }
        if (!sda) {
-               printk(KERN_WARNING "%s: SDA unexpected low "
-                      "while pulling SCL low!\n", name);
+               printk(KERN_WARNING
+                      "%s: SDA unexpected low while pulling SCL low!\n",
+                      name);
                goto bailout;
        }
 
@@ -304,8 +309,9 @@ static int test_bus(struct i2c_adapter *i2c_adap)
                goto bailout;
        }
        if (!sda) {
-               printk(KERN_WARNING "%s: SDA unexpected low "
-                      "while pulling SCL high!\n", name);
+               printk(KERN_WARNING
+                      "%s: SDA unexpected low while pulling SCL high!\n",
+                      name);
                goto bailout;
        }
 
@@ -352,8 +358,8 @@ static int try_address(struct i2c_adapter *i2c_adap,
                i2c_start(adap);
        }
        if (i && ret)
-               bit_dbg(1, &i2c_adap->dev, "Used %d tries to %s client at "
-                       "0x%02x: %s\n", i + 1,
+               bit_dbg(1, &i2c_adap->dev,
+                       "Used %d tries to %s client at 0x%02x: %s\n", i + 1,
                        addr & 1 ? "read from" : "write to", addr >> 1,
                        ret == 1 ? "success" : "failed, timeout?");
        return ret;
@@ -442,8 +448,9 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
                        if (inval <= 0 || inval > I2C_SMBUS_BLOCK_MAX) {
                                if (!(flags & I2C_M_NO_RD_ACK))
                                        acknak(i2c_adap, 0);
-                               dev_err(&i2c_adap->dev, "readbytes: invalid "
-                                       "block length (%d)\n", inval);
+                               dev_err(&i2c_adap->dev,
+                                       "readbytes: invalid block length (%d)\n",
+                                       inval);
                                return -EPROTO;
                        }
                        /* The original count value accounts for the extra
@@ -506,8 +513,8 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
                        return -ENXIO;
                }
                if (flags & I2C_M_RD) {
-                       bit_dbg(3, &i2c_adap->dev, "emitting repeated "
-                               "start condition\n");
+                       bit_dbg(3, &i2c_adap->dev,
+                               "emitting repeated start condition\n");
                        i2c_repstart(adap);
                        /* okay, now switch into reading mode */
                        addr |= 0x01;
@@ -564,8 +571,8 @@ static int bit_xfer(struct i2c_adapter *i2c_adap,
                        }
                        ret = bit_doAddress(i2c_adap, pmsg);
                        if ((ret != 0) && !nak_ok) {
-                               bit_dbg(1, &i2c_adap->dev, "NAK from "
-                                       "device addr 0x%02x msg #%d\n",
+                               bit_dbg(1, &i2c_adap->dev,
+                                       "NAK from device addr 0x%02x msg #%d\n",
                                        msgs[i].addr, i);
                                goto bailout;
                        }
index e18442b..18cc324 100644 (file)
@@ -34,11 +34,11 @@ static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
 
 static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
 {
-       u32 ic_clk = i2c_dw_clk_rate(dev);
        const char *mode_str, *fp_str = "";
        u32 comp_param1;
        u32 sda_falling_time, scl_falling_time;
        struct i2c_timings *t = &dev->timings;
+       u32 ic_clk;
        int ret;
 
        ret = i2c_dw_acquire_lock(dev);
@@ -53,6 +53,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
 
        /* Calculate SCL timing parameters for standard mode if not set */
        if (!dev->ss_hcnt || !dev->ss_lcnt) {
+               ic_clk = i2c_dw_clk_rate(dev);
                dev->ss_hcnt =
                        i2c_dw_scl_hcnt(ic_clk,
                                        4000,   /* tHD;STA = tHIGH = 4.0 us */
@@ -89,6 +90,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
         * needed also in high speed mode.
         */
        if (!dev->fs_hcnt || !dev->fs_lcnt) {
+               ic_clk = i2c_dw_clk_rate(dev);
                dev->fs_hcnt =
                        i2c_dw_scl_hcnt(ic_clk,
                                        600,    /* tHD;STA = tHIGH = 0.6 us */
@@ -708,7 +710,6 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
        i2c_set_adapdata(adap, dev);
 
        if (dev->pm_disabled) {
-               dev_pm_syscore_device(dev->dev, true);
                irq_flags = IRQF_NO_SUSPEND;
        } else {
                irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND;
index 1a8d2da..b5750fd 100644 (file)
@@ -434,6 +434,9 @@ static int dw_i2c_plat_suspend(struct device *dev)
 {
        struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
 
+       if (i_dev->pm_disabled)
+               return 0;
+
        i_dev->disable(i_dev);
        i2c_dw_prepare_clk(i_dev, false);
 
@@ -444,7 +447,9 @@ static int dw_i2c_plat_resume(struct device *dev)
 {
        struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
 
-       i2c_dw_prepare_clk(i_dev, true);
+       if (!i_dev->pm_disabled)
+               i2c_dw_prepare_clk(i_dev, true);
+
        i_dev->init(i_dev);
 
        return 0;
index 941c223..c91e145 100644 (file)
 
 #define SBREG_BAR              0x10
 #define SBREG_SMBCTRL          0xc6000c
+#define SBREG_SMBCTRL_DNV      0xcf000c
 
 /* Host status bits for SMBPCISTS */
 #define SMBPCISTS_INTS         BIT(3)
@@ -1399,7 +1400,11 @@ static void i801_add_tco(struct i801_priv *priv)
        spin_unlock(&p2sb_spinlock);
 
        res = &tco_res[ICH_RES_MEM_OFF];
-       res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+       if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
+               res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
+       else
+               res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+
        res->end = res->start + 3;
        res->flags = IORESOURCE_MEM;
 
@@ -1415,6 +1420,13 @@ static void i801_add_tco(struct i801_priv *priv)
 }
 
 #ifdef CONFIG_ACPI
+static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv,
+                                     acpi_physical_address address)
+{
+       return address >= priv->smba &&
+              address <= pci_resource_end(priv->pci_dev, SMBBAR);
+}
+
 static acpi_status
 i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
                     u64 *value, void *handler_context, void *region_context)
@@ -1430,7 +1442,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
         */
        mutex_lock(&priv->acpi_lock);
 
-       if (!priv->acpi_reserved) {
+       if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
                priv->acpi_reserved = true;
 
                dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
index 6d975f5..06c4c76 100644 (file)
@@ -538,7 +538,6 @@ static const struct i2c_algorithm lpi2c_imx_algo = {
 
 static const struct of_device_id lpi2c_imx_of_match[] = {
        { .compatible = "fsl,imx7ulp-lpi2c" },
-       { .compatible = "fsl,imx8dv-lpi2c" },
        { },
 };
 MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
index 0cf1379..5c754bf 100644 (file)
@@ -164,7 +164,7 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr,
                 * run ~75 kHz instead which should do no harm.
                 */
                dev_notice(&sch_adapter.dev,
-                       "Clock divider unitialized. Setting defaults\n");
+                       "Clock divider uninitialized. Setting defaults\n");
                outw(backbone_speed / (4 * 100), SMBHSTCLK);
        }
 
index 36732eb..9f2eb02 100644 (file)
@@ -367,20 +367,26 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
        dma_addr_t rx_dma;
        enum geni_se_xfer_mode mode;
        unsigned long time_left = XFER_TIMEOUT;
+       void *dma_buf;
 
        gi2c->cur = msg;
-       mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
+       mode = GENI_SE_FIFO;
+       dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+       if (dma_buf)
+               mode = GENI_SE_DMA;
+
        geni_se_select_mode(&gi2c->se, mode);
        writel_relaxed(msg->len, gi2c->se.base + SE_I2C_RX_TRANS_LEN);
        geni_se_setup_m_cmd(&gi2c->se, I2C_READ, m_param);
        if (mode == GENI_SE_DMA) {
                int ret;
 
-               ret = geni_se_rx_dma_prep(&gi2c->se, msg->buf, msg->len,
+               ret = geni_se_rx_dma_prep(&gi2c->se, dma_buf, msg->len,
                                                                &rx_dma);
                if (ret) {
                        mode = GENI_SE_FIFO;
                        geni_se_select_mode(&gi2c->se, mode);
+                       i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
                }
        }
 
@@ -393,6 +399,7 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
                if (gi2c->err)
                        geni_i2c_rx_fsm_rst(gi2c);
                geni_se_rx_dma_unprep(&gi2c->se, rx_dma, msg->len);
+               i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
        }
        return gi2c->err;
 }
@@ -403,20 +410,26 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
        dma_addr_t tx_dma;
        enum geni_se_xfer_mode mode;
        unsigned long time_left;
+       void *dma_buf;
 
        gi2c->cur = msg;
-       mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
+       mode = GENI_SE_FIFO;
+       dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+       if (dma_buf)
+               mode = GENI_SE_DMA;
+
        geni_se_select_mode(&gi2c->se, mode);
        writel_relaxed(msg->len, gi2c->se.base + SE_I2C_TX_TRANS_LEN);
        geni_se_setup_m_cmd(&gi2c->se, I2C_WRITE, m_param);
        if (mode == GENI_SE_DMA) {
                int ret;
 
-               ret = geni_se_tx_dma_prep(&gi2c->se, msg->buf, msg->len,
+               ret = geni_se_tx_dma_prep(&gi2c->se, dma_buf, msg->len,
                                                                &tx_dma);
                if (ret) {
                        mode = GENI_SE_FIFO;
                        geni_se_select_mode(&gi2c->se, mode);
+                       i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
                }
        }
 
@@ -432,6 +445,7 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
                if (gi2c->err)
                        geni_i2c_tx_fsm_rst(gi2c);
                geni_se_tx_dma_unprep(&gi2c->se, tx_dma, msg->len);
+               i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
        }
        return gi2c->err;
 }
index a01389b..7e9a2bb 100644 (file)
@@ -152,6 +152,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
                        mt_params[3].type = ACPI_TYPE_INTEGER;
                        mt_params[3].integer.value = len;
                        mt_params[4].type = ACPI_TYPE_BUFFER;
+                       mt_params[4].buffer.length = len;
                        mt_params[4].buffer.pointer = data->block + 1;
                }
                break;
index 439e877..818cab1 100644 (file)
@@ -507,8 +507,6 @@ static void sh_mobile_i2c_dma_callback(void *data)
        pd->pos = pd->msg->len;
        pd->stop_after_dma = true;
 
-       i2c_release_dma_safe_msg_buf(pd->msg, pd->dma_buf);
-
        iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
 }
 
@@ -602,8 +600,8 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
        dma_async_issue_pending(chan);
 }
 
-static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
-                   bool do_init)
+static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
+                    bool do_init)
 {
        if (do_init) {
                /* Initialize channel registers */
@@ -627,7 +625,6 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
 
        /* Enable all interrupts to begin with */
        iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
-       return 0;
 }
 
 static int poll_dte(struct sh_mobile_i2c_data *pd)
@@ -698,9 +695,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
                pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP;
                pd->stop_after_dma = false;
 
-               err = start_ch(pd, msg, do_start);
-               if (err)
-                       break;
+               start_ch(pd, msg, do_start);
 
                if (do_start)
                        i2c_op(pd, OP_START, 0);
@@ -709,6 +704,10 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
                timeout = wait_event_timeout(pd->wait,
                                       pd->sr & (ICSR_TACK | SW_DONE),
                                       adapter->timeout);
+
+               /* 'stop_after_dma' tells if DMA transfer was complete */
+               i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma);
+
                if (!timeout) {
                        dev_err(pd->dev, "Transfer request timed out\n");
                        if (pd->dma_direction != DMA_NONE)
index 9918bdd..a403e85 100644 (file)
@@ -401,11 +401,8 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap,
                return ret;
 
        for (msg = msgs; msg < emsg; msg++) {
-               /* If next message is read, skip the stop condition */
-               bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
-               /* but, force it if I2C_M_STOP is set */
-               if (msg->flags & I2C_M_STOP)
-                       stop = true;
+               /* Emit STOP if it is the last message or I2C_M_STOP is set. */
+               bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
 
                ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
                if (ret)
index bb181b0..454f914 100644 (file)
@@ -248,11 +248,8 @@ static int uniphier_i2c_master_xfer(struct i2c_adapter *adap,
                return ret;
 
        for (msg = msgs; msg < emsg; msg++) {
-               /* If next message is read, skip the stop condition */
-               bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
-               /* but, force it if I2C_M_STOP is set */
-               if (msg->flags & I2C_M_STOP)
-                       stop = true;
+               /* Emit STOP if it is the last message or I2C_M_STOP is set. */
+               bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
 
                ret = uniphier_i2c_master_xfer_one(adap, msg, stop);
                if (ret)
index 9a71e50..0c51c0f 100644 (file)
@@ -532,6 +532,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
 {
        u8 rx_watermark;
        struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
+       unsigned long flags;
 
        /* Clear and enable Rx full interrupt. */
        xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
@@ -547,6 +548,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
                rx_watermark = IIC_RX_FIFO_DEPTH;
        xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
 
+       local_irq_save(flags);
        if (!(msg->flags & I2C_M_NOSTART))
                /* write the address */
                xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
@@ -556,6 +558,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
 
        xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
                msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
+       local_irq_restore(flags);
+
        if (i2c->nmsgs == 1)
                /* very last, enable bus not busy as well */
                xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
index f157377..9ee9a15 100644 (file)
@@ -2293,21 +2293,22 @@ u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold)
 EXPORT_SYMBOL_GPL(i2c_get_dma_safe_msg_buf);
 
 /**
- * i2c_release_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg
- * @msg: the message to be synced with
+ * i2c_put_dma_safe_msg_buf - release DMA safe buffer and sync with i2c_msg
  * @buf: the buffer obtained from i2c_get_dma_safe_msg_buf(). May be NULL.
+ * @msg: the message which the buffer corresponds to
+ * @xferred: bool saying if the message was transferred
  */
-void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf)
+void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred)
 {
        if (!buf || buf == msg->buf)
                return;
 
-       if (msg->flags & I2C_M_RD)
+       if (xferred && msg->flags & I2C_M_RD)
                memcpy(msg->buf, buf, msg->len);
 
        kfree(buf);
 }
-EXPORT_SYMBOL_GPL(i2c_release_dma_safe_msg_buf);
+EXPORT_SYMBOL_GPL(i2c_put_dma_safe_msg_buf);
 
 MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
 MODULE_DESCRIPTION("I2C-Bus main module");
index 7589f2a..631360b 100644 (file)
@@ -187,12 +187,15 @@ static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor,
 
 int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
 {
-       u16 fifo_watermark = ~0, cur_watermark, sip = 0, fifo_th_mask;
+       u16 fifo_watermark = ~0, cur_watermark, fifo_th_mask;
        struct st_lsm6dsx_hw *hw = sensor->hw;
        struct st_lsm6dsx_sensor *cur_sensor;
        int i, err, data;
        __le16 wdata;
 
+       if (!hw->sip)
+               return 0;
+
        for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
                cur_sensor = iio_priv(hw->iio_devs[i]);
 
@@ -203,14 +206,10 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
                                                       : cur_sensor->watermark;
 
                fifo_watermark = min_t(u16, fifo_watermark, cur_watermark);
-               sip += cur_sensor->sip;
        }
 
-       if (!sip)
-               return 0;
-
-       fifo_watermark = max_t(u16, fifo_watermark, sip);
-       fifo_watermark = (fifo_watermark / sip) * sip;
+       fifo_watermark = max_t(u16, fifo_watermark, hw->sip);
+       fifo_watermark = (fifo_watermark / hw->sip) * hw->sip;
        fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl;
 
        err = regmap_read(hw->regmap, hw->settings->fifo_ops.fifo_th.addr + 1,
index 54e3832..c31b963 100644 (file)
@@ -258,7 +258,6 @@ static int maxim_thermocouple_remove(struct spi_device *spi)
 static const struct spi_device_id maxim_thermocouple_id[] = {
        {"max6675", MAX6675},
        {"max31855", MAX31855},
-       {"max31856", MAX31855},
        {},
 };
 MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
index 0bee1f4..3208ad6 100644 (file)
@@ -338,6 +338,39 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
 }
 
 /**
+ * del_gid - Delete GID table entry
+ *
+ * @ib_dev:    IB device whose GID entry to be deleted
+ * @port:      Port number of the IB device
+ * @table:     GID table of the IB device for a port
+ * @ix:                GID entry index to delete
+ *
+ */
+static void del_gid(struct ib_device *ib_dev, u8 port,
+                   struct ib_gid_table *table, int ix)
+{
+       struct ib_gid_table_entry *entry;
+
+       lockdep_assert_held(&table->lock);
+
+       pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
+                ib_dev->name, port, ix,
+                table->data_vec[ix]->attr.gid.raw);
+
+       write_lock_irq(&table->rwlock);
+       entry = table->data_vec[ix];
+       entry->state = GID_TABLE_ENTRY_PENDING_DEL;
+       /*
+        * For non RoCE protocol, GID entry slot is ready to use.
+        */
+       if (!rdma_protocol_roce(ib_dev, port))
+               table->data_vec[ix] = NULL;
+       write_unlock_irq(&table->rwlock);
+
+       put_gid_entry_locked(entry);
+}
+
+/**
  * add_modify_gid - Add or modify GID table entry
  *
  * @table:     GID table in which GID to be added or modified
@@ -358,7 +391,7 @@ static int add_modify_gid(struct ib_gid_table *table,
         * this index.
         */
        if (is_gid_entry_valid(table->data_vec[attr->index]))
-               put_gid_entry(table->data_vec[attr->index]);
+               del_gid(attr->device, attr->port_num, table, attr->index);
 
        /*
         * Some HCA's report multiple GID entries with only one valid GID, and
@@ -386,39 +419,6 @@ done:
        return ret;
 }
 
-/**
- * del_gid - Delete GID table entry
- *
- * @ib_dev:    IB device whose GID entry to be deleted
- * @port:      Port number of the IB device
- * @table:     GID table of the IB device for a port
- * @ix:                GID entry index to delete
- *
- */
-static void del_gid(struct ib_device *ib_dev, u8 port,
-                   struct ib_gid_table *table, int ix)
-{
-       struct ib_gid_table_entry *entry;
-
-       lockdep_assert_held(&table->lock);
-
-       pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
-                ib_dev->name, port, ix,
-                table->data_vec[ix]->attr.gid.raw);
-
-       write_lock_irq(&table->rwlock);
-       entry = table->data_vec[ix];
-       entry->state = GID_TABLE_ENTRY_PENDING_DEL;
-       /*
-        * For non RoCE protocol, GID entry slot is ready to use.
-        */
-       if (!rdma_protocol_roce(ib_dev, port))
-               table->data_vec[ix] = NULL;
-       write_unlock_irq(&table->rwlock);
-
-       put_gid_entry_locked(entry);
-}
-
 /* rwlock should be read locked, or lock should be held */
 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
                    const struct ib_gid_attr *val, bool default_gid,
index f726772..a36c949 100644 (file)
@@ -724,6 +724,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
        dgid = (union ib_gid *) &addr->sib_addr;
        pkey = ntohs(addr->sib_pkey);
 
+       mutex_lock(&lock);
        list_for_each_entry(cur_dev, &dev_list, list) {
                for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
                        if (!rdma_cap_af_ib(cur_dev->device, p))
@@ -750,18 +751,19 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
                                        cma_dev = cur_dev;
                                        sgid = gid;
                                        id_priv->id.port_num = p;
+                                       goto found;
                                }
                        }
                }
        }
-
-       if (!cma_dev)
-               return -ENODEV;
+       mutex_unlock(&lock);
+       return -ENODEV;
 
 found:
        cma_attach_to_dev(id_priv, cma_dev);
-       addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
-       memcpy(&addr->sib_addr, &sgid, sizeof sgid);
+       mutex_unlock(&lock);
+       addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
+       memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
        cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
        return 0;
 }
index 6eb64c6..c4118bc 100644 (file)
@@ -882,6 +882,8 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
                WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
                if (!uverbs_destroy_uobject(obj, reason))
                        ret = 0;
+               else
+                       atomic_set(&obj->usecnt, 0);
        }
        return ret;
 }
index ec8fb28..21863dd 100644 (file)
@@ -124,6 +124,8 @@ static DEFINE_MUTEX(mut);
 static DEFINE_IDR(ctx_idr);
 static DEFINE_IDR(multicast_idr);
 
+static const struct file_operations ucma_fops;
+
 static inline struct ucma_context *_ucma_find_context(int id,
                                                      struct ucma_file *file)
 {
@@ -1581,6 +1583,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
        f = fdget(cmd.fd);
        if (!f.file)
                return -ENOENT;
+       if (f.file->f_op != &ucma_fops) {
+               ret = -EINVAL;
+               goto file_put;
+       }
 
        /* Validate current fd and prevent destruction of id. */
        ctx = ucma_get_ctx(f.file->private_data, cmd.id);
@@ -1753,6 +1759,8 @@ static int ucma_close(struct inode *inode, struct file *filp)
                mutex_lock(&mut);
                if (!ctx->closing) {
                        mutex_unlock(&mut);
+                       ucma_put_ctx(ctx);
+                       wait_for_completion(&ctx->comp);
                        /* rdma_destroy_id ensures that no event handlers are
                         * inflight for that id before releasing it.
                         */
index a21d521..e012ca8 100644 (file)
@@ -2027,33 +2027,55 @@ static int modify_qp(struct ib_uverbs_file *file,
 
        if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
            cmd->base.cur_qp_state > IB_QPS_ERR) ||
-           cmd->base.qp_state > IB_QPS_ERR) {
+           (cmd->base.attr_mask & IB_QP_STATE &&
+           cmd->base.qp_state > IB_QPS_ERR)) {
                ret = -EINVAL;
                goto release_qp;
        }
 
-       attr->qp_state            = cmd->base.qp_state;
-       attr->cur_qp_state        = cmd->base.cur_qp_state;
-       attr->path_mtu            = cmd->base.path_mtu;
-       attr->path_mig_state      = cmd->base.path_mig_state;
-       attr->qkey                = cmd->base.qkey;
-       attr->rq_psn              = cmd->base.rq_psn;
-       attr->sq_psn              = cmd->base.sq_psn;
-       attr->dest_qp_num         = cmd->base.dest_qp_num;
-       attr->qp_access_flags     = cmd->base.qp_access_flags;
-       attr->pkey_index          = cmd->base.pkey_index;
-       attr->alt_pkey_index      = cmd->base.alt_pkey_index;
-       attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
-       attr->max_rd_atomic       = cmd->base.max_rd_atomic;
-       attr->max_dest_rd_atomic  = cmd->base.max_dest_rd_atomic;
-       attr->min_rnr_timer       = cmd->base.min_rnr_timer;
-       attr->port_num            = cmd->base.port_num;
-       attr->timeout             = cmd->base.timeout;
-       attr->retry_cnt           = cmd->base.retry_cnt;
-       attr->rnr_retry           = cmd->base.rnr_retry;
-       attr->alt_port_num        = cmd->base.alt_port_num;
-       attr->alt_timeout         = cmd->base.alt_timeout;
-       attr->rate_limit          = cmd->rate_limit;
+       if (cmd->base.attr_mask & IB_QP_STATE)
+               attr->qp_state = cmd->base.qp_state;
+       if (cmd->base.attr_mask & IB_QP_CUR_STATE)
+               attr->cur_qp_state = cmd->base.cur_qp_state;
+       if (cmd->base.attr_mask & IB_QP_PATH_MTU)
+               attr->path_mtu = cmd->base.path_mtu;
+       if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
+               attr->path_mig_state = cmd->base.path_mig_state;
+       if (cmd->base.attr_mask & IB_QP_QKEY)
+               attr->qkey = cmd->base.qkey;
+       if (cmd->base.attr_mask & IB_QP_RQ_PSN)
+               attr->rq_psn = cmd->base.rq_psn;
+       if (cmd->base.attr_mask & IB_QP_SQ_PSN)
+               attr->sq_psn = cmd->base.sq_psn;
+       if (cmd->base.attr_mask & IB_QP_DEST_QPN)
+               attr->dest_qp_num = cmd->base.dest_qp_num;
+       if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
+               attr->qp_access_flags = cmd->base.qp_access_flags;
+       if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
+               attr->pkey_index = cmd->base.pkey_index;
+       if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
+               attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
+       if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
+               attr->max_rd_atomic = cmd->base.max_rd_atomic;
+       if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+               attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
+       if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
+               attr->min_rnr_timer = cmd->base.min_rnr_timer;
+       if (cmd->base.attr_mask & IB_QP_PORT)
+               attr->port_num = cmd->base.port_num;
+       if (cmd->base.attr_mask & IB_QP_TIMEOUT)
+               attr->timeout = cmd->base.timeout;
+       if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
+               attr->retry_cnt = cmd->base.retry_cnt;
+       if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
+               attr->rnr_retry = cmd->base.rnr_retry;
+       if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
+               attr->alt_port_num = cmd->base.alt_port_num;
+               attr->alt_timeout = cmd->base.alt_timeout;
+               attr->alt_pkey_index = cmd->base.alt_pkey_index;
+       }
+       if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
+               attr->rate_limit = cmd->rate_limit;
 
        if (cmd->base.attr_mask & IB_QP_AV)
                copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
index 823beca..50152c1 100644 (file)
@@ -440,6 +440,7 @@ static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
                        list_del(&entry->obj_list);
                kfree(entry);
        }
+       file->ev_queue.is_closed = 1;
        spin_unlock_irq(&file->ev_queue.lock);
 
        uverbs_close_fd(filp);
@@ -1050,7 +1051,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
        uverbs_dev->num_comp_vectors = device->num_comp_vectors;
 
        if (ib_uverbs_create_uapi(device, uverbs_dev))
-               goto err;
+               goto err_uapi;
 
        cdev_init(&uverbs_dev->cdev, NULL);
        uverbs_dev->cdev.owner = THIS_MODULE;
@@ -1077,11 +1078,10 @@ static void ib_uverbs_add_one(struct ib_device *device)
 
 err_class:
        device_destroy(uverbs_class, uverbs_dev->cdev.dev);
-
 err_cdev:
        cdev_del(&uverbs_dev->cdev);
+err_uapi:
        clear_bit(devnum, dev_map);
-
 err:
        if (atomic_dec_and_test(&uverbs_dev->refcount))
                ib_uverbs_comp_dev(uverbs_dev);
index 73ea6f0..be85462 100644 (file)
@@ -248,6 +248,7 @@ void uverbs_destroy_api(struct uverbs_api *uapi)
                kfree(rcu_dereference_protected(*slot, true));
                radix_tree_iter_delete(&uapi->radix, &iter, slot);
        }
+       kfree(uapi);
 }
 
 struct uverbs_api *uverbs_alloc_api(
index bbfb86e..bc2b9e0 100644 (file)
@@ -833,6 +833,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
                                "Failed to destroy Shadow QP");
                        return rc;
                }
+               bnxt_qplib_free_qp_res(&rdev->qplib_res,
+                                      &rdev->qp1_sqp->qplib_qp);
                mutex_lock(&rdev->qp_lock);
                list_del(&rdev->qp1_sqp->list);
                atomic_dec(&rdev->qp_count);
index 20b9f31..85cd1a3 100644 (file)
@@ -78,7 +78,7 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
 /* Mutex to protect the list of bnxt_re devices added */
 static DEFINE_MUTEX(bnxt_re_dev_lock);
 static struct workqueue_struct *bnxt_re_wq;
-static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait);
+static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev);
 
 /* SR-IOV helper functions */
 
@@ -182,7 +182,7 @@ static void bnxt_re_shutdown(void *p)
        if (!rdev)
                return;
 
-       bnxt_re_ib_unreg(rdev, false);
+       bnxt_re_ib_unreg(rdev);
 }
 
 static void bnxt_re_stop_irq(void *handle)
@@ -251,7 +251,7 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
 /* Driver registration routines used to let the networking driver (bnxt_en)
  * to know that the RoCE driver is now installed
  */
-static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait)
+static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
 {
        struct bnxt_en_dev *en_dev;
        int rc;
@@ -260,14 +260,9 @@ static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait)
                return -EINVAL;
 
        en_dev = rdev->en_dev;
-       /* Acquire rtnl lock if it is not invokded from netdev event */
-       if (lock_wait)
-               rtnl_lock();
 
        rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
                                                    BNXT_ROCE_ULP);
-       if (lock_wait)
-               rtnl_unlock();
        return rc;
 }
 
@@ -281,14 +276,12 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
 
        en_dev = rdev->en_dev;
 
-       rtnl_lock();
        rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
                                                  &bnxt_re_ulp_ops, rdev);
-       rtnl_unlock();
        return rc;
 }
 
-static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
+static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
 {
        struct bnxt_en_dev *en_dev;
        int rc;
@@ -298,13 +291,9 @@ static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
 
        en_dev = rdev->en_dev;
 
-       if (lock_wait)
-               rtnl_lock();
 
        rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
 
-       if (lock_wait)
-               rtnl_unlock();
        return rc;
 }
 
@@ -320,7 +309,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
 
        num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
 
-       rtnl_lock();
        num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
                                                         rdev->msix_entries,
                                                         num_msix_want);
@@ -335,7 +323,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
        }
        rdev->num_msix = num_msix_got;
 done:
-       rtnl_unlock();
        return rc;
 }
 
@@ -358,24 +345,18 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
        fw_msg->timeout = timeout;
 }
 
-static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id,
-                                bool lock_wait)
+static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id)
 {
        struct bnxt_en_dev *en_dev = rdev->en_dev;
        struct hwrm_ring_free_input req = {0};
        struct hwrm_ring_free_output resp;
        struct bnxt_fw_msg fw_msg;
-       bool do_unlock = false;
        int rc = -EINVAL;
 
        if (!en_dev)
                return rc;
 
        memset(&fw_msg, 0, sizeof(fw_msg));
-       if (lock_wait) {
-               rtnl_lock();
-               do_unlock = true;
-       }
 
        bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
        req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -386,8 +367,6 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id,
        if (rc)
                dev_err(rdev_to_dev(rdev),
                        "Failed to free HW ring:%d :%#x", req.ring_id, rc);
-       if (do_unlock)
-               rtnl_unlock();
        return rc;
 }
 
@@ -405,7 +384,6 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
                return rc;
 
        memset(&fw_msg, 0, sizeof(fw_msg));
-       rtnl_lock();
        bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
        req.enables = 0;
        req.page_tbl_addr =  cpu_to_le64(dma_arr[0]);
@@ -426,27 +404,21 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
        if (!rc)
                *fw_ring_id = le16_to_cpu(resp.ring_id);
 
-       rtnl_unlock();
        return rc;
 }
 
 static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
-                                     u32 fw_stats_ctx_id, bool lock_wait)
+                                     u32 fw_stats_ctx_id)
 {
        struct bnxt_en_dev *en_dev = rdev->en_dev;
        struct hwrm_stat_ctx_free_input req = {0};
        struct bnxt_fw_msg fw_msg;
-       bool do_unlock = false;
        int rc = -EINVAL;
 
        if (!en_dev)
                return rc;
 
        memset(&fw_msg, 0, sizeof(fw_msg));
-       if (lock_wait) {
-               rtnl_lock();
-               do_unlock = true;
-       }
 
        bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
        req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
@@ -457,8 +429,6 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
                dev_err(rdev_to_dev(rdev),
                        "Failed to free HW stats context %#x", rc);
 
-       if (do_unlock)
-               rtnl_unlock();
        return rc;
 }
 
@@ -478,7 +448,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
                return rc;
 
        memset(&fw_msg, 0, sizeof(fw_msg));
-       rtnl_lock();
 
        bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
        req.update_period_ms = cpu_to_le32(1000);
@@ -490,7 +459,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
        if (!rc)
                *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
 
-       rtnl_unlock();
        return rc;
 }
 
@@ -929,19 +897,19 @@ fail:
        return rc;
 }
 
-static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev, bool lock_wait)
+static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
 {
        int i;
 
        for (i = 0; i < rdev->num_msix - 1; i++) {
-               bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, lock_wait);
+               bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
                bnxt_qplib_free_nq(&rdev->nq[i]);
        }
 }
 
-static void bnxt_re_free_res(struct bnxt_re_dev *rdev, bool lock_wait)
+static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
 {
-       bnxt_re_free_nq_res(rdev, lock_wait);
+       bnxt_re_free_nq_res(rdev);
 
        if (rdev->qplib_res.dpi_tbl.max) {
                bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
@@ -1219,7 +1187,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
        return 0;
 }
 
-static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
+static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
 {
        int i, rc;
 
@@ -1234,28 +1202,27 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
                cancel_delayed_work(&rdev->worker);
 
        bnxt_re_cleanup_res(rdev);
-       bnxt_re_free_res(rdev, lock_wait);
+       bnxt_re_free_res(rdev);
 
        if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
                rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
                if (rc)
                        dev_warn(rdev_to_dev(rdev),
                                 "Failed to deinitialize RCFW: %#x", rc);
-               bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id,
-                                          lock_wait);
+               bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
                bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
                bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
-               bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, lock_wait);
+               bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
                bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
        }
        if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
-               rc = bnxt_re_free_msix(rdev, lock_wait);
+               rc = bnxt_re_free_msix(rdev);
                if (rc)
                        dev_warn(rdev_to_dev(rdev),
                                 "Failed to free MSI-X vectors: %#x", rc);
        }
        if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
-               rc = bnxt_re_unregister_netdev(rdev, lock_wait);
+               rc = bnxt_re_unregister_netdev(rdev);
                if (rc)
                        dev_warn(rdev_to_dev(rdev),
                                 "Failed to unregister with netdev: %#x", rc);
@@ -1276,6 +1243,12 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
 {
        int i, j, rc;
 
+       bool locked;
+
+       /* Acquire rtnl lock through out this function */
+       rtnl_lock();
+       locked = true;
+
        /* Registered a new RoCE device instance to netdev */
        rc = bnxt_re_register_netdev(rdev);
        if (rc) {
@@ -1374,12 +1347,16 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
                schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
        }
 
+       rtnl_unlock();
+       locked = false;
+
        /* Register ib dev */
        rc = bnxt_re_register_ib(rdev);
        if (rc) {
                pr_err("Failed to register with IB: %#x\n", rc);
                goto fail;
        }
+       set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
        dev_info(rdev_to_dev(rdev), "Device registered successfully");
        for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) {
                rc = device_create_file(&rdev->ibdev.dev,
@@ -1395,7 +1372,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
                        goto fail;
                }
        }
-       set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
        ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
                         &rdev->active_width);
        set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
@@ -1404,17 +1380,21 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
 
        return 0;
 free_sctx:
-       bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, true);
+       bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
 free_ctx:
        bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
 disable_rcfw:
        bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
 free_ring:
-       bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, true);
+       bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
 free_rcfw:
        bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
 fail:
-       bnxt_re_ib_unreg(rdev, true);
+       if (!locked)
+               rtnl_lock();
+       bnxt_re_ib_unreg(rdev);
+       rtnl_unlock();
+
        return rc;
 }
 
@@ -1567,7 +1547,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
                 */
                if (atomic_read(&rdev->sched_count) > 0)
                        goto exit;
-               bnxt_re_ib_unreg(rdev, false);
+               bnxt_re_ib_unreg(rdev);
                bnxt_re_remove_one(rdev);
                bnxt_re_dev_unreg(rdev);
                break;
@@ -1646,7 +1626,10 @@ static void __exit bnxt_re_mod_exit(void)
                 */
                flush_workqueue(bnxt_re_wq);
                bnxt_re_dev_stop(rdev);
-               bnxt_re_ib_unreg(rdev, true);
+               /* Acquire the rtnl_lock as the L2 resources are freed here */
+               rtnl_lock();
+               bnxt_re_ib_unreg(rdev);
+               rtnl_unlock();
                bnxt_re_remove_one(rdev);
                bnxt_re_dev_unreg(rdev);
        }
index e426b99..6ad0d46 100644 (file)
@@ -196,7 +196,7 @@ static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
                                       struct bnxt_qplib_qp *qp)
 {
        struct bnxt_qplib_q *rq = &qp->rq;
-       struct bnxt_qplib_q *sq = &qp->rq;
+       struct bnxt_qplib_q *sq = &qp->sq;
        int rc = 0;
 
        if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
index b3203af..347fe18 100644 (file)
@@ -1685,6 +1685,12 @@ static void flush_qp(struct c4iw_qp *qhp)
        schp = to_c4iw_cq(qhp->ibqp.send_cq);
 
        if (qhp->ibqp.uobject) {
+
+               /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
+               if (qhp->wq.flushed)
+                       return;
+
+               qhp->wq.flushed = 1;
                t4_set_wq_in_error(&qhp->wq, 0);
                t4_set_cq_in_error(&rchp->cq);
                spin_lock_irqsave(&rchp->comp_handler_lock, flag);
index 2c19bf7..e1668bc 100644 (file)
@@ -6733,6 +6733,7 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
        struct hfi1_devdata *dd = ppd->dd;
        struct send_context *sc;
        int i;
+       int sc_flags;
 
        if (flags & FREEZE_SELF)
                write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
@@ -6743,11 +6744,13 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
        /* notify all SDMA engines that they are going into a freeze */
        sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
 
+       sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
+                                             SCF_LINK_DOWN : 0);
        /* do halt pre-handling on all enabled send contexts */
        for (i = 0; i < dd->num_send_contexts; i++) {
                sc = dd->send_contexts[i].sc;
                if (sc && (sc->flags & SCF_ENABLED))
-                       sc_stop(sc, SCF_FROZEN | SCF_HALTED);
+                       sc_stop(sc, sc_flags);
        }
 
        /* Send context are frozen. Notify user space */
@@ -10674,6 +10677,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
                add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
 
                handle_linkup_change(dd, 1);
+               pio_kernel_linkup(dd);
 
                /*
                 * After link up, a new link width will have been set.
index eec8375..6c967dd 100644 (file)
@@ -893,14 +893,11 @@ static int trigger_sbr(struct hfi1_devdata *dd)
                }
 
        /*
-        * A secondary bus reset (SBR) issues a hot reset to our device.
-        * The following routine does a 1s wait after the reset is dropped
-        * per PCI Trhfa (recovery time).  PCIe 3.0 section 6.6.1 -
-        * Conventional Reset, paragraph 3, line 35 also says that a 1s
-        * delay after a reset is required.  Per spec requirements,
-        * the link is either working or not after that point.
+        * This is an end around to do an SBR during probe time. A new API needs
+        * to be implemented to have cleaner interface but this fixes the
+        * current brokenness
         */
-       return pci_reset_bus(dev);
+       return pci_bridge_secondary_bus_reset(dev->bus->self);
 }
 
 /*
index c2c1cba..7520576 100644 (file)
@@ -86,6 +86,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
        unsigned long flags;
        int write = 1;  /* write sendctrl back */
        int flush = 0;  /* re-read sendctrl to make sure it is flushed */
+       int i;
 
        spin_lock_irqsave(&dd->sendctrl_lock, flags);
 
@@ -95,9 +96,13 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
                reg |= SEND_CTRL_SEND_ENABLE_SMASK;
        /* Fall through */
        case PSC_DATA_VL_ENABLE:
+               mask = 0;
+               for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
+                       if (!dd->vld[i].mtu)
+                               mask |= BIT_ULL(i);
                /* Disallow sending on VLs not enabled */
-               mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
-                               SEND_CTRL_UNSUPPORTED_VL_SHIFT;
+               mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
+                       SEND_CTRL_UNSUPPORTED_VL_SHIFT;
                reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
                break;
        case PSC_GLOBAL_DISABLE:
@@ -921,20 +926,18 @@ void sc_free(struct send_context *sc)
 void sc_disable(struct send_context *sc)
 {
        u64 reg;
-       unsigned long flags;
        struct pio_buf *pbuf;
 
        if (!sc)
                return;
 
        /* do all steps, even if already disabled */
-       spin_lock_irqsave(&sc->alloc_lock, flags);
+       spin_lock_irq(&sc->alloc_lock);
        reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
        reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
        sc->flags &= ~SCF_ENABLED;
        sc_wait_for_packet_egress(sc, 1);
        write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
-       spin_unlock_irqrestore(&sc->alloc_lock, flags);
 
        /*
         * Flush any waiters.  Once the context is disabled,
@@ -944,7 +947,7 @@ void sc_disable(struct send_context *sc)
         * proceed with the flush.
         */
        udelay(1);
-       spin_lock_irqsave(&sc->release_lock, flags);
+       spin_lock(&sc->release_lock);
        if (sc->sr) {   /* this context has a shadow ring */
                while (sc->sr_tail != sc->sr_head) {
                        pbuf = &sc->sr[sc->sr_tail].pbuf;
@@ -955,7 +958,8 @@ void sc_disable(struct send_context *sc)
                                sc->sr_tail = 0;
                }
        }
-       spin_unlock_irqrestore(&sc->release_lock, flags);
+       spin_unlock(&sc->release_lock);
+       spin_unlock_irq(&sc->alloc_lock);
 }
 
 /* return SendEgressCtxtStatus.PacketOccupancy */
@@ -1178,11 +1182,39 @@ void pio_kernel_unfreeze(struct hfi1_devdata *dd)
                sc = dd->send_contexts[i].sc;
                if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
                        continue;
+               if (sc->flags & SCF_LINK_DOWN)
+                       continue;
 
                sc_enable(sc);  /* will clear the sc frozen flag */
        }
 }
 
+/**
+ * pio_kernel_linkup() - Re-enable send contexts after linkup event
+ * @dd: valid devive data
+ *
+ * When the link goes down, the freeze path is taken.  However, a link down
+ * event is different from a freeze because if the send context is re-enabled
+ * whowever is sending data will start sending data again, which will hang
+ * any QP that is sending data.
+ *
+ * The freeze path now looks at the type of event that occurs and takes this
+ * path for link down event.
+ */
+void pio_kernel_linkup(struct hfi1_devdata *dd)
+{
+       struct send_context *sc;
+       int i;
+
+       for (i = 0; i < dd->num_send_contexts; i++) {
+               sc = dd->send_contexts[i].sc;
+               if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
+                       continue;
+
+               sc_enable(sc);  /* will clear the sc link down flag */
+       }
+}
+
 /*
  * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
  * Returns:
@@ -1382,11 +1414,10 @@ void sc_stop(struct send_context *sc, int flag)
 {
        unsigned long flags;
 
-       /* mark the context */
-       sc->flags |= flag;
-
        /* stop buffer allocations */
        spin_lock_irqsave(&sc->alloc_lock, flags);
+       /* mark the context */
+       sc->flags |= flag;
        sc->flags &= ~SCF_ENABLED;
        spin_unlock_irqrestore(&sc->alloc_lock, flags);
        wake_up(&sc->halt_wait);
index 058b08f..aaf372c 100644 (file)
@@ -139,6 +139,7 @@ struct send_context {
 #define SCF_IN_FREE 0x02
 #define SCF_HALTED  0x04
 #define SCF_FROZEN  0x08
+#define SCF_LINK_DOWN 0x10
 
 struct send_context_info {
        struct send_context *sc;        /* allocated working context */
@@ -306,6 +307,7 @@ void set_pio_integrity(struct send_context *sc);
 void pio_reset_all(struct hfi1_devdata *dd);
 void pio_freeze(struct hfi1_devdata *dd);
 void pio_kernel_unfreeze(struct hfi1_devdata *dd);
+void pio_kernel_linkup(struct hfi1_devdata *dd);
 
 /* global PIO send control operations */
 #define PSC_GLOBAL_ENABLE 0
index a3a7b33..5c88706 100644 (file)
@@ -828,7 +828,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
                        if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
                                if (++req->iov_idx == req->data_iovs) {
                                        ret = -EFAULT;
-                                       goto free_txreq;
+                                       goto free_tx;
                                }
                                iovec = &req->iovs[req->iov_idx];
                                WARN_ON(iovec->offset);
index 13374c7..a7c586a 100644 (file)
@@ -1582,6 +1582,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
        struct hfi1_pportdata *ppd;
        struct hfi1_devdata *dd;
        u8 sc5;
+       u8 sl;
 
        if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) &&
            !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
@@ -1590,8 +1591,13 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
        /* test the mapping for validity */
        ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
        ppd = ppd_from_ibp(ibp);
-       sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
        dd = dd_from_ppd(ppd);
+
+       sl = rdma_ah_get_sl(ah_attr);
+       if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
+               return -EINVAL;
+
+       sc5 = ibp->sl_to_sc[sl];
        if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
                return -EINVAL;
        return 0;
index ca0f1ee..0bbeaaa 100644 (file)
@@ -517,9 +517,11 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
        props->page_size_cap       = dev->dev->caps.page_size_cap;
        props->max_qp              = dev->dev->quotas.qp;
        props->max_qp_wr           = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
-       props->max_send_sge        = dev->dev->caps.max_sq_sg;
-       props->max_recv_sge        = dev->dev->caps.max_rq_sg;
-       props->max_sge_rd          = MLX4_MAX_SGE_RD;
+       props->max_send_sge =
+               min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
+       props->max_recv_sge =
+               min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
+       props->max_sge_rd = MLX4_MAX_SGE_RD;
        props->max_cq              = dev->dev->quotas.cq;
        props->max_cqe             = dev->dev->caps.max_cqes;
        props->max_mr              = dev->dev->quotas.mpt;
index ac116d6..f2f11e6 100644 (file)
@@ -723,6 +723,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
                attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
        struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
        struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
        struct devx_obj *obj;
        int err;
 
@@ -754,10 +755,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
 
        err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
        if (err)
-               goto obj_free;
+               goto obj_destroy;
 
        return 0;
 
+obj_destroy:
+       mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
 obj_free:
        kfree(obj);
        return err;
index ea01b8d..3d5424f 100644 (file)
@@ -1027,12 +1027,14 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id,
 
        skb_queue_head_init(&skqueue);
 
+       netif_tx_lock_bh(p->dev);
        spin_lock_irq(&priv->lock);
        set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
        if (p->neigh)
                while ((skb = __skb_dequeue(&p->neigh->queue)))
                        __skb_queue_tail(&skqueue, skb);
        spin_unlock_irq(&priv->lock);
+       netif_tx_unlock_bh(p->dev);
 
        while ((skb = __skb_dequeue(&skqueue))) {
                skb->dev = p->dev;
index 444d165..0b34e90 100644 (file)
@@ -2951,7 +2951,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
 {
        struct srp_target_port *target = host_to_target(scmnd->device->host);
        struct srp_rdma_ch *ch;
-       int i;
+       int i, j;
        u8 status;
 
        shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -2965,8 +2965,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
 
        for (i = 0; i < target->ch_count; i++) {
                ch = &target->ch[i];
-               for (i = 0; i < target->req_ring_size; ++i) {
-                       struct srp_request *req = &ch->req_ring[i];
+               for (j = 0; j < target->req_ring_size; ++j) {
+                       struct srp_request *req = &ch->req_ring[j];
 
                        srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
                }
index 6f62da2..6caee80 100644 (file)
@@ -75,8 +75,7 @@ MODULE_LICENSE("GPL");
  */
 
 
-static unsigned char atakbd_keycode[0x72] = {  /* American layout */
-       [0]      = KEY_GRAVE,
+static unsigned char atakbd_keycode[0x73] = {  /* American layout */
        [1]      = KEY_ESC,
        [2]      = KEY_1,
        [3]      = KEY_2,
@@ -117,9 +116,9 @@ static unsigned char atakbd_keycode[0x72] = {       /* American layout */
        [38]     = KEY_L,
        [39]     = KEY_SEMICOLON,
        [40]     = KEY_APOSTROPHE,
-       [41]     = KEY_BACKSLASH,       /* FIXME, '#' */
+       [41]     = KEY_GRAVE,
        [42]     = KEY_LEFTSHIFT,
-       [43]     = KEY_GRAVE,           /* FIXME: '~' */
+       [43]     = KEY_BACKSLASH,
        [44]     = KEY_Z,
        [45]     = KEY_X,
        [46]     = KEY_C,
@@ -145,45 +144,34 @@ static unsigned char atakbd_keycode[0x72] = {     /* American layout */
        [66]     = KEY_F8,
        [67]     = KEY_F9,
        [68]     = KEY_F10,
-       [69]     = KEY_ESC,
-       [70]     = KEY_DELETE,
-       [71]     = KEY_KP7,
-       [72]     = KEY_KP8,
-       [73]     = KEY_KP9,
+       [71]     = KEY_HOME,
+       [72]     = KEY_UP,
        [74]     = KEY_KPMINUS,
-       [75]     = KEY_KP4,
-       [76]     = KEY_KP5,
-       [77]     = KEY_KP6,
+       [75]     = KEY_LEFT,
+       [77]     = KEY_RIGHT,
        [78]     = KEY_KPPLUS,
-       [79]     = KEY_KP1,
-       [80]     = KEY_KP2,
-       [81]     = KEY_KP3,
-       [82]     = KEY_KP0,
-       [83]     = KEY_KPDOT,
-       [90]     = KEY_KPLEFTPAREN,
-       [91]     = KEY_KPRIGHTPAREN,
-       [92]     = KEY_KPASTERISK,      /* FIXME */
-       [93]     = KEY_KPASTERISK,
-       [94]     = KEY_KPPLUS,
-       [95]     = KEY_HELP,
+       [80]     = KEY_DOWN,
+       [82]     = KEY_INSERT,
+       [83]     = KEY_DELETE,
        [96]     = KEY_102ND,
-       [97]     = KEY_KPASTERISK,      /* FIXME */
-       [98]     = KEY_KPSLASH,
+       [97]     = KEY_UNDO,
+       [98]     = KEY_HELP,
        [99]     = KEY_KPLEFTPAREN,
        [100]    = KEY_KPRIGHTPAREN,
        [101]    = KEY_KPSLASH,
        [102]    = KEY_KPASTERISK,
-       [103]    = KEY_UP,
-       [104]    = KEY_KPASTERISK,      /* FIXME */
-       [105]    = KEY_LEFT,
-       [106]    = KEY_RIGHT,
-       [107]    = KEY_KPASTERISK,      /* FIXME */
-       [108]    = KEY_DOWN,
-       [109]    = KEY_KPASTERISK,      /* FIXME */
-       [110]    = KEY_KPASTERISK,      /* FIXME */
-       [111]    = KEY_KPASTERISK,      /* FIXME */
-       [112]    = KEY_KPASTERISK,      /* FIXME */
-       [113]    = KEY_KPASTERISK       /* FIXME */
+       [103]    = KEY_KP7,
+       [104]    = KEY_KP8,
+       [105]    = KEY_KP9,
+       [106]    = KEY_KP4,
+       [107]    = KEY_KP5,
+       [108]    = KEY_KP6,
+       [109]    = KEY_KP1,
+       [110]    = KEY_KP2,
+       [111]    = KEY_KP3,
+       [112]    = KEY_KP0,
+       [113]    = KEY_KPDOT,
+       [114]    = KEY_KPENTER,
 };
 
 static struct input_dev *atakbd_dev;
@@ -191,21 +179,15 @@ static struct input_dev *atakbd_dev;
 static void atakbd_interrupt(unsigned char scancode, char down)
 {
 
-       if (scancode < 0x72) {          /* scancodes < 0xf2 are keys */
+       if (scancode < 0x73) {          /* scancodes < 0xf3 are keys */
 
                // report raw events here?
 
                scancode = atakbd_keycode[scancode];
 
-               if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
-                       input_report_key(atakbd_dev, scancode, 1);
-                       input_report_key(atakbd_dev, scancode, 0);
-                       input_sync(atakbd_dev);
-               } else {
-                       input_report_key(atakbd_dev, scancode, down);
-                       input_sync(atakbd_dev);
-               }
-       } else                          /* scancodes >= 0xf2 are mouse data, most likely */
+               input_report_key(atakbd_dev, scancode, down);
+               input_sync(atakbd_dev);
+       } else                          /* scancodes >= 0xf3 are mouse data, most likely */
                printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
 
        return;
index 96a887f..eb14ddf 100644 (file)
@@ -410,7 +410,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
        min = abs->minimum;
        max = abs->maximum;
 
-       if ((min != 0 || max != 0) && max <= min) {
+       if ((min != 0 || max != 0) && max < min) {
                printk(KERN_DEBUG
                       "%s: invalid abs[%02x] min:%d max:%d\n",
                       UINPUT_NAME, code, min, max);
index 44f57cf..2d95e8d 100644 (file)
@@ -1178,6 +1178,8 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
 static const char * const middle_button_pnp_ids[] = {
        "LEN2131", /* ThinkPad P52 w/ NFC */
        "LEN2132", /* ThinkPad P52 */
+       "LEN2133", /* ThinkPad P72 w/ NFC */
+       "LEN2134", /* ThinkPad P72 */
        NULL
 };
 
index 80e69bb..83ac8c1 100644 (file)
@@ -241,6 +241,9 @@ static int __maybe_unused egalax_ts_suspend(struct device *dev)
        struct i2c_client *client = to_i2c_client(dev);
        int ret;
 
+       if (device_may_wakeup(dev))
+               return enable_irq_wake(client->irq);
+
        ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN);
        return ret > 0 ? 0 : ret;
 }
@@ -249,6 +252,9 @@ static int __maybe_unused egalax_ts_resume(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
 
+       if (device_may_wakeup(dev))
+               return disable_irq_wake(client->irq);
+
        return egalax_wake_up_device(client);
 }
 
index 4e04fff..bee0dfb 100644 (file)
@@ -246,7 +246,13 @@ static u16 get_alias(struct device *dev)
 
        /* The callers make sure that get_device_id() does not fail here */
        devid = get_device_id(dev);
+
+       /* For ACPI HID devices, we simply return the devid as such */
+       if (!dev_is_pci(dev))
+               return devid;
+
        ivrs_alias = amd_iommu_alias_table[devid];
+
        pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
 
        if (ivrs_alias == pci_alias)
@@ -3063,7 +3069,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
                return 0;
 
        offset_mask = pte_pgsize - 1;
-       __pte       = *pte & PM_ADDR_MASK;
+       __pte       = __sme_clr(*pte & PM_ADDR_MASK);
 
        return (__pte & ~offset_mask) | (iova & offset_mask);
 }
index 5f3f10c..bedc801 100644 (file)
@@ -2540,9 +2540,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
        if (dev && dev_is_pci(dev) && info->pasid_supported) {
                ret = intel_pasid_alloc_table(dev);
                if (ret) {
-                       __dmar_remove_one_dev_info(info);
-                       spin_unlock_irqrestore(&device_domain_lock, flags);
-                       return NULL;
+                       pr_warn("No pasid table for %s, pasid disabled\n",
+                               dev_name(dev));
+                       info->pasid_supported = 0;
                }
        }
        spin_unlock_irqrestore(&device_domain_lock, flags);
index 1c05ed6..1fb5e12 100644 (file)
@@ -11,7 +11,7 @@
 #define __INTEL_PASID_H
 
 #define PASID_MIN                      0x1
-#define PASID_MAX                      0x100000
+#define PASID_MAX                      0x20000
 
 struct pasid_entry {
        u64 val;
index 258115b..ad3e2b9 100644 (file)
@@ -1241,6 +1241,12 @@ err_unprepare_clocks:
 
 static void rk_iommu_shutdown(struct platform_device *pdev)
 {
+       struct rk_iommu *iommu = platform_get_drvdata(pdev);
+       int i = 0, irq;
+
+       while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
+               devm_free_irq(iommu->dev, irq, iommu);
+
        pm_runtime_force_suspend(&pdev->dev);
 }
 
index 316a575..c2df341 100644 (file)
@@ -1439,6 +1439,7 @@ static struct irq_chip its_irq_chip = {
  * The consequence of the above is that allocation is cost is low, but
  * freeing is expensive. We assumes that freeing rarely occurs.
  */
+#define ITS_MAX_LPI_NRBITS     16 /* 64K LPIs */
 
 static DEFINE_MUTEX(lpi_range_lock);
 static LIST_HEAD(lpi_range_list);
@@ -1625,7 +1626,8 @@ static int __init its_alloc_lpi_tables(void)
 {
        phys_addr_t paddr;
 
-       lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer);
+       lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
+                               ITS_MAX_LPI_NRBITS);
        gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
        if (!gic_rdists->prop_page) {
                pr_err("Failed to allocate PROPBASE\n");
index 83504dd..954dad2 100644 (file)
@@ -965,6 +965,7 @@ void bch_prio_write(struct cache *ca);
 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
 
 extern struct workqueue_struct *bcache_wq;
+extern struct workqueue_struct *bch_journal_wq;
 extern struct mutex bch_register_lock;
 extern struct list_head bch_cache_sets;
 
index 6116bbf..522c742 100644 (file)
@@ -485,7 +485,7 @@ static void do_journal_discard(struct cache *ca)
 
                closure_get(&ca->set->cl);
                INIT_WORK(&ja->discard_work, journal_discard_work);
-               schedule_work(&ja->discard_work);
+               queue_work(bch_journal_wq, &ja->discard_work);
        }
 }
 
@@ -592,7 +592,7 @@ static void journal_write_done(struct closure *cl)
                : &j->w[0];
 
        __closure_wake_up(&w->wait);
-       continue_at_nobarrier(cl, journal_write, system_wq);
+       continue_at_nobarrier(cl, journal_write, bch_journal_wq);
 }
 
 static void journal_write_unlock(struct closure *cl)
@@ -627,7 +627,7 @@ static void journal_write_unlocked(struct closure *cl)
                spin_unlock(&c->journal.lock);
 
                btree_flush_write(c);
-               continue_at(cl, journal_write, system_wq);
+               continue_at(cl, journal_write, bch_journal_wq);
                return;
        }
 
index 94c756c..30ba9ae 100644 (file)
@@ -47,6 +47,7 @@ static int bcache_major;
 static DEFINE_IDA(bcache_device_idx);
 static wait_queue_head_t unregister_wait;
 struct workqueue_struct *bcache_wq;
+struct workqueue_struct *bch_journal_wq;
 
 #define BTREE_MAX_PAGES                (256 * 1024 / PAGE_SIZE)
 /* limitation of partitions number on single bcache device */
@@ -2341,6 +2342,9 @@ static void bcache_exit(void)
                kobject_put(bcache_kobj);
        if (bcache_wq)
                destroy_workqueue(bcache_wq);
+       if (bch_journal_wq)
+               destroy_workqueue(bch_journal_wq);
+
        if (bcache_major)
                unregister_blkdev(bcache_major, "bcache");
        unregister_reboot_notifier(&reboot);
@@ -2370,6 +2374,10 @@ static int __init bcache_init(void)
        if (!bcache_wq)
                goto err;
 
+       bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
+       if (!bch_journal_wq)
+               goto err;
+
        bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
        if (!bcache_kobj)
                goto err;
index 69dddea..5936de7 100644 (file)
@@ -1455,8 +1455,8 @@ static int __load_mappings(struct dm_cache_metadata *cmd,
                if (hints_valid) {
                        r = dm_array_cursor_next(&cmd->hint_cursor);
                        if (r) {
-                               DMERR("dm_array_cursor_next for hint failed");
-                               goto out;
+                               dm_array_cursor_end(&cmd->hint_cursor);
+                               hints_valid = false;
                        }
                }
 
index a534133..b29a832 100644 (file)
@@ -3009,8 +3009,13 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
 
 static bool can_resize(struct cache *cache, dm_cblock_t new_size)
 {
-       if (from_cblock(new_size) > from_cblock(cache->cache_size))
-               return true;
+       if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
+               if (cache->sized) {
+                       DMERR("%s: unable to extend cache due to missing cache table reload",
+                             cache_device_name(cache));
+                       return false;
+               }
+       }
 
        /*
         * We can't drop a dirty block when shrinking the cache.
@@ -3479,14 +3484,13 @@ static int __init dm_cache_init(void)
        int r;
 
        migration_cache = KMEM_CACHE(dm_cache_migration, 0);
-       if (!migration_cache) {
-               dm_unregister_target(&cache_target);
+       if (!migration_cache)
                return -ENOMEM;
-       }
 
        r = dm_register_target(&cache_target);
        if (r) {
                DMERR("cache target registration failed: %d", r);
+               kmem_cache_destroy(migration_cache);
                return r;
        }
 
index f266c81..0481223 100644 (file)
@@ -332,7 +332,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
        int err;
 
        desc->tfm = essiv->hash_tfm;
-       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       desc->flags = 0;
 
        err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
        shash_desc_zero(desc);
@@ -606,7 +606,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
        int i, r;
 
        desc->tfm = lmk->hash_tfm;
-       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       desc->flags = 0;
 
        r = crypto_shash_init(desc);
        if (r)
@@ -768,7 +768,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
 
        /* calculate crc32 for every 32bit part and xor it */
        desc->tfm = tcw->crc32_tfm;
-       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       desc->flags = 0;
        for (i = 0; i < 4; i++) {
                r = crypto_shash_init(desc);
                if (r)
@@ -1251,7 +1251,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
         * requests if driver request queue is full.
         */
        skcipher_request_set_callback(ctx->r.req,
-           CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+           CRYPTO_TFM_REQ_MAY_BACKLOG,
            kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
 }
 
@@ -1268,7 +1268,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
         * requests if driver request queue is full.
         */
        aead_request_set_callback(ctx->r.req_aead,
-           CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+           CRYPTO_TFM_REQ_MAY_BACKLOG,
            kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
 }
 
index 3788785..89ccb64 100644 (file)
@@ -532,7 +532,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
        unsigned j, size;
 
        desc->tfm = ic->journal_mac;
-       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       desc->flags = 0;
 
        r = crypto_shash_init(desc);
        if (unlikely(r)) {
@@ -676,7 +676,7 @@ static void complete_journal_encrypt(struct crypto_async_request *req, int err)
 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
 {
        int r;
-       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                                      complete_journal_encrypt, comp);
        if (likely(encrypt))
                r = crypto_skcipher_encrypt(req);
index d10964d..172f6fa 100644 (file)
@@ -102,6 +102,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
        return DM_MAPIO_REMAPPED;
 }
 
+#ifdef CONFIG_DM_ZONED
 static int linear_end_io(struct dm_target *ti, struct bio *bio,
                         blk_status_t *error)
 {
@@ -112,6 +113,7 @@ static int linear_end_io(struct dm_target *ti, struct bio *bio,
 
        return DM_ENDIO_DONE;
 }
+#endif
 
 static void linear_status(struct dm_target *ti, status_type_t type,
                          unsigned status_flags, char *result, unsigned maxlen)
@@ -208,12 +210,16 @@ static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
 static struct target_type linear_target = {
        .name   = "linear",
        .version = {1, 4, 0},
+#ifdef CONFIG_DM_ZONED
+       .end_io = linear_end_io,
        .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
+#else
+       .features = DM_TARGET_PASSES_INTEGRITY,
+#endif
        .module = THIS_MODULE,
        .ctr    = linear_ctr,
        .dtr    = linear_dtr,
        .map    = linear_map,
-       .end_io = linear_end_io,
        .status = linear_status,
        .prepare_ioctl = linear_prepare_ioctl,
        .iterate_devices = linear_iterate_devices,
index d94ba6f..419362c 100644 (file)
@@ -806,19 +806,19 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
 }
 
 static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
-                        const char *attached_handler_name, char **error)
+                        const char **attached_handler_name, char **error)
 {
        struct request_queue *q = bdev_get_queue(bdev);
        int r;
 
        if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
 retain:
-               if (attached_handler_name) {
+               if (*attached_handler_name) {
                        /*
                         * Clear any hw_handler_params associated with a
                         * handler that isn't already attached.
                         */
-                       if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
+                       if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
                                kfree(m->hw_handler_params);
                                m->hw_handler_params = NULL;
                        }
@@ -830,7 +830,8 @@ retain:
                         * handler instead of the original table passed in.
                         */
                        kfree(m->hw_handler_name);
-                       m->hw_handler_name = attached_handler_name;
+                       m->hw_handler_name = *attached_handler_name;
+                       *attached_handler_name = NULL;
                }
        }
 
@@ -867,7 +868,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
        struct pgpath *p;
        struct multipath *m = ti->private;
        struct request_queue *q;
-       const char *attached_handler_name;
+       const char *attached_handler_name = NULL;
 
        /* we need at least a path arg */
        if (as->argc < 1) {
@@ -890,7 +891,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
        attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
        if (attached_handler_name || m->hw_handler_name) {
                INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
-               r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error);
+               r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
                if (r) {
                        dm_put_device(ti, p->path.dev);
                        goto bad;
@@ -905,6 +906,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
 
        return p;
  bad:
+       kfree(attached_handler_name);
        free_pgpath(p);
        return ERR_PTR(r);
 }
index cae689d..c44925e 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 2010-2011 Neil Brown
- * Copyright (C) 2010-2017 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010-2018 Red Hat, Inc. All rights reserved.
  *
  * This file is released under the GPL.
  */
@@ -29,9 +29,6 @@
  */
 #define        MIN_RAID456_JOURNAL_SPACE (4*2048)
 
-/* Global list of all raid sets */
-static LIST_HEAD(raid_sets);
-
 static bool devices_handle_discard_safely = false;
 
 /*
@@ -227,7 +224,6 @@ struct rs_layout {
 
 struct raid_set {
        struct dm_target *ti;
-       struct list_head list;
 
        uint32_t stripe_cache_entries;
        unsigned long ctr_flags;
@@ -273,19 +269,6 @@ static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
        mddev->new_chunk_sectors = l->new_chunk_sectors;
 }
 
-/* Find any raid_set in active slot for @rs on global list */
-static struct raid_set *rs_find_active(struct raid_set *rs)
-{
-       struct raid_set *r;
-       struct mapped_device *md = dm_table_get_md(rs->ti->table);
-
-       list_for_each_entry(r, &raid_sets, list)
-               if (r != rs && dm_table_get_md(r->ti->table) == md)
-                       return r;
-
-       return NULL;
-}
-
 /* raid10 algorithms (i.e. formats) */
 #define        ALGORITHM_RAID10_DEFAULT        0
 #define        ALGORITHM_RAID10_NEAR           1
@@ -764,7 +747,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
 
        mddev_init(&rs->md);
 
-       INIT_LIST_HEAD(&rs->list);
        rs->raid_disks = raid_devs;
        rs->delta_disks = 0;
 
@@ -782,9 +764,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
        for (i = 0; i < raid_devs; i++)
                md_rdev_init(&rs->dev[i].rdev);
 
-       /* Add @rs to global list. */
-       list_add(&rs->list, &raid_sets);
-
        /*
         * Remaining items to be initialized by further RAID params:
         *  rs->md.persistent
@@ -797,7 +776,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
        return rs;
 }
 
-/* Free all @rs allocations and remove it from global list. */
+/* Free all @rs allocations */
 static void raid_set_free(struct raid_set *rs)
 {
        int i;
@@ -815,8 +794,6 @@ static void raid_set_free(struct raid_set *rs)
                        dm_put_device(rs->ti, rs->dev[i].data_dev);
        }
 
-       list_del(&rs->list);
-
        kfree(rs);
 }
 
@@ -2649,7 +2626,7 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
                return 0;
        }
 
-       /* HM FIXME: get InSync raid_dev? */
+       /* HM FIXME: get In_Sync raid_dev? */
        rdev = &rs->dev[0].rdev;
 
        if (rs->delta_disks < 0) {
@@ -3149,6 +3126,11 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
                rs_set_new(rs);
        } else if (rs_is_recovering(rs)) {
+               /* Rebuild particular devices */
+               if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
+                       set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+                       rs_setup_recovery(rs, MaxSector);
+               }
                /* A recovering raid set may be resized */
                ; /* skip setup rs */
        } else if (rs_is_reshaping(rs)) {
@@ -3242,6 +3224,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        /* Start raid set read-only and assumed clean to change in raid_resume() */
        rs->md.ro = 1;
        rs->md.in_sync = 1;
+
+       /* Keep array frozen */
        set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
 
        /* Has to be held on running the array */
@@ -3265,7 +3249,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        rs->callbacks.congested_fn = raid_is_congested;
        dm_table_add_target_callbacks(ti->table, &rs->callbacks);
 
-       /* If raid4/5/6 journal mode explictely requested (only possible with journal dev) -> set it */
+       /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
        if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
                r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
                if (r) {
@@ -3350,32 +3334,53 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
        return DM_MAPIO_SUBMITTED;
 }
 
-/* Return string describing the current sync action of @mddev */
-static const char *decipher_sync_action(struct mddev *mddev, unsigned long recovery)
+/* Return sync state string for @state */
+enum sync_state { st_frozen, st_reshape, st_resync, st_check, st_repair, st_recover, st_idle };
+static const char *sync_str(enum sync_state state)
+{
+       /* Has to be in above sync_state order! */
+       static const char *sync_strs[] = {
+               "frozen",
+               "reshape",
+               "resync",
+               "check",
+               "repair",
+               "recover",
+               "idle"
+       };
+
+       return __within_range(state, 0, ARRAY_SIZE(sync_strs) - 1) ? sync_strs[state] : "undef";
+};
+
+/* Return enum sync_state for @mddev derived from @recovery flags */
+static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
 {
        if (test_bit(MD_RECOVERY_FROZEN, &recovery))
-               return "frozen";
+               return st_frozen;
 
-       /* The MD sync thread can be done with io but still be running */
+       /* The MD sync thread can be done with io or be interrupted but still be running */
        if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
            (test_bit(MD_RECOVERY_RUNNING, &recovery) ||
             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
                if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
-                       return "reshape";
+                       return st_reshape;
 
                if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
                        if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
-                               return "resync";
-                       else if (test_bit(MD_RECOVERY_CHECK, &recovery))
-                               return "check";
-                       return "repair";
+                               return st_resync;
+                       if (test_bit(MD_RECOVERY_CHECK, &recovery))
+                               return st_check;
+                       return st_repair;
                }
 
                if (test_bit(MD_RECOVERY_RECOVER, &recovery))
-                       return "recover";
+                       return st_recover;
+
+               if (mddev->reshape_position != MaxSector)
+                       return st_reshape;
        }
 
-       return "idle";
+       return st_idle;
 }
 
 /*
@@ -3409,6 +3414,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                                sector_t resync_max_sectors)
 {
        sector_t r;
+       enum sync_state state;
        struct mddev *mddev = &rs->md;
 
        clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
@@ -3419,20 +3425,14 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
 
        } else {
-               if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) &&
-                   !test_bit(MD_RECOVERY_INTR, &recovery) &&
-                   (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
-                    test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
-                    test_bit(MD_RECOVERY_RUNNING, &recovery)))
-                       r = mddev->curr_resync_completed;
-               else
+               state = decipher_sync_action(mddev, recovery);
+
+               if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
                        r = mddev->recovery_cp;
+               else
+                       r = mddev->curr_resync_completed;
 
-               if (r >= resync_max_sectors &&
-                   (!test_bit(MD_RECOVERY_REQUESTED, &recovery) ||
-                    (!test_bit(MD_RECOVERY_FROZEN, &recovery) &&
-                     !test_bit(MD_RECOVERY_NEEDED, &recovery) &&
-                     !test_bit(MD_RECOVERY_RUNNING, &recovery)))) {
+               if (state == st_idle && r >= resync_max_sectors) {
                        /*
                         * Sync complete.
                         */
@@ -3440,24 +3440,20 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                        if (test_bit(MD_RECOVERY_RECOVER, &recovery))
                                set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
 
-               } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) {
+               } else if (state == st_recover)
                        /*
                         * In case we are recovering, the array is not in sync
                         * and health chars should show the recovering legs.
                         */
                        ;
-
-               } else if (test_bit(MD_RECOVERY_SYNC, &recovery) &&
-                          !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+               else if (state == st_resync)
                        /*
                         * If "resync" is occurring, the raid set
                         * is or may be out of sync hence the health
                         * characters shall be 'a'.
                         */
                        set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
-
-               } else if (test_bit(MD_RECOVERY_RESHAPE, &recovery) &&
-                          !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+               else if (state == st_reshape)
                        /*
                         * If "reshape" is occurring, the raid set
                         * is or may be out of sync hence the health
@@ -3465,7 +3461,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                         */
                        set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
 
-               } else if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+               else if (state == st_check || state == st_repair)
                        /*
                         * If "check" or "repair" is occurring, the raid set has
                         * undergone an initial sync and the health characters
@@ -3473,12 +3469,12 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
                         */
                        set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
 
-               else {
+               else {
                        struct md_rdev *rdev;
 
                        /*
                         * We are idle and recovery is needed, prevent 'A' chars race
-                        * caused by components still set to in-sync by constrcuctor.
+                        * caused by components still set to in-sync by constructor.
                         */
                        if (test_bit(MD_RECOVERY_NEEDED, &recovery))
                                set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
@@ -3542,7 +3538,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
                progress = rs_get_progress(rs, recovery, resync_max_sectors);
                resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
                                    atomic64_read(&mddev->resync_mismatches) : 0;
-               sync_action = decipher_sync_action(&rs->md, recovery);
+               sync_action = sync_str(decipher_sync_action(&rs->md, recovery));
 
                /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
                for (i = 0; i < rs->raid_disks; i++)
@@ -3892,14 +3888,13 @@ static int rs_start_reshape(struct raid_set *rs)
        struct mddev *mddev = &rs->md;
        struct md_personality *pers = mddev->pers;
 
+       /* Don't allow the sync thread to work until the table gets reloaded. */
+       set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
+
        r = rs_setup_reshape(rs);
        if (r)
                return r;
 
-       /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
-       if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
-               mddev_resume(mddev);
-
        /*
         * Check any reshape constraints enforced by the personalility
         *
@@ -3923,10 +3918,6 @@ static int rs_start_reshape(struct raid_set *rs)
                }
        }
 
-       /* Suspend because a resume will happen in raid_resume() */
-       set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
-       mddev_suspend(mddev);
-
        /*
         * Now reshape got set up, update superblocks to
         * reflect the fact so that a table reload will
@@ -3947,29 +3938,6 @@ static int raid_preresume(struct dm_target *ti)
        if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
                return 0;
 
-       if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
-               struct raid_set *rs_active = rs_find_active(rs);
-
-               if (rs_active) {
-                       /*
-                        * In case no rebuilds have been requested
-                        * and an active table slot exists, copy
-                        * current resynchonization completed and
-                        * reshape position pointers across from
-                        * suspended raid set in the active slot.
-                        *
-                        * This resumes the new mapping at current
-                        * offsets to continue recover/reshape without
-                        * necessarily redoing a raid set partially or
-                        * causing data corruption in case of a reshape.
-                        */
-                       if (rs_active->md.curr_resync_completed != MaxSector)
-                               mddev->curr_resync_completed = rs_active->md.curr_resync_completed;
-                       if (rs_active->md.reshape_position != MaxSector)
-                               mddev->reshape_position = rs_active->md.reshape_position;
-               }
-       }
-
        /*
         * The superblocks need to be updated on disk if the
         * array is new or new devices got added (thus zeroed
@@ -4046,7 +4014,7 @@ static void raid_resume(struct dm_target *ti)
 
 static struct target_type raid_target = {
        .name = "raid",
-       .version = {1, 13, 2},
+       .version = {1, 14, 0},
        .module = THIS_MODULE,
        .ctr = raid_ctr,
        .dtr = raid_dtr,
index 7214202..20b0776 100644 (file)
@@ -189,6 +189,12 @@ struct dm_pool_metadata {
        sector_t data_block_size;
 
        /*
+        * We reserve a section of the metadata for commit overhead.
+        * All reported space does *not* include this.
+        */
+       dm_block_t metadata_reserve;
+
+       /*
         * Set if a transaction has to be aborted but the attempt to roll back
         * to the previous (good) transaction failed.  The only pool metadata
         * operation possible in this state is the closing of the device.
@@ -816,6 +822,20 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
        return dm_tm_commit(pmd->tm, sblock);
 }
 
+static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
+{
+       int r;
+       dm_block_t total;
+       dm_block_t max_blocks = 4096; /* 16M */
+
+       r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
+       if (r) {
+               DMERR("could not get size of metadata device");
+               pmd->metadata_reserve = max_blocks;
+       } else
+               pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
+}
+
 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
                                               sector_t data_block_size,
                                               bool format_device)
@@ -849,6 +869,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
                return ERR_PTR(r);
        }
 
+       __set_metadata_reserve(pmd);
+
        return pmd;
 }
 
@@ -1820,6 +1842,13 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
        down_read(&pmd->root_lock);
        if (!pmd->fail_io)
                r = dm_sm_get_nr_free(pmd->metadata_sm, result);
+
+       if (!r) {
+               if (*result < pmd->metadata_reserve)
+                       *result = 0;
+               else
+                       *result -= pmd->metadata_reserve;
+       }
        up_read(&pmd->root_lock);
 
        return r;
@@ -1932,8 +1961,11 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
        int r = -EINVAL;
 
        down_write(&pmd->root_lock);
-       if (!pmd->fail_io)
+       if (!pmd->fail_io) {
                r = __resize_space_map(pmd->metadata_sm, new_count);
+               if (!r)
+                       __set_metadata_reserve(pmd);
+       }
        up_write(&pmd->root_lock);
 
        return r;
index 7bd60a1..aaf1ad4 100644 (file)
@@ -200,7 +200,13 @@ struct dm_thin_new_mapping;
 enum pool_mode {
        PM_WRITE,               /* metadata may be changed */
        PM_OUT_OF_DATA_SPACE,   /* metadata may be changed, though data may not be allocated */
+
+       /*
+        * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
+        */
+       PM_OUT_OF_METADATA_SPACE,
        PM_READ_ONLY,           /* metadata may not be changed */
+
        PM_FAIL,                /* all I/O fails */
 };
 
@@ -1371,7 +1377,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
 
 static void requeue_bios(struct pool *pool);
 
-static void check_for_space(struct pool *pool)
+static bool is_read_only_pool_mode(enum pool_mode mode)
+{
+       return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
+}
+
+static bool is_read_only(struct pool *pool)
+{
+       return is_read_only_pool_mode(get_pool_mode(pool));
+}
+
+static void check_for_metadata_space(struct pool *pool)
+{
+       int r;
+       const char *ooms_reason = NULL;
+       dm_block_t nr_free;
+
+       r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
+       if (r)
+               ooms_reason = "Could not get free metadata blocks";
+       else if (!nr_free)
+               ooms_reason = "No free metadata blocks";
+
+       if (ooms_reason && !is_read_only(pool)) {
+               DMERR("%s", ooms_reason);
+               set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
+       }
+}
+
+static void check_for_data_space(struct pool *pool)
 {
        int r;
        dm_block_t nr_free;
@@ -1397,14 +1431,16 @@ static int commit(struct pool *pool)
 {
        int r;
 
-       if (get_pool_mode(pool) >= PM_READ_ONLY)
+       if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
                return -EINVAL;
 
        r = dm_pool_commit_metadata(pool->pmd);
        if (r)
                metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
-       else
-               check_for_space(pool);
+       else {
+               check_for_metadata_space(pool);
+               check_for_data_space(pool);
+       }
 
        return r;
 }
@@ -1470,6 +1506,19 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
                return r;
        }
 
+       r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
+       if (r) {
+               metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
+               return r;
+       }
+
+       if (!free_blocks) {
+               /* Let's commit before we use up the metadata reserve. */
+               r = commit(pool);
+               if (r)
+                       return r;
+       }
+
        return 0;
 }
 
@@ -1501,6 +1550,7 @@ static blk_status_t should_error_unserviceable_bio(struct pool *pool)
        case PM_OUT_OF_DATA_SPACE:
                return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
 
+       case PM_OUT_OF_METADATA_SPACE:
        case PM_READ_ONLY:
        case PM_FAIL:
                return BLK_STS_IOERR;
@@ -2464,8 +2514,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
                error_retry_list(pool);
                break;
 
+       case PM_OUT_OF_METADATA_SPACE:
        case PM_READ_ONLY:
-               if (old_mode != new_mode)
+               if (!is_read_only_pool_mode(old_mode))
                        notify_of_pool_mode_change(pool, "read-only");
                dm_pool_metadata_read_only(pool->pmd);
                pool->process_bio = process_bio_read_only;
@@ -3403,6 +3454,10 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
                DMINFO("%s: growing the metadata device from %llu to %llu blocks",
                       dm_device_name(pool->pool_md),
                       sb_metadata_dev_size, metadata_dev_size);
+
+               if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
+                       set_pool_mode(pool, PM_WRITE);
+
                r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
                if (r) {
                        metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
@@ -3707,7 +3762,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
        struct pool_c *pt = ti->private;
        struct pool *pool = pt->pool;
 
-       if (get_pool_mode(pool) >= PM_READ_ONLY) {
+       if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
                DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
                      dm_device_name(pool->pool_md));
                return -EOPNOTSUPP;
@@ -3781,6 +3836,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
        dm_block_t nr_blocks_data;
        dm_block_t nr_blocks_metadata;
        dm_block_t held_root;
+       enum pool_mode mode;
        char buf[BDEVNAME_SIZE];
        char buf2[BDEVNAME_SIZE];
        struct pool_c *pt = ti->private;
@@ -3851,9 +3907,10 @@ static void pool_status(struct dm_target *ti, status_type_t type,
                else
                        DMEMIT("- ");
 
-               if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
+               mode = get_pool_mode(pool);
+               if (mode == PM_OUT_OF_DATA_SPACE)
                        DMEMIT("out_of_data_space ");
-               else if (pool->pf.mode == PM_READ_ONLY)
+               else if (is_read_only_pool_mode(mode))
                        DMEMIT("ro ");
                else
                        DMEMIT("rw ");
index 12decdb..fc65f0d 100644 (file)
@@ -99,10 +99,26 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
 {
        struct scatterlist sg;
 
-       sg_init_one(&sg, data, len);
-       ahash_request_set_crypt(req, &sg, NULL, len);
-
-       return crypto_wait_req(crypto_ahash_update(req), wait);
+       if (likely(!is_vmalloc_addr(data))) {
+               sg_init_one(&sg, data, len);
+               ahash_request_set_crypt(req, &sg, NULL, len);
+               return crypto_wait_req(crypto_ahash_update(req), wait);
+       } else {
+               do {
+                       int r;
+                       size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
+                       flush_kernel_vmap_range((void *)data, this_step);
+                       sg_init_table(&sg, 1);
+                       sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
+                       ahash_request_set_crypt(req, &sg, NULL, this_step);
+                       r = crypto_wait_req(crypto_ahash_update(req), wait);
+                       if (unlikely(r))
+                               return r;
+                       data += this_step;
+                       len -= this_step;
+               } while (len);
+               return 0;
+       }
 }
 
 /*
index 20f7e4e..45abb54 100644 (file)
@@ -1155,12 +1155,14 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
 
 /*
- * The zone descriptors obtained with a zone report indicate
- * zone positions within the target device. The zone descriptors
- * must be remapped to match their position within the dm device.
- * A target may call dm_remap_zone_report after completion of a
- * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained
- * from the target device mapping to the dm device.
+ * The zone descriptors obtained with a zone report indicate zone positions
+ * within the target backing device, regardless of that device is a partition
+ * and regardless of the target mapping start sector on the device or partition.
+ * The zone descriptors start sector and write pointer position must be adjusted
+ * to match their relative position within the dm device.
+ * A target may call dm_remap_zone_report() after completion of a
+ * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the
+ * backing device.
  */
 void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
 {
@@ -1171,6 +1173,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
        struct blk_zone *zone;
        unsigned int nr_rep = 0;
        unsigned int ofst;
+       sector_t part_offset;
        struct bio_vec bvec;
        struct bvec_iter iter;
        void *addr;
@@ -1179,6 +1182,15 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
                return;
 
        /*
+        * bio sector was incremented by the request size on completion. Taking
+        * into account the original request sector, the target start offset on
+        * the backing device and the target mapping offset (ti->begin), the
+        * start sector of the backing device. The partition offset is always 0
+        * if the target uses a whole device.
+        */
+       part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio));
+
+       /*
         * Remap the start sector of the reported zones. For sequential zones,
         * also remap the write pointer position.
         */
@@ -1195,6 +1207,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
                /* Set zones start sector */
                while (hdr->nr_zones && ofst < bvec.bv_len) {
                        zone = addr + ofst;
+                       zone->start -= part_offset;
                        if (zone->start >= start + ti->len) {
                                hdr->nr_zones = 0;
                                break;
@@ -1206,7 +1219,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
                                else if (zone->cond == BLK_ZONE_COND_EMPTY)
                                        zone->wp = zone->start;
                                else
-                                       zone->wp = zone->wp + ti->begin - start;
+                                       zone->wp = zone->wp + ti->begin - start - part_offset;
                        }
                        ofst += sizeof(struct blk_zone);
                        hdr->nr_zones--;
index 94329e0..0b2af6e 100644 (file)
@@ -1276,18 +1276,18 @@ static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
 static int resync_finish(struct mddev *mddev)
 {
        struct md_cluster_info *cinfo = mddev->cluster_info;
+       int ret = 0;
 
        clear_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
-       dlm_unlock_sync(cinfo->resync_lockres);
 
        /*
         * If resync thread is interrupted so we can't say resync is finished,
         * another node will launch resync thread to continue.
         */
-       if (test_bit(MD_CLOSING, &mddev->flags))
-               return 0;
-       else
-               return resync_info_update(mddev, 0, 0);
+       if (!test_bit(MD_CLOSING, &mddev->flags))
+               ret = resync_info_update(mddev, 0, 0);
+       dlm_unlock_sync(cinfo->resync_lockres);
+       return ret;
 }
 
 static int area_resyncing(struct mddev *mddev, int direction,
index 9818980..d6f7978 100644 (file)
@@ -4529,11 +4529,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
                allow_barrier(conf);
        }
 
+       raise_barrier(conf, 0);
 read_more:
        /* Now schedule reads for blocks from sector_nr to last */
        r10_bio = raid10_alloc_init_r10buf(conf);
        r10_bio->state = 0;
-       raise_barrier(conf, sectors_done != 0);
+       raise_barrier(conf, 1);
        atomic_set(&r10_bio->remaining, 0);
        r10_bio->mddev = mddev;
        r10_bio->sector = sector_nr;
@@ -4629,6 +4630,8 @@ read_more:
        if (sector_nr <= last)
                goto read_more;
 
+       lower_barrier(conf);
+
        /* Now that we have done the whole section we can
         * update reshape_progress
         */
index a001808..bfb8114 100644 (file)
@@ -46,6 +46,11 @@ extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
 extern void ppl_quiesce(struct r5conf *conf, int quiesce);
 extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
 
+static inline bool raid5_has_log(struct r5conf *conf)
+{
+       return test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
+}
+
 static inline bool raid5_has_ppl(struct r5conf *conf)
 {
        return test_bit(MD_HAS_PPL, &conf->mddev->flags);
index 4ce0d75..e4e98f4 100644 (file)
@@ -733,7 +733,7 @@ static bool stripe_can_batch(struct stripe_head *sh)
 {
        struct r5conf *conf = sh->raid_conf;
 
-       if (conf->log || raid5_has_ppl(conf))
+       if (raid5_has_log(conf) || raid5_has_ppl(conf))
                return false;
        return test_bit(STRIPE_BATCH_READY, &sh->state) &&
                !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
@@ -7737,7 +7737,7 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
        sector_t newsize;
        struct r5conf *conf = mddev->private;
 
-       if (conf->log || raid5_has_ppl(conf))
+       if (raid5_has_log(conf) || raid5_has_ppl(conf))
                return -EINVAL;
        sectors &= ~((sector_t)conf->chunk_sectors - 1);
        newsize = raid5_size(mddev, sectors, mddev->raid_disks);
@@ -7788,7 +7788,7 @@ static int check_reshape(struct mddev *mddev)
 {
        struct r5conf *conf = mddev->private;
 
-       if (conf->log || raid5_has_ppl(conf))
+       if (raid5_has_log(conf) || raid5_has_ppl(conf))
                return -EINVAL;
        if (mddev->delta_disks == 0 &&
            mddev->new_layout == mddev->layout &&
index b5410ae..bb41bea 100644 (file)
@@ -1159,41 +1159,21 @@ static int mt9v111_probe(struct i2c_client *client)
                                              V4L2_CID_AUTO_WHITE_BALANCE,
                                              0, 1, 1,
                                              V4L2_WHITE_BALANCE_AUTO);
-       if (IS_ERR_OR_NULL(mt9v111->auto_awb)) {
-               ret = PTR_ERR(mt9v111->auto_awb);
-               goto error_free_ctrls;
-       }
-
        mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls,
                                                   &mt9v111_ctrl_ops,
                                                   V4L2_CID_EXPOSURE_AUTO,
                                                   V4L2_EXPOSURE_MANUAL,
                                                   0, V4L2_EXPOSURE_AUTO);
-       if (IS_ERR_OR_NULL(mt9v111->auto_exp)) {
-               ret = PTR_ERR(mt9v111->auto_exp);
-               goto error_free_ctrls;
-       }
-
-       /* Initialize timings */
        mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
                                            V4L2_CID_HBLANK,
                                            MT9V111_CORE_R05_MIN_HBLANK,
                                            MT9V111_CORE_R05_MAX_HBLANK, 1,
                                            MT9V111_CORE_R05_DEF_HBLANK);
-       if (IS_ERR_OR_NULL(mt9v111->hblank)) {
-               ret = PTR_ERR(mt9v111->hblank);
-               goto error_free_ctrls;
-       }
-
        mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
                                            V4L2_CID_VBLANK,
                                            MT9V111_CORE_R06_MIN_VBLANK,
                                            MT9V111_CORE_R06_MAX_VBLANK, 1,
                                            MT9V111_CORE_R06_DEF_VBLANK);
-       if (IS_ERR_OR_NULL(mt9v111->vblank)) {
-               ret = PTR_ERR(mt9v111->vblank);
-               goto error_free_ctrls;
-       }
 
        /* PIXEL_RATE is fixed: just expose it to user space. */
        v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
@@ -1201,6 +1181,10 @@ static int mt9v111_probe(struct i2c_client *client)
                          DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1,
                          DIV_ROUND_CLOSEST(mt9v111->sysclk, 2));
 
+       if (mt9v111->ctrls.error) {
+               ret = mt9v111->ctrls.error;
+               goto error_free_ctrls;
+       }
        mt9v111->sd.ctrl_handler = &mt9v111->ctrls;
 
        /* Start with default configuration: 640x480 UYVY. */
@@ -1226,26 +1210,27 @@ static int mt9v111_probe(struct i2c_client *client)
        mt9v111->pad.flags      = MEDIA_PAD_FL_SOURCE;
        ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad);
        if (ret)
-               goto error_free_ctrls;
+               goto error_free_entity;
 #endif
 
        ret = mt9v111_chip_probe(mt9v111);
        if (ret)
-               goto error_free_ctrls;
+               goto error_free_entity;
 
        ret = v4l2_async_register_subdev(&mt9v111->sd);
        if (ret)
-               goto error_free_ctrls;
+               goto error_free_entity;
 
        return 0;
 
-error_free_ctrls:
-       v4l2_ctrl_handler_free(&mt9v111->ctrls);
-
+error_free_entity:
 #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
        media_entity_cleanup(&mt9v111->sd.entity);
 #endif
 
+error_free_ctrls:
+       v4l2_ctrl_handler_free(&mt9v111->ctrls);
+
        mutex_destroy(&mt9v111->pwr_mutex);
        mutex_destroy(&mt9v111->stream_mutex);
 
@@ -1259,12 +1244,12 @@ static int mt9v111_remove(struct i2c_client *client)
 
        v4l2_async_unregister_subdev(sd);
 
-       v4l2_ctrl_handler_free(&mt9v111->ctrls);
-
 #if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
        media_entity_cleanup(&sd->entity);
 #endif
 
+       v4l2_ctrl_handler_free(&mt9v111->ctrls);
+
        mutex_destroy(&mt9v111->pwr_mutex);
        mutex_destroy(&mt9v111->stream_mutex);
 
index 94c1fe0..54fe90a 100644 (file)
@@ -541,6 +541,8 @@ config VIDEO_CROS_EC_CEC
        depends on MFD_CROS_EC
        select CEC_CORE
        select CEC_NOTIFIER
+       select CHROME_PLATFORMS
+       select CROS_EC_PROTO
        ---help---
          If you say yes here you will get support for the
          ChromeOS Embedded Controller's CEC.
index 729b318..a5ae856 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/clk.h>
 #include <linux/completion.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
index c832539..12bce39 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 
 #define CAMSS_CSI_PHY_LNn_CFG2(n)              (0x004 + 0x40 * (n))
 #define CAMSS_CSI_PHY_LNn_CFG3(n)              (0x008 + 0x40 * (n))
index bcd0dfd..2e65caf 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 
 #define CSIPHY_3PH_LNn_CFG1(n)                 (0x000 + 0x100 * (n))
 #define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG    (BIT(7) | BIT(6))
index 4559f3b..008afb8 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
index 7f26902..1f33b4e 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/clk.h>
 #include <linux/completion.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/iopoll.h>
 #include <linux/kernel.h>
 #include <linux/mutex.h>
@@ -1076,8 +1077,8 @@ int msm_ispif_subdev_init(struct ispif_device *ispif,
        else
                return -EINVAL;
 
-       ispif->line = kcalloc(ispif->line_num, sizeof(*ispif->line),
-                             GFP_KERNEL);
+       ispif->line = devm_kcalloc(dev, ispif->line_num, sizeof(*ispif->line),
+                                  GFP_KERNEL);
        if (!ispif->line)
                return -ENOMEM;
 
index da3a9fe..174a36b 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/iopoll.h>
 
 #include "camss-vfe.h"
index 4c584bf..0dca8bf 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/iopoll.h>
 
 #include "camss-vfe.h"
index dcc0c30..669615f 100644 (file)
@@ -848,17 +848,18 @@ static int camss_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       camss->csiphy = kcalloc(camss->csiphy_num, sizeof(*camss->csiphy),
-                               GFP_KERNEL);
+       camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
+                                    sizeof(*camss->csiphy), GFP_KERNEL);
        if (!camss->csiphy)
                return -ENOMEM;
 
-       camss->csid = kcalloc(camss->csid_num, sizeof(*camss->csid),
-                             GFP_KERNEL);
+       camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
+                                  GFP_KERNEL);
        if (!camss->csid)
                return -ENOMEM;
 
-       camss->vfe = kcalloc(camss->vfe_num, sizeof(*camss->vfe), GFP_KERNEL);
+       camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe),
+                                 GFP_KERNEL);
        if (!camss->vfe)
                return -ENOMEM;
 
@@ -993,12 +994,12 @@ static const struct of_device_id camss_dt_match[] = {
 
 MODULE_DEVICE_TABLE(of, camss_dt_match);
 
-static int camss_runtime_suspend(struct device *dev)
+static int __maybe_unused camss_runtime_suspend(struct device *dev)
 {
        return 0;
 }
 
-static int camss_runtime_resume(struct device *dev)
+static int __maybe_unused camss_runtime_resume(struct device *dev)
 {
        return 0;
 }
index 666d319..1f6c1ee 100644 (file)
@@ -402,8 +402,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
                        if (msg[0].addr == state->af9033_i2c_addr[1])
                                reg |= 0x100000;
 
-                       ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
-                                       msg[0].len - 3);
+                       ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
+                                                                &msg[0].buf[3],
+                                                                msg[0].len - 3)
+                                               : -EOPNOTSUPP;
                } else {
                        /* I2C write */
                        u8 buf[MAX_XFER_SIZE];
index 127fe6e..a3ef1f5 100644 (file)
@@ -115,14 +115,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
        if (sev == NULL)
                return;
 
-       /*
-        * If the event has been added to the fh->subscribed list, but its
-        * add op has not completed yet elems will be 0, treat this as
-        * not being subscribed.
-        */
-       if (!sev->elems)
-               return;
-
        /* Increase event sequence number on fh. */
        fh->sequence++;
 
@@ -208,6 +200,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
        struct v4l2_subscribed_event *sev, *found_ev;
        unsigned long flags;
        unsigned i;
+       int ret = 0;
 
        if (sub->type == V4L2_EVENT_ALL)
                return -EINVAL;
@@ -225,31 +218,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
        sev->flags = sub->flags;
        sev->fh = fh;
        sev->ops = ops;
+       sev->elems = elems;
+
+       mutex_lock(&fh->subscribe_lock);
 
        spin_lock_irqsave(&fh->vdev->fh_lock, flags);
        found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
-       if (!found_ev)
-               list_add(&sev->list, &fh->subscribed);
        spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
        if (found_ev) {
+               /* Already listening */
                kvfree(sev);
-               return 0; /* Already listening */
+               goto out_unlock;
        }
 
        if (sev->ops && sev->ops->add) {
-               int ret = sev->ops->add(sev, elems);
+               ret = sev->ops->add(sev, elems);
                if (ret) {
-                       sev->ops = NULL;
-                       v4l2_event_unsubscribe(fh, sub);
-                       return ret;
+                       kvfree(sev);
+                       goto out_unlock;
                }
        }
 
-       /* Mark as ready for use */
-       sev->elems = elems;
+       spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+       list_add(&sev->list, &fh->subscribed);
+       spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
-       return 0;
+out_unlock:
+       mutex_unlock(&fh->subscribe_lock);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
 
@@ -288,6 +286,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
                return 0;
        }
 
+       mutex_lock(&fh->subscribe_lock);
+
        spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 
        sev = v4l2_event_subscribed(fh, sub->type, sub->id);
@@ -305,6 +305,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
        if (sev && sev->ops && sev->ops->del)
                sev->ops->del(sev);
 
+       mutex_unlock(&fh->subscribe_lock);
+
        kvfree(sev);
 
        return 0;
index 3895999..c91a7bd 100644 (file)
@@ -45,6 +45,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
        INIT_LIST_HEAD(&fh->available);
        INIT_LIST_HEAD(&fh->subscribed);
        fh->sequence = -1;
+       mutex_init(&fh->subscribe_lock);
 }
 EXPORT_SYMBOL_GPL(v4l2_fh_init);
 
@@ -90,6 +91,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
                return;
        v4l_disable_media_source(fh->vdev);
        v4l2_event_unsubscribe_all(fh);
+       mutex_destroy(&fh->subscribe_lock);
        fh->vdev = NULL;
 }
 EXPORT_SYMBOL_GPL(v4l2_fh_exit);
index 31112f6..475e5b3 100644 (file)
@@ -411,7 +411,7 @@ static int aemif_probe(struct platform_device *pdev)
                        if (ret < 0)
                                goto error;
                }
-       } else {
+       } else if (pdata) {
                for (i = 0; i < pdata->num_sub_devices; i++) {
                        pdata->sub_devices[i].dev.parent = dev;
                        ret = platform_device_register(&pdata->sub_devices[i]);
index e11ab12..800986a 100644 (file)
@@ -528,8 +528,8 @@ static int usbhs_omap_get_dt_pdata(struct device *dev,
 }
 
 static const struct of_device_id usbhs_child_match_table[] = {
-       { .compatible = "ti,omap-ehci", },
-       { .compatible = "ti,omap-ohci", },
+       { .compatible = "ti,ehci-omap", },
+       { .compatible = "ti,ohci-omap3", },
        { }
 };
 
@@ -855,6 +855,7 @@ static struct platform_driver usbhs_omap_driver = {
                .pm             = &usbhsomap_dev_pm_ops,
                .of_match_table = usbhs_omap_dt_ids,
        },
+       .probe          = usbhs_omap_probe,
        .remove         = usbhs_omap_remove,
 };
 
@@ -864,9 +865,9 @@ MODULE_ALIAS("platform:" USBHS_DRIVER_NAME);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI");
 
-static int __init omap_usbhs_drvinit(void)
+static int omap_usbhs_drvinit(void)
 {
-       return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe);
+       return platform_driver_register(&usbhs_omap_driver);
 }
 
 /*
@@ -878,7 +879,7 @@ static int __init omap_usbhs_drvinit(void)
  */
 fs_initcall_sync(omap_usbhs_drvinit);
 
-static void __exit omap_usbhs_drvexit(void)
+static void omap_usbhs_drvexit(void)
 {
        platform_driver_unregister(&usbhs_omap_driver);
 }
index eeb7eef..38f90e1 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/err.h>
 #include <linux/delay.h>
 #include <linux/sysfs.h>
+#include <linux/nospec.h>
 
 static DEFINE_MUTEX(compass_mutex);
 
@@ -50,6 +51,7 @@ static int compass_store(struct device *dev, const char *buf, size_t count,
                return ret;
        if (val >= strlen(map))
                return -EINVAL;
+       val = array_index_nospec(val, strlen(map));
        mutex_lock(&compass_mutex);
        ret = compass_command(c, map[val]);
        mutex_unlock(&compass_mutex);
index 8f82bb9..b8aaa68 100644 (file)
@@ -2131,7 +2131,7 @@ static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
        retrc = plpar_hcall_norets(H_REG_CRQ,
                                   vdev->unit_address,
                                   queue->msg_token, PAGE_SIZE);
-       retrc = rc;
+       rc = retrc;
 
        if (rc == H_RESOURCE)
                rc = ibmvmc_reset_crq_queue(adapter);
index 7bba62a..fc3872f 100644 (file)
@@ -521,17 +521,15 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
 
        cl = cldev->cl;
 
+       mutex_lock(&bus->device_lock);
        if (cl->state == MEI_FILE_UNINITIALIZED) {
-               mutex_lock(&bus->device_lock);
                ret = mei_cl_link(cl);
-               mutex_unlock(&bus->device_lock);
                if (ret)
-                       return ret;
+                       goto out;
                /* update pointers */
                cl->cldev = cldev;
        }
 
-       mutex_lock(&bus->device_lock);
        if (mei_cl_is_connected(cl)) {
                ret = 0;
                goto out;
@@ -616,9 +614,8 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
        if (err < 0)
                dev_err(bus->dev, "Could not disconnect from the ME client\n");
 
-out:
        mei_cl_bus_module_put(cldev);
-
+out:
        /* Flush queues and remove any pending read */
        mei_cl_flush_queues(cl, NULL);
        mei_cl_unlink(cl);
@@ -876,12 +873,13 @@ static void mei_cl_bus_dev_release(struct device *dev)
 
        mei_me_cl_put(cldev->me_cl);
        mei_dev_bus_put(cldev->bus);
+       mei_cl_unlink(cldev->cl);
        kfree(cldev->cl);
        kfree(cldev);
 }
 
 static const struct device_type mei_cl_device_type = {
-       .release        = mei_cl_bus_dev_release,
+       .release = mei_cl_bus_dev_release,
 };
 
 /**
index 4ab6251..ebdcf0b 100644 (file)
@@ -1767,7 +1767,7 @@ out:
                }
        }
 
-       rets = buf->size;
+       rets = len;
 err:
        cl_dbg(dev, cl, "rpm: autosuspend\n");
        pm_runtime_mark_last_busy(dev->dev);
index 09e233d..e56f3e7 100644 (file)
@@ -1161,15 +1161,18 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
 
                props_res = (struct hbm_props_response *)mei_msg;
 
-               if (props_res->status) {
+               if (props_res->status == MEI_HBMS_CLIENT_NOT_FOUND) {
+                       dev_dbg(dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n",
+                               props_res->me_addr);
+               } else if (props_res->status) {
                        dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n",
                                props_res->status,
                                mei_hbm_status_str(props_res->status));
                        return -EPROTO;
+               } else {
+                       mei_hbm_me_cl_add(dev, props_res);
                }
 
-               mei_hbm_me_cl_add(dev, props_res);
-
                /* request property for the next client */
                if (mei_hbm_prop_req(dev, props_res->me_addr + 1))
                        return -EIO;
index abf9e88..f57f5de 100644 (file)
@@ -235,7 +235,7 @@ int mmc_of_parse(struct mmc_host *host)
                        host->caps |= MMC_CAP_NEEDS_POLL;
 
                ret = mmc_gpiod_request_cd(host, "cd", 0, true,
-                                          cd_debounce_delay_ms,
+                                          cd_debounce_delay_ms * 1000,
                                           &cd_gpio_invert);
                if (!ret)
                        dev_info(host->parent, "Got CD GPIO\n");
index 648eb67..6edffee 100644 (file)
@@ -238,10 +238,6 @@ static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
        mmc_exit_request(mq->queue, req);
 }
 
-/*
- * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests
- * will not be dispatched in parallel.
- */
 static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
                                    const struct blk_mq_queue_data *bd)
 {
@@ -264,7 +260,7 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        spin_lock_irq(q->queue_lock);
 
-       if (mq->recovery_needed) {
+       if (mq->recovery_needed || mq->busy) {
                spin_unlock_irq(q->queue_lock);
                return BLK_STS_RESOURCE;
        }
@@ -291,6 +287,9 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
                break;
        }
 
+       /* Parallel dispatch of requests is not supported at the moment */
+       mq->busy = true;
+
        mq->in_flight[issue_type] += 1;
        get_card = (mmc_tot_in_flight(mq) == 1);
        cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
@@ -333,9 +332,12 @@ static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
                mq->in_flight[issue_type] -= 1;
                if (mmc_tot_in_flight(mq) == 0)
                        put_card = true;
+               mq->busy = false;
                spin_unlock_irq(q->queue_lock);
                if (put_card)
                        mmc_put_card(card, &mq->ctx);
+       } else {
+               WRITE_ONCE(mq->busy, false);
        }
 
        return ret;
index 17e59d5..9bf3c92 100644 (file)
@@ -81,6 +81,7 @@ struct mmc_queue {
        unsigned int            cqe_busy;
 #define MMC_CQE_DCMD_BUSY      BIT(0)
 #define MMC_CQE_QUEUE_FULL     BIT(1)
+       bool                    busy;
        bool                    use_cqe;
        bool                    recovery_needed;
        bool                    in_recovery;
index 2a83368..86803a3 100644 (file)
@@ -271,7 +271,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
        if (debounce) {
                ret = gpiod_set_debounce(desc, debounce);
                if (ret < 0)
-                       ctx->cd_debounce_delay_ms = debounce;
+                       ctx->cd_debounce_delay_ms = debounce / 1000;
        }
 
        if (gpio_invert)
index 294de17..61e4e2a 100644 (file)
@@ -217,7 +217,7 @@ static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host,
                         * We don't really have DMA, so we need
                         * to copy from our platform driver buffer
                         */
-                       sg_copy_to_buffer(data->sg, 1, host->virt_base,
+                       sg_copy_from_buffer(data->sg, 1, host->virt_base,
                                        data->sg->length);
                }
                host->data->bytes_xfered += data->sg->length;
@@ -393,7 +393,7 @@ static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host,
                 * We don't really have DMA, so we need to copy to our
                 * platform driver buffer
                 */
-               sg_copy_from_buffer(data->sg, 1, host->virt_base,
+               sg_copy_to_buffer(data->sg, 1, host->virt_base,
                                data->sg->length);
        }
 }
index 5aa2c94..be53044 100644 (file)
@@ -1976,7 +1976,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
        do {
                value = atmci_readl(host, ATMCI_RDR);
                if (likely(offset + 4 <= sg->length)) {
-                       sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset);
+                       sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset);
 
                        offset += 4;
                        nbytes += 4;
@@ -1993,7 +1993,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
                } else {
                        unsigned int remaining = sg->length - offset;
 
-                       sg_pcopy_to_buffer(sg, 1, &value, remaining, offset);
+                       sg_pcopy_from_buffer(sg, 1, &value, remaining, offset);
                        nbytes += remaining;
 
                        flush_dcache_page(sg_page(sg));
@@ -2003,7 +2003,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)
                                goto done;
 
                        offset = 4 - remaining;
-                       sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining,
+                       sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining,
                                        offset, 0);
                        nbytes += offset;
                }
@@ -2042,7 +2042,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
 
        do {
                if (likely(offset + 4 <= sg->length)) {
-                       sg_pcopy_from_buffer(sg, 1, &value, sizeof(u32), offset);
+                       sg_pcopy_to_buffer(sg, 1, &value, sizeof(u32), offset);
                        atmci_writel(host, ATMCI_TDR, value);
 
                        offset += 4;
@@ -2059,7 +2059,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
                        unsigned int remaining = sg->length - offset;
 
                        value = 0;
-                       sg_pcopy_from_buffer(sg, 1, &value, remaining, offset);
+                       sg_pcopy_to_buffer(sg, 1, &value, remaining, offset);
                        nbytes += remaining;
 
                        host->sg = sg = sg_next(sg);
@@ -2070,7 +2070,7 @@ static void atmci_write_data_pio(struct atmel_mci *host)
                        }
 
                        offset = 4 - remaining;
-                       sg_pcopy_from_buffer(sg, 1, (u8 *)&value + remaining,
+                       sg_pcopy_to_buffer(sg, 1, (u8 *)&value + remaining,
                                        offset, 0);
                        atmci_writel(host, ATMCI_TDR, value);
                        nbytes += offset;
index 09cb896..2cfec33 100644 (file)
@@ -517,19 +517,23 @@ static struct mmc_host_ops meson_mx_mmc_ops = {
 static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent)
 {
        struct device_node *slot_node;
+       struct platform_device *pdev;
 
        /*
         * TODO: the MMC core framework currently does not support
         * controllers with multiple slots properly. So we only register
         * the first slot for now
         */
-       slot_node = of_find_compatible_node(parent->of_node, NULL, "mmc-slot");
+       slot_node = of_get_compatible_child(parent->of_node, "mmc-slot");
        if (!slot_node) {
                dev_warn(parent, "no 'mmc-slot' sub-node found\n");
                return ERR_PTR(-ENOENT);
        }
 
-       return of_platform_device_create(slot_node, NULL, parent);
+       pdev = of_platform_device_create(slot_node, NULL, parent);
+       of_node_put(slot_node);
+
+       return pdev;
 }
 
 static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
index 071693e..68760d4 100644 (file)
@@ -2177,6 +2177,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
        dma_release_channel(host->tx_chan);
        dma_release_channel(host->rx_chan);
 
+       dev_pm_clear_wake_irq(host->dev);
        pm_runtime_dont_use_autosuspend(host->dev);
        pm_runtime_put_sync(host->dev);
        pm_runtime_disable(host->dev);
index 35cc0de..ca0b439 100644 (file)
 /* DM_CM_RST */
 #define RST_DTRANRST1          BIT(9)
 #define RST_DTRANRST0          BIT(8)
-#define RST_RESERVED_BITS      GENMASK_ULL(32, 0)
+#define RST_RESERVED_BITS      GENMASK_ULL(31, 0)
 
 /* DM_CM_INFO1 and DM_CM_INFO1_MASK */
 #define INFO1_CLEAR            0
+#define INFO1_MASK_CLEAR       GENMASK_ULL(31, 0)
 #define INFO1_DTRANEND1                BIT(17)
 #define INFO1_DTRANEND0                BIT(16)
 
 /* DM_CM_INFO2 and DM_CM_INFO2_MASK */
+#define INFO2_MASK_CLEAR       GENMASK_ULL(31, 0)
 #define INFO2_DTRANERR1                BIT(17)
 #define INFO2_DTRANERR0                BIT(16)
 
@@ -252,6 +254,12 @@ renesas_sdhi_internal_dmac_request_dma(struct tmio_mmc_host *host,
 {
        struct renesas_sdhi *priv = host_to_priv(host);
 
+       /* Disable DMAC interrupts, we don't use them */
+       renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO1_MASK,
+                                           INFO1_MASK_CLEAR);
+       renesas_sdhi_internal_dmac_dm_write(host, DM_CM_INFO2_MASK,
+                                           INFO2_MASK_CLEAR);
+
        /* Each value is set to non-zero to assume "enabling" each DMA */
        host->chan_rx = host->chan_tx = (void *)0xdeadbeaf;
 
index 890f192..5389c48 100644 (file)
@@ -498,7 +498,8 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
 
 static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
 {
-       if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible &&
+       if ((of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible ||
+           of_device_get_match_data(&pdev->dev) == &of_rcar_r8a7795_compatible) &&
            !soc_device_match(gen3_soc_whitelist))
                return -ENODEV;
 
index cbfafc4..270d3c9 100644 (file)
@@ -39,13 +39,23 @@ static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
        struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1),
                                          SPI_MEM_OP_NO_ADDR,
                                          SPI_MEM_OP_NO_DUMMY,
-                                         SPI_MEM_OP_DATA_IN(len, val, 1));
+                                         SPI_MEM_OP_DATA_IN(len, NULL, 1));
+       void *scratchbuf;
        int ret;
 
+       scratchbuf = kmalloc(len, GFP_KERNEL);
+       if (!scratchbuf)
+               return -ENOMEM;
+
+       op.data.buf.in = scratchbuf;
        ret = spi_mem_exec_op(flash->spimem, &op);
        if (ret < 0)
                dev_err(&flash->spimem->spi->dev, "error %d reading %x\n", ret,
                        code);
+       else
+               memcpy(val, scratchbuf, len);
+
+       kfree(scratchbuf);
 
        return ret;
 }
@@ -56,9 +66,19 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
        struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1),
                                          SPI_MEM_OP_NO_ADDR,
                                          SPI_MEM_OP_NO_DUMMY,
-                                         SPI_MEM_OP_DATA_OUT(len, buf, 1));
+                                         SPI_MEM_OP_DATA_OUT(len, NULL, 1));
+       void *scratchbuf;
+       int ret;
 
-       return spi_mem_exec_op(flash->spimem, &op);
+       scratchbuf = kmemdup(buf, len, GFP_KERNEL);
+       if (!scratchbuf)
+               return -ENOMEM;
+
+       op.data.buf.out = scratchbuf;
+       ret = spi_mem_exec_op(flash->spimem, &op);
+       kfree(scratchbuf);
+
+       return ret;
 }
 
 static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
index 52e2cb3..99c460f 100644 (file)
@@ -873,8 +873,11 @@ static int mtd_part_of_parse(struct mtd_info *master,
        int ret, err = 0;
 
        np = mtd_get_of_node(master);
-       if (!mtd_is_partition(master))
+       if (mtd_is_partition(master))
+               of_node_get(np);
+       else
                np = of_get_child_by_name(np, "partitions");
+
        of_property_for_each_string(np, "compatible", prop, compat) {
                parser = mtd_part_get_compatible_parser(compat);
                if (!parser)
index ca18612..b864b93 100644 (file)
@@ -596,6 +596,12 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
        }
 
        iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
+       /*
+        * The ->setup_dma() hook kicks DMA by using the data/command
+        * interface, which belongs to a different AXI port from the
+        * register interface.  Read back the register to avoid a race.
+        */
+       ioread32(denali->reg + DMA_ENABLE);
 
        denali_reset_irq(denali);
        denali->setup_dma(denali, dma_addr, page, write);
@@ -1338,6 +1344,11 @@ int denali_init(struct denali_nand_info *denali)
 
        denali_enable_irq(denali);
        denali_reset_banks(denali);
+       if (!denali->max_banks) {
+               /* Error out earlier if no chip is found for some reasons. */
+               ret = -ENODEV;
+               goto disable_irq;
+       }
 
        denali->active_bank = DENALI_INVALID_BANK;
 
index a3f0431..427fcbc 100644 (file)
@@ -1218,7 +1218,7 @@ static int docg4_resume(struct platform_device *pdev)
        return 0;
 }
 
-static void __init init_mtd_structs(struct mtd_info *mtd)
+static void init_mtd_structs(struct mtd_info *mtd)
 {
        /* initialize mtd and nand data structures */
 
@@ -1290,7 +1290,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
 
 }
 
-static int __init read_id_reg(struct mtd_info *mtd)
+static int read_id_reg(struct mtd_info *mtd)
 {
        struct nand_chip *nand = mtd_to_nand(mtd);
        struct docg4_priv *doc = nand_get_controller_data(nand);
index 7af4d62..bc2ef52 100644 (file)
@@ -1547,7 +1547,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
        for (op_id = 0; op_id < subop->ninstrs; op_id++) {
                unsigned int offset, naddrs;
                const u8 *addrs;
-               int len = nand_subop_get_data_len(subop, op_id);
+               int len;
 
                instr = &subop->instrs[op_id];
 
@@ -1593,6 +1593,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
                                nfc_op->ndcb[0] |=
                                        NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
                                        NDCB0_LEN_OVRD;
+                               len = nand_subop_get_data_len(subop, op_id);
                                nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
                        }
                        nfc_op->data_delay_ns = instr->delay_ns;
@@ -1606,6 +1607,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
                                nfc_op->ndcb[0] |=
                                        NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
                                        NDCB0_LEN_OVRD;
+                               len = nand_subop_get_data_len(subop, op_id);
                                nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
                        }
                        nfc_op->data_delay_ns = instr->delay_ns;
index 9375cef..3d27616 100644 (file)
@@ -283,8 +283,12 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                 case SIOCFINDIPDDPRT:
                        spin_lock_bh(&ipddp_route_lock);
                        rp = __ipddp_find_route(&rcp);
-                       if (rp)
-                               memcpy(&rcp2, rp, sizeof(rcp2));
+                       if (rp) {
+                               memset(&rcp2, 0, sizeof(rcp2));
+                               rcp2.ip    = rp->ip;
+                               rcp2.at    = rp->at;
+                               rcp2.flags = rp->flags;
+                       }
                        spin_unlock_bh(&ipddp_route_lock);
 
                        if (rp) {
index a764a83..ee28ec9 100644 (file)
@@ -210,6 +210,7 @@ static void bond_get_stats(struct net_device *bond_dev,
 static void bond_slave_arr_handler(struct work_struct *work);
 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
                                  int mod);
+static void bond_netdev_notify_work(struct work_struct *work);
 
 /*---------------------------- General routines -----------------------------*/
 
@@ -971,16 +972,13 @@ static void bond_poll_controller(struct net_device *bond_dev)
        struct slave *slave = NULL;
        struct list_head *iter;
        struct ad_info ad_info;
-       struct netpoll_info *ni;
-       const struct net_device_ops *ops;
 
        if (BOND_MODE(bond) == BOND_MODE_8023AD)
                if (bond_3ad_get_active_agg_info(bond, &ad_info))
                        return;
 
        bond_for_each_slave_rcu(bond, slave, iter) {
-               ops = slave->dev->netdev_ops;
-               if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
+               if (!bond_slave_is_up(slave))
                        continue;
 
                if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -992,11 +990,7 @@ static void bond_poll_controller(struct net_device *bond_dev)
                                continue;
                }
 
-               ni = rcu_dereference_bh(slave->dev->npinfo);
-               if (down_trylock(&ni->dev_lock))
-                       continue;
-               ops->ndo_poll_controller(slave->dev);
-               up(&ni->dev_lock);
+               netpoll_poll_dev(slave->dev);
        }
 }
 
@@ -1177,9 +1171,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
                }
        }
 
-       /* don't change skb->dev for link-local packets */
-       if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+       /* Link-local multicast packets should be passed to the
+        * stack on the link they arrive as well as pass them to the
+        * bond-master device. These packets are mostly usable when
+        * stack receives it with the link on which they arrive
+        * (e.g. LLDP) they also must be available on master. Some of
+        * the use cases include (but are not limited to): LLDP agents
+        * that must be able to operate both on enslaved interfaces as
+        * well as on bonds themselves; linux bridges that must be able
+        * to process/pass BPDUs from attached bonds when any kind of
+        * STP version is enabled on the network.
+        */
+       if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
+               struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+               if (nskb) {
+                       nskb->dev = bond->dev;
+                       nskb->queue_mapping = 0;
+                       netif_rx(nskb);
+               }
                return RX_HANDLER_PASS;
+       }
        if (bond_should_deliver_exact_match(skb, slave, bond))
                return RX_HANDLER_EXACT;
 
@@ -1276,6 +1288,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
                        return NULL;
                }
        }
+       INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
+
        return slave;
 }
 
@@ -1283,6 +1297,7 @@ static void bond_free_slave(struct slave *slave)
 {
        struct bonding *bond = bond_get_bond_by_slave(slave);
 
+       cancel_delayed_work_sync(&slave->notify_work);
        if (BOND_MODE(bond) == BOND_MODE_8023AD)
                kfree(SLAVE_AD_INFO(slave));
 
@@ -1304,39 +1319,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
        info->link_failure_count = slave->link_failure_count;
 }
 
-static void bond_netdev_notify(struct net_device *dev,
-                              struct netdev_bonding_info *info)
-{
-       rtnl_lock();
-       netdev_bonding_info_change(dev, info);
-       rtnl_unlock();
-}
-
 static void bond_netdev_notify_work(struct work_struct *_work)
 {
-       struct netdev_notify_work *w =
-               container_of(_work, struct netdev_notify_work, work.work);
+       struct slave *slave = container_of(_work, struct slave,
+                                          notify_work.work);
+
+       if (rtnl_trylock()) {
+               struct netdev_bonding_info binfo;
 
-       bond_netdev_notify(w->dev, &w->bonding_info);
-       dev_put(w->dev);
-       kfree(w);
+               bond_fill_ifslave(slave, &binfo.slave);
+               bond_fill_ifbond(slave->bond, &binfo.master);
+               netdev_bonding_info_change(slave->dev, &binfo);
+               rtnl_unlock();
+       } else {
+               queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
+       }
 }
 
 void bond_queue_slave_event(struct slave *slave)
 {
-       struct bonding *bond = slave->bond;
-       struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
-
-       if (!nnw)
-               return;
-
-       dev_hold(slave->dev);
-       nnw->dev = slave->dev;
-       bond_fill_ifslave(slave, &nnw->bonding_info.slave);
-       bond_fill_ifbond(bond, &nnw->bonding_info.master);
-       INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
-
-       queue_delayed_work(slave->bond->wq, &nnw->work, 0);
+       queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
 }
 
 void bond_lower_state_changed(struct slave *slave)
index d93c790..ad534b9 100644 (file)
@@ -1107,7 +1107,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
                b53_get_vlan_entry(dev, vid, vl);
 
                vl->members |= BIT(port);
-               if (untagged)
+               if (untagged && !dsa_is_cpu_port(ds, port))
                        vl->untag |= BIT(port);
                else
                        vl->untag &= ~BIT(port);
@@ -1149,7 +1149,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
                                pvid = 0;
                }
 
-               if (untagged)
+               if (untagged && !dsa_is_cpu_port(ds, port))
                        vl->untag &= ~(BIT(port));
 
                b53_set_vlan_entry(dev, vid, vl);
index 7c791c1..bef0133 100644 (file)
 #define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION          0x7000
 #define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION          BIT(7)
 #define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION           BIT(6)
-#define MV88E6XXX_G1_ATU_OP_MISS_VIOLTATION            BIT(5)
+#define MV88E6XXX_G1_ATU_OP_MISS_VIOLATION             BIT(5)
 #define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION             BIT(4)
 
 /* Offset 0x0C: ATU Data Register */
index 3074108..5200e4b 100644 (file)
@@ -349,7 +349,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
                chip->ports[entry.portvec].atu_member_violation++;
        }
 
-       if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
+       if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
                dev_err_ratelimited(chip->dev,
                                    "ATU miss violation for %pM portvec %x\n",
                                    entry.mac, entry.portvec);
index 17f12c1..7635c38 100644 (file)
@@ -459,12 +459,12 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
        cqe = &admin_queue->cq.entries[head_masked];
 
        /* Go over all the completions */
-       while ((cqe->acq_common_descriptor.flags &
+       while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
                        ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
                /* Do not read the rest of the completion entry before the
                 * phase bit was validated
                 */
-               rmb();
+               dma_rmb();
                ena_com_handle_single_admin_completion(admin_queue, cqe);
 
                head_masked++;
@@ -627,17 +627,10 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
        mmio_read_reg |= mmio_read->seq_num &
                        ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
 
-       /* make sure read_resp->req_id get updated before the hw can write
-        * there
-        */
-       wmb();
-
-       writel_relaxed(mmio_read_reg,
-                      ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
+       writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
 
-       mmiowb();
        for (i = 0; i < timeout; i++) {
-               if (read_resp->req_id == mmio_read->seq_num)
+               if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
                        break;
 
                udelay(1);
@@ -1796,8 +1789,13 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
        aenq_common = &aenq_e->aenq_common_desc;
 
        /* Go over all the events */
-       while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
-              phase) {
+       while ((READ_ONCE(aenq_common->flags) &
+               ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+               /* Make sure the phase bit (ownership) is as expected before
+                * reading the rest of the descriptor.
+                */
+               dma_rmb();
+
                pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
                         aenq_common->group, aenq_common->syndrom,
                         (u64)aenq_common->timestamp_low +
index ea149c1..1c682b7 100644 (file)
@@ -51,6 +51,11 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
        if (desc_phase != expected_phase)
                return NULL;
 
+       /* Make sure we read the rest of the descriptor after the phase bit
+        * has been read
+        */
+       dma_rmb();
+
        return cdesc;
 }
 
@@ -493,6 +498,7 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
        if (cdesc_phase != expected_phase)
                return -EAGAIN;
 
+       dma_rmb();
        if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
                pr_err("Invalid req id %d\n", cdesc->req_id);
                return -EINVAL;
index 6fdc753..2f76572 100644 (file)
@@ -107,8 +107,7 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
        return io_sq->q_depth - 1 - cnt;
 }
 
-static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
-                                           bool relaxed)
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
 {
        u16 tail;
 
@@ -117,10 +116,7 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
        pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
                 io_sq->qid, tail);
 
-       if (relaxed)
-               writel_relaxed(tail, io_sq->db_addr);
-       else
-               writel(tail, io_sq->db_addr);
+       writel(tail, io_sq->db_addr);
 
        return 0;
 }
index c673ac2..25621a2 100644 (file)
@@ -76,7 +76,7 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
 
 static int ena_rss_init_default(struct ena_adapter *adapter);
 static void check_for_admin_com_state(struct ena_adapter *adapter);
-static void ena_destroy_device(struct ena_adapter *adapter);
+static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
 static int ena_restore_device(struct ena_adapter *adapter);
 
 static void ena_tx_timeout(struct net_device *dev)
@@ -461,7 +461,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
                return -ENOMEM;
        }
 
-       dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
+       dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
                           DMA_FROM_DEVICE);
        if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
                u64_stats_update_begin(&rx_ring->syncp);
@@ -478,7 +478,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
        rx_info->page_offset = 0;
        ena_buf = &rx_info->ena_buf;
        ena_buf->paddr = dma;
-       ena_buf->len = PAGE_SIZE;
+       ena_buf->len = ENA_PAGE_SIZE;
 
        return 0;
 }
@@ -495,7 +495,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
                return;
        }
 
-       dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
+       dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
                       DMA_FROM_DEVICE);
 
        __free_page(page);
@@ -551,14 +551,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
                            rx_ring->qid, i, num);
        }
 
-       if (likely(i)) {
-               /* Add memory barrier to make sure the desc were written before
-                * issue a doorbell
-                */
-               wmb();
-               ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);
-               mmiowb();
-       }
+       /* ena_com_write_sq_doorbell issues a wmb() */
+       if (likely(i))
+               ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
 
        rx_ring->next_to_use = next_to_use;
 
@@ -916,10 +911,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
        do {
                dma_unmap_page(rx_ring->dev,
                               dma_unmap_addr(&rx_info->ena_buf, paddr),
-                              PAGE_SIZE, DMA_FROM_DEVICE);
+                              ENA_PAGE_SIZE, DMA_FROM_DEVICE);
 
                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
-                               rx_info->page_offset, len, PAGE_SIZE);
+                               rx_info->page_offset, len, ENA_PAGE_SIZE);
 
                netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
                          "rx skb updated. len %d. data_len %d\n",
@@ -1900,7 +1895,7 @@ static int ena_close(struct net_device *netdev)
                          "Destroy failure, restarting device\n");
                ena_dump_stats_to_dmesg(adapter);
                /* rtnl lock already obtained in dev_ioctl() layer */
-               ena_destroy_device(adapter);
+               ena_destroy_device(adapter, false);
                ena_restore_device(adapter);
        }
 
@@ -2112,12 +2107,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
        tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
                tx_ring->ring_size);
 
-       /* This WMB is aimed to:
-        * 1 - perform smp barrier before reading next_to_completion
-        * 2 - make sure the desc were written before trigger DB
-        */
-       wmb();
-
        /* stop the queue when no more space available, the packet can have up
         * to sgl_size + 2. one for the meta descriptor and one for header
         * (if the header is larger than tx_max_header_size).
@@ -2136,10 +2125,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
                 * stop the queue but meanwhile clean_tx_irq updates
                 * next_to_completion and terminates.
                 * The queue will remain stopped forever.
-                * To solve this issue this function perform rmb, check
-                * the wakeup condition and wake up the queue if needed.
+                * To solve this issue add a mb() to make sure that
+                * netif_tx_stop_queue() write is vissible before checking if
+                * there is additional space in the queue.
                 */
-               smp_rmb();
+               smp_mb();
 
                if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
                                > ENA_TX_WAKEUP_THRESH) {
@@ -2151,8 +2141,10 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        if (netif_xmit_stopped(txq) || !skb->xmit_more) {
-               /* trigger the dma engine */
-               ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false);
+               /* trigger the dma engine. ena_com_write_sq_doorbell()
+                * has a mb
+                */
+               ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
                u64_stats_update_begin(&tx_ring->syncp);
                tx_ring->tx_stats.doorbells++;
                u64_stats_update_end(&tx_ring->syncp);
@@ -2193,25 +2185,6 @@ error_drop_packet:
        return NETDEV_TX_OK;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ena_netpoll(struct net_device *netdev)
-{
-       struct ena_adapter *adapter = netdev_priv(netdev);
-       int i;
-
-       /* Dont schedule NAPI if the driver is in the middle of reset
-        * or netdev is down.
-        */
-
-       if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
-           test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
-               return;
-
-       for (i = 0; i < adapter->num_queues; i++)
-               napi_schedule(&adapter->ena_napi[i].napi);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
 static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
                            struct net_device *sb_dev,
                            select_queue_fallback_t fallback)
@@ -2377,9 +2350,6 @@ static const struct net_device_ops ena_netdev_ops = {
        .ndo_change_mtu         = ena_change_mtu,
        .ndo_set_mac_address    = NULL,
        .ndo_validate_addr      = eth_validate_addr,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ena_netpoll,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
 };
 
 static int ena_device_validate_params(struct ena_adapter *adapter,
@@ -2550,12 +2520,15 @@ err_disable_msix:
        return rc;
 }
 
-static void ena_destroy_device(struct ena_adapter *adapter)
+static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
 {
        struct net_device *netdev = adapter->netdev;
        struct ena_com_dev *ena_dev = adapter->ena_dev;
        bool dev_up;
 
+       if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+               return;
+
        netif_carrier_off(netdev);
 
        del_timer_sync(&adapter->timer_service);
@@ -2563,7 +2536,8 @@ static void ena_destroy_device(struct ena_adapter *adapter)
        dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
        adapter->dev_up_before_reset = dev_up;
 
-       ena_com_set_admin_running_state(ena_dev, false);
+       if (!graceful)
+               ena_com_set_admin_running_state(ena_dev, false);
 
        if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
                ena_down(adapter);
@@ -2591,6 +2565,7 @@ static void ena_destroy_device(struct ena_adapter *adapter)
        adapter->reset_reason = ENA_REGS_RESET_NORMAL;
 
        clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+       clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
 }
 
 static int ena_restore_device(struct ena_adapter *adapter)
@@ -2635,6 +2610,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
                }
        }
 
+       set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
        mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
        dev_err(&pdev->dev, "Device reset completed successfully\n");
 
@@ -2665,7 +2641,7 @@ static void ena_fw_reset_device(struct work_struct *work)
                return;
        }
        rtnl_lock();
-       ena_destroy_device(adapter);
+       ena_destroy_device(adapter, false);
        ena_restore_device(adapter);
        rtnl_unlock();
 }
@@ -3409,30 +3385,24 @@ static void ena_remove(struct pci_dev *pdev)
                netdev->rx_cpu_rmap = NULL;
        }
 #endif /* CONFIG_RFS_ACCEL */
-
-       unregister_netdev(netdev);
        del_timer_sync(&adapter->timer_service);
 
        cancel_work_sync(&adapter->reset_task);
 
-       /* Reset the device only if the device is running. */
-       if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
-               ena_com_dev_reset(ena_dev, adapter->reset_reason);
+       unregister_netdev(netdev);
 
-       ena_free_mgmnt_irq(adapter);
+       /* If the device is running then we want to make sure the device will be
+        * reset to make sure no more events will be issued by the device.
+        */
+       if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+               set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
 
-       ena_disable_msix(adapter);
+       rtnl_lock();
+       ena_destroy_device(adapter, true);
+       rtnl_unlock();
 
        free_netdev(netdev);
 
-       ena_com_mmio_reg_read_request_destroy(ena_dev);
-
-       ena_com_abort_admin_commands(ena_dev);
-
-       ena_com_wait_for_abort_completion(ena_dev);
-
-       ena_com_admin_destroy(ena_dev);
-
        ena_com_rss_destroy(ena_dev);
 
        ena_com_delete_debug_area(ena_dev);
@@ -3467,7 +3437,7 @@ static int ena_suspend(struct pci_dev *pdev,  pm_message_t state)
                        "ignoring device reset request as the device is being suspended\n");
                clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
        }
-       ena_destroy_device(adapter);
+       ena_destroy_device(adapter, true);
        rtnl_unlock();
        return 0;
 }
index f1972b5..7c7ae56 100644 (file)
@@ -355,4 +355,15 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
 
 int ena_get_sset_count(struct net_device *netdev, int sset);
 
+/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
+ * driver passas 0.
+ * Since the max packet size the ENA handles is ~9kB limit the buffer length to
+ * 16kB.
+ */
+#if PAGE_SIZE > SZ_16K
+#define ENA_PAGE_SIZE SZ_16K
+#else
+#define ENA_PAGE_SIZE PAGE_SIZE
+#endif
+
 #endif /* !(ENA_H) */
index 116997a..00332a1 100644 (file)
@@ -1031,6 +1031,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
        int i, ret;
        unsigned long esar_base;
        unsigned char *esar;
+       const char *desc;
 
        if (dec_lance_debug && version_printed++ == 0)
                printk(version);
@@ -1216,19 +1217,20 @@ static int dec_lance_probe(struct device *bdev, const int type)
         */
        switch (type) {
        case ASIC_LANCE:
-               printk("%s: IOASIC onboard LANCE", name);
+               desc = "IOASIC onboard LANCE";
                break;
        case PMAD_LANCE:
-               printk("%s: PMAD-AA", name);
+               desc = "PMAD-AA";
                break;
        case PMAX_LANCE:
-               printk("%s: PMAX onboard LANCE", name);
+               desc = "PMAX onboard LANCE";
                break;
        }
        for (i = 0; i < 6; i++)
                dev->dev_addr[i] = esar[i * 4];
 
-       printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
+       printk("%s: %s, addr = %pM, irq = %d\n",
+              name, desc, dev->dev_addr, dev->irq);
 
        dev->netdev_ops = &lance_netdev_ops;
        dev->watchdog_timeo = 5*HZ;
index 024998d..6a8e256 100644 (file)
@@ -154,7 +154,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
 static void bmac_set_timeout(struct net_device *dev);
 static void bmac_tx_timeout(struct timer_list *t);
-static int bmac_output(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev);
 static void bmac_start(struct net_device *dev);
 
 #define        DBDMA_SET(x)    ( ((x) | (x) << 16) )
@@ -1456,7 +1456,7 @@ bmac_start(struct net_device *dev)
        spin_unlock_irqrestore(&bp->lock, flags);
 }
 
-static int
+static netdev_tx_t
 bmac_output(struct sk_buff *skb, struct net_device *dev)
 {
        struct bmac_data *bp = netdev_priv(dev);
index 0b5429d..68b9ee4 100644 (file)
@@ -78,7 +78,7 @@ struct mace_data {
 
 static int mace_open(struct net_device *dev);
 static int mace_close(struct net_device *dev);
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
 static void mace_set_multicast(struct net_device *dev);
 static void mace_reset(struct net_device *dev);
 static int mace_set_address(struct net_device *dev, void *addr);
@@ -525,7 +525,7 @@ static inline void mace_set_timeout(struct net_device *dev)
     mp->timeout_active = 1;
 }
 
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
 {
     struct mace_data *mp = netdev_priv(dev);
     volatile struct dbdma_regs __iomem *td = mp->tx_dma;
index 137cbb4..376f2c2 100644 (file)
@@ -89,7 +89,7 @@ struct mace_frame {
 
 static int mace_open(struct net_device *dev);
 static int mace_close(struct net_device *dev);
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
 static void mace_set_multicast(struct net_device *dev);
 static int mace_set_address(struct net_device *dev, void *addr);
 static void mace_reset(struct net_device *dev);
@@ -444,7 +444,7 @@ static int mace_close(struct net_device *dev)
  * Transmit a frame
  */
 
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
 {
        struct mace_data *mp = netdev_priv(dev);
        unsigned long flags;
index b5f1f62..d1e1a0b 100644 (file)
@@ -225,9 +225,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                }
 
                /* for single fragment packets use build_skb() */
-               if (buff->is_eop) {
+               if (buff->is_eop &&
+                   buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
                        skb = build_skb(page_address(buff->page),
-                                       buff->len + AQ_SKB_ALIGN);
+                                       AQ_CFG_RX_FRAME_MAX);
                        if (unlikely(!skb)) {
                                err = -ENOMEM;
                                goto err_exit;
@@ -247,18 +248,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                                        buff->len - ETH_HLEN,
                                        SKB_TRUESIZE(buff->len - ETH_HLEN));
 
-                       for (i = 1U, next_ = buff->next,
-                            buff_ = &self->buff_ring[next_]; true;
-                            next_ = buff_->next,
-                            buff_ = &self->buff_ring[next_], ++i) {
-                               skb_add_rx_frag(skb, i, buff_->page, 0,
-                                               buff_->len,
-                                               SKB_TRUESIZE(buff->len -
-                                               ETH_HLEN));
-                               buff_->is_cleaned = 1;
-
-                               if (buff_->is_eop)
-                                       break;
+                       if (!buff->is_eop) {
+                               for (i = 1U, next_ = buff->next,
+                                    buff_ = &self->buff_ring[next_];
+                                    true; next_ = buff_->next,
+                                    buff_ = &self->buff_ring[next_], ++i) {
+                                       skb_add_rx_frag(skb, i,
+                                                       buff_->page, 0,
+                                                       buff_->len,
+                                                       SKB_TRUESIZE(buff->len -
+                                                       ETH_HLEN));
+                                       buff_->is_cleaned = 1;
+
+                                       if (buff_->is_eop)
+                                               break;
+                               }
                        }
                }
 
index 1470457..c57238f 100644 (file)
@@ -1069,9 +1069,6 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
 {
        u32 reg;
 
-       /* Stop monitoring MPD interrupt */
-       intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
-
        /* Disable RXCHK, active filters and Broadcom tag matching */
        reg = rxchk_readl(priv, RXCHK_CONTROL);
        reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
@@ -1081,6 +1078,17 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
        /* Clear the MagicPacket detection logic */
        mpd_enable_set(priv, false);
 
+       reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
+       if (reg & INTRL2_0_MPD)
+               netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
+
+       if (reg & INTRL2_0_BRCM_MATCH_TAG) {
+               reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
+                                 RXCHK_BRCM_TAG_MATCH_MASK;
+               netdev_info(priv->netdev,
+                           "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
+       }
+
        netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
 }
 
@@ -1105,7 +1113,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        struct bcm_sysport_tx_ring *txr;
        unsigned int ring, ring_bit;
-       u32 reg;
 
        priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
                          ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
@@ -1131,16 +1138,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
        if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
                bcm_sysport_tx_reclaim_all(priv);
 
-       if (priv->irq0_stat & INTRL2_0_MPD)
-               netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
-
-       if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) {
-               reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
-                                 RXCHK_BRCM_TAG_MATCH_MASK;
-               netdev_info(priv->netdev,
-                           "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
-       }
-
        if (!priv->is_lite)
                goto out;
 
@@ -2641,9 +2638,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
        /* UniMAC receive needs to be turned on */
        umac_enable_set(priv, CMD_RX_EN, 1);
 
-       /* Enable the interrupt wake-up source */
-       intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
-
        netif_dbg(priv, wol, ndev, "entered WOL mode\n");
 
        return 0;
index 71362b7..fcc2328 100644 (file)
@@ -12894,19 +12894,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        }
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void poll_bnx2x(struct net_device *dev)
-{
-       struct bnx2x *bp = netdev_priv(dev);
-       int i;
-
-       for_each_eth_queue(bp, i) {
-               struct bnx2x_fastpath *fp = &bp->fp[i];
-               napi_schedule(&bnx2x_fp(bp, fp->index, napi));
-       }
-}
-#endif
-
 static int bnx2x_validate_addr(struct net_device *dev)
 {
        struct bnx2x *bp = netdev_priv(dev);
@@ -13113,9 +13100,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_tx_timeout         = bnx2x_tx_timeout,
        .ndo_vlan_rx_add_vid    = bnx2x_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = bnx2x_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = poll_bnx2x,
-#endif
        .ndo_setup_tc           = __bnx2x_setup_tc,
 #ifdef CONFIG_BNX2X_SRIOV
        .ndo_set_vf_mac         = bnx2x_set_vf_mac,
index 8bb1e38..e2d9254 100644 (file)
@@ -1884,8 +1884,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
                        tx_pkts++;
                        /* return full budget so NAPI will complete. */
-                       if (unlikely(tx_pkts > bp->tx_wake_thresh))
+                       if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
                                rx_pkts = budget;
+                               raw_cons = NEXT_RAW_CMP(raw_cons);
+                               break;
+                       }
                } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
                        if (likely(budget))
                                rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
@@ -1913,7 +1916,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                }
                raw_cons = NEXT_RAW_CMP(raw_cons);
 
-               if (rx_pkts == budget)
+               if (rx_pkts && rx_pkts == budget)
                        break;
        }
 
@@ -2027,8 +2030,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
        while (1) {
                work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
 
-               if (work_done >= budget)
+               if (work_done >= budget) {
+                       if (!budget)
+                               BNXT_CP_DB_REARM(cpr->cp_doorbell,
+                                                cpr->cp_raw_cons);
                        break;
+               }
 
                if (!bnxt_has_work(bp, cpr)) {
                        if (napi_complete_done(napi, work_done))
@@ -3010,10 +3017,11 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
 {
        struct pci_dev *pdev = bp->pdev;
 
-       dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
-                         bp->hwrm_cmd_resp_dma_addr);
-
-       bp->hwrm_cmd_resp_addr = NULL;
+       if (bp->hwrm_cmd_resp_addr) {
+               dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
+                                 bp->hwrm_cmd_resp_dma_addr);
+               bp->hwrm_cmd_resp_addr = NULL;
+       }
 }
 
 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
@@ -4643,7 +4651,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
                                      FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
                enables |= ring_grps ?
                           FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
-               enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+               enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
 
                req->num_rx_rings = cpu_to_le16(rx_rings);
                req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@@ -5913,12 +5921,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
        return bp->hw_resc.max_cp_rings;
 }
 
-void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
+unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
 {
-       bp->hw_resc.max_cp_rings = max;
+       return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp);
 }
 
-unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
+static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
 {
        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 
@@ -6684,6 +6692,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
                hw_resc->resv_rx_rings = 0;
                hw_resc->resv_hw_ring_grps = 0;
                hw_resc->resv_vnics = 0;
+               bp->tx_nr_rings = 0;
+               bp->rx_nr_rings = 0;
        }
        return rc;
 }
@@ -7670,21 +7680,6 @@ static void bnxt_tx_timeout(struct net_device *dev)
        bnxt_queue_sp_work(bp);
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void bnxt_poll_controller(struct net_device *dev)
-{
-       struct bnxt *bp = netdev_priv(dev);
-       int i;
-
-       /* Only process tx rings/combined rings in netpoll mode. */
-       for (i = 0; i < bp->tx_nr_rings; i++) {
-               struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
-
-               napi_schedule(&txr->bnapi->napi);
-       }
-}
-#endif
-
 static void bnxt_timer(struct timer_list *t)
 {
        struct bnxt *bp = from_timer(bp, t, timer);
@@ -8025,7 +8020,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
        if (ether_addr_equal(addr->sa_data, dev->dev_addr))
                return 0;
 
-       rc = bnxt_approve_mac(bp, addr->sa_data);
+       rc = bnxt_approve_mac(bp, addr->sa_data, true);
        if (rc)
                return rc;
 
@@ -8518,9 +8513,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
        .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
        .ndo_set_vf_trust       = bnxt_set_vf_trust,
 #endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = bnxt_poll_controller,
-#endif
        .ndo_setup_tc           = bnxt_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
@@ -8629,7 +8621,8 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
 
        *max_tx = hw_resc->max_tx_rings;
        *max_rx = hw_resc->max_rx_rings;
-       *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings);
+       *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
+                       hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp));
        *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
        max_ring_grps = hw_resc->max_hw_ring_grps;
        if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -8769,20 +8762,25 @@ static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
        if (bp->tx_nr_rings)
                return 0;
 
+       bnxt_ulp_irq_stop(bp);
+       bnxt_clear_int_mode(bp);
        rc = bnxt_set_dflt_rings(bp, true);
        if (rc) {
                netdev_err(bp->dev, "Not enough rings available.\n");
-               return rc;
+               goto init_dflt_ring_err;
        }
        rc = bnxt_init_int_mode(bp);
        if (rc)
-               return rc;
+               goto init_dflt_ring_err;
+
        bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
        if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
                bp->flags |= BNXT_FLAG_RFS;
                bp->dev->features |= NETIF_F_NTUPLE;
        }
-       return 0;
+init_dflt_ring_err:
+       bnxt_ulp_irq_restart(bp, rc);
+       return rc;
 }
 
 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
@@ -8819,14 +8817,19 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
        } else {
 #ifdef CONFIG_BNXT_SRIOV
                struct bnxt_vf_info *vf = &bp->vf;
+               bool strict_approval = true;
 
                if (is_valid_ether_addr(vf->mac_addr)) {
                        /* overwrite netdev dev_addr with admin VF MAC */
                        memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+                       /* Older PF driver or firmware may not approve this
+                        * correctly.
+                        */
+                       strict_approval = false;
                } else {
                        eth_hw_addr_random(bp->dev);
                }
-               rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
+               rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
 #endif
        }
        return rc;
@@ -9055,6 +9058,7 @@ init_err_cleanup_tc:
        bnxt_clear_int_mode(bp);
 
 init_err_pci_clean:
+       bnxt_free_hwrm_resources(bp);
        bnxt_cleanup_pci(bp);
 
 init_err_free:
index fefa011..bde3846 100644 (file)
@@ -1481,8 +1481,7 @@ int bnxt_hwrm_set_coal(struct bnxt *);
 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
 void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
-void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
-unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
+unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp);
 int bnxt_get_avail_msix(struct bnxt *bp, int num);
 int bnxt_reserve_rings(struct bnxt *bp);
 void bnxt_tx_disable(struct bnxt *bp);
index ddc98c3..a85d2be 100644 (file)
@@ -98,13 +98,13 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
        for (i = 0; i < max_tc; i++) {
-               u8 qidx;
+               u8 qidx = bp->tc_to_qidx[i];
 
                req.enables |= cpu_to_le32(
-                       QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
+                       QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID <<
+                       qidx);
 
                memset(&cos2bw, 0, sizeof(cos2bw));
-               qidx = bp->tc_to_qidx[i];
                cos2bw.queue_id = bp->q_info[qidx].queue_id;
                if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
                        cos2bw.tsa =
index f3b9fbc..790c684 100644 (file)
@@ -46,6 +46,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
                }
        }
 
+       if (i == ARRAY_SIZE(nvm_params))
+               return -EOPNOTSUPP;
+
        if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
                idx = bp->pf.port_id;
        else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
index 6d583bc..3962f6f 100644 (file)
@@ -451,7 +451,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
 
-       vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
+       vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
        vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
        if (bp->flags & BNXT_FLAG_AGG_RINGS)
                vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
@@ -549,7 +549,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
        max_stat_ctxs = hw_resc->max_stat_ctxs;
 
        /* Remaining rings are distributed equally amongs VF's for now */
-       vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs;
+       vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) -
+                      bp->cp_nr_rings) / num_vfs;
        vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
        if (bp->flags & BNXT_FLAG_AGG_RINGS)
                vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
@@ -643,7 +644,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
         */
        vfs_supported = *num_vfs;
 
-       avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings;
+       avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings;
        avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
        avail_cp = min_t(int, avail_cp, avail_stat);
 
@@ -1103,7 +1104,7 @@ update_vf_mac_exit:
        mutex_unlock(&bp->hwrm_cmd_lock);
 }
 
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
 {
        struct hwrm_func_vf_cfg_input req = {0};
        int rc = 0;
@@ -1121,12 +1122,13 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
        memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
        rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
 mac_done:
-       if (rc) {
+       if (rc && strict) {
                rc = -EADDRNOTAVAIL;
                netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
                            mac);
+               return rc;
        }
-       return rc;
+       return 0;
 }
 #else
 
@@ -1143,7 +1145,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
 {
 }
 
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
 {
        return 0;
 }
index e9b20cd..2eed9ed 100644 (file)
@@ -39,5 +39,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
 void bnxt_sriov_disable(struct bnxt *);
 void bnxt_hwrm_exec_fwd_req(struct bnxt *);
 void bnxt_update_vf_mac(struct bnxt *);
-int bnxt_approve_mac(struct bnxt *, u8 *);
+int bnxt_approve_mac(struct bnxt *, u8 *, bool);
 #endif
index 139d96c..e1594c9 100644 (file)
@@ -75,17 +75,23 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
        return 0;
 }
 
-static void bnxt_tc_parse_vlan(struct bnxt *bp,
-                              struct bnxt_tc_actions *actions,
-                              const struct tc_action *tc_act)
+static int bnxt_tc_parse_vlan(struct bnxt *bp,
+                             struct bnxt_tc_actions *actions,
+                             const struct tc_action *tc_act)
 {
-       if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
+       switch (tcf_vlan_action(tc_act)) {
+       case TCA_VLAN_ACT_POP:
                actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
-       } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
+               break;
+       case TCA_VLAN_ACT_PUSH:
                actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
                actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
                actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
+               break;
+       default:
+               return -EOPNOTSUPP;
        }
+       return 0;
 }
 
 static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
@@ -110,16 +116,14 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
                                 struct tcf_exts *tc_exts)
 {
        const struct tc_action *tc_act;
-       LIST_HEAD(tc_actions);
-       int rc;
+       int i, rc;
 
        if (!tcf_exts_has_actions(tc_exts)) {
                netdev_info(bp->dev, "no actions");
                return -EINVAL;
        }
 
-       tcf_exts_to_list(tc_exts, &tc_actions);
-       list_for_each_entry(tc_act, &tc_actions, list) {
+       tcf_exts_for_each_action(i, tc_act, tc_exts) {
                /* Drop action */
                if (is_tcf_gact_shot(tc_act)) {
                        actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
@@ -136,7 +140,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
 
                /* Push/pop VLAN */
                if (is_tcf_vlan(tc_act)) {
-                       bnxt_tc_parse_vlan(bp, actions, tc_act);
+                       rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
+                       if (rc)
+                               return rc;
                        continue;
                }
 
index c37b284..beee612 100644 (file)
@@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
                edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
        }
        bnxt_fill_msix_vecs(bp, ent);
-       bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
        edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
        return avail_msix;
 }
@@ -178,7 +177,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
 {
        struct net_device *dev = edev->net;
        struct bnxt *bp = netdev_priv(dev);
-       int max_cp_rings, msix_requested;
 
        ASSERT_RTNL();
        if (ulp_id != BNXT_ROCE_ULP)
@@ -187,9 +185,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
        if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
                return 0;
 
-       max_cp_rings = bnxt_get_max_func_cp_rings(bp);
-       msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
-       bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
        edev->ulp_tbl[ulp_id].msix_requested = 0;
        edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
        if (netif_running(dev)) {
@@ -220,21 +215,6 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
        return 0;
 }
 
-void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id)
-{
-       ASSERT_RTNL();
-       if (bnxt_ulp_registered(bp->edev, ulp_id)) {
-               struct bnxt_en_dev *edev = bp->edev;
-               unsigned int msix_req, max;
-
-               msix_req = edev->ulp_tbl[ulp_id].msix_requested;
-               max = bnxt_get_max_func_cp_rings(bp);
-               bnxt_set_max_func_cp_rings(bp, max - msix_req);
-               max = bnxt_get_max_func_stat_ctxs(bp);
-               bnxt_set_max_func_stat_ctxs(bp, max - 1);
-       }
-}
-
 static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
                         struct bnxt_fw_msg *fw_msg)
 {
index df48ac7..d9bea37 100644 (file)
@@ -90,7 +90,6 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
 
 int bnxt_get_ulp_msix_num(struct bnxt *bp);
 int bnxt_get_ulp_msix_base(struct bnxt *bp);
-void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
 void bnxt_ulp_stop(struct bnxt *bp);
 void bnxt_ulp_start(struct bnxt *bp);
 void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
index b773bc0..14b4961 100644 (file)
@@ -186,6 +186,9 @@ struct bcmgenet_mib_counters {
 #define UMAC_MAC1                      0x010
 #define UMAC_MAX_FRAME_LEN             0x014
 
+#define UMAC_MODE                      0x44
+#define  MODE_LINK_STATUS              (1 << 5)
+
 #define UMAC_EEE_CTRL                  0x064
 #define  EN_LPI_RX_PAUSE               (1 << 0)
 #define  EN_LPI_TX_PFC                 (1 << 1)
index 5333274..4241ae9 100644 (file)
@@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev)
 static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
                                          struct fixed_phy_status *status)
 {
-       if (dev && dev->phydev && status)
-               status->link = dev->phydev->link;
+       struct bcmgenet_priv *priv;
+       u32 reg;
+
+       if (dev && dev->phydev && status) {
+               priv = netdev_priv(dev);
+               reg = bcmgenet_umac_readl(priv, UMAC_MODE);
+               status->link = !!(reg & MODE_LINK_STATUS);
+       }
 
        return 0;
 }
index dc09f9a..58b9744 100644 (file)
@@ -482,11 +482,6 @@ static int macb_mii_probe(struct net_device *dev)
 
        if (np) {
                if (of_phy_is_fixed_link(np)) {
-                       if (of_phy_register_fixed_link(np) < 0) {
-                               dev_err(&bp->pdev->dev,
-                                       "broken fixed-link specification\n");
-                               return -ENODEV;
-                       }
                        bp->phy_node = of_node_get(np);
                } else {
                        bp->phy_node = of_parse_phandle(np, "phy-handle", 0);
@@ -569,7 +564,7 @@ static int macb_mii_init(struct macb *bp)
 {
        struct macb_platform_data *pdata;
        struct device_node *np;
-       int err;
+       int err = -ENXIO;
 
        /* Enable management port */
        macb_writel(bp, NCR, MACB_BIT(MPE));
@@ -592,12 +587,23 @@ static int macb_mii_init(struct macb *bp)
        dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
 
        np = bp->pdev->dev.of_node;
-       if (pdata)
-               bp->mii_bus->phy_mask = pdata->phy_mask;
+       if (np && of_phy_is_fixed_link(np)) {
+               if (of_phy_register_fixed_link(np) < 0) {
+                       dev_err(&bp->pdev->dev,
+                               "broken fixed-link specification %pOF\n", np);
+                       goto err_out_free_mdiobus;
+               }
+
+               err = mdiobus_register(bp->mii_bus);
+       } else {
+               if (pdata)
+                       bp->mii_bus->phy_mask = pdata->phy_mask;
+
+               err = of_mdiobus_register(bp->mii_bus, np);
+       }
 
-       err = of_mdiobus_register(bp->mii_bus, np);
        if (err)
-               goto err_out_free_mdiobus;
+               goto err_out_free_fixed_link;
 
        err = macb_mii_probe(bp->dev);
        if (err)
@@ -607,6 +613,7 @@ static int macb_mii_init(struct macb *bp)
 
 err_out_unregister_bus:
        mdiobus_unregister(bp->mii_bus);
+err_out_free_fixed_link:
        if (np && of_phy_is_fixed_link(np))
                of_phy_deregister_fixed_link(np);
 err_out_free_mdiobus:
@@ -642,7 +649,7 @@ static int macb_halt_tx(struct macb *bp)
                if (!(status & MACB_BIT(TGO)))
                        return 0;
 
-               usleep_range(10, 250);
+               udelay(250);
        } while (time_before(halt_time, timeout));
 
        return -ETIMEDOUT;
@@ -2028,14 +2035,17 @@ static void macb_reset_hw(struct macb *bp)
 {
        struct macb_queue *queue;
        unsigned int q;
+       u32 ctrl = macb_readl(bp, NCR);
 
        /* Disable RX and TX (XXX: Should we halt the transmission
         * more gracefully?)
         */
-       macb_writel(bp, NCR, 0);
+       ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
 
        /* Clear the stats registers (XXX: Update stats first?) */
-       macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
+       ctrl |= MACB_BIT(CLRSTAT);
+
+       macb_writel(bp, NCR, ctrl);
 
        /* Clear all status flags */
        macb_writel(bp, TSR, -1);
@@ -2150,6 +2160,7 @@ static void macb_configure_dma(struct macb *bp)
                else
                        dmacfg &= ~GEM_BIT(TXCOEN);
 
+               dmacfg &= ~GEM_BIT(ADDR64);
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
                if (bp->hw_dma_cap & HW_DMA_CAP_64B)
                        dmacfg |= GEM_BIT(ADDR64);
@@ -2223,7 +2234,7 @@ static void macb_init_hw(struct macb *bp)
        }
 
        /* Enable TX and RX */
-       macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
+       macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
 }
 
 /* The hash address register is 64 bits long and takes up two
@@ -3827,6 +3838,13 @@ static const struct macb_config at91sam9260_config = {
        .init = macb_init,
 };
 
+static const struct macb_config sama5d3macb_config = {
+       .caps = MACB_CAPS_SG_DISABLED
+             | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+       .clk_init = macb_clk_init,
+       .init = macb_init,
+};
+
 static const struct macb_config pc302gem_config = {
        .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
        .dma_burst_length = 16,
@@ -3894,6 +3912,7 @@ static const struct of_device_id macb_dt_ids[] = {
        { .compatible = "cdns,gem", .data = &pc302gem_config },
        { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
        { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
+       { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
        { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
        { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
        { .compatible = "cdns,emac", .data = &emac_config },
index a19172d..c34ea38 100644 (file)
@@ -2159,6 +2159,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EPERM;
                if (copy_from_user(&t, useraddr, sizeof(t)))
                        return -EFAULT;
+               if (t.cmd != CHELSIO_SET_QSET_PARAMS)
+                       return -EINVAL;
                if (t.qset_idx >= SGE_QSETS)
                        return -EINVAL;
                if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
@@ -2258,6 +2260,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                if (copy_from_user(&t, useraddr, sizeof(t)))
                        return -EFAULT;
 
+               if (t.cmd != CHELSIO_GET_QSET_PARAMS)
+                       return -EINVAL;
+
                /* Display qsets for all ports when offload enabled */
                if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
                        q1 = 0;
@@ -2303,6 +2308,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EBUSY;
                if (copy_from_user(&edata, useraddr, sizeof(edata)))
                        return -EFAULT;
+               if (edata.cmd != CHELSIO_SET_QSET_NUM)
+                       return -EINVAL;
                if (edata.val < 1 ||
                        (edata.val > 1 && !(adapter->flags & USING_MSIX)))
                        return -EINVAL;
@@ -2343,6 +2350,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EPERM;
                if (copy_from_user(&t, useraddr, sizeof(t)))
                        return -EFAULT;
+               if (t.cmd != CHELSIO_LOAD_FW)
+                       return -EINVAL;
                /* Check t.len sanity ? */
                fw_data = memdup_user(useraddr + sizeof(t), t.len);
                if (IS_ERR(fw_data))
@@ -2366,6 +2375,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EBUSY;
                if (copy_from_user(&m, useraddr, sizeof(m)))
                        return -EFAULT;
+               if (m.cmd != CHELSIO_SETMTUTAB)
+                       return -EINVAL;
                if (m.nmtus != NMTUS)
                        return -EINVAL;
                if (m.mtus[0] < 81)     /* accommodate SACK */
@@ -2407,6 +2418,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EBUSY;
                if (copy_from_user(&m, useraddr, sizeof(m)))
                        return -EFAULT;
+               if (m.cmd != CHELSIO_SET_PM)
+                       return -EINVAL;
                if (!is_power_of_2(m.rx_pg_sz) ||
                        !is_power_of_2(m.tx_pg_sz))
                        return -EINVAL; /* not power of 2 */
@@ -2440,6 +2453,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EIO;    /* need the memory controllers */
                if (copy_from_user(&t, useraddr, sizeof(t)))
                        return -EFAULT;
+               if (t.cmd != CHELSIO_GET_MEM)
+                       return -EINVAL;
                if ((t.addr & 7) || (t.len & 7))
                        return -EINVAL;
                if (t.mem_id == MEM_CM)
@@ -2492,6 +2507,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
                        return -EAGAIN;
                if (copy_from_user(&t, useraddr, sizeof(t)))
                        return -EFAULT;
+               if (t.cmd != CHELSIO_SET_TRACE_FILTER)
+                       return -EINVAL;
 
                tp = (const struct trace_params *)&t.sip;
                if (t.config_tx)
index 623f73d..c116f96 100644 (file)
@@ -417,10 +417,9 @@ static void cxgb4_process_flow_actions(struct net_device *in,
                                       struct ch_filter_specification *fs)
 {
        const struct tc_action *a;
-       LIST_HEAD(actions);
+       int i;
 
-       tcf_exts_to_list(cls->exts, &actions);
-       list_for_each_entry(a, &actions, list) {
+       tcf_exts_for_each_action(i, a, cls->exts) {
                if (is_tcf_gact_ok(a)) {
                        fs->action = FILTER_PASS;
                } else if (is_tcf_gact_shot(a)) {
@@ -591,10 +590,9 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
        bool act_redir = false;
        bool act_pedit = false;
        bool act_vlan = false;
-       LIST_HEAD(actions);
+       int i;
 
-       tcf_exts_to_list(cls->exts, &actions);
-       list_for_each_entry(a, &actions, list) {
+       tcf_exts_for_each_action(i, a, cls->exts) {
                if (is_tcf_gact_ok(a)) {
                        /* Do nothing */
                } else if (is_tcf_gact_shot(a)) {
index 18eb2ae..c7d2b4d 100644 (file)
@@ -93,14 +93,13 @@ static int fill_action_fields(struct adapter *adap,
        unsigned int num_actions = 0;
        const struct tc_action *a;
        struct tcf_exts *exts;
-       LIST_HEAD(actions);
+       int i;
 
        exts = cls->knode.exts;
        if (!tcf_exts_has_actions(exts))
                return -EINVAL;
 
-       tcf_exts_to_list(exts, &actions);
-       list_for_each_entry(a, &actions, list) {
+       tcf_exts_for_each_action(i, a, exts) {
                /* Don't allow more than one action per rule. */
                if (num_actions)
                        return -EINVAL;
index b8f75a2..f152da1 100644 (file)
@@ -753,7 +753,6 @@ struct cpl_abort_req_rss {
 };
 
 struct cpl_abort_req_rss6 {
-       WR_HDR;
        union opcode_tid ot;
        __be32 srqidx_status;
 };
index e2a7029..13dfdfc 100644 (file)
@@ -332,7 +332,7 @@ static int ep93xx_poll(struct napi_struct *napi, int budget)
        return rx;
 }
 
-static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ep93xx_priv *ep = netdev_priv(dev);
        struct ep93xx_tdesc *txd;
index 3f8fe8f..6324e80 100644 (file)
@@ -113,7 +113,7 @@ struct net_local {
 
 /* Index to functions, as function prototypes. */
 static int net_open(struct net_device *dev);
-static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev);
 static irqreturn_t net_interrupt(int irq, void *dev_id);
 static void set_multicast_list(struct net_device *dev);
 static void net_rx(struct net_device *dev);
@@ -324,7 +324,7 @@ net_open(struct net_device *dev)
        return 0;
 }
 
-static int
+static netdev_tx_t
 net_send_packet(struct sk_buff *skb, struct net_device *dev)
 {
        struct net_local *lp = netdev_priv(dev);
index ff92ab1..1e9d882 100644 (file)
@@ -4500,7 +4500,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
                                port_res->max_vfs += le16_to_cpu(pcie->num_vfs);
                        }
                }
-               return status;
+               goto err;
        }
 
        pcie = be_get_pcie_desc(resp->func_param, desc_count,
index 74d1226..5347872 100644 (file)
@@ -4002,8 +4002,6 @@ static int be_enable_vxlan_offloads(struct be_adapter *adapter)
        netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                                   NETIF_F_TSO | NETIF_F_TSO6 |
                                   NETIF_F_GSO_UDP_TUNNEL;
-       netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
-       netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
 
        dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
                 be16_to_cpu(port));
@@ -4025,8 +4023,6 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
        adapter->vxlan_port = 0;
 
        netdev->hw_enc_features = 0;
-       netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
-       netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
 }
 
 static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
@@ -5320,6 +5316,7 @@ static void be_netdev_init(struct net_device *netdev)
        struct be_adapter *adapter = netdev_priv(netdev);
 
        netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+               NETIF_F_GSO_UDP_TUNNEL |
                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
                NETIF_F_HW_VLAN_CTAG_TX;
        if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
index 2708297..bf9b9fd 100644 (file)
@@ -1158,7 +1158,7 @@ static void fec_enet_timeout_work(struct work_struct *work)
                napi_disable(&fep->napi);
                netif_tx_lock_bh(ndev);
                fec_restart(ndev);
-               netif_wake_queue(ndev);
+               netif_tx_wake_all_queues(ndev);
                netif_tx_unlock_bh(ndev);
                napi_enable(&fep->napi);
        }
@@ -1273,7 +1273,7 @@ skb_done:
 
                /* Since we have freed up a buffer, the ring is no longer full
                 */
-               if (netif_queue_stopped(ndev)) {
+               if (netif_tx_queue_stopped(nq)) {
                        entries_free = fec_enet_get_free_txdesc_num(txq);
                        if (entries_free >= txq->tx_wake_threshold)
                                netif_tx_wake_queue(nq);
@@ -1746,7 +1746,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
                        napi_disable(&fep->napi);
                        netif_tx_lock_bh(ndev);
                        fec_restart(ndev);
-                       netif_wake_queue(ndev);
+                       netif_tx_wake_all_queues(ndev);
                        netif_tx_unlock_bh(ndev);
                        napi_enable(&fep->napi);
                }
@@ -2247,7 +2247,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
                napi_disable(&fep->napi);
                netif_tx_lock_bh(ndev);
                fec_restart(ndev);
-               netif_wake_queue(ndev);
+               netif_tx_wake_all_queues(ndev);
                netif_tx_unlock_bh(ndev);
                napi_enable(&fep->napi);
        }
index a051e58..79d03f8 100644 (file)
@@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
        if (cb->type == DESC_TYPE_SKB)
                dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
                                 ring_to_dma_dir(ring));
-       else
+       else if (cb->length)
                dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
                               ring_to_dma_dir(ring));
 }
index fa5b30f..08a750f 100644 (file)
@@ -220,10 +220,10 @@ struct hnae_desc_cb {
 
        /* priv data for the desc, e.g. skb when use with ip stack*/
        void *priv;
-       u16 page_offset;
-       u16 reuse_flag;
+       u32 page_offset;
+       u32 length;     /* length of the buffer */
 
-       u16 length;     /* length of the buffer */
+       u16 reuse_flag;
 
        /* desc type, used by the ring user to mark the type of the priv data */
        u16 type;
@@ -486,6 +486,8 @@ struct hnae_ae_ops {
                        u8 *auto_neg, u16 *speed, u8 *duplex);
        void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
        void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
+       bool (*need_adjust_link)(struct hnae_handle *handle,
+                                int speed, int duplex);
        int (*set_loopback)(struct hnae_handle *handle,
                            enum hnae_loop loop_mode, int en);
        void (*get_ring_bdnum_limit)(struct hnae_queue *queue,
index e6aad30..b52029e 100644 (file)
@@ -155,6 +155,41 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
                hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
 }
 
+static int hns_ae_wait_flow_down(struct hnae_handle *handle)
+{
+       struct dsaf_device *dsaf_dev;
+       struct hns_ppe_cb *ppe_cb;
+       struct hnae_vf_cb *vf_cb;
+       int ret;
+       int i;
+
+       for (i = 0; i < handle->q_num; i++) {
+               ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]);
+               if (ret)
+                       return ret;
+       }
+
+       ppe_cb = hns_get_ppe_cb(handle);
+       ret = hns_ppe_wait_tx_fifo_clean(ppe_cb);
+       if (ret)
+               return ret;
+
+       dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+       if (!dsaf_dev)
+               return -EINVAL;
+       ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id);
+       if (ret)
+               return ret;
+
+       vf_cb = hns_ae_get_vf_cb(handle);
+       ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb);
+       if (ret)
+               return ret;
+
+       mdelay(10);
+       return 0;
+}
+
 static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
 {
        int q_num = handle->q_num;
@@ -399,12 +434,41 @@ static int hns_ae_get_mac_info(struct hnae_handle *handle,
        return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
 }
 
+static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed,
+                                   int duplex)
+{
+       struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+       return hns_mac_need_adjust_link(mac_cb, speed, duplex);
+}
+
 static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
                               int duplex)
 {
        struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
 
-       hns_mac_adjust_link(mac_cb, speed, duplex);
+       switch (mac_cb->dsaf_dev->dsaf_ver) {
+       case AE_VERSION_1:
+               hns_mac_adjust_link(mac_cb, speed, duplex);
+               break;
+
+       case AE_VERSION_2:
+               /* chip need to clear all pkt inside */
+               hns_mac_disable(mac_cb, MAC_COMM_MODE_RX);
+               if (hns_ae_wait_flow_down(handle)) {
+                       hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
+                       break;
+               }
+
+               hns_mac_adjust_link(mac_cb, speed, duplex);
+               hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
+               break;
+
+       default:
+               break;
+       }
+
+       return;
 }
 
 static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
@@ -902,6 +966,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
        .get_status = hns_ae_get_link_status,
        .get_info = hns_ae_get_mac_info,
        .adjust_link = hns_ae_adjust_link,
+       .need_adjust_link = hns_ae_need_adjust_link,
        .set_loopback = hns_ae_config_loopback,
        .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
        .get_pauseparam = hns_ae_get_pauseparam,
index 5488c6e..09e4061 100644 (file)
@@ -257,6 +257,16 @@ static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
        *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B);
 }
 
+static bool hns_gmac_need_adjust_link(void *mac_drv, enum mac_speed speed,
+                                     int duplex)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       struct hns_mac_cb *mac_cb = drv->mac_cb;
+
+       return (mac_cb->speed != speed) ||
+               (mac_cb->half_duplex == duplex);
+}
+
 static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
                                u32 full_duplex)
 {
@@ -309,6 +319,30 @@ static void hns_gmac_set_promisc(void *mac_drv, u8 en)
                hns_gmac_set_uc_match(mac_drv, en);
 }
 
+int hns_gmac_wait_fifo_clean(void *mac_drv)
+{
+       struct mac_driver *drv = (struct mac_driver *)mac_drv;
+       int wait_cnt;
+       u32 val;
+
+       wait_cnt = 0;
+       while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+               val = dsaf_read_dev(drv, GMAC_FIFO_STATE_REG);
+               /* bit5~bit0 is not send complete pkts */
+               if ((val & 0x3f) == 0)
+                       break;
+               usleep_range(100, 200);
+       }
+
+       if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+               dev_err(drv->dev,
+                       "hns ge %d fifo was not idle.\n", drv->mac_id);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 static void hns_gmac_init(void *mac_drv)
 {
        u32 port;
@@ -690,6 +724,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
        mac_drv->mac_disable = hns_gmac_disable;
        mac_drv->mac_free = hns_gmac_free;
        mac_drv->adjust_link = hns_gmac_adjust_link;
+       mac_drv->need_adjust_link = hns_gmac_need_adjust_link;
        mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames;
        mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length;
        mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg;
@@ -717,6 +752,7 @@ void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
        mac_drv->get_strings = hns_gmac_get_strings;
        mac_drv->update_stats = hns_gmac_update_stats;
        mac_drv->set_promiscuous = hns_gmac_set_promisc;
+       mac_drv->wait_fifo_clean = hns_gmac_wait_fifo_clean;
 
        return (void *)mac_drv;
 }
index 1c2326b..6ed6f14 100644 (file)
@@ -114,6 +114,26 @@ int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
        return 0;
 }
 
+/**
+ *hns_mac_is_adjust_link - check is need change mac speed and duplex register
+ *@mac_cb: mac device
+ *@speed: phy device speed
+ *@duplex:phy device duplex
+ *
+ */
+bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
+{
+       struct mac_driver *mac_ctrl_drv;
+
+       mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac);
+
+       if (mac_ctrl_drv->need_adjust_link)
+               return mac_ctrl_drv->need_adjust_link(mac_ctrl_drv,
+                       (enum mac_speed)speed, duplex);
+       else
+               return true;
+}
+
 void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
 {
        int ret;
@@ -430,6 +450,16 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
        return 0;
 }
 
+int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb)
+{
+       struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+
+       if (drv->wait_fifo_clean)
+               return drv->wait_fifo_clean(drv);
+
+       return 0;
+}
+
 void hns_mac_reset(struct hns_mac_cb *mac_cb)
 {
        struct mac_driver *drv = hns_mac_get_drv(mac_cb);
@@ -998,6 +1028,20 @@ static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev)
                return  DSAF_MAX_PORT_NUM;
 }
 
+void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       mac_ctrl_drv->mac_enable(mac_cb->priv.mac, mode);
+}
+
+void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
+{
+       struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+       mac_ctrl_drv->mac_disable(mac_cb->priv.mac, mode);
+}
+
 /**
  * hns_mac_init - init mac
  * @dsaf_dev: dsa fabric device struct pointer
index bbc0a98..fbc7534 100644 (file)
@@ -356,6 +356,9 @@ struct mac_driver {
        /*adjust mac mode of port,include speed and duplex*/
        int (*adjust_link)(void *mac_drv, enum mac_speed speed,
                           u32 full_duplex);
+       /* need adjust link */
+       bool (*need_adjust_link)(void *mac_drv, enum mac_speed speed,
+                                int duplex);
        /* config autoegotaite mode of port*/
        void (*set_an_mode)(void *mac_drv, u8 enable);
        /* config loopbank mode */
@@ -394,6 +397,7 @@ struct mac_driver {
        void (*get_info)(void *mac_drv, struct mac_info *mac_info);
 
        void (*update_stats)(void *mac_drv);
+       int (*wait_fifo_clean)(void *mac_drv);
 
        enum mac_mode mac_mode;
        u8 mac_id;
@@ -427,6 +431,7 @@ void *hns_xgmac_config(struct hns_mac_cb *mac_cb,
 
 int hns_mac_init(struct dsaf_device *dsaf_dev);
 void mac_adjust_link(struct net_device *net_dev);
+bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
 void hns_mac_get_link_status(struct hns_mac_cb *mac_cb,        u32 *link_status);
 int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
 int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
@@ -463,5 +468,8 @@ int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
 int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
                       const unsigned char *addr);
 int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn);
+void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
+void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
+int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb);
 
 #endif /* _HNS_DSAF_MAC_H */
index ca50c25..e557a4e 100644 (file)
@@ -2727,6 +2727,35 @@ void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
        soft_mac_entry->index = enable ? entry_index : DSAF_INVALID_ENTRY_IDX;
 }
 
+int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
+{
+       u32 val, val_tmp;
+       int wait_cnt;
+
+       if (port >= DSAF_SERVICE_NW_NUM)
+               return 0;
+
+       wait_cnt = 0;
+       while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+               val = dsaf_read_dev(dsaf_dev, DSAF_VOQ_IN_PKT_NUM_0_REG +
+                       (port + DSAF_XGE_NUM) * 0x40);
+               val_tmp = dsaf_read_dev(dsaf_dev, DSAF_VOQ_OUT_PKT_NUM_0_REG +
+                       (port + DSAF_XGE_NUM) * 0x40);
+               if (val == val_tmp)
+                       break;
+
+               usleep_range(100, 200);
+       }
+
+       if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+               dev_err(dsaf_dev->dev, "hns dsaf clean wait timeout(%u - %u).\n",
+                       val, val_tmp);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 /**
  * dsaf_probe - probo dsaf dev
  * @pdev: dasf platform device
index 4507e82..0e1cd99 100644 (file)
@@ -44,6 +44,8 @@ struct hns_mac_cb;
 #define DSAF_ROCE_CREDIT_CHN   8
 #define DSAF_ROCE_CHAN_MODE    3
 
+#define HNS_MAX_WAIT_CNT 10000
+
 enum dsaf_roce_port_mode {
        DSAF_ROCE_6PORT_MODE,
        DSAF_ROCE_4PORT_MODE,
@@ -463,5 +465,6 @@ int hns_dsaf_rm_mac_addr(
 
 int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
                             u8 mac_id, u8 port_num);
+int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
 
 #endif /* __HNS_DSAF_MAIN_H__ */
index d160d8c..0942e49 100644 (file)
@@ -275,6 +275,29 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
        dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk);
 }
 
+int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb)
+{
+       int wait_cnt;
+       u32 val;
+
+       wait_cnt = 0;
+       while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+               val = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG) & 0x3ffU;
+               if (!val)
+                       break;
+
+               usleep_range(100, 200);
+       }
+
+       if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+               dev_err(ppe_cb->dev, "hns ppe tx fifo clean wait timeout, still has %u pkt.\n",
+                       val);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 /**
  * ppe_init_hw - init ppe
  * @ppe_cb: ppe device
index 9d8e643..f670e63 100644 (file)
@@ -100,6 +100,7 @@ struct ppe_common_cb {
 
 };
 
+int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb);
 int hns_ppe_init(struct dsaf_device *dsaf_dev);
 
 void hns_ppe_uninit(struct dsaf_device *dsaf_dev);
index 9d76e2e..5d64519 100644 (file)
@@ -66,6 +66,29 @@ void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
                        "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
 }
 
+int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs)
+{
+       u32 head, tail;
+       int wait_cnt;
+
+       tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL);
+       wait_cnt = 0;
+       while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+               head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD);
+               if (tail == head)
+                       break;
+
+               usleep_range(100, 200);
+       }
+
+       if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+               dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n");
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 /**
  *hns_rcb_reset_ring_hw - ring reset
  *@q: ring struct pointer
index 6028164..2319b77 100644 (file)
@@ -136,6 +136,7 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
 void hns_rcb_init_hw(struct ring_pair_cb *ring);
 void hns_rcb_reset_ring_hw(struct hnae_queue *q);
 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
+int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs);
 u32 hns_rcb_get_rx_coalesced_frames(
        struct rcb_common_cb *rcb_common, u32 port_idx);
 u32 hns_rcb_get_tx_coalesced_frames(
index 886cbbf..74d935d 100644 (file)
 #define RCB_RING_INTMSK_TX_OVERTIME_REG                0x000C4
 #define RCB_RING_INTSTS_TX_OVERTIME_REG                0x000C8
 
+#define GMAC_FIFO_STATE_REG                    0x0000UL
 #define GMAC_DUPLEX_TYPE_REG                   0x0008UL
 #define GMAC_FD_FC_TYPE_REG                    0x000CUL
 #define GMAC_TX_WATER_LINE_REG                 0x0010UL
index 9f2b552..28e9078 100644 (file)
@@ -40,9 +40,9 @@
 #define SKB_TMP_LEN(SKB) \
        (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
 
-static void fill_v2_desc(struct hnae_ring *ring, void *priv,
-                        int size, dma_addr_t dma, int frag_end,
-                        int buf_num, enum hns_desc_type type, int mtu)
+static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
+                           int send_sz, dma_addr_t dma, int frag_end,
+                           int buf_num, enum hns_desc_type type, int mtu)
 {
        struct hnae_desc *desc = &ring->desc[ring->next_to_use];
        struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
        desc_cb->type = type;
 
        desc->addr = cpu_to_le64(dma);
-       desc->tx.send_size = cpu_to_le16((u16)size);
+       desc->tx.send_size = cpu_to_le16((u16)send_sz);
 
        /* config bd buffer end */
        hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
@@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
        ring_ptr_move_fw(ring, next_to_use);
 }
 
+static void fill_v2_desc(struct hnae_ring *ring, void *priv,
+                        int size, dma_addr_t dma, int frag_end,
+                        int buf_num, enum hns_desc_type type, int mtu)
+{
+       fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
+                       buf_num, type, mtu);
+}
+
 static const struct acpi_device_id hns_enet_acpi_match[] = {
        { "HISI00C1", 0 },
        { "HISI00C2", 0 },
@@ -289,15 +297,15 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
 
        /* when the frag size is bigger than hardware, split this frag */
        for (k = 0; k < frag_buf_num; k++)
-               fill_v2_desc(ring, priv,
-                            (k == frag_buf_num - 1) ?
+               fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
+                               (k == frag_buf_num - 1) ?
                                        sizeoflast : BD_MAX_SEND_SIZE,
-                            dma + BD_MAX_SEND_SIZE * k,
-                            frag_end && (k == frag_buf_num - 1) ? 1 : 0,
-                            buf_num,
-                            (type == DESC_TYPE_SKB && !k) ?
+                               dma + BD_MAX_SEND_SIZE * k,
+                               frag_end && (k == frag_buf_num - 1) ? 1 : 0,
+                               buf_num,
+                               (type == DESC_TYPE_SKB && !k) ?
                                        DESC_TYPE_SKB : DESC_TYPE_PAGE,
-                            mtu);
+                               mtu);
 }
 
 netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
@@ -406,113 +414,13 @@ out_net_tx_busy:
        return NETDEV_TX_BUSY;
 }
 
-/**
- * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
- * @data: pointer to the start of the headers
- * @max: total length of section to find headers in
- *
- * This function is meant to determine the length of headers that will
- * be recognized by hardware for LRO, GRO, and RSC offloads.  The main
- * motivation of doing this is to only perform one pull for IPv4 TCP
- * packets so that we can do basic things like calculating the gso_size
- * based on the average data per packet.
- **/
-static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
-                                       unsigned int max_size)
-{
-       unsigned char *network;
-       u8 hlen;
-
-       /* this should never happen, but better safe than sorry */
-       if (max_size < ETH_HLEN)
-               return max_size;
-
-       /* initialize network frame pointer */
-       network = data;
-
-       /* set first protocol and move network header forward */
-       network += ETH_HLEN;
-
-       /* handle any vlan tag if present */
-       if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
-               == HNS_RX_FLAG_VLAN_PRESENT) {
-               if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
-                       return max_size;
-
-               network += VLAN_HLEN;
-       }
-
-       /* handle L3 protocols */
-       if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
-               == HNS_RX_FLAG_L3ID_IPV4) {
-               if ((typeof(max_size))(network - data) >
-                   (max_size - sizeof(struct iphdr)))
-                       return max_size;
-
-               /* access ihl as a u8 to avoid unaligned access on ia64 */
-               hlen = (network[0] & 0x0F) << 2;
-
-               /* verify hlen meets minimum size requirements */
-               if (hlen < sizeof(struct iphdr))
-                       return network - data;
-
-               /* record next protocol if header is present */
-       } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
-               == HNS_RX_FLAG_L3ID_IPV6) {
-               if ((typeof(max_size))(network - data) >
-                   (max_size - sizeof(struct ipv6hdr)))
-                       return max_size;
-
-               /* record next protocol */
-               hlen = sizeof(struct ipv6hdr);
-       } else {
-               return network - data;
-       }
-
-       /* relocate pointer to start of L4 header */
-       network += hlen;
-
-       /* finally sort out TCP/UDP */
-       if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
-               == HNS_RX_FLAG_L4ID_TCP) {
-               if ((typeof(max_size))(network - data) >
-                   (max_size - sizeof(struct tcphdr)))
-                       return max_size;
-
-               /* access doff as a u8 to avoid unaligned access on ia64 */
-               hlen = (network[12] & 0xF0) >> 2;
-
-               /* verify hlen meets minimum size requirements */
-               if (hlen < sizeof(struct tcphdr))
-                       return network - data;
-
-               network += hlen;
-       } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
-               == HNS_RX_FLAG_L4ID_UDP) {
-               if ((typeof(max_size))(network - data) >
-                   (max_size - sizeof(struct udphdr)))
-                       return max_size;
-
-               network += sizeof(struct udphdr);
-       }
-
-       /* If everything has gone correctly network should be the
-        * data section of the packet and will be the end of the header.
-        * If not then it probably represents the end of the last recognized
-        * header.
-        */
-       if ((typeof(max_size))(network - data) < max_size)
-               return network - data;
-       else
-               return max_size;
-}
-
 static void hns_nic_reuse_page(struct sk_buff *skb, int i,
                               struct hnae_ring *ring, int pull_len,
                               struct hnae_desc_cb *desc_cb)
 {
        struct hnae_desc *desc;
-       int truesize, size;
+       u32 truesize;
+       int size;
        int last_offset;
        bool twobufs;
 
@@ -530,7 +438,7 @@ static void hns_nic_reuse_page(struct sk_buff *skb, int i,
        }
 
        skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
-                       size - pull_len, truesize - pull_len);
+                       size - pull_len, truesize);
 
         /* avoid re-using remote pages,flag default unreuse */
        if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
@@ -695,7 +603,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
        } else {
                ring->stats.seg_pkt_cnt++;
 
-               pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE);
+               pull_len = eth_get_headlen(va, HNS_RX_HEAD_SIZE);
                memcpy(__skb_put(skb, pull_len), va,
                       ALIGN(pull_len, sizeof(long)));
 
@@ -1212,11 +1120,26 @@ static void hns_nic_adjust_link(struct net_device *ndev)
        struct hnae_handle *h = priv->ae_handle;
        int state = 1;
 
+       /* If there is no phy, do not need adjust link */
        if (ndev->phydev) {
-               h->dev->ops->adjust_link(h, ndev->phydev->speed,
-                                        ndev->phydev->duplex);
-               state = ndev->phydev->link;
+               /* When phy link down, do nothing */
+               if (ndev->phydev->link == 0)
+                       return;
+
+               if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed,
+                                                 ndev->phydev->duplex)) {
+                       /* because Hi161X chip don't support to change gmac
+                        * speed and duplex with traffic. Delay 200ms to
+                        * make sure there is no more data in chip FIFO.
+                        */
+                       netif_carrier_off(ndev);
+                       msleep(200);
+                       h->dev->ops->adjust_link(h, ndev->phydev->speed,
+                                                ndev->phydev->duplex);
+                       netif_carrier_on(ndev);
+               }
        }
+
        state = state && h->dev->ops->get_status(h);
 
        if (state != priv->link) {
@@ -1580,21 +1503,6 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
        return phy_mii_ioctl(phy_dev, ifr, cmd);
 }
 
-/* use only for netconsole to poll with the device without interrupt */
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void hns_nic_poll_controller(struct net_device *ndev)
-{
-       struct hns_nic_priv *priv = netdev_priv(ndev);
-       unsigned long flags;
-       int i;
-
-       local_irq_save(flags);
-       for (i = 0; i < priv->ae_handle->q_num * 2; i++)
-               napi_schedule(&priv->ring_data[i].napi);
-       local_irq_restore(flags);
-}
-#endif
-
 static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
                                    struct net_device *ndev)
 {
@@ -2047,9 +1955,6 @@ static const struct net_device_ops hns_nic_netdev_ops = {
        .ndo_set_features = hns_nic_set_features,
        .ndo_fix_features = hns_nic_fix_features,
        .ndo_get_stats64 = hns_nic_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = hns_nic_poll_controller,
-#endif
        .ndo_set_rx_mode = hns_nic_set_rx_mode,
        .ndo_select_queue = hns_nic_select_queue,
 };
index 08f3c47..774beda 100644 (file)
@@ -243,7 +243,9 @@ static int hns_nic_set_link_ksettings(struct net_device *net_dev,
        }
 
        if (h->dev->ops->adjust_link) {
+               netif_carrier_off(net_dev);
                h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
+               netif_carrier_on(net_dev);
                return 0;
        }
 
index 3554dca..955c4ab 100644 (file)
@@ -2019,7 +2019,8 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
                                struct hns3_desc_cb *desc_cb)
 {
        struct hns3_desc *desc;
-       int truesize, size;
+       u32 truesize;
+       int size;
        int last_offset;
        bool twobufs;
 
index a02a96a..cb450d7 100644 (file)
@@ -284,11 +284,11 @@ struct hns3_desc_cb {
 
        /* priv data for the desc, e.g. skb when use with ip stack*/
        void *priv;
-       u16 page_offset;
-       u16 reuse_flag;
-
+       u32 page_offset;
        u32 length;     /* length of the buffer */
 
+       u16 reuse_flag;
+
        /* desc type, used by the ring user to mark the type of the priv data */
        u16 type;
 };
index c8c7ad2..9b5a68b 100644 (file)
@@ -2634,7 +2634,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
                /* Wait for link to drop */
                time = jiffies + (HZ / 10);
                do {
-                       if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
+                       if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
                                break;
                        if (!in_interrupt())
                                schedule_timeout_interruptible(1);
index 09e9da1..4a8f829 100644 (file)
@@ -789,23 +789,6 @@ static void hinic_get_stats64(struct net_device *netdev,
        stats->tx_errors  = nic_tx_stats->tx_dropped;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void hinic_netpoll(struct net_device *netdev)
-{
-       struct hinic_dev *nic_dev = netdev_priv(netdev);
-       int i, num_qps;
-
-       num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
-       for (i = 0; i < num_qps; i++) {
-               struct hinic_txq *txq = &nic_dev->txqs[i];
-               struct hinic_rxq *rxq = &nic_dev->rxqs[i];
-
-               napi_schedule(&txq->napi);
-               napi_schedule(&rxq->napi);
-       }
-}
-#endif
-
 static const struct net_device_ops hinic_netdev_ops = {
        .ndo_open = hinic_open,
        .ndo_stop = hinic_close,
@@ -818,9 +801,6 @@ static const struct net_device_ops hinic_netdev_ops = {
        .ndo_start_xmit = hinic_xmit_frame,
        .ndo_tx_timeout = hinic_tx_timeout,
        .ndo_get_stats64 = hinic_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = hinic_netpoll,
-#endif
 };
 
 static void netdev_features_init(struct net_device *netdev)
index dc98345..35f6291 100644 (file)
@@ -64,7 +64,8 @@ static unsigned int net_debug = NET_DEBUG;
 #define RX_AREA_END    0x0fc00
 
 static int ether1_open(struct net_device *dev);
-static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t ether1_sendpacket(struct sk_buff *skb,
+                                    struct net_device *dev);
 static irqreturn_t ether1_interrupt(int irq, void *dev_id);
 static int ether1_close(struct net_device *dev);
 static void ether1_setmulticastlist(struct net_device *dev);
@@ -667,7 +668,7 @@ ether1_timeout(struct net_device *dev)
        netif_wake_queue(dev);
 }
 
-static int
+static netdev_tx_t
 ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
 {
        int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr;
index f00a1dc..2f7ae11 100644 (file)
@@ -347,7 +347,7 @@ static const char init_setup[] =
        0x7f /*  *multi IA */ };
 
 static int i596_open(struct net_device *dev);
-static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static irqreturn_t i596_interrupt(int irq, void *dev_id);
 static int i596_close(struct net_device *dev);
 static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
@@ -966,7 +966,7 @@ static void i596_tx_timeout (struct net_device *dev)
 }
 
 
-static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct i596_private *lp = netdev_priv(dev);
        struct tx_cmd *tx_cmd;
index 8bb15a8..1a86184 100644 (file)
@@ -121,7 +121,8 @@ static int     sun3_82586_probe1(struct net_device *dev,int ioaddr);
 static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id);
 static int     sun3_82586_open(struct net_device *dev);
 static int     sun3_82586_close(struct net_device *dev);
-static int     sun3_82586_send_packet(struct sk_buff *,struct net_device *);
+static netdev_tx_t     sun3_82586_send_packet(struct sk_buff *,
+                                             struct net_device *);
 static struct  net_device_stats *sun3_82586_get_stats(struct net_device *dev);
 static void    set_multicast_list(struct net_device *dev);
 static void    sun3_82586_timeout(struct net_device *dev);
@@ -1002,7 +1003,8 @@ static void sun3_82586_timeout(struct net_device *dev)
  * send frame
  */
 
-static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
 {
        int len,i;
 #ifndef NO_NOPCOMMANDS
index ba580bf..03f64f4 100644 (file)
@@ -921,17 +921,6 @@ static int ehea_poll(struct napi_struct *napi, int budget)
        return rx;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ehea_netpoll(struct net_device *dev)
-{
-       struct ehea_port *port = netdev_priv(dev);
-       int i;
-
-       for (i = 0; i < port->num_def_qps; i++)
-               napi_schedule(&port->port_res[i].napi);
-}
-#endif
-
 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
 {
        struct ehea_port_res *pr = param;
@@ -2953,9 +2942,6 @@ static const struct net_device_ops ehea_netdev_ops = {
        .ndo_open               = ehea_open,
        .ndo_stop               = ehea_stop,
        .ndo_start_xmit         = ehea_start_xmit,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ehea_netpoll,
-#endif
        .ndo_get_stats64        = ehea_get_stats64,
        .ndo_set_mac_address    = ehea_set_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
index 354c098..129f4e9 100644 (file)
@@ -494,9 +494,6 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
        case 16384:
                ret |= EMAC_MR1_RFS_16K;
                break;
-       case 8192:
-               ret |= EMAC4_MR1_RFS_8K;
-               break;
        case 4096:
                ret |= EMAC_MR1_RFS_4K;
                break;
@@ -537,6 +534,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
        case 16384:
                ret |= EMAC4_MR1_RFS_16K;
                break;
+       case 8192:
+               ret |= EMAC4_MR1_RFS_8K;
+               break;
        case 4096:
                ret |= EMAC4_MR1_RFS_4K;
                break;
@@ -2677,12 +2677,17 @@ static int emac_init_phy(struct emac_instance *dev)
                if (of_phy_is_fixed_link(np)) {
                        int res = emac_dt_mdio_probe(dev);
 
-                       if (!res) {
-                               res = of_phy_register_fixed_link(np);
-                               if (res)
-                                       mdiobus_unregister(dev->mii_bus);
+                       if (res)
+                               return res;
+
+                       res = of_phy_register_fixed_link(np);
+                       dev->phy_dev = of_phy_find_device(np);
+                       if (res || !dev->phy_dev) {
+                               mdiobus_unregister(dev->mii_bus);
+                               return res ? res : -EINVAL;
                        }
-                       return res;
+                       emac_adjust_link(dev->ndev);
+                       put_device(&dev->phy_dev->mdio.dev);
                }
                return 0;
        }
index dafdd4a..699ef94 100644 (file)
@@ -1823,11 +1823,17 @@ static int do_reset(struct ibmvnic_adapter *adapter,
                        adapter->map_id = 1;
                        release_rx_pools(adapter);
                        release_tx_pools(adapter);
-                       init_rx_pools(netdev);
-                       init_tx_pools(netdev);
+                       rc = init_rx_pools(netdev);
+                       if (rc)
+                               return rc;
+                       rc = init_tx_pools(netdev);
+                       if (rc)
+                               return rc;
 
                        release_napi(adapter);
-                       init_napi(adapter);
+                       rc = init_napi(adapter);
+                       if (rc)
+                               return rc;
                } else {
                        rc = reset_tx_pools(adapter);
                        if (rc)
@@ -2201,19 +2207,6 @@ restart_poll:
        return frames_processed;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ibmvnic_netpoll_controller(struct net_device *dev)
-{
-       struct ibmvnic_adapter *adapter = netdev_priv(dev);
-       int i;
-
-       replenish_pools(netdev_priv(dev));
-       for (i = 0; i < adapter->req_rx_queues; i++)
-               ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
-                                    adapter->rx_scrq[i]);
-}
-#endif
-
 static int wait_for_reset(struct ibmvnic_adapter *adapter)
 {
        int rc, ret;
@@ -2286,9 +2279,6 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
        .ndo_set_mac_address    = ibmvnic_set_mac,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_tx_timeout         = ibmvnic_tx_timeout,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ibmvnic_netpoll_controller,
-#endif
        .ndo_change_mtu         = ibmvnic_change_mtu,
        .ndo_features_check     = ibmvnic_features_check,
 };
index bdb3f8e..2569a16 100644 (file)
@@ -624,14 +624,14 @@ static int e1000_set_ringparam(struct net_device *netdev,
                adapter->tx_ring = tx_old;
                e1000_free_all_rx_resources(adapter);
                e1000_free_all_tx_resources(adapter);
-               kfree(tx_old);
-               kfree(rx_old);
                adapter->rx_ring = rxdr;
                adapter->tx_ring = txdr;
                err = e1000_up(adapter);
                if (err)
                        goto err_setup;
        }
+       kfree(tx_old);
+       kfree(rx_old);
 
        clear_bit(__E1000_RESETTING, &adapter->flags);
        return 0;
@@ -644,7 +644,8 @@ err_setup_rx:
 err_alloc_rx:
        kfree(txdr);
 err_alloc_tx:
-       e1000_up(adapter);
+       if (netif_running(adapter->netdev))
+               e1000_up(adapter);
 err_setup:
        clear_bit(__E1000_RESETTING, &adapter->flags);
        return err;
index a903a0b..7d42582 100644 (file)
@@ -504,9 +504,6 @@ void fm10k_update_stats(struct fm10k_intfc *interface);
 void fm10k_service_event_schedule(struct fm10k_intfc *interface);
 void fm10k_macvlan_schedule(struct fm10k_intfc *interface);
 void fm10k_update_rx_drop_en(struct fm10k_intfc *interface);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-void fm10k_netpoll(struct net_device *netdev);
-#endif
 
 /* Netdev */
 struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info);
index 929f538..538a846 100644 (file)
@@ -1648,9 +1648,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
        .ndo_udp_tunnel_del     = fm10k_udp_tunnel_del,
        .ndo_dfwd_add_station   = fm10k_dfwd_add_station,
        .ndo_dfwd_del_station   = fm10k_dfwd_del_station,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = fm10k_netpoll,
-#endif
        .ndo_features_check     = fm10k_features_check,
 };
 
index 15071e4..c859aba 100644 (file)
@@ -1210,28 +1210,6 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
        return IRQ_HANDLED;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- *  fm10k_netpoll - A Polling 'interrupt' handler
- *  @netdev: network interface device structure
- *
- *  This is used by netconsole to send skbs without having to re-enable
- *  interrupts. It's not called while the normal interrupt routine is executing.
- **/
-void fm10k_netpoll(struct net_device *netdev)
-{
-       struct fm10k_intfc *interface = netdev_priv(netdev);
-       int i;
-
-       /* if interface is down do nothing */
-       if (test_bit(__FM10K_DOWN, interface->state))
-               return;
-
-       for (i = 0; i < interface->num_q_vectors; i++)
-               fm10k_msix_clean_rings(0, interface->q_vector[i]);
-}
-
-#endif
 #define FM10K_ERR_MSG(type) case (type): error = #type; break
 static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
                               struct fm10k_fault *fault)
index abcd096..5ff6caa 100644 (file)
@@ -2013,7 +2013,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
        for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
                i40e_add_stat_strings(&data, i40e_gstrings_pfc_stats, i);
 
-       WARN_ONCE(p - data != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
+       WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN,
                  "stat strings count mismatch!");
 }
 
index f2c622e..ac685ad 100644 (file)
@@ -5122,15 +5122,17 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
                                       u8 *bw_share)
 {
        struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
+       struct i40e_pf *pf = vsi->back;
        i40e_status ret;
        int i;
 
-       if (vsi->back->flags & I40E_FLAG_TC_MQPRIO)
+       /* There is no need to reset BW when mqprio mode is on.  */
+       if (pf->flags & I40E_FLAG_TC_MQPRIO)
                return 0;
-       if (!vsi->mqprio_qopt.qopt.hw) {
+       if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
                ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
                if (ret)
-                       dev_info(&vsi->back->pdev->dev,
+                       dev_info(&pf->pdev->dev,
                                 "Failed to reset tx rate for vsi->seid %u\n",
                                 vsi->seid);
                return ret;
@@ -5139,12 +5141,11 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
        for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
                bw_data.tc_bw_credits[i] = bw_share[i];
 
-       ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
-                                      NULL);
+       ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL);
        if (ret) {
-               dev_info(&vsi->back->pdev->dev,
+               dev_info(&pf->pdev->dev,
                         "AQ command Config VSI BW allocation per TC failed = %d\n",
-                        vsi->back->hw.aq.asq_last_status);
+                        pf->hw.aq.asq_last_status);
                return -EINVAL;
        }
 
index 5906c1c..fef6d89 100644 (file)
@@ -396,29 +396,6 @@ static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
        adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * i40evf_netpoll - A Polling 'interrupt' handler
- * @netdev: network interface device structure
- *
- * This is used by netconsole to send skbs without having to re-enable
- * interrupts.  It's not called while the normal interrupt routine is executing.
- **/
-static void i40evf_netpoll(struct net_device *netdev)
-{
-       struct i40evf_adapter *adapter = netdev_priv(netdev);
-       int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
-       int i;
-
-       /* if interface is down do nothing */
-       if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state))
-               return;
-
-       for (i = 0; i < q_vectors; i++)
-               i40evf_msix_clean_rings(0, &adapter->q_vectors[i]);
-}
-
-#endif
 /**
  * i40evf_irq_affinity_notify - Callback for affinity changes
  * @notify: context as to what irq was changed
@@ -3229,9 +3206,6 @@ static const struct net_device_ops i40evf_netdev_ops = {
        .ndo_features_check     = i40evf_features_check,
        .ndo_fix_features       = i40evf_fix_features,
        .ndo_set_features       = i40evf_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = i40evf_netpoll,
-#endif
        .ndo_setup_tc           = i40evf_setup_tc,
 };
 
index d8b5fff..868f4a1 100644 (file)
@@ -89,6 +89,13 @@ extern const char ice_drv_ver[];
 #define ice_for_each_rxq(vsi, i) \
        for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
 
+/* Macros for each allocated tx/rx ring whether used or not in a VSI */
+#define ice_for_each_alloc_txq(vsi, i) \
+       for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
+
+#define ice_for_each_alloc_rxq(vsi, i) \
+       for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
+
 struct ice_tc_info {
        u16 qoffset;
        u16 qcount;
@@ -189,9 +196,9 @@ struct ice_vsi {
        struct list_head tmp_sync_list;         /* MAC filters to be synced */
        struct list_head tmp_unsync_list;       /* MAC filters to be unsynced */
 
-       bool irqs_ready;
-       bool current_isup;               /* Sync 'link up' logging */
-       bool stat_offsets_loaded;
+       u8 irqs_ready;
+       u8 current_isup;                 /* Sync 'link up' logging */
+       u8 stat_offsets_loaded;
 
        /* queue information */
        u8 tx_mapping_mode;              /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -262,7 +269,7 @@ struct ice_pf {
        struct ice_hw_port_stats stats;
        struct ice_hw_port_stats stats_prev;
        struct ice_hw hw;
-       bool stat_prev_loaded;  /* has previous stats been loaded */
+       u8 stat_prev_loaded;    /* has previous stats been loaded */
        char int_name[ICE_INT_NAME_STR_LEN];
 };
 
index 7541ec2..a0614f4 100644 (file)
@@ -329,19 +329,19 @@ struct ice_aqc_vsi_props {
        /* VLAN section */
        __le16 pvid; /* VLANS include priority bits */
        u8 pvlan_reserved[2];
-       u8 port_vlan_flags;
-#define ICE_AQ_VSI_PVLAN_MODE_S        0
-#define ICE_AQ_VSI_PVLAN_MODE_M        (0x3 << ICE_AQ_VSI_PVLAN_MODE_S)
-#define ICE_AQ_VSI_PVLAN_MODE_UNTAGGED 0x1
-#define ICE_AQ_VSI_PVLAN_MODE_TAGGED   0x2
-#define ICE_AQ_VSI_PVLAN_MODE_ALL      0x3
+       u8 vlan_flags;
+#define ICE_AQ_VSI_VLAN_MODE_S 0
+#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S)
+#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED  0x1
+#define ICE_AQ_VSI_VLAN_MODE_TAGGED    0x2
+#define ICE_AQ_VSI_VLAN_MODE_ALL       0x3
 #define ICE_AQ_VSI_PVLAN_INSERT_PVID   BIT(2)
-#define ICE_AQ_VSI_PVLAN_EMOD_S        3
-#define ICE_AQ_VSI_PVLAN_EMOD_M        (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S)
-#define ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_PVLAN_EMOD_S)
-#define ICE_AQ_VSI_PVLAN_EMOD_STR_UP   (0x1 << ICE_AQ_VSI_PVLAN_EMOD_S)
-#define ICE_AQ_VSI_PVLAN_EMOD_STR      (0x2 << ICE_AQ_VSI_PVLAN_EMOD_S)
-#define ICE_AQ_VSI_PVLAN_EMOD_NOTHING  (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_S         3
+#define ICE_AQ_VSI_VLAN_EMOD_M         (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH  (0x0 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_STR_UP    (0x1 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_STR       (0x2 << ICE_AQ_VSI_VLAN_EMOD_S)
+#define ICE_AQ_VSI_VLAN_EMOD_NOTHING   (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
        u8 pvlan_reserved2[3];
        /* ingress egress up sections */
        __le32 ingress_table; /* bitmap, 3 bits per up */
@@ -594,6 +594,7 @@ struct ice_sw_rule_lg_act {
 #define ICE_LG_ACT_GENERIC_OFFSET_M    (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S)
 #define ICE_LG_ACT_GENERIC_PRIORITY_S  22
 #define ICE_LG_ACT_GENERIC_PRIORITY_M  (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S)
+#define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX        7
 
        /* Action = 7 - Set Stat count */
 #define ICE_LG_ACT_STAT_COUNT          0x7
index 71d032c..661beea 100644 (file)
@@ -45,6 +45,9 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
 /**
  * ice_clear_pf_cfg - Clear PF configuration
  * @hw: pointer to the hardware structure
+ *
+ * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
+ * configuration, flow director filters, etc.).
  */
 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
 {
@@ -1483,7 +1486,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
        struct ice_phy_info *phy_info;
        enum ice_status status = 0;
 
-       if (!pi)
+       if (!pi || !link_up)
                return ICE_ERR_PARAM;
 
        phy_info = &pi->phy;
@@ -1619,20 +1622,23 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
        }
 
        /* LUT size is only valid for Global and PF table types */
-       if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) {
-               flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
-                         ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
-                        ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
-       } else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) {
+       switch (lut_size) {
+       case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
+               break;
+       case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
                flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
                          ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
                         ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
-       } else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) &&
-                  (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) {
-               flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
-                         ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
-                        ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
-       } else {
+               break;
+       case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
+               if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
+                       flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
+                                 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
+                                ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
+                       break;
+               }
+               /* fall-through */
+       default:
                status = ICE_ERR_PARAM;
                goto ice_aq_get_set_rss_lut_exit;
        }
index 7c511f1..62be72f 100644 (file)
@@ -597,10 +597,14 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
        return 0;
 
 init_ctrlq_free_rq:
-       ice_shutdown_rq(hw, cq);
-       ice_shutdown_sq(hw, cq);
-       mutex_destroy(&cq->sq_lock);
-       mutex_destroy(&cq->rq_lock);
+       if (cq->rq.head) {
+               ice_shutdown_rq(hw, cq);
+               mutex_destroy(&cq->rq_lock);
+       }
+       if (cq->sq.head) {
+               ice_shutdown_sq(hw, cq);
+               mutex_destroy(&cq->sq_lock);
+       }
        return status;
 }
 
@@ -706,10 +710,14 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
                return;
        }
 
-       ice_shutdown_sq(hw, cq);
-       ice_shutdown_rq(hw, cq);
-       mutex_destroy(&cq->sq_lock);
-       mutex_destroy(&cq->rq_lock);
+       if (cq->sq.head) {
+               ice_shutdown_sq(hw, cq);
+               mutex_destroy(&cq->sq_lock);
+       }
+       if (cq->rq.head) {
+               ice_shutdown_rq(hw, cq);
+               mutex_destroy(&cq->rq_lock);
+       }
 }
 
 /**
@@ -1057,8 +1065,11 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
 
 clean_rq_elem_out:
        /* Set pending if needed, unlock and return */
-       if (pending)
+       if (pending) {
+               /* re-read HW head to calculate actual pending messages */
+               ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
                *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
+       }
 clean_rq_elem_err:
        mutex_unlock(&cq->rq_lock);
 
index 1db304c..c71a9b5 100644 (file)
@@ -26,7 +26,7 @@ static int ice_q_stats_len(struct net_device *netdev)
 {
        struct ice_netdev_priv *np = netdev_priv(netdev);
 
-       return ((np->vsi->num_txq + np->vsi->num_rxq) *
+       return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) *
                (sizeof(struct ice_q_stats) / sizeof(u64)));
 }
 
@@ -218,7 +218,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
                        p += ETH_GSTRING_LEN;
                }
 
-               ice_for_each_txq(vsi, i) {
+               ice_for_each_alloc_txq(vsi, i) {
                        snprintf(p, ETH_GSTRING_LEN,
                                 "tx-queue-%u.tx_packets", i);
                        p += ETH_GSTRING_LEN;
@@ -226,7 +226,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
                        p += ETH_GSTRING_LEN;
                }
 
-               ice_for_each_rxq(vsi, i) {
+               ice_for_each_alloc_rxq(vsi, i) {
                        snprintf(p, ETH_GSTRING_LEN,
                                 "rx-queue-%u.rx_packets", i);
                        p += ETH_GSTRING_LEN;
@@ -253,6 +253,24 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
 {
        switch (sset) {
        case ETH_SS_STATS:
+               /* The number (and order) of strings reported *must* remain
+                * constant for a given netdevice. This function must not
+                * report a different number based on run time parameters
+                * (such as the number of queues in use, or the setting of
+                * a private ethtool flag). This is due to the nature of the
+                * ethtool stats API.
+                *
+                * User space programs such as ethtool must make 3 separate
+                * ioctl requests, one for size, one for the strings, and
+                * finally one for the stats. Since these cross into
+                * user space, changes to the number or size could result in
+                * undefined memory access or incorrect string<->value
+                * correlations for statistics.
+                *
+                * Even if it appears to be safe, changes to the size or
+                * order of strings will suffer from race conditions and are
+                * not safe.
+                */
                return ICE_ALL_STATS_LEN(netdev);
        default:
                return -EOPNOTSUPP;
@@ -280,18 +298,26 @@ ice_get_ethtool_stats(struct net_device *netdev,
        /* populate per queue stats */
        rcu_read_lock();
 
-       ice_for_each_txq(vsi, j) {
+       ice_for_each_alloc_txq(vsi, j) {
                ring = READ_ONCE(vsi->tx_rings[j]);
-               if (!ring)
-                       continue;
-               data[i++] = ring->stats.pkts;
-               data[i++] = ring->stats.bytes;
+               if (ring) {
+                       data[i++] = ring->stats.pkts;
+                       data[i++] = ring->stats.bytes;
+               } else {
+                       data[i++] = 0;
+                       data[i++] = 0;
+               }
        }
 
-       ice_for_each_rxq(vsi, j) {
+       ice_for_each_alloc_rxq(vsi, j) {
                ring = READ_ONCE(vsi->rx_rings[j]);
-               data[i++] = ring->stats.pkts;
-               data[i++] = ring->stats.bytes;
+               if (ring) {
+                       data[i++] = ring->stats.pkts;
+                       data[i++] = ring->stats.bytes;
+               } else {
+                       data[i++] = 0;
+                       data[i++] = 0;
+               }
        }
 
        rcu_read_unlock();
@@ -519,7 +545,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
                goto done;
        }
 
-       for (i = 0; i < vsi->num_txq; i++) {
+       for (i = 0; i < vsi->alloc_txq; i++) {
                /* clone ring and setup updated count */
                tx_rings[i] = *vsi->tx_rings[i];
                tx_rings[i].count = new_tx_cnt;
@@ -551,7 +577,7 @@ process_rx:
                goto done;
        }
 
-       for (i = 0; i < vsi->num_rxq; i++) {
+       for (i = 0; i < vsi->alloc_rxq; i++) {
                /* clone ring and setup updated count */
                rx_rings[i] = *vsi->rx_rings[i];
                rx_rings[i].count = new_rx_cnt;
index 4999048..6076fc8 100644 (file)
 #define PFINT_FW_CTL_CAUSE_ENA_S       30
 #define PFINT_FW_CTL_CAUSE_ENA_M       BIT(PFINT_FW_CTL_CAUSE_ENA_S)
 #define PFINT_OICR                     0x0016CA00
-#define PFINT_OICR_HLP_RDY_S           14
-#define PFINT_OICR_HLP_RDY_M           BIT(PFINT_OICR_HLP_RDY_S)
-#define PFINT_OICR_CPM_RDY_S           15
-#define PFINT_OICR_CPM_RDY_M           BIT(PFINT_OICR_CPM_RDY_S)
 #define PFINT_OICR_ECC_ERR_S           16
 #define PFINT_OICR_ECC_ERR_M           BIT(PFINT_OICR_ECC_ERR_S)
 #define PFINT_OICR_MAL_DETECT_S                19
 #define PFINT_OICR_GRST_M              BIT(PFINT_OICR_GRST_S)
 #define PFINT_OICR_PCI_EXCEPTION_S     21
 #define PFINT_OICR_PCI_EXCEPTION_M     BIT(PFINT_OICR_PCI_EXCEPTION_S)
-#define PFINT_OICR_GPIO_S              22
-#define PFINT_OICR_GPIO_M              BIT(PFINT_OICR_GPIO_S)
-#define PFINT_OICR_STORM_DETECT_S      24
-#define PFINT_OICR_STORM_DETECT_M      BIT(PFINT_OICR_STORM_DETECT_S)
 #define PFINT_OICR_HMC_ERR_S           26
 #define PFINT_OICR_HMC_ERR_M           BIT(PFINT_OICR_HMC_ERR_S)
 #define PFINT_OICR_PE_CRITERR_S                28
index d23a916..068dbc7 100644 (file)
@@ -265,6 +265,7 @@ enum ice_rx_flex_desc_status_error_0_bits {
 struct ice_rlan_ctx {
        u16 head;
        u16 cpuid; /* bigger than needed, see above for reason */
+#define ICE_RLAN_BASE_S 7
        u64 base;
        u16 qlen;
 #define ICE_RLAN_CTX_DBUF_S 7
index 5299caf..3f047bb 100644 (file)
@@ -901,7 +901,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
                case ice_aqc_opc_get_link_status:
                        if (ice_handle_link_event(pf))
                                dev_err(&pf->pdev->dev,
-                                       "Could not handle link event");
+                                       "Could not handle link event\n");
                        break;
                default:
                        dev_dbg(&pf->pdev->dev,
@@ -917,13 +917,27 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
 }
 
 /**
+ * ice_ctrlq_pending - check if there is a difference between ntc and ntu
+ * @hw: pointer to hardware info
+ * @cq: control queue information
+ *
+ * returns true if there are pending messages in a queue, false if there aren't
+ */
+static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+{
+       u16 ntu;
+
+       ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
+       return cq->rq.next_to_clean != ntu;
+}
+
+/**
  * ice_clean_adminq_subtask - clean the AdminQ rings
  * @pf: board private structure
  */
 static void ice_clean_adminq_subtask(struct ice_pf *pf)
 {
        struct ice_hw *hw = &pf->hw;
-       u32 val;
 
        if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
                return;
@@ -933,9 +947,13 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
 
        clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
 
-       /* re-enable Admin queue interrupt causes */
-       val = rd32(hw, PFINT_FW_CTL);
-       wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M));
+       /* There might be a situation where new messages arrive to a control
+        * queue between processing the last message and clearing the
+        * EVENT_PENDING bit. So before exiting, check queue head again (using
+        * ice_ctrlq_pending) and process new messages if any.
+        */
+       if (ice_ctrlq_pending(hw, &hw->adminq))
+               __ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
 
        ice_flush(hw);
 }
@@ -1295,11 +1313,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
                qcount = numq_tc;
        }
 
-       /* find higher power-of-2 of qcount */
-       pow = ilog2(qcount);
-
-       if (!is_power_of_2(qcount))
-               pow++;
+       /* find the (rounded up) power-of-2 of qcount */
+       pow = order_base_2(qcount);
 
        for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
                if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
@@ -1352,14 +1367,15 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
        ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
        /* Traffic from VSI can be sent to LAN */
        ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
-       /* Allow all packets untagged/tagged */
-       ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL &
-                                      ICE_AQ_VSI_PVLAN_MODE_M) >>
-                                     ICE_AQ_VSI_PVLAN_MODE_S);
-       /* Show VLAN/UP from packets in Rx descriptors */
-       ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH &
-                                       ICE_AQ_VSI_PVLAN_EMOD_M) >>
-                                      ICE_AQ_VSI_PVLAN_EMOD_S);
+
+       /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
+        * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
+        * packets untagged/tagged.
+        */
+       ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
+                                 ICE_AQ_VSI_VLAN_MODE_M) >>
+                                ICE_AQ_VSI_VLAN_MODE_S);
+
        /* Have 1:1 UP mapping for both ingress/egress tables */
        table |= ICE_UP_TABLE_TRANSLATE(0, 0);
        table |= ICE_UP_TABLE_TRANSLATE(1, 1);
@@ -1688,15 +1704,12 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
        wr32(hw, PFINT_OICR_ENA, 0);    /* disable all */
        rd32(hw, PFINT_OICR);           /* read to clear */
 
-       val = (PFINT_OICR_HLP_RDY_M |
-              PFINT_OICR_CPM_RDY_M |
-              PFINT_OICR_ECC_ERR_M |
+       val = (PFINT_OICR_ECC_ERR_M |
               PFINT_OICR_MAL_DETECT_M |
               PFINT_OICR_GRST_M |
               PFINT_OICR_PCI_EXCEPTION_M |
-              PFINT_OICR_GPIO_M |
-              PFINT_OICR_STORM_DETECT_M |
-              PFINT_OICR_HMC_ERR_M);
+              PFINT_OICR_HMC_ERR_M |
+              PFINT_OICR_PE_CRITERR_M);
 
        wr32(hw, PFINT_OICR_ENA, val);
 
@@ -2058,15 +2071,13 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
 skip_req_irq:
        ice_ena_misc_vector(pf);
 
-       val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
-             (ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) |
-             PFINT_OICR_CTL_CAUSE_ENA_M;
+       val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
+              PFINT_OICR_CTL_CAUSE_ENA_M);
        wr32(hw, PFINT_OICR_CTL, val);
 
        /* This enables Admin queue Interrupt causes */
-       val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
-             (ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) |
-             PFINT_FW_CTL_CAUSE_ENA_M;
+       val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
+              PFINT_FW_CTL_CAUSE_ENA_M);
        wr32(hw, PFINT_FW_CTL, val);
 
        itr_gran = hw->itr_gran_200;
@@ -3246,8 +3257,10 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
        if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
                ice_dis_msix(pf);
 
-       devm_kfree(&pf->pdev->dev, pf->irq_tracker);
-       pf->irq_tracker = NULL;
+       if (pf->irq_tracker) {
+               devm_kfree(&pf->pdev->dev, pf->irq_tracker);
+               pf->irq_tracker = NULL;
+       }
 }
 
 /**
@@ -3271,7 +3284,7 @@ static int ice_probe(struct pci_dev *pdev,
 
        err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
        if (err) {
-               dev_err(&pdev->dev, "I/O map error %d\n", err);
+               dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
                return err;
        }
 
@@ -3720,10 +3733,10 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
        enum ice_status status;
 
        /* Here we are configuring the VSI to let the driver add VLAN tags by
-        * setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN
-        * tag insertion happens in the Tx hot path, in ice_tx_map.
+        * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
+        * insertion happens in the Tx hot path, in ice_tx_map.
         */
-       ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL;
+       ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
 
        ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
        ctxt.vsi_num = vsi->vsi_num;
@@ -3735,7 +3748,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
                return -EIO;
        }
 
-       vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags;
+       vsi->info.vlan_flags = ctxt.info.vlan_flags;
        return 0;
 }
 
@@ -3757,12 +3770,15 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
         */
        if (ena) {
                /* Strip VLAN tag from Rx packet and put it in the desc */
-               ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+               ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
        } else {
                /* Disable stripping. Leave tag in packet */
-               ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING;
+               ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
        }
 
+       /* Allow all packets untagged/tagged */
+       ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
+
        ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
        ctxt.vsi_num = vsi->vsi_num;
 
@@ -3773,7 +3789,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
                return -EIO;
        }
 
-       vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags;
+       vsi->info.vlan_flags = ctxt.info.vlan_flags;
        return 0;
 }
 
@@ -3986,7 +4002,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
        /* clear the context structure first */
        memset(&rlan_ctx, 0, sizeof(rlan_ctx));
 
-       rlan_ctx.base = ring->dma >> 7;
+       rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
 
        rlan_ctx.qlen = ring->count;
 
@@ -4098,11 +4114,12 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
 {
        int err;
 
-       ice_set_rx_mode(vsi->netdev);
-
-       err = ice_restore_vlan(vsi);
-       if (err)
-               return err;
+       if (vsi->netdev) {
+               ice_set_rx_mode(vsi->netdev);
+               err = ice_restore_vlan(vsi);
+               if (err)
+                       return err;
+       }
 
        err = ice_vsi_cfg_txqs(vsi);
        if (!err)
@@ -4789,30 +4806,6 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
        stats->rx_length_errors = vsi_stats->rx_length_errors;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * ice_netpoll - polling "interrupt" handler
- * @netdev: network interface device structure
- *
- * Used by netconsole to send skbs without having to re-enable interrupts.
- * This is not called in the normal interrupt path.
- */
-static void ice_netpoll(struct net_device *netdev)
-{
-       struct ice_netdev_priv *np = netdev_priv(netdev);
-       struct ice_vsi *vsi = np->vsi;
-       struct ice_pf *pf = vsi->back;
-       int i;
-
-       if (test_bit(__ICE_DOWN, vsi->state) ||
-           !test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
-               return;
-
-       for (i = 0; i < vsi->num_q_vectors; i++)
-               ice_msix_clean_rings(0, vsi->q_vectors[i]);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
 /**
  * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
  * @vsi: VSI having NAPI disabled
@@ -4868,7 +4861,7 @@ int ice_down(struct ice_vsi *vsi)
  */
 static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
 {
-       int i, err;
+       int i, err = 0;
 
        if (!vsi->num_txq) {
                dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
@@ -4893,7 +4886,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
  */
 static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
 {
-       int i, err;
+       int i, err = 0;
 
        if (!vsi->num_rxq) {
                dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
@@ -5235,7 +5228,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
        u8 count = 0;
 
        if (new_mtu == netdev->mtu) {
-               netdev_warn(netdev, "mtu is already %d\n", netdev->mtu);
+               netdev_warn(netdev, "mtu is already %u\n", netdev->mtu);
                return 0;
        }
 
@@ -5480,9 +5473,6 @@ static const struct net_device_ops ice_netdev_ops = {
        .ndo_validate_addr = eth_validate_addr,
        .ndo_change_mtu = ice_change_mtu,
        .ndo_get_stats64 = ice_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = ice_netpoll,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
        .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
        .ndo_set_features = ice_set_features,
index 92da0a6..295a8cd 100644 (file)
@@ -131,9 +131,8 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
  *
  * This function will request NVM ownership.
  */
-static enum
-ice_status ice_acquire_nvm(struct ice_hw *hw,
-                          enum ice_aq_res_access_type access)
+static enum ice_status
+ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
 {
        if (hw->nvm.blank_nvm_mode)
                return 0;
index 2e6c1d9..eeae199 100644 (file)
@@ -1576,8 +1576,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
                        return status;
        }
 
-       if (owner == ICE_SCHED_NODE_OWNER_LAN)
-               vsi->max_lanq[tc] = new_numqs;
+       vsi->max_lanq[tc] = new_numqs;
 
        return status;
 }
index 723d15f..6b7ec2a 100644 (file)
@@ -645,14 +645,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
        act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
        lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
 
-       act = (7 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M;
+       act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
+              ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
 
        /* Third action Marker value */
        act |= ICE_LG_ACT_GENERIC;
        act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
                ICE_LG_ACT_GENERIC_VALUE_M;
 
-       act |= (0 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M;
        lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
 
        /* call the fill switch rule to fill the lookup tx rx structure */
index 6f4a0d1..9b8ec12 100644 (file)
@@ -17,7 +17,7 @@ struct ice_vsi_ctx {
        u16 vsis_unallocated;
        u16 flags;
        struct ice_aqc_vsi_props info;
-       bool alloc_from_pool;
+       u8 alloc_from_pool;
 };
 
 enum ice_sw_fwd_act_type {
@@ -94,8 +94,8 @@ struct ice_fltr_info {
        u8 qgrp_size;
 
        /* Rule creations populate these indicators basing on the switch type */
-       bool lb_en;     /* Indicate if packet can be looped back */
-       bool lan_en;    /* Indicate if packet can be forwarded to the uplink */
+       u8 lb_en;       /* Indicate if packet can be looped back */
+       u8 lan_en;      /* Indicate if packet can be forwarded to the uplink */
 };
 
 /* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */
index 567067b..31bc998 100644 (file)
@@ -143,7 +143,7 @@ struct ice_ring {
        u16 next_to_use;
        u16 next_to_clean;
 
-       bool ring_active;               /* is ring online or not */
+       u8 ring_active;                 /* is ring online or not */
 
        /* stats structs */
        struct ice_q_stats      stats;
index 99c8a9a..97c366e 100644 (file)
@@ -83,7 +83,7 @@ struct ice_link_status {
        u64 phy_type_low;
        u16 max_frame_size;
        u16 link_speed;
-       bool lse_ena;   /* Link Status Event notification */
+       u8 lse_ena;     /* Link Status Event notification */
        u8 link_info;
        u8 an_info;
        u8 ext_info;
@@ -101,7 +101,7 @@ struct ice_phy_info {
        struct ice_link_status link_info_old;
        u64 phy_type_low;
        enum ice_media_type media_type;
-       bool get_link_info;
+       u8 get_link_info;
 };
 
 /* Common HW capabilities for SW use */
@@ -167,7 +167,7 @@ struct ice_nvm_info {
        u32 oem_ver;              /* OEM version info */
        u16 sr_words;             /* Shadow RAM size in words */
        u16 ver;                  /* NVM package version */
-       bool blank_nvm_mode;      /* is NVM empty (no FW present) */
+       u8 blank_nvm_mode;        /* is NVM empty (no FW present) */
 };
 
 /* Max number of port to queue branches w.r.t topology */
@@ -181,7 +181,7 @@ struct ice_sched_node {
        struct ice_aqc_txsched_elem_data info;
        u32 agg_id;                     /* aggregator group id */
        u16 vsi_id;
-       bool in_use;                    /* suspended or in use */
+       u8 in_use;                      /* suspended or in use */
        u8 tx_sched_layer;              /* Logical Layer (1-9) */
        u8 num_children;
        u8 tc_num;
@@ -218,7 +218,7 @@ struct ice_sched_vsi_info {
 struct ice_sched_tx_policy {
        u16 max_num_vsis;
        u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS];
-       bool rdma_ena;
+       u8 rdma_ena;
 };
 
 struct ice_port_info {
@@ -243,7 +243,7 @@ struct ice_port_info {
        struct list_head agg_list;      /* lists all aggregator */
        u8 lport;
 #define ICE_LPORT_MASK         0xff
-       bool is_vf;
+       u8 is_vf;
 };
 
 struct ice_switch_info {
@@ -287,7 +287,7 @@ struct ice_hw {
        u8 max_cgds;
        u8 sw_entry_point_layer;
 
-       bool evb_veb;           /* true for VEB, false for VEPA */
+       u8 evb_veb;             /* true for VEB, false for VEPA */
        struct ice_bus_info bus;
        struct ice_nvm_info nvm;
        struct ice_hw_dev_caps dev_caps;        /* device capabilities */
@@ -318,7 +318,7 @@ struct ice_hw {
        u8 itr_gran_100;
        u8 itr_gran_50;
        u8 itr_gran_25;
-       bool ucast_shared;      /* true if VSIs can share unicast addr */
+       u8 ucast_shared;        /* true if VSIs can share unicast addr */
 
 };
 
index f92f791..5acf3b7 100644 (file)
@@ -1649,7 +1649,7 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
        if (hw->phy.type == e1000_phy_m88)
                igb_phy_disable_receiver(adapter);
 
-       mdelay(500);
+       msleep(500);
        return 0;
 }
 
index d03c2f0..0796cef 100644 (file)
@@ -205,10 +205,6 @@ static struct notifier_block dca_notifier = {
        .priority       = 0
 };
 #endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* for netdump / net console */
-static void igb_netpoll(struct net_device *);
-#endif
 #ifdef CONFIG_PCI_IOV
 static unsigned int max_vfs;
 module_param(max_vfs, uint, 0);
@@ -2881,9 +2877,6 @@ static const struct net_device_ops igb_netdev_ops = {
        .ndo_set_vf_spoofchk    = igb_ndo_set_vf_spoofchk,
        .ndo_set_vf_trust       = igb_ndo_set_vf_trust,
        .ndo_get_vf_config      = igb_ndo_get_vf_config,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = igb_netpoll,
-#endif
        .ndo_fix_features       = igb_fix_features,
        .ndo_set_features       = igb_set_features,
        .ndo_fdb_add            = igb_ndo_fdb_add,
@@ -3873,7 +3866,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
 
        adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
                                     sizeof(struct igb_mac_addr),
-                                    GFP_ATOMIC);
+                                    GFP_KERNEL);
        if (!adapter->mac_table)
                return -ENOMEM;
 
@@ -3883,7 +3876,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
 
        /* Setup and initialize a copy of the hw vlan table array */
        adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
-                                      GFP_ATOMIC);
+                                      GFP_KERNEL);
        if (!adapter->shadow_vfta)
                return -ENOMEM;
 
@@ -5816,7 +5809,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
 
        if (skb->ip_summed != CHECKSUM_PARTIAL) {
 csum_failed:
-               if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
+               if (!(first->tx_flags & IGB_TX_FLAGS_VLAN) &&
+                   !tx_ring->launchtime_enable)
                        return;
                goto no_csum;
        }
@@ -9052,29 +9046,6 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
        return 0;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void igb_netpoll(struct net_device *netdev)
-{
-       struct igb_adapter *adapter = netdev_priv(netdev);
-       struct e1000_hw *hw = &adapter->hw;
-       struct igb_q_vector *q_vector;
-       int i;
-
-       for (i = 0; i < adapter->num_q_vectors; i++) {
-               q_vector = adapter->q_vector[i];
-               if (adapter->flags & IGB_FLAG_HAS_MSIX)
-                       wr32(E1000_EIMC, q_vector->eims_value);
-               else
-                       igb_irq_disable(adapter);
-               napi_schedule(&q_vector->napi);
-       }
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
 /**
  *  igb_io_error_detected - called when PCI error is detected
  *  @pdev: Pointer to PCI device
index 43664ad..7722153 100644 (file)
@@ -81,11 +81,6 @@ static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
                                 __be16 proto, u16 vid);
 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* for netdump / net console */
-static void ixgb_netpoll(struct net_device *dev);
-#endif
-
 static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
                              enum pci_channel_state state);
 static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
@@ -348,9 +343,6 @@ static const struct net_device_ops ixgb_netdev_ops = {
        .ndo_tx_timeout         = ixgb_tx_timeout,
        .ndo_vlan_rx_add_vid    = ixgb_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = ixgb_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ixgb_netpoll,
-#endif
        .ndo_fix_features       = ixgb_fix_features,
        .ndo_set_features       = ixgb_set_features,
 };
@@ -771,14 +763,13 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
        rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
        rxdr->size = ALIGN(rxdr->size, 4096);
 
-       rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
-                                       GFP_KERNEL);
+       rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+                                        GFP_KERNEL);
 
        if (!rxdr->desc) {
                vfree(rxdr->buffer_info);
                return -ENOMEM;
        }
-       memset(rxdr->desc, 0, rxdr->size);
 
        rxdr->next_to_clean = 0;
        rxdr->next_to_use = 0;
@@ -2196,23 +2187,6 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
                ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-
-static void ixgb_netpoll(struct net_device *dev)
-{
-       struct ixgb_adapter *adapter = netdev_priv(dev);
-
-       disable_irq(adapter->pdev->irq);
-       ixgb_intr(adapter->pdev->irq, dev);
-       enable_irq(adapter->pdev->irq);
-}
-#endif
-
 /**
  * ixgb_io_error_detected - called when PCI error is detected
  * @pdev:    pointer to pci device with error
index 94b3165..ccd852a 100644 (file)
@@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
        }
 
        /* alloc the udl from per cpu ddp pool */
-       ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
+       ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp);
        if (!ddp->udl) {
                e_err(drv, "failed allocated ddp context\n");
                goto out_noddp_unmap;
@@ -760,7 +760,7 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
                return 0;
 
        /* Extra buffer to be shared by all DDPs for HW work around */
-       buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+       buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL);
        if (!buffer)
                return -ENOMEM;
 
index 4470980..6cdd58d 100644 (file)
@@ -3196,11 +3196,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                return budget;
 
        /* all work done, exit the polling mode */
-       napi_complete_done(napi, work_done);
-       if (adapter->rx_itr_setting & 1)
-               ixgbe_set_itr(q_vector);
-       if (!test_bit(__IXGBE_DOWN, &adapter->state))
-               ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
+       if (likely(napi_complete_done(napi, work_done))) {
+               if (adapter->rx_itr_setting & 1)
+                       ixgbe_set_itr(q_vector);
+               if (!test_bit(__IXGBE_DOWN, &adapter->state))
+                       ixgbe_irq_enable_queues(adapter,
+                                               BIT_ULL(q_vector->v_idx));
+       }
 
        return min(work_done, budget - 1);
 }
@@ -6201,7 +6203,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
 
        adapter->mac_table = kcalloc(hw->mac.num_rar_entries,
                                     sizeof(struct ixgbe_mac_addr),
-                                    GFP_ATOMIC);
+                                    GFP_KERNEL);
        if (!adapter->mac_table)
                return -ENOMEM;
 
@@ -6620,8 +6622,18 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
        if (adapter->xdp_prog) {
-               e_warn(probe, "MTU cannot be changed while XDP program is loaded\n");
-               return -EPERM;
+               int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
+                                    VLAN_HLEN;
+               int i;
+
+               for (i = 0; i < adapter->num_rx_queues; i++) {
+                       struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+                       if (new_frame_size > ixgbe_rx_bufsz(ring)) {
+                               e_warn(probe, "Requested MTU size is not supported with XDP\n");
+                               return -EINVAL;
+                       }
+               }
        }
 
        /*
@@ -8758,28 +8770,6 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev)
        return err;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void ixgbe_netpoll(struct net_device *netdev)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       int i;
-
-       /* if interface is down do nothing */
-       if (test_bit(__IXGBE_DOWN, &adapter->state))
-               return;
-
-       /* loop through and schedule all active queues */
-       for (i = 0; i < adapter->num_q_vectors; i++)
-               ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
-}
-
-#endif
-
 static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
                                   struct ixgbe_ring *ring)
 {
@@ -8983,6 +8973,15 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
 
 #ifdef CONFIG_IXGBE_DCB
        if (tc) {
+               if (adapter->xdp_prog) {
+                       e_warn(probe, "DCB is not supported with XDP\n");
+
+                       ixgbe_init_interrupt_scheme(adapter);
+                       if (netif_running(dev))
+                               ixgbe_open(dev);
+                       return -EINVAL;
+               }
+
                netdev_set_num_tc(dev, tc);
                ixgbe_set_prio_tc_map(adapter);
 
@@ -9171,14 +9170,12 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
                            struct tcf_exts *exts, u64 *action, u8 *queue)
 {
        const struct tc_action *a;
-       LIST_HEAD(actions);
+       int i;
 
        if (!tcf_exts_has_actions(exts))
                return -EINVAL;
 
-       tcf_exts_to_list(exts, &actions);
-       list_for_each_entry(a, &actions, list) {
-
+       tcf_exts_for_each_action(i, a, exts) {
                /* Drop action */
                if (is_tcf_gact_shot(a)) {
                        *action = IXGBE_FDIR_DROP_QUEUE;
@@ -9936,6 +9933,11 @@ static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
        int tcs = adapter->hw_tcs ? : 1;
        int pool, err;
 
+       if (adapter->xdp_prog) {
+               e_warn(probe, "L2FW offload is not supported with XDP\n");
+               return ERR_PTR(-EINVAL);
+       }
+
        /* The hardware supported by ixgbe only filters on the destination MAC
         * address. In order to avoid issues we only support offloading modes
         * where the hardware can actually provide the functionality.
@@ -10229,9 +10231,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_get_vf_config      = ixgbe_ndo_get_vf_config,
        .ndo_get_stats64        = ixgbe_get_stats64,
        .ndo_setup_tc           = __ixgbe_setup_tc,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ixgbe_netpoll,
-#endif
 #ifdef IXGBE_FCOE
        .ndo_select_queue       = ixgbe_select_queue,
        .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
index 6f59933..3c6f01c 100644 (file)
@@ -53,6 +53,11 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
        struct ixgbe_hw *hw = &adapter->hw;
        int i;
 
+       if (adapter->xdp_prog) {
+               e_warn(probe, "SRIOV is not supported with XDP\n");
+               return -EINVAL;
+       }
+
        /* Enable VMDq flag so device will be set in VM mode */
        adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED |
                          IXGBE_FLAG_VMDQ_ENABLED;
@@ -688,8 +693,13 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
 static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
 {
        struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
        struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+       u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
        u8 num_tcs = adapter->hw_tcs;
+       u32 reg_val;
+       u32 queue;
+       u32 word;
 
        /* remove VLAN filters beloning to this VF */
        ixgbe_clear_vf_vlans(adapter, vf);
@@ -726,6 +736,27 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
 
        /* reset VF api back to unknown */
        adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
+
+       /* Restart each queue for given VF */
+       for (queue = 0; queue < q_per_pool; queue++) {
+               unsigned int reg_idx = (vf * q_per_pool) + queue;
+
+               reg_val = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(reg_idx));
+
+               /* Re-enabling only configured queues */
+               if (reg_val) {
+                       reg_val |= IXGBE_TXDCTL_ENABLE;
+                       IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
+                       reg_val &= ~IXGBE_TXDCTL_ENABLE;
+                       IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(reg_idx), reg_val);
+               }
+       }
+
+       /* Clear VF's mailbox memory */
+       for (word = 0; word < IXGBE_VFMAILBOX_SIZE; word++)
+               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf), word, 0);
+
+       IXGBE_WRITE_FLUSH(hw);
 }
 
 static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
index 44cfb20..41bcbb3 100644 (file)
@@ -2518,6 +2518,7 @@ enum {
 /* Translated register #defines */
 #define IXGBE_PVFTDH(P)                (0x06010 + (0x40 * (P)))
 #define IXGBE_PVFTDT(P)                (0x06018 + (0x40 * (P)))
+#define IXGBE_PVFTXDCTL(P)     (0x06028 + (0x40 * (P)))
 #define IXGBE_PVFTDWBAL(P)     (0x06038 + (0x40 * (P)))
 #define IXGBE_PVFTDWBAH(P)     (0x0603C + (0x40 * (P)))
 
index d86446d..5a22858 100644 (file)
@@ -4233,24 +4233,6 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
        return 0;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void ixgbevf_netpoll(struct net_device *netdev)
-{
-       struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-       int i;
-
-       /* if interface is down do nothing */
-       if (test_bit(__IXGBEVF_DOWN, &adapter->state))
-               return;
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
 static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4482,9 +4464,6 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
        .ndo_tx_timeout         = ixgbevf_tx_timeout,
        .ndo_vlan_rx_add_vid    = ixgbevf_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = ixgbevf_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ixgbevf_netpoll,
-#endif
        .ndo_features_check     = ixgbevf_features_check,
        .ndo_bpf                = ixgbevf_xdp,
 };
index 7a637b5..e08301d 100644 (file)
@@ -274,6 +274,7 @@ ltq_etop_hw_init(struct net_device *dev)
                struct ltq_etop_chan *ch = &priv->ch[i];
 
                ch->idx = ch->dma.nr = i;
+               ch->dma.dev = &priv->pdev->dev;
 
                if (IS_TX(i)) {
                        ltq_dma_alloc_tx(&ch->dma);
index bc80a67..b4ed7d3 100644 (file)
@@ -1890,8 +1890,8 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
                if (!data || !(rx_desc->buf_phys_addr))
                        continue;
 
-               dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
-                                MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
+               dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
+                              PAGE_SIZE, DMA_FROM_DEVICE);
                __free_page(data);
        }
 }
@@ -2008,8 +2008,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
                                skb_add_rx_frag(rxq->skb, frag_num, page,
                                                frag_offset, frag_size,
                                                PAGE_SIZE);
-                               dma_unmap_single(dev->dev.parent, phys_addr,
-                                                PAGE_SIZE, DMA_FROM_DEVICE);
+                               dma_unmap_page(dev->dev.parent, phys_addr,
+                                              PAGE_SIZE, DMA_FROM_DEVICE);
                                rxq->left_size -= frag_size;
                        }
                } else {
@@ -2039,9 +2039,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
                                                frag_offset, frag_size,
                                                PAGE_SIZE);
 
-                               dma_unmap_single(dev->dev.parent, phys_addr,
-                                                PAGE_SIZE,
-                                                DMA_FROM_DEVICE);
+                               dma_unmap_page(dev->dev.parent, phys_addr,
+                                              PAGE_SIZE, DMA_FROM_DEVICE);
 
                                rxq->left_size -= frag_size;
                        }
index 32d785b..a74002b 100644 (file)
@@ -58,6 +58,8 @@ static struct {
  */
 static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
                             const struct phylink_link_state *state);
+static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
+                             phy_interface_t interface, struct phy_device *phy);
 
 /* Queue modes */
 #define MVPP2_QDIST_SINGLE_MODE        0
@@ -1723,7 +1725,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
 }
 
 /* Set Tx descriptors fields relevant for CSUM calculation */
-static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
                               int ip_hdr_len, int l4_proto)
 {
        u32 command;
@@ -2598,14 +2600,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                int ip_hdr_len = 0;
                u8 l4_proto;
+               __be16 l3_proto = vlan_get_protocol(skb);
 
-               if (skb->protocol == htons(ETH_P_IP)) {
+               if (l3_proto == htons(ETH_P_IP)) {
                        struct iphdr *ip4h = ip_hdr(skb);
 
                        /* Calculate IPv4 checksum and L4 checksum */
                        ip_hdr_len = ip4h->ihl;
                        l4_proto = ip4h->protocol;
-               } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               } else if (l3_proto == htons(ETH_P_IPV6)) {
                        struct ipv6hdr *ip6h = ipv6_hdr(skb);
 
                        /* Read l4_protocol from one of IPv6 extra headers */
@@ -2617,7 +2620,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
                }
 
                return mvpp2_txq_desc_csum(skb_network_offset(skb),
-                               skb->protocol, ip_hdr_len, l4_proto);
+                                          l3_proto, ip_hdr_len, l4_proto);
        }
 
        return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
@@ -3053,10 +3056,12 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
                                   cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
        }
 
-       cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
-       if (cause_tx) {
-               cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
-               mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
+       if (port->has_tx_irqs) {
+               cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+               if (cause_tx) {
+                       cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
+                       mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
+               }
        }
 
        /* Process RX packets */
@@ -3142,6 +3147,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
                mvpp22_mode_reconfigure(port);
 
        if (port->phylink) {
+               netif_carrier_off(port->dev);
                phylink_start(port->phylink);
        } else {
                /* Phylink isn't used as of now for ACPI, so the MAC has to be
@@ -3150,9 +3156,10 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
                 */
                struct phylink_link_state state = {
                        .interface = port->phy_interface,
-                       .link = 1,
                };
                mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state);
+               mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface,
+                                 NULL);
        }
 
        netif_tx_start_all_queues(port->dev);
@@ -4495,10 +4502,6 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
                return;
        }
 
-       netif_tx_stop_all_queues(port->dev);
-       if (!port->has_phy)
-               netif_carrier_off(port->dev);
-
        /* Make sure the port is disabled when reconfiguring the mode */
        mvpp2_port_disable(port);
 
@@ -4523,16 +4526,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
        if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
                mvpp2_port_loopback_set(port, state);
 
-       /* If the port already was up, make sure it's still in the same state */
-       if (state->link || !port->has_phy) {
-               mvpp2_port_enable(port);
-
-               mvpp2_egress_enable(port);
-               mvpp2_ingress_enable(port);
-               if (!port->has_phy)
-                       netif_carrier_on(dev);
-               netif_tx_wake_all_queues(dev);
-       }
+       mvpp2_port_enable(port);
 }
 
 static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
@@ -4803,6 +4797,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        dev->min_mtu = ETH_MIN_MTU;
        /* 9704 == 9728 - 20 and rounding to 8 */
        dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
+       dev->dev.of_node = port_node;
 
        /* Phylink isn't used w/ ACPI as of now */
        if (port_node) {
index 6785661..fe49384 100644 (file)
@@ -1286,20 +1286,6 @@ out:
        mutex_unlock(&mdev->state_lock);
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void mlx4_en_netpoll(struct net_device *dev)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       struct mlx4_en_cq *cq;
-       int i;
-
-       for (i = 0; i < priv->tx_ring_num[TX]; i++) {
-               cq = priv->tx_cq[TX][i];
-               napi_schedule(&cq->napi);
-       }
-}
-#endif
-
 static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
 {
        u64 reg_id;
@@ -2946,9 +2932,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
        .ndo_tx_timeout         = mlx4_en_tx_timeout,
        .ndo_vlan_rx_add_vid    = mlx4_en_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = mlx4_en_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = mlx4_en_netpoll,
-#endif
        .ndo_set_features       = mlx4_en_set_features,
        .ndo_fix_features       = mlx4_en_fix_features,
        .ndo_setup_tc           = __mlx4_en_setup_tc,
@@ -2983,9 +2966,6 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
        .ndo_set_vf_link_state  = mlx4_en_set_vf_link_state,
        .ndo_get_vf_stats       = mlx4_en_get_vf_stats,
        .ndo_get_vf_config      = mlx4_en_get_vf_config,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = mlx4_en_netpoll,
-#endif
        .ndo_set_features       = mlx4_en_set_features,
        .ndo_fix_features       = mlx4_en_fix_features,
        .ndo_setup_tc           = __mlx4_en_setup_tc,
index 1f3372c..2df92db 100644 (file)
@@ -240,7 +240,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
        struct mlx4_dev *dev = &priv->dev;
        struct mlx4_eq *eq = &priv->eq_table.eq[vec];
 
-       if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
+       if (!cpumask_available(eq->affinity_mask) ||
+           cpumask_empty(eq->affinity_mask))
                return;
 
        hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
index 3ce14d4..a53736c 100644 (file)
@@ -206,7 +206,7 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
        u8 own;
 
        do {
-               own = ent->lay->status_own;
+               own = READ_ONCE(ent->lay->status_own);
                if (!(own & CMD_OWNER_HW)) {
                        ent->ret = 0;
                        return;
index b994b80..37ba7c7 100644 (file)
@@ -132,11 +132,11 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
        delayed_event_start(priv);
 
        dev_ctx->context = intf->add(dev);
-       set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
-       if (intf->attach)
-               set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
-
        if (dev_ctx->context) {
+               set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
+               if (intf->attach)
+                       set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+
                spin_lock_irq(&priv->ctx_lock);
                list_add_tail(&dev_ctx->list, &priv->ctx_list);
 
@@ -211,12 +211,17 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
        if (intf->attach) {
                if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
                        goto out;
-               intf->attach(dev, dev_ctx->context);
+               if (intf->attach(dev, dev_ctx->context))
+                       goto out;
+
                set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
        } else {
                if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
                        goto out;
                dev_ctx->context = intf->add(dev);
+               if (!dev_ctx->context)
+                       goto out;
+
                set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
        }
 
@@ -391,16 +396,17 @@ void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
                }
 }
 
-static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
+static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
 {
-       return (u16)((dev->pdev->bus->number << 8) |
+       return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
+                    (dev->pdev->bus->number << 8) |
                     PCI_SLOT(dev->pdev->devfn));
 }
 
 /* Must be called with intf_mutex held */
 struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
 {
-       u16 pci_id = mlx5_gen_pci_id(dev);
+       u32 pci_id = mlx5_gen_pci_id(dev);
        struct mlx5_core_dev *res = NULL;
        struct mlx5_core_dev *tmp_dev;
        struct mlx5_priv *priv;
index db2cfcd..0f189f8 100644 (file)
@@ -54,6 +54,7 @@
 #include "en_stats.h"
 #include "en/fs.h"
 
+extern const struct net_device_ops mlx5e_netdev_ops;
 struct page_pool;
 
 #define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
index bbf69e8..1431232 100644 (file)
@@ -16,6 +16,8 @@ struct mlx5e_tc_table {
 
        DECLARE_HASHTABLE(mod_hdr_tbl, 8);
        DECLARE_HASHTABLE(hairpin_tbl, 8);
+
+       struct notifier_block     netdevice_nb;
 };
 
 struct mlx5e_flow_table {
index eddd770..e88340e 100644 (file)
@@ -183,12 +183,13 @@ static const struct tlsdev_ops mlx5e_tls_ops = {
 
 void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
 {
-       u32 caps = mlx5_accel_tls_device_caps(priv->mdev);
        struct net_device *netdev = priv->netdev;
+       u32 caps;
 
        if (!mlx5_accel_is_tls_device(priv->mdev))
                return;
 
+       caps = mlx5_accel_tls_device_caps(priv->mdev);
        if (caps & MLX5_ACCEL_TLS_TX) {
                netdev->features          |= NETIF_F_HW_TLS_TX;
                netdev->hw_features       |= NETIF_F_HW_TLS_TX;
index 75bb981..41cde92 100644 (file)
@@ -191,7 +191,7 @@ set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
 {
        if (psrc_m) {
                MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
-               MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v));
+               MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
        }
 
        if (pdst_m) {
index 5a7939e..f291d1b 100644 (file)
@@ -4315,23 +4315,7 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
        }
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
- * reenabling interrupts.
- */
-static void mlx5e_netpoll(struct net_device *dev)
-{
-       struct mlx5e_priv *priv = netdev_priv(dev);
-       struct mlx5e_channels *chs = &priv->channels;
-
-       int i;
-
-       for (i = 0; i < chs->num; i++)
-               napi_schedule(&chs->c[i]->napi);
-}
-#endif
-
-static const struct net_device_ops mlx5e_netdev_ops = {
+const struct net_device_ops mlx5e_netdev_ops = {
        .ndo_open                = mlx5e_open,
        .ndo_stop                = mlx5e_close,
        .ndo_start_xmit          = mlx5e_xmit,
@@ -4356,9 +4340,6 @@ static const struct net_device_ops mlx5e_netdev_ops = {
 #ifdef CONFIG_MLX5_EN_ARFS
        .ndo_rx_flow_steer       = mlx5e_rx_flow_steer,
 #endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller     = mlx5e_netpoll,
-#endif
 #ifdef CONFIG_MLX5_ESWITCH
        /* SRIOV E-Switch NDOs */
        .ndo_set_vf_mac          = mlx5e_set_vf_mac,
index 9131a13..8579672 100644 (file)
@@ -1368,6 +1368,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
 
                        *match_level = MLX5_MATCH_L2;
                }
+       } else {
+               MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
        }
 
        if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
@@ -1982,14 +1985,15 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
                goto out_ok;
 
        modify_ip_header = false;
-       tcf_exts_to_list(exts, &actions);
-       list_for_each_entry(a, &actions, list) {
+       tcf_exts_for_each_action(i, a, exts) {
+               int k;
+
                if (!is_tcf_pedit(a))
                        continue;
 
                nkeys = tcf_pedit_nkeys(a);
-               for (i = 0; i < nkeys; i++) {
-                       htype = tcf_pedit_htype(a, i);
+               for (k = 0; k < nkeys; k++) {
+                       htype = tcf_pedit_htype(a, k);
                        if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
                            htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
                                modify_ip_header = true;
@@ -2053,15 +2057,14 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
        const struct tc_action *a;
        LIST_HEAD(actions);
        u32 action = 0;
-       int err;
+       int err, i;
 
        if (!tcf_exts_has_actions(exts))
                return -EINVAL;
 
        attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
 
-       tcf_exts_to_list(exts, &actions);
-       list_for_each_entry(a, &actions, list) {
+       tcf_exts_for_each_action(i, a, exts) {
                if (is_tcf_gact_shot(a)) {
                        action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
                        if (MLX5_CAP_FLOWTABLE(priv->mdev,
@@ -2666,7 +2669,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
        LIST_HEAD(actions);
        bool encap = false;
        u32 action = 0;
-       int err;
+       int err, i;
 
        if (!tcf_exts_has_actions(exts))
                return -EINVAL;
@@ -2674,8 +2677,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
        attr->in_rep = rpriv->rep;
        attr->in_mdev = priv->mdev;
 
-       tcf_exts_to_list(exts, &actions);
-       list_for_each_entry(a, &actions, list) {
+       tcf_exts_for_each_action(i, a, exts) {
                if (is_tcf_gact_shot(a)) {
                        action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
                                  MLX5_FLOW_CONTEXT_ACTION_COUNT;
@@ -2947,14 +2949,71 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
        return 0;
 }
 
+static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
+                                             struct mlx5e_priv *peer_priv)
+{
+       struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
+       struct mlx5e_hairpin_entry *hpe;
+       u16 peer_vhca_id;
+       int bkt;
+
+       if (!same_hw_devs(priv, peer_priv))
+               return;
+
+       peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
+
+       hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
+               if (hpe->peer_vhca_id == peer_vhca_id)
+                       hpe->hp->pair->peer_gone = true;
+       }
+}
+
+static int mlx5e_tc_netdev_event(struct notifier_block *this,
+                                unsigned long event, void *ptr)
+{
+       struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+       struct mlx5e_flow_steering *fs;
+       struct mlx5e_priv *peer_priv;
+       struct mlx5e_tc_table *tc;
+       struct mlx5e_priv *priv;
+
+       if (ndev->netdev_ops != &mlx5e_netdev_ops ||
+           event != NETDEV_UNREGISTER ||
+           ndev->reg_state == NETREG_REGISTERED)
+               return NOTIFY_DONE;
+
+       tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
+       fs = container_of(tc, struct mlx5e_flow_steering, tc);
+       priv = container_of(fs, struct mlx5e_priv, fs);
+       peer_priv = netdev_priv(ndev);
+       if (priv == peer_priv ||
+           !(priv->netdev->features & NETIF_F_HW_TC))
+               return NOTIFY_DONE;
+
+       mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
+
+       return NOTIFY_DONE;
+}
+
 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
 {
        struct mlx5e_tc_table *tc = &priv->fs.tc;
+       int err;
 
        hash_init(tc->mod_hdr_tbl);
        hash_init(tc->hairpin_tbl);
 
-       return rhashtable_init(&tc->ht, &tc_ht_params);
+       err = rhashtable_init(&tc->ht, &tc_ht_params);
+       if (err)
+               return err;
+
+       tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
+       if (register_netdevice_notifier(&tc->netdevice_nb)) {
+               tc->netdevice_nb.notifier_call = NULL;
+               mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
+       }
+
+       return err;
 }
 
 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
@@ -2970,6 +3029,9 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
 {
        struct mlx5e_tc_table *tc = &priv->fs.tc;
 
+       if (tc->netdevice_nb.notifier_call)
+               unregister_netdevice_notifier(&tc->netdevice_nb);
+
        rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
 
        if (!IS_ERR_OR_NULL(tc->t)) {
index 2b252cd..ea7dedc 100644 (file)
@@ -2000,7 +2000,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
        u32 max_guarantee = 0;
        int i;
 
-       for (i = 0; i <= esw->total_vports; i++) {
+       for (i = 0; i < esw->total_vports; i++) {
                evport = &esw->vports[i];
                if (!evport->enabled || evport->info.min_rate < max_guarantee)
                        continue;
@@ -2020,7 +2020,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
        int err;
        int i;
 
-       for (i = 0; i <= esw->total_vports; i++) {
+       for (i = 0; i < esw->total_vports; i++) {
                evport = &esw->vports[i];
                if (!evport->enabled)
                        continue;
index f72b5c9..3028e8d 100644 (file)
@@ -663,6 +663,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
        if (err)
                goto miss_rule_err;
 
+       kvfree(flow_group_in);
        return 0;
 
 miss_rule_err:
index f418541..37d114c 100644 (file)
@@ -1578,6 +1578,33 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
        return version;
 }
 
+static struct fs_fte *
+lookup_fte_locked(struct mlx5_flow_group *g,
+                 u32 *match_value,
+                 bool take_write)
+{
+       struct fs_fte *fte_tmp;
+
+       if (take_write)
+               nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+       else
+               nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
+       fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
+                                        rhash_fte);
+       if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
+               fte_tmp = NULL;
+               goto out;
+       }
+
+       nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+out:
+       if (take_write)
+               up_write_ref_node(&g->node);
+       else
+               up_read_ref_node(&g->node);
+       return fte_tmp;
+}
+
 static struct mlx5_flow_handle *
 try_add_to_existing_fg(struct mlx5_flow_table *ft,
                       struct list_head *match_head,
@@ -1600,10 +1627,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
        if (IS_ERR(fte))
                return  ERR_PTR(-ENOMEM);
 
-       list_for_each_entry(iter, match_head, list) {
-               nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
-       }
-
 search_again_locked:
        version = matched_fgs_get_version(match_head);
        /* Try to find a fg that already contains a matching fte */
@@ -1611,20 +1634,9 @@ search_again_locked:
                struct fs_fte *fte_tmp;
 
                g = iter->g;
-               fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
-                                                rhash_fte);
-               if (!fte_tmp || !tree_get_node(&fte_tmp->node))
+               fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
+               if (!fte_tmp)
                        continue;
-
-               nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
-               if (!take_write) {
-                       list_for_each_entry(iter, match_head, list)
-                               up_read_ref_node(&iter->g->node);
-               } else {
-                       list_for_each_entry(iter, match_head, list)
-                               up_write_ref_node(&iter->g->node);
-               }
-
                rule = add_rule_fg(g, spec->match_value,
                                   flow_act, dest, dest_num, fte_tmp);
                up_write_ref_node(&fte_tmp->node);
@@ -1633,19 +1645,6 @@ search_again_locked:
                return rule;
        }
 
-       /* No group with matching fte found. Try to add a new fte to any
-        * matching fg.
-        */
-
-       if (!take_write) {
-               list_for_each_entry(iter, match_head, list)
-                       up_read_ref_node(&iter->g->node);
-               list_for_each_entry(iter, match_head, list)
-                       nested_down_write_ref_node(&iter->g->node,
-                                                  FS_LOCK_PARENT);
-               take_write = true;
-       }
-
        /* Check the ft version, for case that new flow group
         * was added while the fgs weren't locked
         */
@@ -1657,27 +1656,30 @@ search_again_locked:
        /* Check the fgs version, for case the new FTE with the
         * same values was added while the fgs weren't locked
         */
-       if (version != matched_fgs_get_version(match_head))
+       if (version != matched_fgs_get_version(match_head)) {
+               take_write = true;
                goto search_again_locked;
+       }
 
        list_for_each_entry(iter, match_head, list) {
                g = iter->g;
 
                if (!g->node.active)
                        continue;
+
+               nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+
                err = insert_fte(g, fte);
                if (err) {
+                       up_write_ref_node(&g->node);
                        if (err == -ENOSPC)
                                continue;
-                       list_for_each_entry(iter, match_head, list)
-                               up_write_ref_node(&iter->g->node);
                        kmem_cache_free(steering->ftes_cache, fte);
                        return ERR_PTR(err);
                }
 
                nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
-               list_for_each_entry(iter, match_head, list)
-                       up_write_ref_node(&iter->g->node);
+               up_write_ref_node(&g->node);
                rule = add_rule_fg(g, spec->match_value,
                                   flow_act, dest, dest_num, fte);
                up_write_ref_node(&fte->node);
@@ -1686,8 +1688,6 @@ search_again_locked:
        }
        rule = ERR_PTR(-ENOENT);
 out:
-       list_for_each_entry(iter, match_head, list)
-               up_write_ref_node(&iter->g->node);
        kmem_cache_free(steering->ftes_cache, fte);
        return rule;
 }
@@ -1726,6 +1726,8 @@ search_again_locked:
        if (err) {
                if (take_write)
                        up_write_ref_node(&ft->node);
+               else
+                       up_read_ref_node(&ft->node);
                return ERR_PTR(err);
        }
 
index d39b0b7..9f39aec 100644 (file)
@@ -331,9 +331,17 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
        add_timer(&health->timer);
 }
 
-void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
 {
        struct mlx5_core_health *health = &dev->priv.health;
+       unsigned long flags;
+
+       if (disable_health) {
+               spin_lock_irqsave(&health->wq_lock, flags);
+               set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+               set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
+               spin_unlock_irqrestore(&health->wq_lock, flags);
+       }
 
        del_timer_sync(&health->timer);
 }
index cf3e4a6..b5e9f66 100644 (file)
@@ -878,8 +878,10 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        priv->numa_node = dev_to_node(&dev->pdev->dev);
 
        priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
-       if (!priv->dbg_root)
+       if (!priv->dbg_root) {
+               dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n");
                return -ENOMEM;
+       }
 
        err = mlx5_pci_enable_device(dev);
        if (err) {
@@ -928,7 +930,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        pci_clear_master(dev->pdev);
        release_bar(dev->pdev);
        mlx5_pci_disable_device(dev);
-       debugfs_remove(priv->dbg_root);
+       debugfs_remove_recursive(priv->dbg_root);
 }
 
 static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
@@ -1286,7 +1288,7 @@ err_cleanup_once:
                mlx5_cleanup_once(dev);
 
 err_stop_poll:
-       mlx5_stop_health_poll(dev);
+       mlx5_stop_health_poll(dev, boot);
        if (mlx5_cmd_teardown_hca(dev)) {
                dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
                goto out_err;
@@ -1346,7 +1348,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
        mlx5_free_irq_vectors(dev);
        if (cleanup)
                mlx5_cleanup_once(dev);
-       mlx5_stop_health_poll(dev);
+       mlx5_stop_health_poll(dev, cleanup);
        err = mlx5_cmd_teardown_hca(dev);
        if (err) {
                dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
@@ -1608,7 +1610,7 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
         * with the HCA, so the health polll is no longer needed.
         */
        mlx5_drain_health_wq(dev);
-       mlx5_stop_health_poll(dev);
+       mlx5_stop_health_poll(dev, false);
 
        ret = mlx5_cmd_force_teardown_hca(dev);
        if (ret) {
index dae1c5c..a1ee9a8 100644 (file)
@@ -475,7 +475,8 @@ static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
 
        for (i = 0; i < hp->num_channels; i++) {
                mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
-               mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+               if (!hp->peer_gone)
+                       mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
        }
 }
 
@@ -509,7 +510,7 @@ static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
 
        sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
 
-       if (next_state == MLX5_RQC_STATE_RDY) {
+       if (next_state == MLX5_SQC_STATE_RDY) {
                MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq);
                MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca);
        }
@@ -567,6 +568,8 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
                                       MLX5_RQC_STATE_RST, 0, 0);
 
        /* unset peer SQs */
+       if (hp->peer_gone)
+               return;
        for (i = 0; i < hp->num_channels; i++)
                mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
                                       MLX5_SQC_STATE_RST, 0, 0);
index 86478a6..68e7f8d 100644 (file)
@@ -39,9 +39,9 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
        return (u32)wq->fbc.sz_m1 + 1;
 }
 
-u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
+u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
 {
-       return (u32)wq->fbc.frag_sz_m1 + 1;
+       return wq->fbc.frag_sz_m1 + 1;
 }
 
 u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
@@ -138,15 +138,16 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                      void *qpc, struct mlx5_wq_qp *wq,
                      struct mlx5_wq_ctrl *wq_ctrl)
 {
-       u32 sq_strides_offset;
+       u16 sq_strides_offset;
+       u32 rq_pg_remainder;
        int err;
 
        mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
                      MLX5_GET(qpc, qpc, log_rq_size),
                      &wq->rq.fbc);
 
-       sq_strides_offset =
-               ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
+       rq_pg_remainder   = mlx5_wq_cyc_get_byte_size(&wq->rq) % PAGE_SIZE;
+       sq_strides_offset = rq_pg_remainder / MLX5_SEND_WQE_BB;
 
        mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
                             MLX5_GET(qpc, qpc, log_sq_size),
index 2bd4c31..3a1a170 100644 (file)
@@ -80,7 +80,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                       void *wqc, struct mlx5_wq_cyc *wq,
                       struct mlx5_wq_ctrl *wq_ctrl);
 u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
-u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
+u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
 
 int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
                      void *qpc, struct mlx5_wq_qp *wq,
index 4d271fb..5890fdf 100644 (file)
@@ -718,14 +718,17 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
        memset(&active_cqns, 0, sizeof(active_cqns));
 
        while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
-               u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
 
-               switch (event_type) {
-               case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
+               /* Command interface completion events are always received on
+                * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
+                * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
+                */
+               switch (q->num) {
+               case MLXSW_PCI_EQ_ASYNC_NUM:
                        mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
                        q->u.eq.ev_cmd_count++;
                        break;
-               case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
+               case MLXSW_PCI_EQ_COMP_NUM:
                        cqn = mlxsw_pci_eqe_cqn_get(eqe);
                        set_bit(cqn, active_cqns);
                        cq_handle = true;
index 6070d15..30bb2c5 100644 (file)
@@ -44,8 +44,8 @@
 #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
 
 #define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 1702
-#define MLXSW_SP1_FWREV_SUBMINOR 6
+#define MLXSW_SP1_FWREV_MINOR 1703
+#define MLXSW_SP1_FWREV_SUBMINOR 4
 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
 
 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -1346,8 +1346,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
                return -ENOMEM;
        mall_tc_entry->cookie = f->cookie;
 
-       tcf_exts_to_list(f->exts, &actions);
-       a = list_first_entry(&actions, struct tc_action, list);
+       a = tcf_exts_first_action(f->exts);
 
        if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
                struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
@@ -4846,6 +4845,8 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
                upper_dev = info->upper_dev;
                if (info->linking)
                        break;
+               if (is_vlan_dev(upper_dev))
+                       mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
                if (netif_is_macvlan(upper_dev))
                        mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
                break;
index 3ae9301..3cdb7ac 100644 (file)
@@ -414,6 +414,8 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
 void
 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
 void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
+void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
+                                struct net_device *dev);
 
 /* spectrum_kvdl.c */
 enum mlxsw_sp_kvdl_entry_type {
index 4327487..3589432 100644 (file)
@@ -337,14 +337,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
        MLXSW_SP_SB_CM(1500, 9, 0),
        MLXSW_SP_SB_CM(1500, 9, 0),
        MLXSW_SP_SB_CM(1500, 9, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
-       MLXSW_SP_SB_CM(0, 0, 0),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
+       MLXSW_SP_SB_CM(0, 140000, 15),
        MLXSW_SP_SB_CM(1, 0xff, 0),
 };
 
index ebd1b24..8d21197 100644 (file)
@@ -21,8 +21,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
                                         struct netlink_ext_ack *extack)
 {
        const struct tc_action *a;
-       LIST_HEAD(actions);
-       int err;
+       int err, i;
 
        if (!tcf_exts_has_actions(exts))
                return 0;
@@ -32,8 +31,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
        if (err)
                return err;
 
-       tcf_exts_to_list(exts, &actions);
-       list_for_each_entry(a, &actions, list) {
+       tcf_exts_for_each_action(i, a, exts) {
                if (is_tcf_gact_ok(a)) {
                        err = mlxsw_sp_acl_rulei_act_terminate(rulei);
                        if (err) {
index 3a96307..2ab9cf2 100644 (file)
@@ -6234,6 +6234,17 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
        mlxsw_sp_vr_put(mlxsw_sp, vr);
 }
 
+void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
+                                struct net_device *dev)
+{
+       struct mlxsw_sp_rif *rif;
+
+       rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+       if (!rif)
+               return;
+       mlxsw_sp_rif_destroy(rif);
+}
+
 static void
 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
                                 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
index 0d8444a..db715da 100644 (file)
@@ -127,6 +127,24 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
        return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
 }
 
+static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
+                                                   void *data)
+{
+       struct mlxsw_sp *mlxsw_sp = data;
+
+       mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
+       return 0;
+}
+
+static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
+                                               struct net_device *dev)
+{
+       mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
+       netdev_walk_all_upper_dev_rcu(dev,
+                                     mlxsw_sp_bridge_device_upper_rif_destroy,
+                                     mlxsw_sp);
+}
+
 static struct mlxsw_sp_bridge_device *
 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
                              struct net_device *br_dev)
@@ -165,6 +183,8 @@ static void
 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
                               struct mlxsw_sp_bridge_device *bridge_device)
 {
+       mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
+                                           bridge_device->dev);
        list_del(&bridge_device->list);
        if (bridge_device->vlan_enabled)
                bridge->vlan_enabled_exists = false;
index e7dce79..001b5f7 100644 (file)
@@ -2850,7 +2850,7 @@ static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
        lan743x_hardware_cleanup(adapter);
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
 {
        return bitrev16(crc16(0xFFFF, buf, len));
@@ -3016,7 +3016,7 @@ static int lan743x_pm_resume(struct device *dev)
 static const struct dev_pm_ops lan743x_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
 };
-#endif /*CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
 
 static const struct pci_device_id lan743x_pcidev_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
@@ -3028,7 +3028,7 @@ static struct pci_driver lan743x_pcidev_driver = {
        .id_table = lan743x_pcidev_tbl,
        .probe    = lan743x_pcidev_probe,
        .remove   = lan743x_pcidev_remove,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
        .driver.pm = &lan743x_pm_ops,
 #endif
        .shutdown = lan743x_pcidev_shutdown,
index 26bb3b1..3cdf63e 100644 (file)
@@ -91,7 +91,7 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
                struct sk_buff *skb;
                struct net_device *dev;
                u32 *buf;
-               int sz, len;
+               int sz, len, buf_len;
                u32 ifh[4];
                u32 val;
                struct frame_info info;
@@ -116,14 +116,20 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
                        err = -ENOMEM;
                        break;
                }
-               buf = (u32 *)skb_put(skb, info.len);
+               buf_len = info.len - ETH_FCS_LEN;
+               buf = (u32 *)skb_put(skb, buf_len);
 
                len = 0;
                do {
                        sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
                        *buf++ = val;
                        len += sz;
-               } while ((sz == 4) && (len < info.len));
+               } while (len < buf_len);
+
+               /* Read the FCS and discard it */
+               sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
+               /* Update the statistics if part of the FCS was read before */
+               len -= ETH_FCS_LEN - sz;
 
                if (sz < 0) {
                        err = sz;
index 0ba0356..46ba0cf 100644 (file)
@@ -52,6 +52,7 @@
 #define NFP_FL_TUNNEL_CSUM                     cpu_to_be16(0x01)
 #define NFP_FL_TUNNEL_KEY                      cpu_to_be16(0x04)
 #define NFP_FL_TUNNEL_GENEVE_OPT               cpu_to_be16(0x0800)
+#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS     IP_TUNNEL_INFO_TX
 #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS    (NFP_FL_TUNNEL_CSUM | \
                                                 NFP_FL_TUNNEL_KEY | \
                                                 NFP_FL_TUNNEL_GENEVE_OPT)
@@ -741,11 +742,16 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
                nfp_fl_push_vlan(psh_v, a);
                *a_len += sizeof(struct nfp_fl_push_vlan);
        } else if (is_tcf_tunnel_set(a)) {
+               struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
                struct nfp_repr *repr = netdev_priv(netdev);
+
                *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
                if (*tun_type == NFP_FL_TUNNEL_NONE)
                        return -EOPNOTSUPP;
 
+               if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
+                       return -EOPNOTSUPP;
+
                /* Pre-tunnel action is required for tunnel encap.
                 * This checks for next hop entries on NFP.
                 * If none, the packet falls back before applying other actions.
@@ -796,11 +802,10 @@ int nfp_flower_compile_action(struct nfp_app *app,
                              struct net_device *netdev,
                              struct nfp_fl_payload *nfp_flow)
 {
-       int act_len, act_cnt, err, tun_out_cnt, out_cnt;
+       int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
        enum nfp_flower_tun_type tun_type;
        const struct tc_action *a;
        u32 csum_updated = 0;
-       LIST_HEAD(actions);
 
        memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
        nfp_flow->meta.act_len = 0;
@@ -810,8 +815,7 @@ int nfp_flower_compile_action(struct nfp_app *app,
        tun_out_cnt = 0;
        out_cnt = 0;
 
-       tcf_exts_to_list(flow->exts, &actions);
-       list_for_each_entry(a, &actions, list) {
+       tcf_exts_for_each_action(i, a, flow->exts) {
                err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
                                             netdev, &tun_type, &tun_out_cnt,
                                             &out_cnt, &csum_updated);
index 85f8209..81d941a 100644 (file)
@@ -70,6 +70,7 @@ struct nfp_app;
 #define NFP_FL_FEATS_GENEVE            BIT(0)
 #define NFP_FL_NBI_MTU_SETTING         BIT(1)
 #define NFP_FL_FEATS_GENEVE_OPT                BIT(2)
+#define NFP_FL_FEATS_VLAN_PCP          BIT(3)
 #define NFP_FL_FEATS_LAG               BIT(31)
 
 struct nfp_fl_mask_id {
index a0c72f2..17acb8c 100644 (file)
@@ -56,7 +56,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
                                                      FLOW_DISSECTOR_KEY_VLAN,
                                                      target);
                /* Populate the tci field. */
-               if (flow_vlan->vlan_id) {
+               if (flow_vlan->vlan_id || flow_vlan->vlan_priority) {
                        tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
                                             flow_vlan->vlan_priority) |
                                  FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
index 2edab01..bd19624 100644 (file)
@@ -192,6 +192,17 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
                key_size += sizeof(struct nfp_flower_mac_mpls);
        }
 
+       if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+               struct flow_dissector_key_vlan *flow_vlan;
+
+               flow_vlan = skb_flow_dissector_target(flow->dissector,
+                                                     FLOW_DISSECTOR_KEY_VLAN,
+                                                     flow->mask);
+               if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
+                   flow_vlan->vlan_priority)
+                       return -EOPNOTSUPP;
+       }
+
        if (dissector_uses_key(flow->dissector,
                               FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
                struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
index a8b9fba..c6d29fd 100644 (file)
@@ -229,29 +229,16 @@ done:
        spin_unlock_bh(&nn->reconfig_lock);
 }
 
-/**
- * nfp_net_reconfig() - Reconfigure the firmware
- * @nn:      NFP Net device to reconfigure
- * @update:  The value for the update field in the BAR config
- *
- * Write the update word to the BAR and ping the reconfig queue.  The
- * poll until the firmware has acknowledged the update by zeroing the
- * update word.
- *
- * Return: Negative errno on error, 0 on success
- */
-int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
 {
        bool cancelled_timer = false;
        u32 pre_posted_requests;
-       int ret;
 
        spin_lock_bh(&nn->reconfig_lock);
 
        nn->reconfig_sync_present = true;
 
        if (nn->reconfig_timer_active) {
-               del_timer(&nn->reconfig_timer);
                nn->reconfig_timer_active = false;
                cancelled_timer = true;
        }
@@ -260,14 +247,43 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
 
        spin_unlock_bh(&nn->reconfig_lock);
 
-       if (cancelled_timer)
+       if (cancelled_timer) {
+               del_timer_sync(&nn->reconfig_timer);
                nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
+       }
 
        /* Run the posted reconfigs which were issued before we started */
        if (pre_posted_requests) {
                nfp_net_reconfig_start(nn, pre_posted_requests);
                nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
        }
+}
+
+static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
+{
+       nfp_net_reconfig_sync_enter(nn);
+
+       spin_lock_bh(&nn->reconfig_lock);
+       nn->reconfig_sync_present = false;
+       spin_unlock_bh(&nn->reconfig_lock);
+}
+
+/**
+ * nfp_net_reconfig() - Reconfigure the firmware
+ * @nn:      NFP Net device to reconfigure
+ * @update:  The value for the update field in the BAR config
+ *
+ * Write the update word to the BAR and ping the reconfig queue.  The
+ * poll until the firmware has acknowledged the update by zeroing the
+ * update word.
+ *
+ * Return: Negative errno on error, 0 on success
+ */
+int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+{
+       int ret;
+
+       nfp_net_reconfig_sync_enter(nn);
 
        nfp_net_reconfig_start(nn, update);
        ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
@@ -2061,14 +2077,17 @@ nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
        return true;
 }
 
-static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
 {
        struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
        struct nfp_net *nn = r_vec->nfp_net;
        struct nfp_net_dp *dp = &nn->dp;
+       unsigned int budget = 512;
 
-       while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
+       while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
                continue;
+
+       return budget;
 }
 
 static void nfp_ctrl_poll(unsigned long arg)
@@ -2080,9 +2099,13 @@ static void nfp_ctrl_poll(unsigned long arg)
        __nfp_ctrl_tx_queued(r_vec);
        spin_unlock_bh(&r_vec->lock);
 
-       nfp_ctrl_rx(r_vec);
-
-       nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+       if (nfp_ctrl_rx(r_vec)) {
+               nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+       } else {
+               tasklet_schedule(&r_vec->tasklet);
+               nn_dp_warn(&r_vec->nfp_net->dp,
+                          "control message budget exceeded!\n");
+       }
 }
 
 /* Setup and Configuration
@@ -3130,21 +3153,6 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
        return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void nfp_net_netpoll(struct net_device *netdev)
-{
-       struct nfp_net *nn = netdev_priv(netdev);
-       int i;
-
-       /* nfp_net's NAPIs are statically allocated so even if there is a race
-        * with reconfig path this will simply try to schedule some disabled
-        * NAPI instances.
-        */
-       for (i = 0; i < nn->dp.num_stack_tx_rings; i++)
-               napi_schedule_irqoff(&nn->r_vecs[i].napi);
-}
-#endif
-
 static void nfp_net_stat64(struct net_device *netdev,
                           struct rtnl_link_stats64 *stats)
 {
@@ -3503,9 +3511,6 @@ const struct net_device_ops nfp_net_netdev_ops = {
        .ndo_get_stats64        = nfp_net_stat64,
        .ndo_vlan_rx_add_vid    = nfp_net_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = nfp_net_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = nfp_net_netpoll,
-#endif
        .ndo_set_vf_mac         = nfp_app_set_vf_mac,
        .ndo_set_vf_vlan        = nfp_app_set_vf_vlan,
        .ndo_set_vf_spoofchk    = nfp_app_set_vf_spoofchk,
@@ -3633,6 +3638,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
  */
 void nfp_net_free(struct nfp_net *nn)
 {
+       WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
        if (nn->dp.netdev)
                free_netdev(nn->dp.netdev);
        else
@@ -3920,4 +3926,5 @@ void nfp_net_clean(struct nfp_net *nn)
                return;
 
        unregister_netdev(nn->dp.netdev);
+       nfp_net_reconfig_wait_posted(nn);
 }
index 69aa7fc..59c70be 100644 (file)
@@ -72,9 +72,6 @@ static void netxen_schedule_work(struct netxen_adapter *adapter,
                work_func_t func, int delay);
 static void netxen_cancel_fw_work(struct netxen_adapter *adapter);
 static int netxen_nic_poll(struct napi_struct *napi, int budget);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netxen_nic_poll_controller(struct net_device *netdev);
-#endif
 
 static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
 static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
@@ -581,9 +578,6 @@ static const struct net_device_ops netxen_netdev_ops = {
        .ndo_tx_timeout    = netxen_tx_timeout,
        .ndo_fix_features = netxen_fix_features,
        .ndo_set_features = netxen_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = netxen_nic_poll_controller,
-#endif
 };
 
 static inline bool netxen_function_zero(struct pci_dev *pdev)
@@ -2402,23 +2396,6 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
        return work_done;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netxen_nic_poll_controller(struct net_device *netdev)
-{
-       int ring;
-       struct nx_host_sds_ring *sds_ring;
-       struct netxen_adapter *adapter = netdev_priv(netdev);
-       struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
-
-       disable_irq(adapter->irq);
-       for (ring = 0; ring < adapter->max_sds_rings; ring++) {
-               sds_ring = &recv_ctx->sds_rings[ring];
-               netxen_intr(adapter->irq, sds_ring);
-       }
-       enable_irq(adapter->irq);
-}
-#endif
-
 static int
 nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
 {
index 6bb76e6..f5459de 100644 (file)
@@ -190,10 +190,8 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
 
 static void
 qed_dcbx_set_params(struct qed_dcbx_results *p_data,
-                   struct qed_hw_info *p_info,
-                   bool enable,
-                   u8 prio,
-                   u8 tc,
+                   struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                   bool enable, u8 prio, u8 tc,
                    enum dcbx_protocol_type type,
                    enum qed_pci_personality personality)
 {
@@ -206,19 +204,30 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
        else
                p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
 
+       /* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */
+       if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) ||
+            test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)))
+               p_data->arr[type].dont_add_vlan0 = true;
+
        /* QM reconf data */
-       if (p_info->personality == personality)
-               qed_hw_info_set_offload_tc(p_info, tc);
+       if (p_hwfn->hw_info.personality == personality)
+               qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc);
+
+       /* Configure dcbx vlan priority in doorbell block for roce EDPM */
+       if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
+           type == DCBX_PROTOCOL_ROCE) {
+               qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+               qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1);
+       }
 }
 
 /* Update app protocol data and hw_info fields with the TLV info */
 static void
 qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
-                        struct qed_hwfn *p_hwfn,
-                        bool enable,
-                        u8 prio, u8 tc, enum dcbx_protocol_type type)
+                        struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                        bool enable, u8 prio, u8 tc,
+                        enum dcbx_protocol_type type)
 {
-       struct qed_hw_info *p_info = &p_hwfn->hw_info;
        enum qed_pci_personality personality;
        enum dcbx_protocol_type id;
        int i;
@@ -231,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
 
                personality = qed_dcbx_app_update[i].personality;
 
-               qed_dcbx_set_params(p_data, p_info, enable,
+               qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable,
                                    prio, tc, type, personality);
        }
 }
@@ -265,7 +274,7 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
  * reconfiguring QM. Get protocol specific data for PF update ramrod command.
  */
 static int
-qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
+qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
                     struct qed_dcbx_results *p_data,
                     struct dcbx_app_priority_entry *p_tbl,
                     u32 pri_tc_tbl, int count, u8 dcbx_version)
@@ -309,7 +318,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
                                enable = true;
                        }
 
-                       qed_dcbx_update_app_info(p_data, p_hwfn, enable,
+                       qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
                                                 priority, tc, type);
                }
        }
@@ -331,7 +340,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
                        continue;
 
                enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
-               qed_dcbx_update_app_info(p_data, p_hwfn, enable,
+               qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
                                         priority, tc, type);
        }
 
@@ -341,7 +350,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
 /* Parse app TLV's to update TC information in hw_info structure for
  * reconfiguring QM. Get protocol specific data for PF update ramrod command.
  */
-static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
+static int
+qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        struct dcbx_app_priority_feature *p_app;
        struct dcbx_app_priority_entry *p_tbl;
@@ -365,7 +375,7 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
        p_info = &p_hwfn->hw_info;
        num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
 
-       rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
+       rc = qed_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl,
                                  num_entries, dcbx_version);
        if (rc)
                return rc;
@@ -891,7 +901,7 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
                return rc;
 
        if (type == QED_DCBX_OPERATIONAL_MIB) {
-               rc = qed_dcbx_process_mib_info(p_hwfn);
+               rc = qed_dcbx_process_mib_info(p_hwfn, p_ptt);
                if (!rc) {
                        /* reconfigure tcs of QM queues according
                         * to negotiation results
@@ -954,6 +964,7 @@ static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
        p_data->dcb_enable_flag = p_src->arr[type].enable;
        p_data->dcb_priority = p_src->arr[type].priority;
        p_data->dcb_tc = p_src->arr[type].tc;
+       p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0;
 }
 
 /* Set pf update ramrod command params */
index a4d688c..01f253e 100644 (file)
@@ -55,6 +55,7 @@ struct qed_dcbx_app_data {
        u8 update;              /* Update indication */
        u8 priority;            /* Priority */
        u8 tc;                  /* Traffic Class */
+       bool dont_add_vlan0;    /* Do not insert a vlan tag with id 0 */
 };
 
 #define QED_DCBX_VERSION_DISABLED       0
index 016ca8a..97f073f 100644 (file)
@@ -1706,7 +1706,7 @@ static int qed_vf_start(struct qed_hwfn *p_hwfn,
 int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
 {
        struct qed_load_req_params load_req_params;
-       u32 load_code, param, drv_mb_param;
+       u32 load_code, resp, param, drv_mb_param;
        bool b_default_mtu = true;
        struct qed_hwfn *p_hwfn;
        int rc = 0, mfw_rc, i;
@@ -1852,6 +1852,19 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
 
        if (IS_PF(cdev)) {
                p_hwfn = QED_LEADING_HWFN(cdev);
+
+               /* Get pre-negotiated values for stag, bandwidth etc. */
+               DP_VERBOSE(p_hwfn,
+                          QED_MSG_SPQ,
+                          "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
+               drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET;
+               rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+                                DRV_MSG_CODE_GET_OEM_UPDATES,
+                                drv_mb_param, &resp, &param);
+               if (rc)
+                       DP_NOTICE(p_hwfn,
+                                 "Failed to send GET_OEM_UPDATES attention request\n");
+
                drv_mb_param = STORM_FW_VERSION;
                rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
                                 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
index 8faceb6..a713826 100644 (file)
@@ -11987,6 +11987,7 @@ struct public_global {
        u32 running_bundle_id;
        s32 external_temperature;
        u32 mdump_reason;
+       u64 reserved;
        u32 data_ptr;
        u32 data_size;
 };
@@ -12414,6 +12415,7 @@ struct public_drv_mb {
 #define DRV_MSG_SET_RESOURCE_VALUE_MSG         0x35000000
 #define DRV_MSG_CODE_OV_UPDATE_WOL              0x38000000
 #define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE     0x39000000
+#define DRV_MSG_CODE_GET_OEM_UPDATES            0x41000000
 
 #define DRV_MSG_CODE_BW_UPDATE_ACK             0x32000000
 #define DRV_MSG_CODE_NIG_DRAIN                 0x30000000
@@ -12541,6 +12543,9 @@ struct public_drv_mb {
 #define DRV_MB_PARAM_ESWITCH_MODE_VEB  0x1
 #define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
 
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK    0x1
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET  0
+
 #define DRV_MB_PARAM_SET_LED_MODE_OPER         0x0
 #define DRV_MB_PARAM_SET_LED_MODE_ON           0x1
 #define DRV_MB_PARAM_SET_LED_MODE_OFF          0x2
index d9ab5ad..34193c2 100644 (file)
@@ -407,7 +407,7 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
 
        if (i == QED_INIT_MAX_POLL_COUNT) {
                DP_ERR(p_hwfn,
-                      "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
+                      "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
                       addr, le32_to_cpu(cmd->expected_val),
                       val, le32_to_cpu(cmd->op_data));
        }
index 17f3dfa..e860bdf 100644 (file)
@@ -1710,7 +1710,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
 
                cm_info->local_ip[0] = ntohl(iph->daddr);
                cm_info->remote_ip[0] = ntohl(iph->saddr);
-               cm_info->ip_version = TCP_IPV4;
+               cm_info->ip_version = QED_TCP_IPV4;
 
                ip_hlen = (iph->ihl) * sizeof(u32);
                *payload_len = ntohs(iph->tot_len) - ip_hlen;
@@ -1730,7 +1730,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
                        cm_info->remote_ip[i] =
                            ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
                }
-               cm_info->ip_version = TCP_IPV6;
+               cm_info->ip_version = QED_TCP_IPV6;
 
                ip_hlen = sizeof(*ip6h);
                *payload_len = ntohs(ip6h->payload_len);
index d89a0e2..58c7eb9 100644 (file)
@@ -48,7 +48,7 @@
 #include "qed_reg_addr.h"
 #include "qed_sriov.h"
 
-#define CHIP_MCP_RESP_ITER_US 10
+#define QED_MCP_RESP_ITER_US   10
 
 #define QED_DRV_MB_MAX_RETRIES (500 * 1000)    /* Account for 5 sec */
 #define QED_MCP_RESET_RETRIES  (50 * 1000)     /* Account for 500 msec */
@@ -183,18 +183,57 @@ int qed_mcp_free(struct qed_hwfn *p_hwfn)
        return 0;
 }
 
+/* Maximum of 1 sec to wait for the SHMEM ready indication */
+#define QED_MCP_SHMEM_RDY_MAX_RETRIES  20
+#define QED_MCP_SHMEM_RDY_ITER_MS      50
+
 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
        struct qed_mcp_info *p_info = p_hwfn->mcp_info;
+       u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
+       u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
        u32 drv_mb_offsize, mfw_mb_offsize;
        u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
 
        p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
-       if (!p_info->public_base)
-               return 0;
+       if (!p_info->public_base) {
+               DP_NOTICE(p_hwfn,
+                         "The address of the MCP scratch-pad is not configured\n");
+               return -EINVAL;
+       }
 
        p_info->public_base |= GRCBASE_MCP;
 
+       /* Get the MFW MB address and number of supported messages */
+       mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
+                               SECTION_OFFSIZE_ADDR(p_info->public_base,
+                                                    PUBLIC_MFW_MB));
+       p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+       p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
+                                           p_info->mfw_mb_addr +
+                                           offsetof(struct public_mfw_mb,
+                                                    sup_msgs));
+
+       /* The driver can notify that there was an MCP reset, and might read the
+        * SHMEM values before the MFW has completed initializing them.
+        * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
+        * data ready indication.
+        */
+       while (!p_info->mfw_mb_length && --cnt) {
+               msleep(msec);
+               p_info->mfw_mb_length =
+                       (u16)qed_rd(p_hwfn, p_ptt,
+                                   p_info->mfw_mb_addr +
+                                   offsetof(struct public_mfw_mb, sup_msgs));
+       }
+
+       if (!cnt) {
+               DP_NOTICE(p_hwfn,
+                         "Failed to get the SHMEM ready notification after %d msec\n",
+                         QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
+               return -EBUSY;
+       }
+
        /* Calculate the driver and MFW mailbox address */
        drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
                                SECTION_OFFSIZE_ADDR(p_info->public_base,
@@ -204,13 +243,6 @@ static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
                   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
                   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
 
-       /* Set the MFW MB address */
-       mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
-                               SECTION_OFFSIZE_ADDR(p_info->public_base,
-                                                    PUBLIC_MFW_MB));
-       p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
-       p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
-
        /* Get the current driver mailbox sequence before sending
         * the first command
         */
@@ -285,9 +317,15 @@ static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
 
 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
+       u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
        int rc = 0;
 
+       if (p_hwfn->mcp_info->b_block_cmd) {
+               DP_NOTICE(p_hwfn,
+                         "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
+               return -EBUSY;
+       }
+
        /* Ensure that only a single thread is accessing the mailbox */
        spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
 
@@ -413,14 +451,41 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                   (p_mb_params->cmd | seq_num), p_mb_params->param);
 }
 
+static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
+{
+       p_hwfn->mcp_info->b_block_cmd = block_cmd;
+
+       DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
+               block_cmd ? "Block" : "Unblock");
+}
+
+static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
+                                  struct qed_ptt *p_ptt)
+{
+       u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
+       u32 delay = QED_MCP_RESP_ITER_US;
+
+       cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+       cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+       cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+       udelay(delay);
+       cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+       udelay(delay);
+       cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+
+       DP_NOTICE(p_hwfn,
+                 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
+                 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
+}
+
 static int
 _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                       struct qed_ptt *p_ptt,
                       struct qed_mcp_mb_params *p_mb_params,
-                      u32 max_retries, u32 delay)
+                      u32 max_retries, u32 usecs)
 {
+       u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
        struct qed_mcp_cmd_elem *p_cmd_elem;
-       u32 cnt = 0;
        u16 seq_num;
        int rc = 0;
 
@@ -443,7 +508,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                        goto err;
 
                spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
-               udelay(delay);
+
+               if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
+                       msleep(msecs);
+               else
+                       udelay(usecs);
        } while (++cnt < max_retries);
 
        if (cnt >= max_retries) {
@@ -472,7 +541,11 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                 * The spinlock stays locked until the list element is removed.
                 */
 
-               udelay(delay);
+               if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
+                       msleep(msecs);
+               else
+                       udelay(usecs);
+
                spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
 
                if (p_cmd_elem->b_is_completed)
@@ -491,11 +564,15 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                DP_NOTICE(p_hwfn,
                          "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
                          p_mb_params->cmd, p_mb_params->param);
+               qed_mcp_print_cpu_info(p_hwfn, p_ptt);
 
                spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
                qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
                spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
 
+               if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
+                       qed_mcp_cmd_set_blocking(p_hwfn, true);
+
                return -EAGAIN;
        }
 
@@ -507,7 +584,7 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                   "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
                   p_mb_params->mcp_resp,
                   p_mb_params->mcp_param,
-                  (cnt * delay) / 1000, (cnt * delay) % 1000);
+                  (cnt * usecs) / 1000, (cnt * usecs) % 1000);
 
        /* Clear the sequence number from the MFW response */
        p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
@@ -525,7 +602,7 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
 {
        size_t union_data_size = sizeof(union drv_union_data);
        u32 max_retries = QED_DRV_MB_MAX_RETRIES;
-       u32 delay = CHIP_MCP_RESP_ITER_US;
+       u32 usecs = QED_MCP_RESP_ITER_US;
 
        /* MCP not initialized */
        if (!qed_mcp_is_init(p_hwfn)) {
@@ -533,6 +610,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                return -EBUSY;
        }
 
+       if (p_hwfn->mcp_info->b_block_cmd) {
+               DP_NOTICE(p_hwfn,
+                         "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
+                         p_mb_params->cmd, p_mb_params->param);
+               return -EBUSY;
+       }
+
        if (p_mb_params->data_src_size > union_data_size ||
            p_mb_params->data_dst_size > union_data_size) {
                DP_ERR(p_hwfn,
@@ -542,8 +626,13 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
                return -EINVAL;
        }
 
+       if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
+               max_retries = DIV_ROUND_UP(max_retries, 1000);
+               usecs *= 1000;
+       }
+
        return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
-                                     delay);
+                                     usecs);
 }
 
 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
@@ -761,6 +850,7 @@ __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
        mb_params.data_src_size = sizeof(load_req);
        mb_params.p_data_dst = &load_rsp;
        mb_params.data_dst_size = sizeof(load_rsp);
+       mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
 
        DP_VERBOSE(p_hwfn, QED_MSG_SP,
                   "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
@@ -982,7 +1072,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
 
 int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       u32 wol_param, mcp_resp, mcp_param;
+       struct qed_mcp_mb_params mb_params;
+       u32 wol_param;
 
        switch (p_hwfn->cdev->wol_config) {
        case QED_OV_WOL_DISABLED:
@@ -1000,8 +1091,12 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
                wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
        }
 
-       return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
-                          &mcp_resp, &mcp_param);
+       memset(&mb_params, 0, sizeof(mb_params));
+       mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
+       mb_params.param = wol_param;
+       mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
+
+       return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
 }
 
 int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
@@ -1486,13 +1581,29 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
                                                 FUNC_MF_CFG_OV_STAG_MASK;
        p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
-       if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) &&
-           (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) {
-               qed_wr(p_hwfn, p_ptt,
-                      NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan);
+       if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
+               if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
+                       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
+                              p_hwfn->hw_info.ovlan);
+                       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
+
+                       /* Configure DB to add external vlan to EDPM packets */
+                       qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+                       qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
+                              p_hwfn->hw_info.ovlan);
+               } else {
+                       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
+                       qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
+                       qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
+                       qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
+               }
+
                qed_sp_pf_update_stag(p_hwfn);
        }
 
+       DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan  = %d hw_mode = 0x%x\n",
+                  p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
+
        /* Acknowledge the MFW */
        qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
                    &resp, &param);
@@ -2077,31 +2188,65 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
+/* A maximal 100 msec waiting time for the MCP to halt */
+#define QED_MCP_HALT_SLEEP_MS          10
+#define QED_MCP_HALT_MAX_RETRIES       10
+
 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       u32 resp = 0, param = 0;
+       u32 resp = 0, param = 0, cpu_state, cnt = 0;
        int rc;
 
        rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
                         &param);
-       if (rc)
+       if (rc) {
                DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+               return rc;
+       }
 
-       return rc;
+       do {
+               msleep(QED_MCP_HALT_SLEEP_MS);
+               cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+               if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
+                       break;
+       } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
+
+       if (cnt == QED_MCP_HALT_MAX_RETRIES) {
+               DP_NOTICE(p_hwfn,
+                         "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+                         qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
+               return -EBUSY;
+       }
+
+       qed_mcp_cmd_set_blocking(p_hwfn, true);
+
+       return 0;
 }
 
+#define QED_MCP_RESUME_SLEEP_MS        10
+
 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       u32 value, cpu_mode;
+       u32 cpu_mode, cpu_state;
 
        qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
 
-       value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
-       value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
-       qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
        cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+       cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+       qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
+       msleep(QED_MCP_RESUME_SLEEP_MS);
+       cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+
+       if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
+               DP_NOTICE(p_hwfn,
+                         "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+                         cpu_mode, cpu_state);
+               return -EBUSY;
+       }
 
-       return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
+       qed_mcp_cmd_set_blocking(p_hwfn, false);
+
+       return 0;
 }
 
 int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
index 047976d..85e6b39 100644 (file)
@@ -635,11 +635,14 @@ struct qed_mcp_info {
         */
        spinlock_t                              cmd_lock;
 
+       /* Flag to indicate whether sending a MFW mailbox command is blocked */
+       bool                                    b_block_cmd;
+
        /* Spinlock used for syncing SW link-changes and link-changes
         * originating from attention context.
         */
        spinlock_t                              link_lock;
-       bool                                    block_mb_sending;
+
        u32                                     public_base;
        u32                                     drv_mb_addr;
        u32                                     mfw_mb_addr;
@@ -660,14 +663,20 @@ struct qed_mcp_info {
 };
 
 struct qed_mcp_mb_params {
-       u32                     cmd;
-       u32                     param;
-       void                    *p_data_src;
-       u8                      data_src_size;
-       void                    *p_data_dst;
-       u8                      data_dst_size;
-       u32                     mcp_resp;
-       u32                     mcp_param;
+       u32 cmd;
+       u32 param;
+       void *p_data_src;
+       void *p_data_dst;
+       u8 data_src_size;
+       u8 data_dst_size;
+       u32 mcp_resp;
+       u32 mcp_param;
+       u32 flags;
+#define QED_MB_FLAG_CAN_SLEEP  (0x1 << 0)
+#define QED_MB_FLAG_AVOID_BLOCK        (0x1 << 1)
+#define QED_MB_FLAGS_IS_SET(params, flag) \
+       ({ typeof(params) __params = (params); \
+          (__params && (__params->flags & QED_MB_FLAG_ ## flag)); })
 };
 
 struct qed_drv_tlv_hdr {
index be941cf..c71391b 100644 (file)
@@ -228,7 +228,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
                                 num_cons, "Toggle");
        if (rc) {
                DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
-                          "Failed to allocate toogle bits, rc = %d\n", rc);
+                          "Failed to allocate toggle bits, rc = %d\n", rc);
                goto free_cq_map;
        }
 
index d8ad2dc..2440970 100644 (file)
        0x00c000UL
 #define  DORQ_REG_IFEN \
        0x100040UL
+#define DORQ_REG_TAG1_OVRD_MODE \
+       0x1008b4UL
+#define DORQ_REG_PF_PCP_BB_K2 \
+       0x1008c4UL
+#define DORQ_REG_PF_EXT_VID_BB_K2 \
+       0x1008c8UL
 #define DORQ_REG_DB_DROP_REASON \
        0x100a2cUL
 #define DORQ_REG_DB_DROP_DETAILS \
        0
 #define MCP_REG_CPU_STATE \
        0xe05004UL
+#define MCP_REG_CPU_STATE_SOFT_HALTED  (0x1UL << 10)
 #define MCP_REG_CPU_EVENT_MASK \
        0xe05008UL
+#define MCP_REG_CPU_PROGRAM_COUNTER    0xe0501cUL
 #define PGLUE_B_REG_PF_BAR0_SIZE \
        0x2aae60UL
 #define PGLUE_B_REG_PF_BAR1_SIZE \
index 7d7a64c..f9167d1 100644 (file)
@@ -140,23 +140,16 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
 
 static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
 {
-       enum roce_flavor flavor;
-
        switch (roce_mode) {
        case ROCE_V1:
-               flavor = PLAIN_ROCE;
-               break;
+               return PLAIN_ROCE;
        case ROCE_V2_IPV4:
-               flavor = RROCE_IPV4;
-               break;
+               return RROCE_IPV4;
        case ROCE_V2_IPV6:
-               flavor = ROCE_V2_IPV6;
-               break;
+               return RROCE_IPV6;
        default:
-               flavor = MAX_ROCE_MODE;
-               break;
+               return MAX_ROCE_FLAVOR;
        }
-       return flavor;
 }
 
 static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
index 8de644b..77b6248 100644 (file)
@@ -154,7 +154,7 @@ qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
 static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
                                  struct qed_tunnel_info *p_src)
 {
-       enum tunnel_clss type;
+       int type;
 
        p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
        p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
index 3d42696..be118d0 100644 (file)
@@ -413,7 +413,6 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
        }
 
        if (!p_iov->b_pre_fp_hsi &&
-           ETH_HSI_VER_MINOR &&
            (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
                DP_INFO(p_hwfn,
                        "PF is using older fastpath HSI; %02x.%02x is configured\n",
@@ -572,7 +571,7 @@ free_p_iov:
 static void
 __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
                           struct qed_tunn_update_type *p_src,
-                          enum qed_tunn_clss mask, u8 *p_cls)
+                          enum qed_tunn_mode mask, u8 *p_cls)
 {
        if (p_src->b_update_mode) {
                p_req->tun_mode_update_mask |= BIT(mask);
@@ -587,7 +586,7 @@ __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
 static void
 qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
                         struct qed_tunn_update_type *p_src,
-                        enum qed_tunn_clss mask,
+                        enum qed_tunn_mode mask,
                         u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
                         u8 *p_update_port, u16 *p_udp_port)
 {
index 9673d19..b16ce7d 100644 (file)
@@ -2006,18 +2006,16 @@ unlock:
 static int qede_parse_actions(struct qede_dev *edev,
                              struct tcf_exts *exts)
 {
-       int rc = -EINVAL, num_act = 0;
+       int rc = -EINVAL, num_act = 0, i;
        const struct tc_action *a;
        bool is_drop = false;
-       LIST_HEAD(actions);
 
        if (!tcf_exts_has_actions(exts)) {
                DP_NOTICE(edev, "No tc actions received\n");
                return rc;
        }
 
-       tcf_exts_to_list(exts, &actions);
-       list_for_each_entry(a, &actions, list) {
+       tcf_exts_for_each_action(i, a, exts) {
                num_act++;
 
                if (is_tcf_gact_shot(a))
index 8131292..0c443ea 100644 (file)
@@ -1800,7 +1800,8 @@ struct qlcnic_hardware_ops {
        int (*config_loopback) (struct qlcnic_adapter *, u8);
        int (*clear_loopback) (struct qlcnic_adapter *, u8);
        int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
-       void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
+       void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
+                                u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
        int (*get_board_info) (struct qlcnic_adapter *);
        void (*set_mac_filter_count) (struct qlcnic_adapter *);
        void (*free_mac_list) (struct qlcnic_adapter *);
@@ -2064,9 +2065,10 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
 }
 
 static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
-                                       u64 *addr, u16 id)
+                                       u64 *addr, u16 vlan,
+                                       struct qlcnic_host_tx_ring *tx_ring)
 {
-       adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+       adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
 }
 
 static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
index 569d54e..a79d84f 100644 (file)
@@ -2135,7 +2135,8 @@ out:
 }
 
 void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
-                                 u16 vlan_id)
+                                 u16 vlan_id,
+                                 struct qlcnic_host_tx_ring *tx_ring)
 {
        u8 mac[ETH_ALEN];
        memcpy(&mac, addr, ETH_ALEN);
index b75a812..73fe2f6 100644 (file)
@@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
 int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
 int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
 int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
-void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+                                 u16 vlan, struct qlcnic_host_tx_ring *ring);
 int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
 int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
 void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
index 4bb33af..56a3bd9 100644 (file)
@@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
                         struct net_device *netdev);
 void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
 void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
-                              u64 *uaddr, u16 vlan_id);
+                              u64 *uaddr, u16 vlan_id,
+                              struct qlcnic_host_tx_ring *tx_ring);
 int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
                                     struct ethtool_coalesce *);
 int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
index 84dd830..9647578 100644 (file)
@@ -268,13 +268,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
 }
 
 void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
-                              u16 vlan_id)
+                              u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
 {
        struct cmd_desc_type0 *hwdesc;
        struct qlcnic_nic_req *req;
        struct qlcnic_mac_req *mac_req;
        struct qlcnic_vlan_req *vlan_req;
-       struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
        u32 producer;
        u64 word;
 
@@ -301,7 +300,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
 
 static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
                               struct cmd_desc_type0 *first_desc,
-                              struct sk_buff *skb)
+                              struct sk_buff *skb,
+                              struct qlcnic_host_tx_ring *tx_ring)
 {
        struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
        struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -335,7 +335,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
                    tmp_fil->vlan_id == vlan_id) {
                        if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
                                qlcnic_change_filter(adapter, &src_addr,
-                                                    vlan_id);
+                                                    vlan_id, tx_ring);
                        tmp_fil->ftime = jiffies;
                        return;
                }
@@ -350,7 +350,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
        if (!fil)
                return;
 
-       qlcnic_change_filter(adapter, &src_addr, vlan_id);
+       qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
        fil->ftime = jiffies;
        fil->vlan_id = vlan_id;
        memcpy(fil->faddr, &src_addr, ETH_ALEN);
@@ -766,7 +766,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        }
 
        if (adapter->drv_mac_learn)
-               qlcnic_send_filter(adapter, first_desc, skb);
+               qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
 
        tx_ring->tx_stats.tx_bytes += skb->len;
        tx_ring->tx_stats.xmit_called++;
index 2d38d1a..dbd4801 100644 (file)
@@ -59,9 +59,6 @@ static int qlcnic_close(struct net_device *netdev);
 static void qlcnic_tx_timeout(struct net_device *netdev);
 static void qlcnic_attach_work(struct work_struct *work);
 static void qlcnic_fwinit_work(struct work_struct *work);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void qlcnic_poll_controller(struct net_device *netdev);
-#endif
 
 static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
 static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
@@ -545,9 +542,6 @@ static const struct net_device_ops qlcnic_netdev_ops = {
        .ndo_udp_tunnel_add     = qlcnic_add_vxlan_port,
        .ndo_udp_tunnel_del     = qlcnic_del_vxlan_port,
        .ndo_features_check     = qlcnic_features_check,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = qlcnic_poll_controller,
-#endif
 #ifdef CONFIG_QLCNIC_SRIOV
        .ndo_set_vf_mac         = qlcnic_sriov_set_vf_mac,
        .ndo_set_vf_rate        = qlcnic_sriov_set_vf_tx_rate,
@@ -3200,45 +3194,6 @@ static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void qlcnic_poll_controller(struct net_device *netdev)
-{
-       struct qlcnic_adapter *adapter = netdev_priv(netdev);
-       struct qlcnic_host_sds_ring *sds_ring;
-       struct qlcnic_recv_context *recv_ctx;
-       struct qlcnic_host_tx_ring *tx_ring;
-       int ring;
-
-       if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
-               return;
-
-       recv_ctx = adapter->recv_ctx;
-
-       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
-               sds_ring = &recv_ctx->sds_rings[ring];
-               qlcnic_disable_sds_intr(adapter, sds_ring);
-               napi_schedule(&sds_ring->napi);
-       }
-
-       if (adapter->flags & QLCNIC_MSIX_ENABLED) {
-               /* Only Multi-Tx queue capable devices need to
-                * schedule NAPI for TX rings
-                */
-               if ((qlcnic_83xx_check(adapter) &&
-                    (adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
-                   (qlcnic_82xx_check(adapter) &&
-                    !qlcnic_check_multi_tx(adapter)))
-                       return;
-
-               for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
-                       tx_ring = &adapter->tx_ring[ring];
-                       qlcnic_disable_tx_intr(adapter, tx_ring);
-                       napi_schedule(&tx_ring->napi);
-               }
-       }
-}
-#endif
-
 static void
 qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
 {
index 353f1c1..059ba94 100644 (file)
@@ -2384,26 +2384,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
        return status;
 }
 
-static netdev_features_t qlge_fix_features(struct net_device *ndev,
-       netdev_features_t features)
-{
-       int err;
-
-       /* Update the behavior of vlan accel in the adapter */
-       err = qlge_update_hw_vlan_features(ndev, features);
-       if (err)
-               return err;
-
-       return features;
-}
-
 static int qlge_set_features(struct net_device *ndev,
        netdev_features_t features)
 {
        netdev_features_t changed = ndev->features ^ features;
+       int err;
+
+       if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
+               /* Update the behavior of vlan accel in the adapter */
+               err = qlge_update_hw_vlan_features(ndev, features);
+               if (err)
+                       return err;
 
-       if (changed & NETIF_F_HW_VLAN_CTAG_RX)
                qlge_vlan_mode(ndev, features);
+       }
 
        return 0;
 }
@@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = {
        .ndo_set_mac_address    = qlge_set_mac_address,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_tx_timeout         = qlge_tx_timeout,
-       .ndo_fix_features       = qlge_fix_features,
        .ndo_set_features       = qlge_set_features,
        .ndo_vlan_rx_add_vid    = qlge_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = qlge_vlan_rx_kill_vid,
index ffe7a16..6c8543f 100644 (file)
@@ -45,34 +45,33 @@ qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result)
 {
        __be16 rx_data;
        __be16 tx_data;
-       struct spi_transfer *transfer;
-       struct spi_message *msg;
+       struct spi_transfer transfer[2];
+       struct spi_message msg;
        int ret;
 
+       memset(transfer, 0, sizeof(transfer));
+
+       spi_message_init(&msg);
+
        tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg);
+       *result = 0;
+
+       transfer[0].tx_buf = &tx_data;
+       transfer[0].len = QCASPI_CMD_LEN;
+       transfer[1].rx_buf = &rx_data;
+       transfer[1].len = QCASPI_CMD_LEN;
+
+       spi_message_add_tail(&transfer[0], &msg);
 
        if (qca->legacy_mode) {
-               msg = &qca->spi_msg1;
-               transfer = &qca->spi_xfer1;
-               transfer->tx_buf = &tx_data;
-               transfer->rx_buf = NULL;
-               transfer->len = QCASPI_CMD_LEN;
-               spi_sync(qca->spi_dev, msg);
-       } else {
-               msg = &qca->spi_msg2;
-               transfer = &qca->spi_xfer2[0];
-               transfer->tx_buf = &tx_data;
-               transfer->rx_buf = NULL;
-               transfer->len = QCASPI_CMD_LEN;
-               transfer = &qca->spi_xfer2[1];
+               spi_sync(qca->spi_dev, &msg);
+               spi_message_init(&msg);
        }
-       transfer->tx_buf = NULL;
-       transfer->rx_buf = &rx_data;
-       transfer->len = QCASPI_CMD_LEN;
-       ret = spi_sync(qca->spi_dev, msg);
+       spi_message_add_tail(&transfer[1], &msg);
+       ret = spi_sync(qca->spi_dev, &msg);
 
        if (!ret)
-               ret = msg->status;
+               ret = msg.status;
 
        if (ret)
                qcaspi_spi_error(qca);
@@ -86,35 +85,32 @@ int
 qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value)
 {
        __be16 tx_data[2];
-       struct spi_transfer *transfer;
-       struct spi_message *msg;
+       struct spi_transfer transfer[2];
+       struct spi_message msg;
        int ret;
 
+       memset(&transfer, 0, sizeof(transfer));
+
+       spi_message_init(&msg);
+
        tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg);
        tx_data[1] = cpu_to_be16(value);
 
+       transfer[0].tx_buf = &tx_data[0];
+       transfer[0].len = QCASPI_CMD_LEN;
+       transfer[1].tx_buf = &tx_data[1];
+       transfer[1].len = QCASPI_CMD_LEN;
+
+       spi_message_add_tail(&transfer[0], &msg);
        if (qca->legacy_mode) {
-               msg = &qca->spi_msg1;
-               transfer = &qca->spi_xfer1;
-               transfer->tx_buf = &tx_data[0];
-               transfer->rx_buf = NULL;
-               transfer->len = QCASPI_CMD_LEN;
-               spi_sync(qca->spi_dev, msg);
-       } else {
-               msg = &qca->spi_msg2;
-               transfer = &qca->spi_xfer2[0];
-               transfer->tx_buf = &tx_data[0];
-               transfer->rx_buf = NULL;
-               transfer->len = QCASPI_CMD_LEN;
-               transfer = &qca->spi_xfer2[1];
+               spi_sync(qca->spi_dev, &msg);
+               spi_message_init(&msg);
        }
-       transfer->tx_buf = &tx_data[1];
-       transfer->rx_buf = NULL;
-       transfer->len = QCASPI_CMD_LEN;
-       ret = spi_sync(qca->spi_dev, msg);
+       spi_message_add_tail(&transfer[1], &msg);
+       ret = spi_sync(qca->spi_dev, &msg);
 
        if (!ret)
-               ret = msg->status;
+               ret = msg.status;
 
        if (ret)
                qcaspi_spi_error(qca);
index 206f026..66b775d 100644 (file)
@@ -99,22 +99,24 @@ static u32
 qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len)
 {
        __be16 cmd;
-       struct spi_message *msg = &qca->spi_msg2;
-       struct spi_transfer *transfer = &qca->spi_xfer2[0];
+       struct spi_message msg;
+       struct spi_transfer transfer[2];
        int ret;
 
+       memset(&transfer, 0, sizeof(transfer));
+       spi_message_init(&msg);
+
        cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL);
-       transfer->tx_buf = &cmd;
-       transfer->rx_buf = NULL;
-       transfer->len = QCASPI_CMD_LEN;
-       transfer = &qca->spi_xfer2[1];
-       transfer->tx_buf = src;
-       transfer->rx_buf = NULL;
-       transfer->len = len;
+       transfer[0].tx_buf = &cmd;
+       transfer[0].len = QCASPI_CMD_LEN;
+       transfer[1].tx_buf = src;
+       transfer[1].len = len;
 
-       ret = spi_sync(qca->spi_dev, msg);
+       spi_message_add_tail(&transfer[0], &msg);
+       spi_message_add_tail(&transfer[1], &msg);
+       ret = spi_sync(qca->spi_dev, &msg);
 
-       if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) {
+       if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
                qcaspi_spi_error(qca);
                return 0;
        }
@@ -125,17 +127,20 @@ qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len)
 static u32
 qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len)
 {
-       struct spi_message *msg = &qca->spi_msg1;
-       struct spi_transfer *transfer = &qca->spi_xfer1;
+       struct spi_message msg;
+       struct spi_transfer transfer;
        int ret;
 
-       transfer->tx_buf = src;
-       transfer->rx_buf = NULL;
-       transfer->len = len;
+       memset(&transfer, 0, sizeof(transfer));
+       spi_message_init(&msg);
+
+       transfer.tx_buf = src;
+       transfer.len = len;
 
-       ret = spi_sync(qca->spi_dev, msg);
+       spi_message_add_tail(&transfer, &msg);
+       ret = spi_sync(qca->spi_dev, &msg);
 
-       if (ret || (msg->actual_length != len)) {
+       if (ret || (msg.actual_length != len)) {
                qcaspi_spi_error(qca);
                return 0;
        }
@@ -146,23 +151,25 @@ qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len)
 static u32
 qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len)
 {
-       struct spi_message *msg = &qca->spi_msg2;
+       struct spi_message msg;
        __be16 cmd;
-       struct spi_transfer *transfer = &qca->spi_xfer2[0];
+       struct spi_transfer transfer[2];
        int ret;
 
+       memset(&transfer, 0, sizeof(transfer));
+       spi_message_init(&msg);
+
        cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL);
-       transfer->tx_buf = &cmd;
-       transfer->rx_buf = NULL;
-       transfer->len = QCASPI_CMD_LEN;
-       transfer = &qca->spi_xfer2[1];
-       transfer->tx_buf = NULL;
-       transfer->rx_buf = dst;
-       transfer->len = len;
+       transfer[0].tx_buf = &cmd;
+       transfer[0].len = QCASPI_CMD_LEN;
+       transfer[1].rx_buf = dst;
+       transfer[1].len = len;
 
-       ret = spi_sync(qca->spi_dev, msg);
+       spi_message_add_tail(&transfer[0], &msg);
+       spi_message_add_tail(&transfer[1], &msg);
+       ret = spi_sync(qca->spi_dev, &msg);
 
-       if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) {
+       if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
                qcaspi_spi_error(qca);
                return 0;
        }
@@ -173,17 +180,20 @@ qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len)
 static u32
 qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len)
 {
-       struct spi_message *msg = &qca->spi_msg1;
-       struct spi_transfer *transfer = &qca->spi_xfer1;
+       struct spi_message msg;
+       struct spi_transfer transfer;
        int ret;
 
-       transfer->tx_buf = NULL;
-       transfer->rx_buf = dst;
-       transfer->len = len;
+       memset(&transfer, 0, sizeof(transfer));
+       spi_message_init(&msg);
 
-       ret = spi_sync(qca->spi_dev, msg);
+       transfer.rx_buf = dst;
+       transfer.len = len;
 
-       if (ret || (msg->actual_length != len)) {
+       spi_message_add_tail(&transfer, &msg);
+       ret = spi_sync(qca->spi_dev, &msg);
+
+       if (ret || (msg.actual_length != len)) {
                qcaspi_spi_error(qca);
                return 0;
        }
@@ -195,19 +205,23 @@ static int
 qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd)
 {
        __be16 tx_data;
-       struct spi_message *msg = &qca->spi_msg1;
-       struct spi_transfer *transfer = &qca->spi_xfer1;
+       struct spi_message msg;
+       struct spi_transfer transfer;
        int ret;
 
+       memset(&transfer, 0, sizeof(transfer));
+
+       spi_message_init(&msg);
+
        tx_data = cpu_to_be16(cmd);
-       transfer->len = sizeof(tx_data);
-       transfer->tx_buf = &tx_data;
-       transfer->rx_buf = NULL;
+       transfer.len = sizeof(cmd);
+       transfer.tx_buf = &tx_data;
+       spi_message_add_tail(&transfer, &msg);
 
-       ret = spi_sync(qca->spi_dev, msg);
+       ret = spi_sync(qca->spi_dev, &msg);
 
        if (!ret)
-               ret = msg->status;
+               ret = msg.status;
 
        if (ret)
                qcaspi_spi_error(qca);
@@ -835,16 +849,6 @@ qcaspi_netdev_setup(struct net_device *dev)
        qca = netdev_priv(dev);
        memset(qca, 0, sizeof(struct qcaspi));
 
-       memset(&qca->spi_xfer1, 0, sizeof(struct spi_transfer));
-       memset(&qca->spi_xfer2, 0, sizeof(struct spi_transfer) * 2);
-
-       spi_message_init(&qca->spi_msg1);
-       spi_message_add_tail(&qca->spi_xfer1, &qca->spi_msg1);
-
-       spi_message_init(&qca->spi_msg2);
-       spi_message_add_tail(&qca->spi_xfer2[0], &qca->spi_msg2);
-       spi_message_add_tail(&qca->spi_xfer2[1], &qca->spi_msg2);
-
        memset(&qca->txr, 0, sizeof(qca->txr));
        qca->txr.count = TX_RING_MAX_LEN;
 }
index fc4beb1..fc0e987 100644 (file)
@@ -83,11 +83,6 @@ struct qcaspi {
        struct tx_ring txr;
        struct qcaspi_stats stats;
 
-       struct spi_message spi_msg1;
-       struct spi_message spi_msg2;
-       struct spi_transfer spi_xfer1;
-       struct spi_transfer spi_xfer2[2];
-
        u8 *rx_buffer;
        u32 buffer_size;
        u8 sync;
index 7fd86d4..11167ab 100644 (file)
@@ -113,7 +113,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
        struct sk_buff *skbn;
 
        if (skb->dev->type == ARPHRD_ETHER) {
-               if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) {
+               if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
                        kfree_skb(skb);
                        return;
                }
@@ -147,7 +147,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
        }
 
        if (skb_headroom(skb) < required_headroom) {
-               if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
+               if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
                        return -ENOMEM;
        }
 
@@ -189,6 +189,9 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
        if (!skb)
                goto done;
 
+       if (skb->pkt_type == PACKET_LOOPBACK)
+               return RX_HANDLER_PASS;
+
        dev = skb->dev;
        port = rmnet_get_port(dev);
 
index 0efa977..9a5e296 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/pci.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/ethtool.h>
 #include <linux/phy.h>
@@ -218,6 +219,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8161), 0, 0, RTL_CFG_1 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8167), 0, 0, RTL_CFG_0 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8168), 0, 0, RTL_CFG_1 },
+       { PCI_DEVICE(PCI_VENDOR_ID_NCUBE,       0x8168), 0, 0, RTL_CFG_1 },
        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     0x8169), 0, 0, RTL_CFG_0 },
        { PCI_VENDOR_ID_DLINK,                  0x4300,
                PCI_VENDOR_ID_DLINK, 0x4b10,             0, 0, RTL_CFG_1 },
@@ -630,7 +632,7 @@ struct rtl8169_tc_offsets {
 };
 
 enum rtl_flag {
-       RTL_FLAG_TASK_ENABLED,
+       RTL_FLAG_TASK_ENABLED = 0,
        RTL_FLAG_TASK_SLOW_PENDING,
        RTL_FLAG_TASK_RESET_PENDING,
        RTL_FLAG_MAX
@@ -664,6 +666,7 @@ struct rtl8169_private {
 
        u16 event_slow;
        const struct rtl_coalesce_info *coalesce_info;
+       struct clk *clk;
 
        struct mdio_ops {
                void (*write)(struct rtl8169_private *, int, int);
@@ -4068,6 +4071,14 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
        phy_speed_up(dev->phydev);
 
        genphy_soft_reset(dev->phydev);
+
+       /* It was reported that several chips end up with 10MBit/Half on a
+        * 1GBit link after resuming from S3. For whatever reason the PHY on
+        * these chips doesn't properly start a renegotiation when soft-reset.
+        * Explicitly requesting a renegotiation fixes this.
+        */
+       if (dev->phydev->autoneg == AUTONEG_ENABLE)
+               phy_restart_aneg(dev->phydev);
 }
 
 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
@@ -4522,11 +4533,16 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
        rtl_hw_reset(tp);
 }
 
-static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
+static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
 {
-       /* Set DMA burst size and Interframe Gap Time */
-       RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) |
-               (InterFrameGap << TxInterFrameGapShift));
+       u32 val = TX_DMA_BURST << TxDMAShift |
+                 InterFrameGap << TxInterFrameGapShift;
+
+       if (tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
+           tp->mac_version != RTL_GIGA_MAC_VER_39)
+               val |= TXCFG_AUTO_FIFO;
+
+       RTL_W32(tp, TxConfig, val);
 }
 
 static void rtl_set_rx_max_size(struct rtl8169_private *tp)
@@ -4633,12 +4649,14 @@ static void rtl_hw_start(struct  rtl8169_private *tp)
 
        rtl_set_rx_max_size(tp);
        rtl_set_rx_tx_desc_registers(tp);
-       rtl_set_rx_tx_config_registers(tp);
        RTL_W8(tp, Cfg9346, Cfg9346_Lock);
 
        /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
        RTL_R8(tp, IntrMask);
        RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
+       rtl_init_rxcfg(tp);
+       rtl_set_tx_config_registers(tp);
+
        rtl_set_rx_mode(tp->dev);
        /* no early-rx interrupts */
        RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
@@ -4772,12 +4790,14 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
 static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
 {
        if (enable) {
-               RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
                RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
+               RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
        } else {
                RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
                RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
        }
+
+       udelay(10);
 }
 
 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
@@ -5017,7 +5037,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
 
        rtl_disable_clock_request(tp);
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
 
        /* Adjust EEE LED frequency */
@@ -5051,7 +5070,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
 
        rtl_disable_clock_request(tp);
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
        RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
        RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
@@ -5096,8 +5114,6 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
 
 static void rtl_hw_start_8168g(struct rtl8169_private *tp)
 {
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
        rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5195,8 +5211,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
        rtl_hw_aspm_clkreq_enable(tp, false);
        rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
        rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5279,8 +5293,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
 {
        rtl8168ep_stop_cmac(tp);
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
        rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
@@ -5602,7 +5614,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
        /* Force LAN exit from ASPM if Rx/Tx are not idle */
        RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
 
-       RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
        RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
 
        rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
@@ -5622,6 +5633,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
 
 static void rtl_hw_start_8106(struct rtl8169_private *tp)
 {
+       rtl_hw_aspm_clkreq_enable(tp, false);
+
        /* Force LAN exit from ASPM if Rx/Tx are not idle */
        RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
 
@@ -5630,6 +5643,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
        RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
 
        rtl_pcie_state_l2l3_enable(tp, false);
+       rtl_hw_aspm_clkreq_enable(tp, true);
 }
 
 static void rtl_hw_start_8101(struct rtl8169_private *tp)
@@ -6652,7 +6666,8 @@ static int rtl8169_close(struct net_device *dev)
        rtl8169_update_counters(tp);
 
        rtl_lock_work(tp);
-       clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+       /* Clear all task flags */
+       bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
 
        rtl8169_down(dev);
        rtl_unlock_work(tp);
@@ -6835,7 +6850,9 @@ static void rtl8169_net_suspend(struct net_device *dev)
 
        rtl_lock_work(tp);
        napi_disable(&tp->napi);
-       clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+       /* Clear all task flags */
+       bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
+
        rtl_unlock_work(tp);
 
        rtl_pll_power_down(tp);
@@ -6847,8 +6864,10 @@ static int rtl8169_suspend(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct net_device *dev = pci_get_drvdata(pdev);
+       struct rtl8169_private *tp = netdev_priv(dev);
 
        rtl8169_net_suspend(dev);
+       clk_disable_unprepare(tp->clk);
 
        return 0;
 }
@@ -6876,6 +6895,9 @@ static int rtl8169_resume(struct device *device)
 {
        struct pci_dev *pdev = to_pci_dev(device);
        struct net_device *dev = pci_get_drvdata(pdev);
+       struct rtl8169_private *tp = netdev_priv(dev);
+
+       clk_prepare_enable(tp->clk);
 
        if (netif_running(dev))
                __rtl8169_resume(dev);
@@ -7251,6 +7273,11 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
        }
 }
 
+static void rtl_disable_clk(void *data)
+{
+       clk_disable_unprepare(data);
+}
+
 static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
@@ -7271,6 +7298,32 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
        tp->supports_gmii = cfg->has_gmii;
 
+       /* Get the *optional* external "ether_clk" used on some boards */
+       tp->clk = devm_clk_get(&pdev->dev, "ether_clk");
+       if (IS_ERR(tp->clk)) {
+               rc = PTR_ERR(tp->clk);
+               if (rc == -ENOENT) {
+                       /* clk-core allows NULL (for suspend / resume) */
+                       tp->clk = NULL;
+               } else if (rc == -EPROBE_DEFER) {
+                       return rc;
+               } else {
+                       dev_err(&pdev->dev, "failed to get clk: %d\n", rc);
+                       return rc;
+               }
+       } else {
+               rc = clk_prepare_enable(tp->clk);
+               if (rc) {
+                       dev_err(&pdev->dev, "failed to enable clk: %d\n", rc);
+                       return rc;
+               }
+
+               rc = devm_add_action_or_reset(&pdev->dev, rtl_disable_clk,
+                                             tp->clk);
+               if (rc)
+                       return rc;
+       }
+
        /* enable device (incl. PCI PM wakeup and hotplug setup) */
        rc = pcim_enable_device(pdev);
        if (rc < 0) {
index f3f7477..bb0ebdf 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # Renesas device configuration
 #
index a05102a..f21ab8c 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 #
 # Makefile for the Renesas device drivers.
 #
index b81f4fa..9b6bf55 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Renesas Ethernet AVB device driver
  *
  * Copyright (C) 2014-2015 Renesas Electronics Corporation
@@ -5,10 +6,6 @@
  * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
  *
  * Based on the SuperH Ethernet driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
  */
 
 #ifndef __RAVB_H__
@@ -431,6 +428,7 @@ enum EIS_BIT {
        EIS_CULF1       = 0x00000080,
        EIS_TFFF        = 0x00000100,
        EIS_QFS         = 0x00010000,
+       EIS_RESERVED    = (GENMASK(31, 17) | GENMASK(15, 11)),
 };
 
 /* RIC0 */
@@ -475,6 +473,7 @@ enum RIS0_BIT {
        RIS0_FRF15      = 0x00008000,
        RIS0_FRF16      = 0x00010000,
        RIS0_FRF17      = 0x00020000,
+       RIS0_RESERVED   = GENMASK(31, 18),
 };
 
 /* RIC1 */
@@ -531,6 +530,7 @@ enum RIS2_BIT {
        RIS2_QFF16      = 0x00010000,
        RIS2_QFF17      = 0x00020000,
        RIS2_RFFF       = 0x80000000,
+       RIS2_RESERVED   = GENMASK(30, 18),
 };
 
 /* TIC */
@@ -547,6 +547,7 @@ enum TIS_BIT {
        TIS_FTF1        = 0x00000002,   /* Undocumented? */
        TIS_TFUF        = 0x00000100,
        TIS_TFWF        = 0x00000200,
+       TIS_RESERVED    = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
 };
 
 /* ISS */
@@ -620,6 +621,7 @@ enum GIC_BIT {
 enum GIS_BIT {
        GIS_PTCF        = 0x00000001,   /* Undocumented? */
        GIS_PTMF        = 0x00000004,
+       GIS_RESERVED    = GENMASK(15, 10),
 };
 
 /* GIE (R-Car Gen3 only) */
index c06f2df..d6f7539 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Renesas Ethernet AVB device driver
  *
  * Copyright (C) 2014-2015 Renesas Electronics Corporation
@@ -5,10 +6,6 @@
  * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
  *
  * Based on the SuperH Ethernet driver
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License version 2,
- * as published by the Free Software Foundation.
  */
 
 #include <linux/cache.h>
@@ -742,10 +739,11 @@ static void ravb_error_interrupt(struct net_device *ndev)
        u32 eis, ris2;
 
        eis = ravb_read(ndev, EIS);
-       ravb_write(ndev, ~EIS_QFS, EIS);
+       ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
        if (eis & EIS_QFS) {
                ris2 = ravb_read(ndev, RIS2);
-               ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
+               ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
+                          RIS2);
 
                /* Receive Descriptor Empty int */
                if (ris2 & RIS2_QFF0)
@@ -798,7 +796,7 @@ static bool ravb_timestamp_interrupt(struct net_device *ndev)
        u32 tis = ravb_read(ndev, TIS);
 
        if (tis & TIS_TFUF) {
-               ravb_write(ndev, ~TIS_TFUF, TIS);
+               ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
                ravb_get_tx_tstamp(ndev);
                return true;
        }
@@ -933,7 +931,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
                /* Processing RX Descriptor Ring */
                if (ris0 & mask) {
                        /* Clear RX interrupt */
-                       ravb_write(ndev, ~mask, RIS0);
+                       ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
                        if (ravb_rx(ndev, &quota, q))
                                goto out;
                }
@@ -941,7 +939,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
                if (tis & mask) {
                        spin_lock_irqsave(&priv->lock, flags);
                        /* Clear TX interrupt */
-                       ravb_write(ndev, ~mask, TIS);
+                       ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
                        ravb_tx_free(ndev, q, true);
                        netif_wake_subqueue(ndev, q);
                        mmiowb();
index eede70e..dce2a40 100644 (file)
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /* PTP 1588 clock using the Renesas Ethernet AVB
  *
  * Copyright (C) 2013-2015 Renesas Electronics Corporation
  * Copyright (C) 2015 Renesas Solutions Corp.
  * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
  */
 
 #include "ravb.h"
@@ -319,7 +315,7 @@ void ravb_ptp_interrupt(struct net_device *ndev)
                }
        }
 
-       ravb_write(ndev, ~gis, GIS);
+       ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
 }
 
 void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
index 5573199..f27a0dc 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*  SuperH Ethernet device driver
  *
  *  Copyright (C) 2014 Renesas Electronics Corporation
@@ -5,18 +6,6 @@
  *  Copyright (C) 2008-2014 Renesas Solutions Corp.
  *  Copyright (C) 2013-2017 Cogent Embedded, Inc.
  *  Copyright (C) 2014 Codethink Limited
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms and conditions of the GNU General Public License,
- *  version 2, as published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- *  more details.
- *
- *  The full GNU General Public License is included in this distribution in
- *  the file called "COPYING".
  */
 
 #include <linux/module.h>
@@ -809,6 +798,41 @@ static struct sh_eth_cpu_data r8a77980_data = {
        .magic          = 1,
        .cexcr          = 1,
 };
+
+/* R7S9210 */
+static struct sh_eth_cpu_data r7s9210_data = {
+       .soft_reset     = sh_eth_soft_reset,
+
+       .set_duplex     = sh_eth_set_duplex,
+       .set_rate       = sh_eth_set_rate_rcar,
+
+       .register_type  = SH_ETH_REG_FAST_SH4,
+
+       .edtrr_trns     = EDTRR_TRNS_ETHER,
+       .ecsr_value     = ECSR_ICD,
+       .ecsipr_value   = ECSIPR_ICDIP,
+       .eesipr_value   = EESIPR_TWBIP | EESIPR_TABTIP | EESIPR_RABTIP |
+                         EESIPR_RFCOFIP | EESIPR_ECIIP | EESIPR_FTCIP |
+                         EESIPR_TDEIP | EESIPR_TFUFIP | EESIPR_FRIP |
+                         EESIPR_RDEIP | EESIPR_RFOFIP | EESIPR_CNDIP |
+                         EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
+                         EESIPR_RMAFIP | EESIPR_RRFIP | EESIPR_RTLFIP |
+                         EESIPR_RTSFIP | EESIPR_PREIP | EESIPR_CERFIP,
+
+       .tx_check       = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_TRO,
+       .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
+                         EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE,
+
+       .fdr_value      = 0x0000070f,
+
+       .apr            = 1,
+       .mpr            = 1,
+       .tpauser        = 1,
+       .hw_swap        = 1,
+       .rpadir         = 1,
+       .no_ade         = 1,
+       .xdfar_rw       = 1,
+};
 #endif /* CONFIG_OF */
 
 static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -3132,6 +3156,7 @@ static const struct of_device_id sh_eth_match_table[] = {
        { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data },
        { .compatible = "renesas,gether-r8a77980", .data = &r8a77980_data },
        { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
+       { .compatible = "renesas,ether-r7s9210", .data = &r7s9210_data },
        { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data },
        { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data },
        { }
index f94be99..0c18650 100644 (file)
@@ -1,19 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /*  SuperH Ethernet device driver
  *
  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
  *  Copyright (C) 2008-2012 Renesas Solutions Corp.
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms and conditions of the GNU General Public License,
- *  version 2, as published by the Free Software Foundation.
- *
- *  This program is distributed in the hope it will be useful, but WITHOUT
- *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- *  more details.
- *
- *  The full GNU General Public License is included in this distribution in
- *  the file called "COPYING".
  */
 
 #ifndef __SH_ETH_H__
index c5bc124..d1bb73b 100644 (file)
@@ -77,7 +77,8 @@ static void   ether3_setmulticastlist(struct net_device *dev);
 static int     ether3_rx(struct net_device *dev, unsigned int maxcnt);
 static void    ether3_tx(struct net_device *dev);
 static int     ether3_open (struct net_device *dev);
-static int     ether3_sendpacket (struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t     ether3_sendpacket(struct sk_buff *skb,
+                                         struct net_device *dev);
 static irqreturn_t ether3_interrupt (int irq, void *dev_id);
 static int     ether3_close (struct net_device *dev);
 static void    ether3_setmulticastlist (struct net_device *dev);
@@ -481,7 +482,7 @@ static void ether3_timeout(struct net_device *dev)
 /*
  * Transmit a packet
  */
-static int
+static netdev_tx_t
 ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
 {
        unsigned long flags;
index 573691b..70cce63 100644 (file)
@@ -578,7 +578,8 @@ static inline int sgiseeq_reset(struct net_device *dev)
        return 0;
 }
 
-static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct sgiseeq_private *sp = netdev_priv(dev);
        struct hpc3_ethregs *hregs = sp->hregs;
index 3302332..3d0dd39 100644 (file)
@@ -2208,29 +2208,6 @@ static void efx_fini_napi(struct efx_nic *efx)
 
 /**************************************************************************
  *
- * Kernel netpoll interface
- *
- *************************************************************************/
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-
-/* Although in the common case interrupts will be disabled, this is not
- * guaranteed. However, all our work happens inside the NAPI callback,
- * so no locking is required.
- */
-static void efx_netpoll(struct net_device *net_dev)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       struct efx_channel *channel;
-
-       efx_for_each_channel(channel, efx)
-               efx_schedule_channel(channel);
-}
-
-#endif
-
-/**************************************************************************
- *
  * Kernel net device interface
  *
  *************************************************************************/
@@ -2509,9 +2486,6 @@ static const struct net_device_ops efx_netdev_ops = {
 #endif
        .ndo_get_phys_port_id   = efx_get_phys_port_id,
        .ndo_get_phys_port_name = efx_get_phys_port_name,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = efx_netpoll,
-#endif
        .ndo_setup_tc           = efx_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = efx_filter_rfs,
index dd5530a..03e2455 100644 (file)
@@ -2054,29 +2054,6 @@ static void ef4_fini_napi(struct ef4_nic *efx)
 
 /**************************************************************************
  *
- * Kernel netpoll interface
- *
- *************************************************************************/
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-
-/* Although in the common case interrupts will be disabled, this is not
- * guaranteed. However, all our work happens inside the NAPI callback,
- * so no locking is required.
- */
-static void ef4_netpoll(struct net_device *net_dev)
-{
-       struct ef4_nic *efx = netdev_priv(net_dev);
-       struct ef4_channel *channel;
-
-       ef4_for_each_channel(channel, efx)
-               ef4_schedule_channel(channel);
-}
-
-#endif
-
-/**************************************************************************
- *
  * Kernel net device interface
  *
  *************************************************************************/
@@ -2250,9 +2227,6 @@ static const struct net_device_ops ef4_netdev_ops = {
        .ndo_set_mac_address    = ef4_set_mac_address,
        .ndo_set_rx_mode        = ef4_set_rx_mode,
        .ndo_set_features       = ef4_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = ef4_netpoll,
-#endif
        .ndo_setup_tc           = ef4_setup_tc,
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = ef4_filter_rfs,
index 18d533f..3140999 100644 (file)
@@ -99,7 +99,7 @@ struct ioc3_private {
 
 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 static void ioc3_set_multicast_list(struct net_device *dev);
-static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
 static void ioc3_timeout(struct net_device *dev);
 static inline unsigned int ioc3_hash(const unsigned char *addr);
 static inline void ioc3_stop(struct ioc3_private *ip);
@@ -1390,7 +1390,7 @@ static struct pci_driver ioc3_driver = {
        .remove         = ioc3_remove_one,
 };
 
-static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        unsigned long data;
        struct ioc3_private *ip = netdev_priv(dev);
index ea55abd..703fbbe 100644 (file)
@@ -697,7 +697,7 @@ static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb)
 /*
  * Transmit a packet (called by the kernel)
  */
-static int meth_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev)
 {
        struct meth_private *priv = netdev_priv(dev);
        unsigned long flags;
index edf2036..324049e 100644 (file)
@@ -33,7 +33,7 @@ config DWMAC_DWC_QOS_ETH
        select PHYLIB
        select CRC32
        select MII
-       depends on OF && COMMON_CLK && HAS_DMA
+       depends on OF && HAS_DMA
        help
          Support for chips using the snps,dwc-qos-ethernet.txt DT binding.
 
@@ -57,7 +57,7 @@ config DWMAC_ANARION
 config DWMAC_IPQ806X
        tristate "QCA IPQ806x DWMAC support"
        default ARCH_QCOM
-       depends on OF && COMMON_CLK && (ARCH_QCOM || COMPILE_TEST)
+       depends on OF && (ARCH_QCOM || COMPILE_TEST)
        select MFD_SYSCON
        help
          Support for QCA IPQ806X DWMAC Ethernet.
@@ -100,7 +100,7 @@ config DWMAC_OXNAS
 config DWMAC_ROCKCHIP
        tristate "Rockchip dwmac support"
        default ARCH_ROCKCHIP
-       depends on OF && COMMON_CLK && (ARCH_ROCKCHIP || COMPILE_TEST)
+       depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
        select MFD_SYSCON
        help
          Support for Ethernet controller on Rockchip RK3288 SoC.
@@ -110,7 +110,7 @@ config DWMAC_ROCKCHIP
 
 config DWMAC_SOCFPGA
        tristate "SOCFPGA dwmac support"
-       default ARCH_SOCFPGA
+       default (ARCH_SOCFPGA || ARCH_STRATIX10)
        depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
        select MFD_SYSCON
        help
@@ -123,7 +123,7 @@ config DWMAC_SOCFPGA
 config DWMAC_STI
        tristate "STi GMAC support"
        default ARCH_STI
-       depends on OF && COMMON_CLK && (ARCH_STI || COMPILE_TEST)
+       depends on OF && (ARCH_STI || COMPILE_TEST)
        select MFD_SYSCON
        ---help---
          Support for ethernet controller on STi SOCs.
@@ -147,7 +147,7 @@ config DWMAC_STM32
 config DWMAC_SUNXI
        tristate "Allwinner GMAC support"
        default ARCH_SUNXI
-       depends on OF && COMMON_CLK && (ARCH_SUNXI || COMPILE_TEST)
+       depends on OF && (ARCH_SUNXI || COMPILE_TEST)
        ---help---
          Support for Allwinner A20/A31 GMAC ethernet controllers.
 
index 1854f27..b1b305f 100644 (file)
@@ -258,10 +258,10 @@ struct stmmac_safety_stats {
 #define MAX_DMA_RIWT           0xff
 #define MIN_DMA_RIWT           0x20
 /* Tx coalesce parameters */
-#define STMMAC_COAL_TX_TIMER   40000
+#define STMMAC_COAL_TX_TIMER   1000
 #define STMMAC_MAX_COAL_TX_TICK        100000
 #define STMMAC_TX_MAX_FRAMES   256
-#define STMMAC_TX_FRAMES       64
+#define STMMAC_TX_FRAMES       25
 
 /* Packets types */
 enum packets_types {
index 76649ad..63e1064 100644 (file)
@@ -48,6 +48,8 @@ struct stmmac_tx_info {
 
 /* Frequently used values are kept adjacent for cache effect */
 struct stmmac_tx_queue {
+       u32 tx_count_frames;
+       struct timer_list txtimer;
        u32 queue_index;
        struct stmmac_priv *priv_data;
        struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
@@ -73,7 +75,14 @@ struct stmmac_rx_queue {
        u32 rx_zeroc_thresh;
        dma_addr_t dma_rx_phy;
        u32 rx_tail_addr;
+};
+
+struct stmmac_channel {
        struct napi_struct napi ____cacheline_aligned_in_smp;
+       struct stmmac_priv *priv_data;
+       u32 index;
+       int has_rx;
+       int has_tx;
 };
 
 struct stmmac_tc_entry {
@@ -109,15 +118,12 @@ struct stmmac_pps_cfg {
 
 struct stmmac_priv {
        /* Frequently used values are kept adjacent for cache effect */
-       u32 tx_count_frames;
        u32 tx_coal_frames;
        u32 tx_coal_timer;
-       bool tx_timer_armed;
 
        int tx_coalesce;
        int hwts_tx_en;
        bool tx_path_in_lpi_mode;
-       struct timer_list txtimer;
        bool tso;
 
        unsigned int dma_buf_sz;
@@ -138,6 +144,9 @@ struct stmmac_priv {
        /* TX Queue */
        struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
 
+       /* Generic channel for NAPI */
+       struct stmmac_channel channel[STMMAC_CH_MAX];
+
        bool oldlink;
        int speed;
        int oldduplex;
index ff1ffb4..75896d6 100644 (file)
@@ -148,12 +148,14 @@ static void stmmac_verify_args(void)
 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
 {
        u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+       u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+       u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
        u32 queue;
 
-       for (queue = 0; queue < rx_queues_cnt; queue++) {
-               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       for (queue = 0; queue < maxq; queue++) {
+               struct stmmac_channel *ch = &priv->channel[queue];
 
-               napi_disable(&rx_q->napi);
+               napi_disable(&ch->napi);
        }
 }
 
@@ -164,12 +166,14 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv)
 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
 {
        u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+       u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+       u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
        u32 queue;
 
-       for (queue = 0; queue < rx_queues_cnt; queue++) {
-               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       for (queue = 0; queue < maxq; queue++) {
+               struct stmmac_channel *ch = &priv->channel[queue];
 
-               napi_enable(&rx_q->napi);
+               napi_enable(&ch->napi);
        }
 }
 
@@ -1843,18 +1847,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
  * @queue: TX queue index
  * Description: it reclaims the transmit resources after transmission completes.
  */
-static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
+static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
 {
        struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
        unsigned int bytes_compl = 0, pkts_compl = 0;
-       unsigned int entry;
+       unsigned int entry, count = 0;
 
-       netif_tx_lock(priv->dev);
+       __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
 
        priv->xstats.tx_clean++;
 
        entry = tx_q->dirty_tx;
-       while (entry != tx_q->cur_tx) {
+       while ((entry != tx_q->cur_tx) && (count < budget)) {
                struct sk_buff *skb = tx_q->tx_skbuff[entry];
                struct dma_desc *p;
                int status;
@@ -1870,6 +1874,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
                if (unlikely(status & tx_dma_own))
                        break;
 
+               count++;
+
                /* Make sure descriptor fields are read after reading
                 * the own bit.
                 */
@@ -1937,7 +1943,10 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
                stmmac_enable_eee_mode(priv);
                mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
        }
-       netif_tx_unlock(priv->dev);
+
+       __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
+
+       return count;
 }
 
 /**
@@ -2020,6 +2029,33 @@ static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
        return false;
 }
 
+static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
+{
+       int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
+                                                &priv->xstats, chan);
+       struct stmmac_channel *ch = &priv->channel[chan];
+       bool needs_work = false;
+
+       if ((status & handle_rx) && ch->has_rx) {
+               needs_work = true;
+       } else {
+               status &= ~handle_rx;
+       }
+
+       if ((status & handle_tx) && ch->has_tx) {
+               needs_work = true;
+       } else {
+               status &= ~handle_tx;
+       }
+
+       if (needs_work && napi_schedule_prep(&ch->napi)) {
+               stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
+               __napi_schedule(&ch->napi);
+       }
+
+       return status;
+}
+
 /**
  * stmmac_dma_interrupt - DMA ISR
  * @priv: driver private structure
@@ -2034,57 +2070,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
        u32 channels_to_check = tx_channel_count > rx_channel_count ?
                                tx_channel_count : rx_channel_count;
        u32 chan;
-       bool poll_scheduled = false;
        int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
 
        /* Make sure we never check beyond our status buffer. */
        if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
                channels_to_check = ARRAY_SIZE(status);
 
-       /* Each DMA channel can be used for rx and tx simultaneously, yet
-        * napi_struct is embedded in struct stmmac_rx_queue rather than in a
-        * stmmac_channel struct.
-        * Because of this, stmmac_poll currently checks (and possibly wakes)
-        * all tx queues rather than just a single tx queue.
-        */
        for (chan = 0; chan < channels_to_check; chan++)
-               status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
-                               &priv->xstats, chan);
-
-       for (chan = 0; chan < rx_channel_count; chan++) {
-               if (likely(status[chan] & handle_rx)) {
-                       struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
-
-                       if (likely(napi_schedule_prep(&rx_q->napi))) {
-                               stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
-                               __napi_schedule(&rx_q->napi);
-                               poll_scheduled = true;
-                       }
-               }
-       }
-
-       /* If we scheduled poll, we already know that tx queues will be checked.
-        * If we didn't schedule poll, see if any DMA channel (used by tx) has a
-        * completed transmission, if so, call stmmac_poll (once).
-        */
-       if (!poll_scheduled) {
-               for (chan = 0; chan < tx_channel_count; chan++) {
-                       if (status[chan] & handle_tx) {
-                               /* It doesn't matter what rx queue we choose
-                                * here. We use 0 since it always exists.
-                                */
-                               struct stmmac_rx_queue *rx_q =
-                                       &priv->rx_queue[0];
-
-                               if (likely(napi_schedule_prep(&rx_q->napi))) {
-                                       stmmac_disable_dma_irq(priv,
-                                                       priv->ioaddr, chan);
-                                       __napi_schedule(&rx_q->napi);
-                               }
-                               break;
-                       }
-               }
-       }
+               status[chan] = stmmac_napi_check(priv, chan);
 
        for (chan = 0; chan < tx_channel_count; chan++) {
                if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
@@ -2220,8 +2213,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
                stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
                                    tx_q->dma_tx_phy, chan);
 
-               tx_q->tx_tail_addr = tx_q->dma_tx_phy +
-                           (DMA_TX_SIZE * sizeof(struct dma_desc));
+               tx_q->tx_tail_addr = tx_q->dma_tx_phy;
                stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
                                       tx_q->tx_tail_addr, chan);
        }
@@ -2233,6 +2225,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
        return ret;
 }
 
+static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
+{
+       struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+       mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
+}
+
 /**
  * stmmac_tx_timer - mitigation sw timer for tx.
  * @data: data pointer
@@ -2241,13 +2240,14 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
  */
 static void stmmac_tx_timer(struct timer_list *t)
 {
-       struct stmmac_priv *priv = from_timer(priv, t, txtimer);
-       u32 tx_queues_count = priv->plat->tx_queues_to_use;
-       u32 queue;
+       struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
+       struct stmmac_priv *priv = tx_q->priv_data;
+       struct stmmac_channel *ch;
+
+       ch = &priv->channel[tx_q->queue_index];
 
-       /* let's scan all the tx queues */
-       for (queue = 0; queue < tx_queues_count; queue++)
-               stmmac_tx_clean(priv, queue);
+       if (likely(napi_schedule_prep(&ch->napi)))
+               __napi_schedule(&ch->napi);
 }
 
 /**
@@ -2260,11 +2260,17 @@ static void stmmac_tx_timer(struct timer_list *t)
  */
 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
 {
+       u32 tx_channel_count = priv->plat->tx_queues_to_use;
+       u32 chan;
+
        priv->tx_coal_frames = STMMAC_TX_FRAMES;
        priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
-       timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
-       priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
-       add_timer(&priv->txtimer);
+
+       for (chan = 0; chan < tx_channel_count; chan++) {
+               struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+
+               timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
+       }
 }
 
 static void stmmac_set_rings_length(struct stmmac_priv *priv)
@@ -2592,6 +2598,7 @@ static void stmmac_hw_teardown(struct net_device *dev)
 static int stmmac_open(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
+       u32 chan;
        int ret;
 
        stmmac_check_ether_addr(priv);
@@ -2688,7 +2695,9 @@ irq_error:
        if (dev->phydev)
                phy_stop(dev->phydev);
 
-       del_timer_sync(&priv->txtimer);
+       for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+               del_timer_sync(&priv->tx_queue[chan].txtimer);
+
        stmmac_hw_teardown(dev);
 init_error:
        free_dma_desc_resources(priv);
@@ -2708,6 +2717,7 @@ dma_desc_error:
 static int stmmac_release(struct net_device *dev)
 {
        struct stmmac_priv *priv = netdev_priv(dev);
+       u32 chan;
 
        if (priv->eee_enabled)
                del_timer_sync(&priv->eee_ctrl_timer);
@@ -2722,7 +2732,8 @@ static int stmmac_release(struct net_device *dev)
 
        stmmac_disable_all_queues(priv);
 
-       del_timer_sync(&priv->txtimer);
+       for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+               del_timer_sync(&priv->tx_queue[chan].txtimer);
 
        /* Free the IRQ lines */
        free_irq(dev->irq, dev);
@@ -2936,14 +2947,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
        priv->xstats.tx_tso_nfrags += nfrags;
 
        /* Manage tx mitigation */
-       priv->tx_count_frames += nfrags + 1;
-       if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
-               mod_timer(&priv->txtimer,
-                         STMMAC_COAL_TIMER(priv->tx_coal_timer));
-       } else {
-               priv->tx_count_frames = 0;
+       tx_q->tx_count_frames += nfrags + 1;
+       if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
                stmmac_set_tx_ic(priv, desc);
                priv->xstats.tx_set_ic_bit++;
+               tx_q->tx_count_frames = 0;
+       } else {
+               stmmac_tx_timer_arm(priv, queue);
        }
 
        skb_tx_timestamp(skb);
@@ -2992,6 +3002,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
 
        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
 
+       tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
        stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
 
        return NETDEV_TX_OK;
@@ -3146,17 +3157,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
         * This approach takes care about the fragments: desc is the first
         * element in case of no SG.
         */
-       priv->tx_count_frames += nfrags + 1;
-       if (likely(priv->tx_coal_frames > priv->tx_count_frames) &&
-           !priv->tx_timer_armed) {
-               mod_timer(&priv->txtimer,
-                         STMMAC_COAL_TIMER(priv->tx_coal_timer));
-               priv->tx_timer_armed = true;
-       } else {
-               priv->tx_count_frames = 0;
+       tx_q->tx_count_frames += nfrags + 1;
+       if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
                stmmac_set_tx_ic(priv, desc);
                priv->xstats.tx_set_ic_bit++;
-               priv->tx_timer_armed = false;
+               tx_q->tx_count_frames = 0;
+       } else {
+               stmmac_tx_timer_arm(priv, queue);
        }
 
        skb_tx_timestamp(skb);
@@ -3202,6 +3209,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
 
        stmmac_enable_dma_transmission(priv, priv->ioaddr);
+
+       tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
        stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
 
        return NETDEV_TX_OK;
@@ -3322,6 +3331,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
 {
        struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       struct stmmac_channel *ch = &priv->channel[queue];
        unsigned int entry = rx_q->cur_rx;
        int coe = priv->hw->rx_csum;
        unsigned int next_entry;
@@ -3494,7 +3504,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
                        else
                                skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-                       napi_gro_receive(&rx_q->napi, skb);
+                       napi_gro_receive(&ch->napi, skb);
 
                        priv->dev->stats.rx_packets++;
                        priv->dev->stats.rx_bytes += frame_len;
@@ -3517,27 +3527,33 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
  *  Description :
  *  To look at the incoming frames and clear the tx resources.
  */
-static int stmmac_poll(struct napi_struct *napi, int budget)
+static int stmmac_napi_poll(struct napi_struct *napi, int budget)
 {
-       struct stmmac_rx_queue *rx_q =
-               container_of(napi, struct stmmac_rx_queue, napi);
-       struct stmmac_priv *priv = rx_q->priv_data;
-       u32 tx_count = priv->plat->tx_queues_to_use;
-       u32 chan = rx_q->queue_index;
-       int work_done = 0;
-       u32 queue;
+       struct stmmac_channel *ch =
+               container_of(napi, struct stmmac_channel, napi);
+       struct stmmac_priv *priv = ch->priv_data;
+       int work_done = 0, work_rem = budget;
+       u32 chan = ch->index;
 
        priv->xstats.napi_poll++;
 
-       /* check all the queues */
-       for (queue = 0; queue < tx_count; queue++)
-               stmmac_tx_clean(priv, queue);
+       if (ch->has_tx) {
+               int done = stmmac_tx_clean(priv, work_rem, chan);
 
-       work_done = stmmac_rx(priv, budget, rx_q->queue_index);
-       if (work_done < budget) {
-               napi_complete_done(napi, work_done);
-               stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+               work_done += done;
+               work_rem -= done;
+       }
+
+       if (ch->has_rx) {
+               int done = stmmac_rx(priv, work_rem, chan);
+
+               work_done += done;
+               work_rem -= done;
        }
+
+       if (work_done < budget && napi_complete_done(napi, work_done))
+               stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+
        return work_done;
 }
 
@@ -4201,8 +4217,8 @@ int stmmac_dvr_probe(struct device *device,
 {
        struct net_device *ndev = NULL;
        struct stmmac_priv *priv;
+       u32 queue, maxq;
        int ret = 0;
-       u32 queue;
 
        ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
                                  MTL_MAX_TX_QUEUES,
@@ -4325,11 +4341,22 @@ int stmmac_dvr_probe(struct device *device,
                         "Enable RX Mitigation via HW Watchdog Timer\n");
        }
 
-       for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
-               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       /* Setup channels NAPI */
+       maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
 
-               netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
-                              (8 * priv->plat->rx_queues_to_use));
+       for (queue = 0; queue < maxq; queue++) {
+               struct stmmac_channel *ch = &priv->channel[queue];
+
+               ch->priv_data = priv;
+               ch->index = queue;
+
+               if (queue < priv->plat->rx_queues_to_use)
+                       ch->has_rx = true;
+               if (queue < priv->plat->tx_queues_to_use)
+                       ch->has_tx = true;
+
+               netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
+                              NAPI_POLL_WEIGHT);
        }
 
        mutex_init(&priv->lock);
@@ -4375,10 +4402,10 @@ error_netdev_register:
            priv->hw->pcs != STMMAC_PCS_RTBI)
                stmmac_mdio_unregister(ndev);
 error_mdio_register:
-       for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
-               struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+       for (queue = 0; queue < maxq; queue++) {
+               struct stmmac_channel *ch = &priv->channel[queue];
 
-               netif_napi_del(&rx_q->napi);
+               netif_napi_del(&ch->napi);
        }
 error_hw_init:
        destroy_workqueue(priv->wq);
index 3609c7b..2b800ce 100644 (file)
@@ -67,7 +67,7 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins)
  * Description:
  * This function validates the number of Unicast address entries supported
  * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
- * supports 132, 64, or 128 Unicast filter entries for it's Unicast filter
+ * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
  * logic. This function validates a valid, supported configuration is
  * selected, and defaults to 1 Unicast address if an unsupported
  * configuration is selected.
@@ -77,8 +77,7 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
        int x = ucast_entries;
 
        switch (x) {
-       case 1:
-       case 32:
+       case 1 ... 32:
        case 64:
        case 128:
                break;
index 1a96dd9..531294f 100644 (file)
@@ -61,7 +61,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry,
        struct stmmac_tc_entry *action_entry = entry;
        const struct tc_action *act;
        struct tcf_exts *exts;
-       LIST_HEAD(actions);
+       int i;
 
        exts = cls->knode.exts;
        if (!tcf_exts_has_actions(exts))
@@ -69,8 +69,7 @@ static int tc_fill_actions(struct stmmac_tc_entry *entry,
        if (frag)
                action_entry = frag;
 
-       tcf_exts_to_list(exts, &actions);
-       list_for_each_entry(act, &actions, list) {
+       tcf_exts_for_each_action(i, act, exts) {
                /* Accept */
                if (is_tcf_gact_ok(act)) {
                        action_entry->val.af = 1;
index 9263d63..f932923 100644 (file)
@@ -41,6 +41,7 @@ config TI_DAVINCI_MDIO
 config TI_DAVINCI_CPDMA
        tristate "TI DaVinci CPDMA Support"
        depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
+       select GENERIC_ALLOCATOR
        ---help---
          This driver supports TI's DaVinci CPDMA dma engine.
 
index 0c1adad..396e1cd 100644 (file)
@@ -170,10 +170,13 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
        struct device_node *node;
        struct cpsw_phy_sel_priv *priv;
 
-       node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
+       node = of_parse_phandle(dev->of_node, "cpsw-phy-sel", 0);
        if (!node) {
-               dev_err(dev, "Phy mode driver DT not found\n");
-               return;
+               node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
+               if (!node) {
+                       dev_err(dev, "Phy mode driver DT not found\n");
+                       return;
+               }
        }
 
        dev = bus_find_device(&platform_bus_type, NULL, node, match);
index 2bdfb39..d8ba512 100644 (file)
@@ -835,7 +835,7 @@ static void w5100_tx_work(struct work_struct *work)
        w5100_tx_skb(priv->ndev, skb);
 }
 
-static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
 {
        struct w5100_priv *priv = netdev_priv(ndev);
 
index 56ae573..80fdbff 100644 (file)
@@ -365,7 +365,7 @@ static void w5300_tx_timeout(struct net_device *ndev)
        netif_wake_queue(ndev);
 }
 
-static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
 {
        struct w5300_priv *priv = netdev_priv(ndev);
 
index 16ec7af..ba9df43 100644 (file)
@@ -966,6 +966,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                                 sizeof(struct yamdrv_ioctl_mcs));
                if (IS_ERR(ym))
                        return PTR_ERR(ym);
+               if (ym->cmd != SIOCYAMSMCS)
+                       return -EINVAL;
                if (ym->bitrate > YAM_MAXBITRATE) {
                        kfree(ym);
                        return -EINVAL;
@@ -981,6 +983,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
                         return -EFAULT;
 
+               if (yi.cmd != SIOCYAMSCFG)
+                       return -EINVAL;
                if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
                        return -EINVAL;         /* Cannot change this parameter when up */
                if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
index 31c3d77..fe01e14 100644 (file)
@@ -1203,6 +1203,9 @@ static void netvsc_send_vf(struct net_device *ndev,
 
        net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
        net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+       netdev_info(ndev, "VF slot %u %s\n",
+                   net_device_ctx->vf_serial,
+                   net_device_ctx->vf_alloc ? "added" : "removed");
 }
 
 static  void netvsc_receive_inband(struct net_device *ndev,
index 507f681..3af6d8d 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/netdevice.h>
 #include <linux/inetdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/pci.h>
 #include <linux/skbuff.h>
 #include <linux/if_vlan.h>
 #include <linux/in.h>
@@ -1893,20 +1894,6 @@ out_unlock:
        rtnl_unlock();
 }
 
-static struct net_device *get_netvsc_bymac(const u8 *mac)
-{
-       struct net_device_context *ndev_ctx;
-
-       list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
-               struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx);
-
-               if (ether_addr_equal(mac, dev->perm_addr))
-                       return dev;
-       }
-
-       return NULL;
-}
-
 static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
 {
        struct net_device_context *net_device_ctx;
@@ -2035,22 +2022,48 @@ static void netvsc_vf_setup(struct work_struct *w)
        rtnl_unlock();
 }
 
+/* Find netvsc by VMBus serial number.
+ * The PCI hyperv controller records the serial number as the slot.
+ */
+static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
+{
+       struct device *parent = vf_netdev->dev.parent;
+       struct net_device_context *ndev_ctx;
+       struct pci_dev *pdev;
+
+       if (!parent || !dev_is_pci(parent))
+               return NULL; /* not a PCI device */
+
+       pdev = to_pci_dev(parent);
+       if (!pdev->slot) {
+               netdev_notice(vf_netdev, "no PCI slot information\n");
+               return NULL;
+       }
+
+       list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
+               if (!ndev_ctx->vf_alloc)
+                       continue;
+
+               if (ndev_ctx->vf_serial == pdev->slot->number)
+                       return hv_get_drvdata(ndev_ctx->device_ctx);
+       }
+
+       netdev_notice(vf_netdev,
+                     "no netdev found for slot %u\n", pdev->slot->number);
+       return NULL;
+}
+
 static int netvsc_register_vf(struct net_device *vf_netdev)
 {
-       struct net_device *ndev;
        struct net_device_context *net_device_ctx;
        struct netvsc_device *netvsc_dev;
+       struct net_device *ndev;
        int ret;
 
        if (vf_netdev->addr_len != ETH_ALEN)
                return NOTIFY_DONE;
 
-       /*
-        * We will use the MAC address to locate the synthetic interface to
-        * associate with the VF interface. If we don't find a matching
-        * synthetic interface, move on.
-        */
-       ndev = get_netvsc_bymac(vf_netdev->perm_addr);
+       ndev = get_netvsc_byslot(vf_netdev);
        if (!ndev)
                return NOTIFY_DONE;
 
@@ -2201,6 +2214,16 @@ static int netvsc_probe(struct hv_device *dev,
 
        memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
 
+       /* We must get rtnl lock before scheduling nvdev->subchan_work,
+        * otherwise netvsc_subchan_work() can get rtnl lock first and wait
+        * all subchannels to show up, but that may not happen because
+        * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
+        * -> ... -> device_add() -> ... -> __device_attach() can't get
+        * the device lock, so all the subchannels can't be processed --
+        * finally netvsc_subchan_work() hangs for ever.
+        */
+       rtnl_lock();
+
        if (nvdev->num_chn > 1)
                schedule_work(&nvdev->subchan_work);
 
@@ -2219,7 +2242,6 @@ static int netvsc_probe(struct hv_device *dev,
        else
                net->max_mtu = ETH_DATA_LEN;
 
-       rtnl_lock();
        ret = register_netdevice(net);
        if (ret != 0) {
                pr_err("Unable to register netdev.\n");
@@ -2258,17 +2280,15 @@ static int netvsc_remove(struct hv_device *dev)
 
        cancel_delayed_work_sync(&ndev_ctx->dwork);
 
-       rcu_read_lock();
-       nvdev = rcu_dereference(ndev_ctx->nvdev);
-
-       if  (nvdev)
+       rtnl_lock();
+       nvdev = rtnl_dereference(ndev_ctx->nvdev);
+       if (nvdev)
                cancel_work_sync(&nvdev->subchan_work);
 
        /*
         * Call to the vsc driver to let it know that the device is being
         * removed. Also blocks mtu and channel changes.
         */
-       rtnl_lock();
        vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
        if (vf_netdev)
                netvsc_unregister_vf(vf_netdev);
@@ -2280,7 +2300,6 @@ static int netvsc_remove(struct hv_device *dev)
        list_del(&ndev_ctx->list);
 
        rtnl_unlock();
-       rcu_read_unlock();
 
        hv_set_drvdata(dev, NULL);
 
index 23a52b9..cd1d8fa 100644 (file)
@@ -1308,8 +1308,7 @@ static int adf7242_remove(struct spi_device *spi)
 {
        struct adf7242_local *lp = spi_get_drvdata(spi);
 
-       if (!IS_ERR_OR_NULL(lp->debugfs_root))
-               debugfs_remove_recursive(lp->debugfs_root);
+       debugfs_remove_recursive(lp->debugfs_root);
 
        cancel_delayed_work_sync(&lp->work);
        destroy_workqueue(lp->wqueue);
index 58299fb..0ff5a40 100644 (file)
@@ -634,10 +634,9 @@ static int ca8210_test_int_driver_write(
        for (i = 0; i < len; i++)
                dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]);
 
-       fifo_buffer = kmalloc(len, GFP_KERNEL);
+       fifo_buffer = kmemdup(buf, len, GFP_KERNEL);
        if (!fifo_buffer)
                return -ENOMEM;
-       memcpy(fifo_buffer, buf, len);
        kfifo_in(&test->up_fifo, &fifo_buffer, 4);
        wake_up_interruptible(&priv->test.readq);
 
@@ -3044,8 +3043,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv)
 {
        struct ca8210_test *test = &priv->test;
 
-       if (!IS_ERR(test->ca8210_dfs_spi_int))
-               debugfs_remove(test->ca8210_dfs_spi_int);
+       debugfs_remove(test->ca8210_dfs_spi_int);
        kfifo_free(&test->up_fifo);
        dev_info(&priv->spi->dev, "Test interface removed\n");
 }
index e428277..0489142 100644 (file)
@@ -903,19 +903,19 @@ mcr20a_irq_clean_complete(void *context)
 
        switch (seq_state) {
        /* TX IRQ, RX IRQ and SEQ IRQ */
-       case (0x03):
+       case (DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
                if (lp->is_tx) {
                        lp->is_tx = 0;
                        dev_dbg(printdev(lp), "TX is done. No ACK\n");
                        mcr20a_handle_tx_complete(lp);
                }
                break;
-       case (0x05):
+       case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
                        /* rx is starting */
                        dev_dbg(printdev(lp), "RX is starting\n");
                        mcr20a_handle_rx(lp);
                break;
-       case (0x07):
+       case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
                if (lp->is_tx) {
                        /* tx is done */
                        lp->is_tx = 0;
@@ -927,7 +927,7 @@ mcr20a_irq_clean_complete(void *context)
                        mcr20a_handle_rx(lp);
                }
                break;
-       case (0x01):
+       case (DAR_IRQSTS1_SEQIRQ):
                if (lp->is_tx) {
                        dev_dbg(printdev(lp), "TX is starting\n");
                        mcr20a_handle_tx(lp);
index db1172d..19ab8a7 100644 (file)
@@ -93,7 +93,12 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
        if (!netdev)
                return !phydev->suspended;
 
-       /* Don't suspend PHY if the attached netdev parent may wakeup.
+       if (netdev->wol_enabled)
+               return false;
+
+       /* As long as not all affected network drivers support the
+        * wol_enabled flag, let's check for hints that WoL is enabled.
+        * Don't suspend PHY if the attached netdev parent may wake up.
         * The parent may point to a PCI device, as in tg3 driver.
         */
        if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent))
@@ -1132,9 +1137,9 @@ void phy_detach(struct phy_device *phydev)
                sysfs_remove_link(&dev->dev.kobj, "phydev");
                sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev");
        }
+       phy_suspend(phydev);
        phydev->attached_dev->phydev = NULL;
        phydev->attached_dev = NULL;
-       phy_suspend(phydev);
        phydev->phylink = NULL;
 
        phy_led_triggers_unregister(phydev);
@@ -1168,12 +1173,13 @@ EXPORT_SYMBOL(phy_detach);
 int phy_suspend(struct phy_device *phydev)
 {
        struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
+       struct net_device *netdev = phydev->attached_dev;
        struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
        int ret = 0;
 
        /* If the device has WOL enabled, we cannot suspend the PHY */
        phy_ethtool_get_wol(phydev, &wol);
-       if (wol.wolopts)
+       if (wol.wolopts || (netdev && netdev->wol_enabled))
                return -EBUSY;
 
        if (phydev->drv && phydrv->suspend)
index 3ba5cf2..7abca86 100644 (file)
@@ -717,6 +717,30 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
        return 0;
 }
 
+static int __phylink_connect_phy(struct phylink *pl, struct phy_device *phy,
+               phy_interface_t interface)
+{
+       int ret;
+
+       if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
+                   (pl->link_an_mode == MLO_AN_INBAND &&
+                    phy_interface_mode_is_8023z(interface))))
+               return -EINVAL;
+
+       if (pl->phydev)
+               return -EBUSY;
+
+       ret = phy_attach_direct(pl->netdev, phy, 0, interface);
+       if (ret)
+               return ret;
+
+       ret = phylink_bringup_phy(pl, phy);
+       if (ret)
+               phy_detach(phy);
+
+       return ret;
+}
+
 /**
  * phylink_connect_phy() - connect a PHY to the phylink instance
  * @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -734,31 +758,13 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
  */
 int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
 {
-       int ret;
-
-       if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
-                   (pl->link_an_mode == MLO_AN_INBAND &&
-                    phy_interface_mode_is_8023z(pl->link_interface))))
-               return -EINVAL;
-
-       if (pl->phydev)
-               return -EBUSY;
-
        /* Use PHY device/driver interface */
        if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
                pl->link_interface = phy->interface;
                pl->link_config.interface = pl->link_interface;
        }
 
-       ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface);
-       if (ret)
-               return ret;
-
-       ret = phylink_bringup_phy(pl, phy);
-       if (ret)
-               phy_detach(phy);
-
-       return ret;
+       return __phylink_connect_phy(pl, phy, pl->link_interface);
 }
 EXPORT_SYMBOL_GPL(phylink_connect_phy);
 
@@ -1672,7 +1678,9 @@ static void phylink_sfp_link_up(void *upstream)
 
 static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
 {
-       return phylink_connect_phy(upstream, phy);
+       struct phylink *pl = upstream;
+
+       return __phylink_connect_phy(upstream, phy, pl->link_config.interface);
 }
 
 static void phylink_sfp_disconnect_phy(void *upstream)
index 7406552..83060fb 100644 (file)
@@ -349,6 +349,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
        }
        if (bus->started)
                bus->socket_ops->start(bus->sfp);
+       bus->netdev->sfp_bus = bus;
        bus->registered = true;
        return 0;
 }
@@ -357,6 +358,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
 {
        const struct sfp_upstream_ops *ops = bus->upstream_ops;
 
+       bus->netdev->sfp_bus = NULL;
        if (bus->registered) {
                if (bus->started)
                        bus->socket_ops->stop(bus->sfp);
@@ -438,7 +440,6 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
 {
        bus->upstream_ops = NULL;
        bus->upstream = NULL;
-       bus->netdev->sfp_bus = NULL;
        bus->netdev = NULL;
 }
 
@@ -467,7 +468,6 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
                bus->upstream_ops = ops;
                bus->upstream = upstream;
                bus->netdev = ndev;
-               ndev->sfp_bus = bus;
 
                if (bus->sfp) {
                        ret = sfp_register_bus(bus);
index 4637d98..6e13b88 100644 (file)
@@ -398,7 +398,6 @@ static umode_t sfp_hwmon_is_visible(const void *data,
        switch (type) {
        case hwmon_temp:
                switch (attr) {
-               case hwmon_temp_input:
                case hwmon_temp_min_alarm:
                case hwmon_temp_max_alarm:
                case hwmon_temp_lcrit_alarm:
@@ -407,13 +406,16 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                case hwmon_temp_max:
                case hwmon_temp_lcrit:
                case hwmon_temp_crit:
+                       if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+                               return 0;
+                       /* fall through */
+               case hwmon_temp_input:
                        return 0444;
                default:
                        return 0;
                }
        case hwmon_in:
                switch (attr) {
-               case hwmon_in_input:
                case hwmon_in_min_alarm:
                case hwmon_in_max_alarm:
                case hwmon_in_lcrit_alarm:
@@ -422,13 +424,16 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                case hwmon_in_max:
                case hwmon_in_lcrit:
                case hwmon_in_crit:
+                       if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+                               return 0;
+                       /* fall through */
+               case hwmon_in_input:
                        return 0444;
                default:
                        return 0;
                }
        case hwmon_curr:
                switch (attr) {
-               case hwmon_curr_input:
                case hwmon_curr_min_alarm:
                case hwmon_curr_max_alarm:
                case hwmon_curr_lcrit_alarm:
@@ -437,6 +442,10 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                case hwmon_curr_max:
                case hwmon_curr_lcrit:
                case hwmon_curr_crit:
+                       if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+                               return 0;
+                       /* fall through */
+               case hwmon_curr_input:
                        return 0444;
                default:
                        return 0;
@@ -452,7 +461,6 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                    channel == 1)
                        return 0;
                switch (attr) {
-               case hwmon_power_input:
                case hwmon_power_min_alarm:
                case hwmon_power_max_alarm:
                case hwmon_power_lcrit_alarm:
@@ -461,6 +469,10 @@ static umode_t sfp_hwmon_is_visible(const void *data,
                case hwmon_power_max:
                case hwmon_power_lcrit:
                case hwmon_power_crit:
+                       if (!(sfp->id.ext.enhopts & SFP_ENHOPTS_ALARMWARN))
+                               return 0;
+                       /* fall through */
+               case hwmon_power_input:
                        return 0444;
                default:
                        return 0;
@@ -1086,8 +1098,11 @@ static int sfp_hwmon_insert(struct sfp *sfp)
 
 static void sfp_hwmon_remove(struct sfp *sfp)
 {
-       hwmon_device_unregister(sfp->hwmon_dev);
-       kfree(sfp->hwmon_name);
+       if (!IS_ERR_OR_NULL(sfp->hwmon_dev)) {
+               hwmon_device_unregister(sfp->hwmon_dev);
+               sfp->hwmon_dev = NULL;
+               kfree(sfp->hwmon_name);
+       }
 }
 #else
 static int sfp_hwmon_insert(struct sfp *sfp)
index ce61231..62dc564 100644 (file)
@@ -429,6 +429,9 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
        if (!skb)
                goto out;
 
+       if (skb_mac_header_len(skb) < ETH_HLEN)
+               goto drop;
+
        if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
                goto drop;
 
index 6a047d3..d887016 100644 (file)
@@ -1167,6 +1167,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
                return -EBUSY;
        }
 
+       if (dev == port_dev) {
+               NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
+               netdev_err(dev, "Cannot enslave team device to itself\n");
+               return -EINVAL;
+       }
+
        if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
            vlan_uses_dev(dev)) {
                NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
index ebd07ad..50e9cc1 100644 (file)
@@ -181,6 +181,7 @@ struct tun_file {
        };
        struct napi_struct napi;
        bool napi_enabled;
+       bool napi_frags_enabled;
        struct mutex napi_mutex;        /* Protects access to the above napi */
        struct list_head next;
        struct tun_struct *detached;
@@ -313,32 +314,32 @@ static int tun_napi_poll(struct napi_struct *napi, int budget)
 }
 
 static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
-                         bool napi_en)
+                         bool napi_en, bool napi_frags)
 {
        tfile->napi_enabled = napi_en;
+       tfile->napi_frags_enabled = napi_en && napi_frags;
        if (napi_en) {
                netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
                               NAPI_POLL_WEIGHT);
                napi_enable(&tfile->napi);
-               mutex_init(&tfile->napi_mutex);
        }
 }
 
-static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile)
+static void tun_napi_disable(struct tun_file *tfile)
 {
        if (tfile->napi_enabled)
                napi_disable(&tfile->napi);
 }
 
-static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile)
+static void tun_napi_del(struct tun_file *tfile)
 {
        if (tfile->napi_enabled)
                netif_napi_del(&tfile->napi);
 }
 
-static bool tun_napi_frags_enabled(const struct tun_struct *tun)
+static bool tun_napi_frags_enabled(const struct tun_file *tfile)
 {
-       return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS;
+       return tfile->napi_frags_enabled;
 }
 
 #ifdef CONFIG_TUN_VNET_CROSS_LE
@@ -690,8 +691,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
        tun = rtnl_dereference(tfile->tun);
 
        if (tun && clean) {
-               tun_napi_disable(tun, tfile);
-               tun_napi_del(tun, tfile);
+               tun_napi_disable(tfile);
+               tun_napi_del(tfile);
        }
 
        if (tun && !tfile->detached) {
@@ -758,7 +759,7 @@ static void tun_detach_all(struct net_device *dev)
        for (i = 0; i < n; i++) {
                tfile = rtnl_dereference(tun->tfiles[i]);
                BUG_ON(!tfile);
-               tun_napi_disable(tun, tfile);
+               tun_napi_disable(tfile);
                tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
                tfile->socket.sk->sk_data_ready(tfile->socket.sk);
                RCU_INIT_POINTER(tfile->tun, NULL);
@@ -774,7 +775,7 @@ static void tun_detach_all(struct net_device *dev)
        synchronize_net();
        for (i = 0; i < n; i++) {
                tfile = rtnl_dereference(tun->tfiles[i]);
-               tun_napi_del(tun, tfile);
+               tun_napi_del(tfile);
                /* Drop read queue */
                tun_queue_purge(tfile);
                xdp_rxq_info_unreg(&tfile->xdp_rxq);
@@ -793,7 +794,7 @@ static void tun_detach_all(struct net_device *dev)
 }
 
 static int tun_attach(struct tun_struct *tun, struct file *file,
-                     bool skip_filter, bool napi)
+                     bool skip_filter, bool napi, bool napi_frags)
 {
        struct tun_file *tfile = file->private_data;
        struct net_device *dev = tun->dev;
@@ -866,7 +867,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
                tun_enable_queue(tfile);
        } else {
                sock_hold(&tfile->sk);
-               tun_napi_init(tun, tfile, napi);
+               tun_napi_init(tun, tfile, napi, napi_frags);
        }
 
        tun_set_real_num_queues(tun);
@@ -1153,43 +1154,6 @@ static netdev_features_t tun_net_fix_features(struct net_device *dev,
 
        return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
 }
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void tun_poll_controller(struct net_device *dev)
-{
-       /*
-        * Tun only receives frames when:
-        * 1) the char device endpoint gets data from user space
-        * 2) the tun socket gets a sendmsg call from user space
-        * If NAPI is not enabled, since both of those are synchronous
-        * operations, we are guaranteed never to have pending data when we poll
-        * for it so there is nothing to do here but return.
-        * We need this though so netpoll recognizes us as an interface that
-        * supports polling, which enables bridge devices in virt setups to
-        * still use netconsole
-        * If NAPI is enabled, however, we need to schedule polling for all
-        * queues unless we are using napi_gro_frags(), which we call in
-        * process context and not in NAPI context.
-        */
-       struct tun_struct *tun = netdev_priv(dev);
-
-       if (tun->flags & IFF_NAPI) {
-               struct tun_file *tfile;
-               int i;
-
-               if (tun_napi_frags_enabled(tun))
-                       return;
-
-               rcu_read_lock();
-               for (i = 0; i < tun->numqueues; i++) {
-                       tfile = rcu_dereference(tun->tfiles[i]);
-                       if (tfile->napi_enabled)
-                               napi_schedule(&tfile->napi);
-               }
-               rcu_read_unlock();
-       }
-       return;
-}
-#endif
 
 static void tun_set_headroom(struct net_device *dev, int new_hr)
 {
@@ -1283,9 +1247,6 @@ static const struct net_device_ops tun_netdev_ops = {
        .ndo_start_xmit         = tun_net_xmit,
        .ndo_fix_features       = tun_net_fix_features,
        .ndo_select_queue       = tun_select_queue,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = tun_poll_controller,
-#endif
        .ndo_set_rx_headroom    = tun_set_headroom,
        .ndo_get_stats64        = tun_net_get_stats64,
 };
@@ -1365,9 +1326,6 @@ static const struct net_device_ops tap_netdev_ops = {
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_select_queue       = tun_select_queue,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = tun_poll_controller,
-#endif
        .ndo_features_check     = passthru_features_check,
        .ndo_set_rx_headroom    = tun_set_headroom,
        .ndo_get_stats64        = tun_net_get_stats64,
@@ -1752,7 +1710,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        int err;
        u32 rxhash = 0;
        int skb_xdp = 1;
-       bool frags = tun_napi_frags_enabled(tun);
+       bool frags = tun_napi_frags_enabled(tfile);
 
        if (!(tun->dev->flags & IFF_UP))
                return -EIO;
@@ -2577,7 +2535,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                        return err;
 
                err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
-                                ifr->ifr_flags & IFF_NAPI);
+                                ifr->ifr_flags & IFF_NAPI,
+                                ifr->ifr_flags & IFF_NAPI_FRAGS);
                if (err < 0)
                        return err;
 
@@ -2675,7 +2634,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                              (ifr->ifr_flags & TUN_FEATURES);
 
                INIT_LIST_HEAD(&tun->disabled);
-               err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI);
+               err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
+                                ifr->ifr_flags & IFF_NAPI_FRAGS);
                if (err < 0)
                        goto err_free_flow;
 
@@ -2824,7 +2784,8 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
                ret = security_tun_dev_attach_queue(tun->security);
                if (ret < 0)
                        goto unlock;
-               ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI);
+               ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
+                                tun->flags & IFF_NAPI_FRAGS);
        } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
                tun = rtnl_dereference(tfile->tun);
                if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
@@ -3242,6 +3203,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
                return -ENOMEM;
        }
 
+       mutex_init(&tfile->napi_mutex);
        RCU_INIT_POINTER(tfile->tun, NULL);
        tfile->flags = 0;
        tfile->ifindex = 0;
index e95dd12..023b8d0 100644 (file)
@@ -607,6 +607,9 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
        struct usbnet *dev = netdev_priv(net);
        u8 opt = 0;
 
+       if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+               return -EINVAL;
+
        if (wolinfo->wolopts & WAKE_PHY)
                opt |= AX_MONITOR_LINK;
        if (wolinfo->wolopts & WAKE_MAGIC)
index 9e8ad37..2207f7a 100644 (file)
@@ -566,6 +566,9 @@ ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
        struct usbnet *dev = netdev_priv(net);
        u8 opt = 0;
 
+       if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+               return -EINVAL;
+
        if (wolinfo->wolopts & WAKE_PHY)
                opt |= AX_MONITOR_MODE_RWLC;
        if (wolinfo->wolopts & WAKE_MAGIC)
index a9991c5..c3c9ba4 100644 (file)
@@ -1401,19 +1401,10 @@ static int lan78xx_set_wol(struct net_device *netdev,
        if (ret < 0)
                return ret;
 
-       pdata->wol = 0;
-       if (wol->wolopts & WAKE_UCAST)
-               pdata->wol |= WAKE_UCAST;
-       if (wol->wolopts & WAKE_MCAST)
-               pdata->wol |= WAKE_MCAST;
-       if (wol->wolopts & WAKE_BCAST)
-               pdata->wol |= WAKE_BCAST;
-       if (wol->wolopts & WAKE_MAGIC)
-               pdata->wol |= WAKE_MAGIC;
-       if (wol->wolopts & WAKE_PHY)
-               pdata->wol |= WAKE_PHY;
-       if (wol->wolopts & WAKE_ARP)
-               pdata->wol |= WAKE_ARP;
+       if (wol->wolopts & ~WAKE_ALL)
+               return -EINVAL;
+
+       pdata->wol = wol->wolopts;
 
        device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
 
index cb0cc30..533b6fb 100644 (file)
@@ -967,6 +967,13 @@ static const struct usb_device_id products[] = {
                USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
                .driver_info = (unsigned long)&qmi_wwan_info,
        },
+       {       /* Quectel EP06/EG06/EM06 */
+               USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306,
+                                             USB_CLASS_VENDOR_SPEC,
+                                             USB_SUBCLASS_VENDOR_SPEC,
+                                             0xff),
+               .driver_info        = (unsigned long)&qmi_wwan_info_quirk_dtr,
+       },
 
        /* 3. Combined interface devices matching on interface number */
        {QMI_FIXED_INTF(0x0408, 0xea42, 4)},    /* Yota / Megafon M100-1 */
@@ -1206,13 +1213,13 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x9061, 8)},    /* Sierra Wireless Modem */
        {QMI_FIXED_INTF(0x1199, 0x9063, 8)},    /* Sierra Wireless EM7305 */
        {QMI_FIXED_INTF(0x1199, 0x9063, 10)},   /* Sierra Wireless EM7305 */
-       {QMI_FIXED_INTF(0x1199, 0x9071, 8)},    /* Sierra Wireless MC74xx */
-       {QMI_FIXED_INTF(0x1199, 0x9071, 10)},   /* Sierra Wireless MC74xx */
-       {QMI_FIXED_INTF(0x1199, 0x9079, 8)},    /* Sierra Wireless EM74xx */
-       {QMI_FIXED_INTF(0x1199, 0x9079, 10)},   /* Sierra Wireless EM74xx */
-       {QMI_FIXED_INTF(0x1199, 0x907b, 8)},    /* Sierra Wireless EM74xx */
-       {QMI_FIXED_INTF(0x1199, 0x907b, 10)},   /* Sierra Wireless EM74xx */
-       {QMI_FIXED_INTF(0x1199, 0x9091, 8)},    /* Sierra Wireless EM7565 */
+       {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
+       {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 10)},/* Sierra Wireless MC74xx */
+       {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
+       {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 10)},/* Sierra Wireless EM74xx */
+       {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
+       {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */
+       {QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
        {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},    /* Alcatel L800MA */
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
@@ -1255,7 +1262,6 @@ static const struct usb_device_id products[] = {
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
        {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
-       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
 
        /* 4. Gobi 1000 devices */
        {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)},    /* Acer Gobi Modem Device */
@@ -1331,6 +1337,19 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
        return false;
 }
 
+static bool quectel_ep06_diag_detected(struct usb_interface *intf)
+{
+       struct usb_device *dev = interface_to_usbdev(intf);
+       struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
+
+       if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&
+           le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&
+           intf_desc.bNumEndpoints == 2)
+               return true;
+
+       return false;
+}
+
 static int qmi_wwan_probe(struct usb_interface *intf,
                          const struct usb_device_id *prod)
 {
@@ -1365,6 +1384,15 @@ static int qmi_wwan_probe(struct usb_interface *intf,
                return -ENODEV;
        }
 
+       /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so
+        * we need to match on class/subclass/protocol. These values are
+        * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
+        * different. Ignore the current interface if the number of endpoints
+        * the number for the diag interface (two).
+        */
+       if (quectel_ep06_diag_detected(intf))
+               return -ENODEV;
+
        return usbnet_probe(intf, id);
 }
 
index 9774270..f1b5201 100644 (file)
@@ -4506,6 +4506,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
        if (!rtl_can_wakeup(tp))
                return -EOPNOTSUPP;
 
+       if (wol->wolopts & ~WAKE_ANY)
+               return -EINVAL;
+
        ret = usb_autopm_get_interface(tp->intf);
        if (ret < 0)
                goto out_set_wol;
@@ -5217,8 +5220,8 @@ static int rtl8152_probe(struct usb_interface *intf,
                netdev->hw_features &= ~NETIF_F_RXCSUM;
        }
 
-       if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 &&
-           udev->serial && !strcmp(udev->serial, "000001000000")) {
+       if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x3011 && udev->serial &&
+           (!strcmp(udev->serial, "000001000000") || !strcmp(udev->serial, "000002000000"))) {
                dev_info(&udev->dev, "Dell TB16 Dock, disable RX aggregation");
                set_bit(DELL_TB_RX_AGG_BUG, &tp->flags);
        }
index 05553d2..ec287c9 100644 (file)
@@ -731,6 +731,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
        struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
        int ret;
 
+       if (wolinfo->wolopts & ~SUPPORTED_WAKE)
+               return -EINVAL;
+
        pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
 
        ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
@@ -1517,6 +1520,7 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
 {
        struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
        if (pdata) {
+               cancel_work_sync(&pdata->set_multicast);
                netif_dbg(dev, ifdown, dev->net, "free pdata\n");
                kfree(pdata);
                pdata = NULL;
index 06b4d29..262e7a3 100644 (file)
@@ -774,6 +774,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
        int ret;
 
+       if (wolinfo->wolopts & ~SUPPORTED_WAKE)
+               return -EINVAL;
+
        pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
 
        ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
index 9277a0f..35f39f2 100644 (file)
@@ -421,6 +421,9 @@ sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
        struct usbnet *dev = netdev_priv(net);
        u8 opt = 0;
 
+       if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+               return -EINVAL;
+
        if (wolinfo->wolopts & WAKE_PHY)
                opt |= SR_MONITOR_LINK;
        if (wolinfo->wolopts & WAKE_MAGIC)
index 8d679c8..41a00cd 100644 (file)
@@ -463,6 +463,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
        int mac_len, delta, off;
        struct xdp_buff xdp;
 
+       skb_orphan(skb);
+
        rcu_read_lock();
        xdp_prog = rcu_dereference(rq->xdp_prog);
        if (unlikely(!xdp_prog)) {
@@ -508,8 +510,6 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
                skb_copy_header(nskb, skb);
                head_off = skb_headroom(nskb) - skb_headroom(skb);
                skb_headers_offset_update(nskb, head_off);
-               if (skb->sk)
-                       skb_set_owner_w(nskb, skb->sk);
                consume_skb(skb);
                skb = nskb;
        }
index 7659209..dab504e 100644 (file)
@@ -1699,17 +1699,6 @@ static void virtnet_stats(struct net_device *dev,
        tot->rx_frame_errors = dev->stats.rx_frame_errors;
 }
 
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void virtnet_netpoll(struct net_device *dev)
-{
-       struct virtnet_info *vi = netdev_priv(dev);
-       int i;
-
-       for (i = 0; i < vi->curr_queue_pairs; i++)
-               napi_schedule(&vi->rq[i].napi);
-}
-#endif
-
 static void virtnet_ack_link_announce(struct virtnet_info *vi)
 {
        rtnl_lock();
@@ -2447,9 +2436,6 @@ static const struct net_device_ops virtnet_netdev = {
        .ndo_get_stats64     = virtnet_stats,
        .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller = virtnet_netpoll,
-#endif
        .ndo_bpf                = virtnet_xdp,
        .ndo_xdp_xmit           = virtnet_xdp_xmit,
        .ndo_features_check     = passthru_features_check,
index ababba3..2b8da2b 100644 (file)
@@ -3539,6 +3539,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
                nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
                nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_TTL */
+               nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_TTL_INHERIT */
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_TOS */
                nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
                nla_total_size(sizeof(__u8)) +  /* IFLA_VXLAN_LEARNING */
@@ -3603,6 +3604,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
        }
 
        if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
+           nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
+                      !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) ||
            nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
            nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
            nla_put_u8(skb, IFLA_VXLAN_LEARNING,
index 094cea7..ef298d8 100644 (file)
@@ -257,7 +257,7 @@ static const struct
        [I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO },
        [I2400M_MS_BUSY] = { "busy", -EBUSY },
        [I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ },
-       [I2400M_MS_UNINITIALIZED] = { "not unitialized", -EILSEQ },
+       [I2400M_MS_UNINITIALIZED] = { "uninitialized", -EILSEQ },
        [I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO },
        [I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO },
        [I2400M_MS_NO_RF] = { "no RF", -EIO },
index 6b0e1ec..d46d57b 100644 (file)
@@ -1518,13 +1518,15 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
                        }
                } else {
                        /* More than a single header/data pair were missed.
-                        * Report this error, and reset the controller to
+                        * Report this error. If running with open-source
+                        * firmware, then reset the controller to
                         * revive operation.
                         */
                        b43dbg(dev->wl,
                               "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
                               ring->index, firstused, slot);
-                       b43_controller_restart(dev, "Out of order TX");
+                       if (dev->fw.opensource)
+                               b43_controller_restart(dev, "Out of order TX");
                        return;
                }
        }
index 5916879..497fd76 100644 (file)
@@ -51,6 +51,7 @@
 
 static const struct iwl_base_params iwl1000_base_params = {
        .num_of_queues = IWLAGN_NUM_QUEUES,
+       .max_tfd_queue_size = 256,
        .eeprom_size = OTP_LOW_IMAGE_SIZE,
        .pll_cfg = true,
        .max_ll_items = OTP_MAX_LL_ITEMS_1000,
index b4c3a95..73969db 100644 (file)
@@ -985,15 +985,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
        const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
                             iwl_ext_nvm_channels : iwl_nvm_channels;
        struct ieee80211_regdomain *regd, *copy_rd;
-       int size_of_regd, regd_to_copy, wmms_to_copy;
-       int size_of_wmms = 0;
+       int size_of_regd, regd_to_copy;
        struct ieee80211_reg_rule *rule;
-       struct ieee80211_wmm_rule *wmm_rule, *d_wmm, *s_wmm;
        struct regdb_ptrs *regdb_ptrs;
        enum nl80211_band band;
        int center_freq, prev_center_freq = 0;
-       int valid_rules = 0, n_wmms = 0;
-       int i;
+       int valid_rules = 0;
        bool new_rule;
        int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
                         IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
@@ -1012,11 +1009,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                sizeof(struct ieee80211_regdomain) +
                num_of_ch * sizeof(struct ieee80211_reg_rule);
 
-       if (geo_info & GEO_WMM_ETSI_5GHZ_INFO)
-               size_of_wmms =
-                       num_of_ch * sizeof(struct ieee80211_wmm_rule);
-
-       regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL);
+       regd = kzalloc(size_of_regd, GFP_KERNEL);
        if (!regd)
                return ERR_PTR(-ENOMEM);
 
@@ -1030,8 +1023,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
        regd->alpha2[0] = fw_mcc >> 8;
        regd->alpha2[1] = fw_mcc & 0xff;
 
-       wmm_rule = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
-
        for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
                ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
                band = (ch_idx < NUM_2GHZ_CHANNELS) ?
@@ -1085,26 +1076,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
                    band == NL80211_BAND_2GHZ)
                        continue;
 
-               if (!reg_query_regdb_wmm(regd->alpha2, center_freq,
-                                        &regdb_ptrs[n_wmms].token, wmm_rule)) {
-                       /* Add only new rules */
-                       for (i = 0; i < n_wmms; i++) {
-                               if (regdb_ptrs[i].token ==
-                                   regdb_ptrs[n_wmms].token) {
-                                       rule->wmm_rule = regdb_ptrs[i].rule;
-                                       break;
-                               }
-                       }
-                       if (i == n_wmms) {
-                               rule->wmm_rule = wmm_rule;
-                               regdb_ptrs[n_wmms++].rule = wmm_rule;
-                               wmm_rule++;
-                       }
-               }
+               reg_query_regdb_wmm(regd->alpha2, center_freq, rule);
        }
 
        regd->n_reg_rules = valid_rules;
-       regd->n_wmm_rules = n_wmms;
 
        /*
         * Narrow down regdom for unused regulatory rules to prevent hole
@@ -1113,28 +1088,13 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
        regd_to_copy = sizeof(struct ieee80211_regdomain) +
                valid_rules * sizeof(struct ieee80211_reg_rule);
 
-       wmms_to_copy = sizeof(struct ieee80211_wmm_rule) * n_wmms;
-
-       copy_rd = kzalloc(regd_to_copy + wmms_to_copy, GFP_KERNEL);
+       copy_rd = kzalloc(regd_to_copy, GFP_KERNEL);
        if (!copy_rd) {
                copy_rd = ERR_PTR(-ENOMEM);
                goto out;
        }
 
        memcpy(copy_rd, regd, regd_to_copy);
-       memcpy((u8 *)copy_rd + regd_to_copy, (u8 *)regd + size_of_regd,
-              wmms_to_copy);
-
-       d_wmm = (struct ieee80211_wmm_rule *)((u8 *)copy_rd + regd_to_copy);
-       s_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
-
-       for (i = 0; i < regd->n_reg_rules; i++) {
-               if (!regd->reg_rules[i].wmm_rule)
-                       continue;
-
-               copy_rd->reg_rules[i].wmm_rule = d_wmm +
-                       (regd->reg_rules[i].wmm_rule - s_wmm);
-       }
 
 out:
        kfree(regdb_ptrs);
index 998dfac..07442ad 100644 (file)
@@ -34,6 +34,7 @@
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 #include <linux/rhashtable.h>
+#include <linux/nospec.h>
 #include "mac80211_hwsim.h"
 
 #define WARN_QUEUE 100
@@ -519,7 +520,6 @@ struct mac80211_hwsim_data {
        int channels, idx;
        bool use_chanctx;
        bool destroy_on_close;
-       struct work_struct destroy_work;
        u32 portid;
        char alpha2[2];
        const struct ieee80211_regdomain *regd;
@@ -2820,9 +2820,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                                IEEE80211_VHT_CAP_SHORT_GI_80 |
                                IEEE80211_VHT_CAP_SHORT_GI_160 |
                                IEEE80211_VHT_CAP_TXSTBC |
-                               IEEE80211_VHT_CAP_RXSTBC_1 |
-                               IEEE80211_VHT_CAP_RXSTBC_2 |
-                               IEEE80211_VHT_CAP_RXSTBC_3 |
                                IEEE80211_VHT_CAP_RXSTBC_4 |
                                IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
                        sband->vht_cap.vht_mcs.rx_mcs_map =
@@ -2937,8 +2934,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
        hwsim_radios_generation++;
        spin_unlock_bh(&hwsim_radio_lock);
 
-       if (idx > 0)
-               hwsim_mcast_new_radio(idx, info, param);
+       hwsim_mcast_new_radio(idx, info, param);
 
        return idx;
 
@@ -3317,6 +3313,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
        if (info->attrs[HWSIM_ATTR_CHANNELS])
                param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
 
+       if (param.channels < 1) {
+               GENL_SET_ERR_MSG(info, "must have at least one channel");
+               return -EINVAL;
+       }
+
        if (param.channels > CFG80211_MAX_NUM_DIFFERENT_CHANNELS) {
                GENL_SET_ERR_MSG(info, "too many channels specified");
                return -EINVAL;
@@ -3350,6 +3351,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
                        kfree(hwname);
                        return -EINVAL;
                }
+
+               idx = array_index_nospec(idx,
+                                        ARRAY_SIZE(hwsim_world_regdom_custom));
                param.regd = hwsim_world_regdom_custom[idx];
        }
 
@@ -3559,30 +3563,27 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
        .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
 };
 
-static void destroy_radio(struct work_struct *work)
-{
-       struct mac80211_hwsim_data *data =
-               container_of(work, struct mac80211_hwsim_data, destroy_work);
-
-       hwsim_radios_generation++;
-       mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL);
-}
-
 static void remove_user_radios(u32 portid)
 {
        struct mac80211_hwsim_data *entry, *tmp;
+       LIST_HEAD(list);
 
        spin_lock_bh(&hwsim_radio_lock);
        list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
                if (entry->destroy_on_close && entry->portid == portid) {
-                       list_del(&entry->list);
+                       list_move(&entry->list, &list);
                        rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht,
                                               hwsim_rht_params);
-                       INIT_WORK(&entry->destroy_work, destroy_radio);
-                       queue_work(hwsim_wq, &entry->destroy_work);
+                       hwsim_radios_generation++;
                }
        }
        spin_unlock_bh(&hwsim_radio_lock);
+
+       list_for_each_entry_safe(entry, tmp, &list, list) {
+               list_del(&entry->list);
+               mac80211_hwsim_del_radio(entry, wiphy_name(entry->hw->wiphy),
+                                        NULL);
+       }
 }
 
 static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
@@ -3640,6 +3641,7 @@ static __net_init int hwsim_init_net(struct net *net)
 static void __net_exit hwsim_exit_net(struct net *net)
 {
        struct mac80211_hwsim_data *data, *tmp;
+       LIST_HEAD(list);
 
        spin_lock_bh(&hwsim_radio_lock);
        list_for_each_entry_safe(data, tmp, &hwsim_radios, list) {
@@ -3650,17 +3652,19 @@ static void __net_exit hwsim_exit_net(struct net *net)
                if (data->netgroup == hwsim_net_get_netgroup(&init_net))
                        continue;
 
-               list_del(&data->list);
+               list_move(&data->list, &list);
                rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
                                       hwsim_rht_params);
                hwsim_radios_generation++;
-               spin_unlock_bh(&hwsim_radio_lock);
+       }
+       spin_unlock_bh(&hwsim_radio_lock);
+
+       list_for_each_entry_safe(data, tmp, &list, list) {
+               list_del(&data->list);
                mac80211_hwsim_del_radio(data,
                                         wiphy_name(data->hw->wiphy),
                                         NULL);
-               spin_lock_bh(&hwsim_radio_lock);
        }
-       spin_unlock_bh(&hwsim_radio_lock);
 
        ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net));
 }
index cf6ffb1..22bc9d3 100644 (file)
@@ -77,9 +77,8 @@ static void mt76x0_remove_interface(struct ieee80211_hw *hw,
 {
        struct mt76x0_dev *dev = hw->priv;
        struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
-       unsigned int wcid = mvif->group_wcid.idx;
 
-       dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
+       dev->vif_mask &= ~BIT(mvif->idx);
 }
 
 static int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
index a46a1e9..936c0b3 100644 (file)
@@ -241,8 +241,9 @@ struct xenvif_hash_cache {
 struct xenvif_hash {
        unsigned int alg;
        u32 flags;
+       bool mapping_sel;
        u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
-       u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE];
+       u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE];
        unsigned int size;
        struct xenvif_hash_cache cache;
 };
index 3c4c58b..0ccb021 100644 (file)
@@ -324,7 +324,8 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
                return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
 
        vif->hash.size = size;
-       memset(vif->hash.mapping, 0, sizeof(u32) * size);
+       memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
+              sizeof(u32) * size);
 
        return XEN_NETIF_CTRL_STATUS_SUCCESS;
 }
@@ -332,31 +333,49 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
 u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
                            u32 off)
 {
-       u32 *mapping = &vif->hash.mapping[off];
-       struct gnttab_copy copy_op = {
+       u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
+       unsigned int nr = 1;
+       struct gnttab_copy copy_op[2] = {{
                .source.u.ref = gref,
                .source.domid = vif->domid,
-               .dest.u.gmfn = virt_to_gfn(mapping),
                .dest.domid = DOMID_SELF,
-               .dest.offset = xen_offset_in_page(mapping),
-               .len = len * sizeof(u32),
+               .len = len * sizeof(*mapping),
                .flags = GNTCOPY_source_gref
-       };
+       }};
 
-       if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
+       if ((off + len < off) || (off + len > vif->hash.size) ||
+           len > XEN_PAGE_SIZE / sizeof(*mapping))
                return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
 
-       while (len-- != 0)
-               if (mapping[off++] >= vif->num_queues)
-                       return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+       copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off);
+       copy_op[0].dest.offset = xen_offset_in_page(mapping + off);
+       if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) {
+               copy_op[1] = copy_op[0];
+               copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset;
+               copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len);
+               copy_op[1].dest.offset = 0;
+               copy_op[1].len = copy_op[0].len - copy_op[1].source.offset;
+               copy_op[0].len = copy_op[1].source.offset;
+               nr = 2;
+       }
 
-       if (copy_op.len != 0) {
-               gnttab_batch_copy(&copy_op, 1);
+       memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
+              vif->hash.size * sizeof(*mapping));
 
-               if (copy_op.status != GNTST_okay)
+       if (copy_op[0].len != 0) {
+               gnttab_batch_copy(copy_op, nr);
+
+               if (copy_op[0].status != GNTST_okay ||
+                   copy_op[nr - 1].status != GNTST_okay)
                        return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
        }
 
+       while (len-- != 0)
+               if (mapping[off++] >= vif->num_queues)
+                       return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+       vif->hash.mapping_sel = !vif->hash.mapping_sel;
+
        return XEN_NETIF_CTRL_STATUS_SUCCESS;
 }
 
@@ -408,6 +427,8 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
        }
 
        if (vif->hash.size != 0) {
+               const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
+
                seq_puts(m, "\nHash Mapping:\n");
 
                for (i = 0; i < vif->hash.size; ) {
@@ -420,7 +441,7 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
                        seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
 
                        for (j = 0; j < n; j++, i++)
-                               seq_printf(m, "%4u ", vif->hash.mapping[i]);
+                               seq_printf(m, "%4u ", mapping[i]);
 
                        seq_puts(m, "\n");
                }
index 92274c2..f6ae23f 100644 (file)
@@ -162,7 +162,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
        if (size == 0)
                return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
 
-       return vif->hash.mapping[skb_get_hash_raw(skb) % size];
+       return vif->hash.mapping[vif->hash.mapping_sel]
+                               [skb_get_hash_raw(skb) % size];
 }
 
 static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
index 73f596a..f17f602 100644 (file)
@@ -87,8 +87,7 @@ struct netfront_cb {
 /* IRQ name is queue name with "-tx" or "-rx" appended */
 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
 
-static DECLARE_WAIT_QUEUE_HEAD(module_load_q);
-static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
+static DECLARE_WAIT_QUEUE_HEAD(module_wq);
 
 struct netfront_stats {
        u64                     packets;
@@ -909,7 +908,11 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                        BUG_ON(pull_to <= skb_headlen(skb));
                        __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
                }
-               BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
+               if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+                       queue->rx.rsp_cons = ++cons;
+                       kfree_skb(nskb);
+                       return ~0U;
+               }
 
                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
                                skb_frag_page(nfrag),
@@ -1046,6 +1049,8 @@ err:
                skb->len += rx->status;
 
                i = xennet_fill_frags(queue, skb, &tmpq);
+               if (unlikely(i == ~0U))
+                       goto err;
 
                if (rx->flags & XEN_NETRXF_csum_blank)
                        skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1332,11 +1337,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        netif_carrier_off(netdev);
 
        xenbus_switch_state(dev, XenbusStateInitialising);
-       wait_event(module_load_q,
-                          xenbus_read_driver_state(dev->otherend) !=
-                          XenbusStateClosed &&
-                          xenbus_read_driver_state(dev->otherend) !=
-                          XenbusStateUnknown);
+       wait_event(module_wq,
+                  xenbus_read_driver_state(dev->otherend) !=
+                  XenbusStateClosed &&
+                  xenbus_read_driver_state(dev->otherend) !=
+                  XenbusStateUnknown);
        return netdev;
 
  exit:
@@ -2010,15 +2015,14 @@ static void netback_changed(struct xenbus_device *dev,
 
        dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
 
+       wake_up_all(&module_wq);
+
        switch (backend_state) {
        case XenbusStateInitialising:
        case XenbusStateInitialised:
        case XenbusStateReconfiguring:
        case XenbusStateReconfigured:
-               break;
-
        case XenbusStateUnknown:
-               wake_up_all(&module_unload_q);
                break;
 
        case XenbusStateInitWait:
@@ -2034,12 +2038,10 @@ static void netback_changed(struct xenbus_device *dev,
                break;
 
        case XenbusStateClosed:
-               wake_up_all(&module_unload_q);
                if (dev->state == XenbusStateClosed)
                        break;
                /* Missed the backend's CLOSING state -- fallthrough */
        case XenbusStateClosing:
-               wake_up_all(&module_unload_q);
                xenbus_frontend_closed(dev);
                break;
        }
@@ -2147,14 +2149,14 @@ static int xennet_remove(struct xenbus_device *dev)
 
        if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
                xenbus_switch_state(dev, XenbusStateClosing);
-               wait_event(module_unload_q,
+               wait_event(module_wq,
                           xenbus_read_driver_state(dev->otherend) ==
                           XenbusStateClosing ||
                           xenbus_read_driver_state(dev->otherend) ==
                           XenbusStateUnknown);
 
                xenbus_switch_state(dev, XenbusStateClosed);
-               wait_event(module_unload_q,
+               wait_event(module_wq,
                           xenbus_read_driver_state(dev->otherend) ==
                           XenbusStateClosed ||
                           xenbus_read_driver_state(dev->otherend) ==
index 5a95628..9fe3fff 100644 (file)
@@ -537,8 +537,10 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
 
        INIT_WORK(&ctrl->ana_work, nvme_ana_work);
        ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
-       if (!ctrl->ana_log_buf)
+       if (!ctrl->ana_log_buf) {
+               error = -ENOMEM;
                goto out;
+       }
 
        error = nvme_read_ana_log(ctrl, true);
        if (error)
@@ -547,7 +549,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
 out_free_ana_log_buf:
        kfree(ctrl->ana_log_buf);
 out:
-       return -ENOMEM;
+       return error;
 }
 
 void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
index 1b9951d..d668682 100644 (file)
@@ -316,6 +316,14 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
                old_value = *dbbuf_db;
                *dbbuf_db = value;
 
+               /*
+                * Ensure that the doorbell is updated before reading the event
+                * index from memory.  The controller needs to provide similar
+                * ordering to ensure the envent index is updated before reading
+                * the doorbell.
+                */
+               mb();
+
                if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
                        return false;
        }
index a21caea..2008fa6 100644 (file)
@@ -245,6 +245,10 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
                offset += len;
                ngrps++;
        }
+       for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
+               if (nvmet_ana_group_enabled[grpid])
+                       ngrps++;
+       }
 
        hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
        hdr.ngrps = cpu_to_le16(ngrps);
index ebf3e7a..b5ec96a 100644 (file)
@@ -1210,7 +1210,7 @@ static int __init nvmet_init(void)
 
        error = nvmet_init_discovery();
        if (error)
-               goto out;
+               goto out_free_work_queue;
 
        error = nvmet_init_configfs();
        if (error)
@@ -1219,6 +1219,8 @@ static int __init nvmet_init(void)
 
 out_exit_discovery:
        nvmet_exit_discovery();
+out_free_work_queue:
+       destroy_workqueue(buffered_io_wq);
 out:
        return error;
 }
index 34712de..5251689 100644 (file)
@@ -311,7 +311,7 @@ fcloop_tgt_lsrqst_done_work(struct work_struct *work)
        struct fcloop_tport *tport = tls_req->tport;
        struct nvmefc_ls_req *lsreq = tls_req->lsreq;
 
-       if (tport->remoteport)
+       if (!tport || tport->remoteport)
                lsreq->done(lsreq, tls_req->status);
 }
 
@@ -329,6 +329,7 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
 
        if (!rport->targetport) {
                tls_req->status = -ECONNREFUSED;
+               tls_req->tport = NULL;
                schedule_work(&tls_req->work);
                return ret;
        }
index 3533e91..bfc4da6 100644 (file)
@@ -66,6 +66,7 @@ struct nvmet_rdma_rsp {
 
        struct nvmet_req        req;
 
+       bool                    allocated;
        u8                      n_rdma;
        u32                     flags;
        u32                     invalidate_rkey;
@@ -174,11 +175,19 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
        unsigned long flags;
 
        spin_lock_irqsave(&queue->rsps_lock, flags);
-       rsp = list_first_entry(&queue->free_rsps,
+       rsp = list_first_entry_or_null(&queue->free_rsps,
                                struct nvmet_rdma_rsp, free_list);
-       list_del(&rsp->free_list);
+       if (likely(rsp))
+               list_del(&rsp->free_list);
        spin_unlock_irqrestore(&queue->rsps_lock, flags);
 
+       if (unlikely(!rsp)) {
+               rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
+               if (unlikely(!rsp))
+                       return NULL;
+               rsp->allocated = true;
+       }
+
        return rsp;
 }
 
@@ -187,6 +196,11 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
 {
        unsigned long flags;
 
+       if (rsp->allocated) {
+               kfree(rsp);
+               return;
+       }
+
        spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
        list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
        spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
@@ -776,6 +790,15 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
 
        cmd->queue = queue;
        rsp = nvmet_rdma_get_rsp(queue);
+       if (unlikely(!rsp)) {
+               /*
+                * we get here only under memory pressure,
+                * silently drop and have the host retry
+                * as we can't even fail it.
+                */
+               nvmet_rdma_post_recv(queue->dev, cmd);
+               return;
+       }
        rsp->queue = queue;
        rsp->cmd = cmd;
        rsp->flags = 0;
index 466e3c8..74eaedd 100644 (file)
@@ -54,6 +54,28 @@ DEFINE_MUTEX(of_mutex);
  */
 DEFINE_RAW_SPINLOCK(devtree_lock);
 
+bool of_node_name_eq(const struct device_node *np, const char *name)
+{
+       const char *node_name;
+       size_t len;
+
+       if (!np)
+               return false;
+
+       node_name = kbasename(np->full_name);
+       len = strchrnul(node_name, '@') - node_name;
+
+       return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
+}
+
+bool of_node_name_prefix(const struct device_node *np, const char *prefix)
+{
+       if (!np)
+               return false;
+
+       return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
+}
+
 int of_n_addr_cells(struct device_node *np)
 {
        u32 cells;
@@ -118,6 +140,9 @@ void of_populate_phandle_cache(void)
                if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
                        phandles++;
 
+       if (!phandles)
+               goto out;
+
        cache_entries = roundup_pow_of_two(phandles);
        phandle_cache_mask = cache_entries - 1;
 
@@ -720,6 +745,31 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
 EXPORT_SYMBOL(of_get_next_available_child);
 
 /**
+ * of_get_compatible_child - Find compatible child node
+ * @parent:    parent node
+ * @compatible:        compatible string
+ *
+ * Lookup child node whose compatible property contains the given compatible
+ * string.
+ *
+ * Returns a node pointer with refcount incremented, use of_node_put() on it
+ * when done; or NULL if not found.
+ */
+struct device_node *of_get_compatible_child(const struct device_node *parent,
+                               const char *compatible)
+{
+       struct device_node *child;
+
+       for_each_child_of_node(parent, child) {
+               if (of_device_is_compatible(child, compatible))
+                       break;
+       }
+
+       return child;
+}
+EXPORT_SYMBOL(of_get_compatible_child);
+
+/**
  *     of_get_child_by_name - Find the child node by name for a given parent
  *     @node:  parent node
  *     @name:  child name to look for.
index 7ba90c2..6c59673 100644 (file)
@@ -241,6 +241,10 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
        if (!dev)
                goto err_clear_flag;
 
+       /* AMBA devices only support a single DMA mask */
+       dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+       dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
+
        /* setup generic device info */
        dev->dev.of_node = of_node_get(node);
        dev->dev.fwnode = &node->fwnode;
index 722537e..41b4971 100644 (file)
@@ -771,6 +771,9 @@ static void __init of_unittest_parse_interrupts(void)
        struct of_phandle_args args;
        int i, rc;
 
+       if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+               return;
+
        np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
        if (!np) {
                pr_err("missing testcase data\n");
@@ -845,6 +848,9 @@ static void __init of_unittest_parse_interrupts_extended(void)
        struct of_phandle_args args;
        int i, rc;
 
+       if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+               return;
+
        np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
        if (!np) {
                pr_err("missing testcase data\n");
@@ -1001,15 +1007,19 @@ static void __init of_unittest_platform_populate(void)
        pdev = of_find_device_by_node(np);
        unittest(pdev, "device 1 creation failed\n");
 
-       irq = platform_get_irq(pdev, 0);
-       unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq);
+       if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
+               irq = platform_get_irq(pdev, 0);
+               unittest(irq == -EPROBE_DEFER,
+                        "device deferred probe failed - %d\n", irq);
 
-       /* Test that a parsing failure does not return -EPROBE_DEFER */
-       np = of_find_node_by_path("/testcase-data/testcase-device2");
-       pdev = of_find_device_by_node(np);
-       unittest(pdev, "device 2 creation failed\n");
-       irq = platform_get_irq(pdev, 0);
-       unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
+               /* Test that a parsing failure does not return -EPROBE_DEFER */
+               np = of_find_node_by_path("/testcase-data/testcase-device2");
+               pdev = of_find_device_by_node(np);
+               unittest(pdev, "device 2 creation failed\n");
+               irq = platform_get_irq(pdev, 0);
+               unittest(irq < 0 && irq != -EPROBE_DEFER,
+                        "device parsing error failed - %d\n", irq);
+       }
 
        np = of_find_node_by_path("/testcase-data/platform-tests");
        unittest(np, "No testcase data in device tree\n");
index 778c4f7..2153956 100644 (file)
@@ -135,7 +135,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
                if (val & PCIE_ATU_ENABLE)
                        return;
 
-               usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+               mdelay(LINK_WAIT_IATU);
        }
        dev_err(pci->dev, "Outbound iATU is not being enabled\n");
 }
@@ -178,7 +178,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
                if (val & PCIE_ATU_ENABLE)
                        return;
 
-               usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+               mdelay(LINK_WAIT_IATU);
        }
        dev_err(pci->dev, "Outbound iATU is not being enabled\n");
 }
@@ -236,7 +236,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
                if (val & PCIE_ATU_ENABLE)
                        return 0;
 
-               usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+               mdelay(LINK_WAIT_IATU);
        }
        dev_err(pci->dev, "Inbound iATU is not being enabled\n");
 
@@ -282,7 +282,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
                if (val & PCIE_ATU_ENABLE)
                        return 0;
 
-               usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+               mdelay(LINK_WAIT_IATU);
        }
        dev_err(pci->dev, "Inbound iATU is not being enabled\n");
 
index 96126fd..9f1a5e3 100644 (file)
@@ -26,8 +26,7 @@
 
 /* Parameters for the waiting for iATU enabled routine */
 #define LINK_WAIT_MAX_IATU_RETRIES     5
-#define LINK_WAIT_IATU_MIN             9000
-#define LINK_WAIT_IATU_MAX             10000
+#define LINK_WAIT_IATU                 9
 
 /* Synopsys-specific PCIe configuration registers */
 #define PCIE_PORT_LINK_CONTROL         0x710
index c00f82c..9ba4d12 100644 (file)
@@ -89,6 +89,9 @@ static enum pci_protocol_version_t pci_protocol_version;
 
 #define STATUS_REVISION_MISMATCH 0xC0000059
 
+/* space for 32bit serial number as string */
+#define SLOT_NAME_SIZE 11
+
 /*
  * Message Types
  */
@@ -494,6 +497,7 @@ struct hv_pci_dev {
        struct list_head list_entry;
        refcount_t refs;
        enum hv_pcichild_state state;
+       struct pci_slot *pci_slot;
        struct pci_function_description desc;
        bool reported_missing;
        struct hv_pcibus_device *hbus;
@@ -1457,6 +1461,36 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
        spin_unlock_irqrestore(&hbus->device_list_lock, flags);
 }
 
+/*
+ * Assign entries in sysfs pci slot directory.
+ *
+ * Note that this function does not need to lock the children list
+ * because it is called from pci_devices_present_work which
+ * is serialized with hv_eject_device_work because they are on the
+ * same ordered workqueue. Therefore hbus->children list will not change
+ * even when pci_create_slot sleeps.
+ */
+static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
+{
+       struct hv_pci_dev *hpdev;
+       char name[SLOT_NAME_SIZE];
+       int slot_nr;
+
+       list_for_each_entry(hpdev, &hbus->children, list_entry) {
+               if (hpdev->pci_slot)
+                       continue;
+
+               slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
+               snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
+               hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
+                                         name, NULL);
+               if (IS_ERR(hpdev->pci_slot)) {
+                       pr_warn("pci_create slot %s failed\n", name);
+                       hpdev->pci_slot = NULL;
+               }
+       }
+}
+
 /**
  * create_root_hv_pci_bus() - Expose a new root PCI bus
  * @hbus:      Root PCI bus, as understood by this driver
@@ -1480,6 +1514,7 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
        pci_lock_rescan_remove();
        pci_scan_child_bus(hbus->pci_bus);
        pci_bus_assign_resources(hbus->pci_bus);
+       hv_pci_assign_slots(hbus);
        pci_bus_add_devices(hbus->pci_bus);
        pci_unlock_rescan_remove();
        hbus->state = hv_pcibus_installed;
@@ -1742,6 +1777,7 @@ static void pci_devices_present_work(struct work_struct *work)
                 */
                pci_lock_rescan_remove();
                pci_scan_child_bus(hbus->pci_bus);
+               hv_pci_assign_slots(hbus);
                pci_unlock_rescan_remove();
                break;
 
@@ -1858,6 +1894,9 @@ static void hv_eject_device_work(struct work_struct *work)
        list_del(&hpdev->list_entry);
        spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
 
+       if (hpdev->pci_slot)
+               pci_destroy_slot(hpdev->pci_slot);
+
        memset(&ctxt, 0, sizeof(ctxt));
        ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
        ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
index 50eb072..a41d79b 100644 (file)
@@ -1145,7 +1145,6 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
 {
        struct device *dev = &pcie->pdev->dev;
        struct device_node *np = dev->of_node;
-       unsigned int i;
        int ret;
 
        INIT_LIST_HEAD(&pcie->resources);
@@ -1179,13 +1178,58 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
                                         resource_size(&pcie->io) - 1);
                pcie->realio.name = "PCI I/O";
 
+               pci_add_resource(&pcie->resources, &pcie->realio);
+       }
+
+       return devm_request_pci_bus_resources(dev, &pcie->resources);
+}
+
+/*
+ * This is a copy of pci_host_probe(), except that it does the I/O
+ * remap as the last step, once we are sure we won't fail.
+ *
+ * It should be removed once the I/O remap error handling issue has
+ * been sorted out.
+ */
+static int mvebu_pci_host_probe(struct pci_host_bridge *bridge)
+{
+       struct mvebu_pcie *pcie;
+       struct pci_bus *bus, *child;
+       int ret;
+
+       ret = pci_scan_root_bus_bridge(bridge);
+       if (ret < 0) {
+               dev_err(bridge->dev.parent, "Scanning root bridge failed");
+               return ret;
+       }
+
+       pcie = pci_host_bridge_priv(bridge);
+       if (resource_size(&pcie->io) != 0) {
+               unsigned int i;
+
                for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K)
                        pci_ioremap_io(i, pcie->io.start + i);
+       }
 
-               pci_add_resource(&pcie->resources, &pcie->realio);
+       bus = bridge->bus;
+
+       /*
+        * We insert PCI resources into the iomem_resource and
+        * ioport_resource trees in either pci_bus_claim_resources()
+        * or pci_bus_assign_resources().
+        */
+       if (pci_has_flag(PCI_PROBE_ONLY)) {
+               pci_bus_claim_resources(bus);
+       } else {
+               pci_bus_size_bridges(bus);
+               pci_bus_assign_resources(bus);
+
+               list_for_each_entry(child, &bus->children, node)
+                       pcie_bus_configure_settings(child);
        }
 
-       return devm_request_pci_bus_resources(dev, &pcie->resources);
+       pci_bus_add_devices(bus);
+       return 0;
 }
 
 static int mvebu_pcie_probe(struct platform_device *pdev)
@@ -1268,7 +1312,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
        bridge->align_resource = mvebu_pcie_align_resource;
        bridge->msi = pcie->msi;
 
-       return pci_host_probe(bridge);
+       return mvebu_pci_host_probe(bridge);
 }
 
 static const struct of_device_id mvebu_pcie_of_match_table[] = {
index ef0b1b6..12afa7f 100644 (file)
@@ -457,17 +457,18 @@ static void acpiphp_native_scan_bridge(struct pci_dev *bridge)
 /**
  * enable_slot - enable, configure a slot
  * @slot: slot to be enabled
+ * @bridge: true if enable is for the whole bridge (not a single slot)
  *
  * This function should be called per *physical slot*,
  * not per each slot object in ACPI namespace.
  */
-static void enable_slot(struct acpiphp_slot *slot)
+static void enable_slot(struct acpiphp_slot *slot, bool bridge)
 {
        struct pci_dev *dev;
        struct pci_bus *bus = slot->bus;
        struct acpiphp_func *func;
 
-       if (bus->self && hotplug_is_native(bus->self)) {
+       if (bridge && bus->self && hotplug_is_native(bus->self)) {
                /*
                 * If native hotplug is used, it will take care of hotplug
                 * slot management and resource allocation for hotplug
@@ -701,7 +702,7 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
                                        trim_stale_devices(dev);
 
                        /* configure all functions */
-                       enable_slot(slot);
+                       enable_slot(slot, true);
                } else {
                        disable_slot(slot);
                }
@@ -785,7 +786,7 @@ static void hotplug_event(u32 type, struct acpiphp_context *context)
                if (bridge)
                        acpiphp_check_bridge(bridge);
                else if (!(slot->flags & SLOT_IS_GOING_AWAY))
-                       enable_slot(slot);
+                       enable_slot(slot, false);
 
                break;
 
@@ -973,7 +974,7 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
 
        /* configure all functions */
        if (!(slot->flags & SLOT_ENABLED))
-               enable_slot(slot);
+               enable_slot(slot, false);
 
        pci_unlock_rescan_remove();
        return 0;
index 7136e34..a938abd 100644 (file)
@@ -496,7 +496,7 @@ int pciehp_power_on_slot(struct slot *slot)
        u16 slot_status;
        int retval;
 
-       /* Clear sticky power-fault bit from previous power failures */
+       /* Clear power-fault bit from previous power failures */
        pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
        if (slot_status & PCI_EXP_SLTSTA_PFD)
                pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
@@ -646,6 +646,14 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
                pciehp_handle_button_press(slot);
        }
 
+       /* Check Power Fault Detected */
+       if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
+               ctrl->power_fault_detected = 1;
+               ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
+               pciehp_set_attention_status(slot, 1);
+               pciehp_green_led_off(slot);
+       }
+
        /*
         * Disable requests have higher priority than Presence Detect Changed
         * or Data Link Layer State Changed events.
@@ -657,14 +665,6 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
                pciehp_handle_presence_or_link_change(slot, events);
        up_read(&ctrl->reset_lock);
 
-       /* Check Power Fault Detected */
-       if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
-               ctrl->power_fault_detected = 1;
-               ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
-               pciehp_set_attention_status(slot, 1);
-               pciehp_green_led_off(slot);
-       }
-
        pci_config_pm_runtime_put(pdev);
        wake_up(&ctrl->requester);
        return IRQ_HANDLED;
index 29ff961..51b6c81 100644 (file)
@@ -1289,12 +1289,12 @@ int pci_save_state(struct pci_dev *dev)
 EXPORT_SYMBOL(pci_save_state);
 
 static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
-                                    u32 saved_val, int retry)
+                                    u32 saved_val, int retry, bool force)
 {
        u32 val;
 
        pci_read_config_dword(pdev, offset, &val);
-       if (val == saved_val)
+       if (!force && val == saved_val)
                return;
 
        for (;;) {
@@ -1313,25 +1313,36 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
 }
 
 static void pci_restore_config_space_range(struct pci_dev *pdev,
-                                          int start, int end, int retry)
+                                          int start, int end, int retry,
+                                          bool force)
 {
        int index;
 
        for (index = end; index >= start; index--)
                pci_restore_config_dword(pdev, 4 * index,
                                         pdev->saved_config_space[index],
-                                        retry);
+                                        retry, force);
 }
 
 static void pci_restore_config_space(struct pci_dev *pdev)
 {
        if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
-               pci_restore_config_space_range(pdev, 10, 15, 0);
+               pci_restore_config_space_range(pdev, 10, 15, 0, false);
                /* Restore BARs before the command register. */
-               pci_restore_config_space_range(pdev, 4, 9, 10);
-               pci_restore_config_space_range(pdev, 0, 3, 0);
+               pci_restore_config_space_range(pdev, 4, 9, 10, false);
+               pci_restore_config_space_range(pdev, 0, 3, 0, false);
+       } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+               pci_restore_config_space_range(pdev, 12, 15, 0, false);
+
+               /*
+                * Force rewriting of prefetch registers to avoid S3 resume
+                * issues on Intel PCI bridges that occur when these
+                * registers are not explicitly written.
+                */
+               pci_restore_config_space_range(pdev, 9, 11, 0, true);
+               pci_restore_config_space_range(pdev, 0, 8, 0, false);
        } else {
-               pci_restore_config_space_range(pdev, 0, 15, 0);
+               pci_restore_config_space_range(pdev, 0, 15, 0, false);
        }
 }
 
@@ -4547,6 +4558,7 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
 
        return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
 }
+EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
 
 static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
 {
@@ -5200,7 +5212,7 @@ static int __pci_reset_bus(struct pci_bus *bus)
  */
 int pci_reset_bus(struct pci_dev *pdev)
 {
-       return pci_probe_reset_slot(pdev->slot) ?
+       return (!pci_probe_reset_slot(pdev->slot)) ?
            __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
 }
 EXPORT_SYMBOL_GPL(pci_reset_bus);
index ec78400..201f9e5 100644 (file)
@@ -2074,6 +2074,7 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
 {
 #ifdef CONFIG_PCI_PASID
        struct pci_dev *bridge;
+       int pcie_type;
        u32 cap;
 
        if (!pci_is_pcie(dev))
@@ -2083,7 +2084,9 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
        if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX))
                return;
 
-       if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+       pcie_type = pci_pcie_type(dev);
+       if (pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
+           pcie_type == PCI_EXP_TYPE_RC_END)
                dev->eetlp_prefix_path = 1;
        else {
                bridge = pci_upstream_bridge(dev);
index ef7143a..6bc27b7 100644 (file)
@@ -4355,11 +4355,6 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
  *
  * 0x9d10-0x9d1b PCI Express Root port #{1-12}
  *
- * The 300 series chipset suffers from the same bug so include those root
- * ports here as well.
- *
- * 0xa32c-0xa343 PCI Express Root port #{0-24}
- *
  * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
  * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
  * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
@@ -4377,7 +4372,6 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
        case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
        case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
        case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
-       case 0xa32c ... 0xa343:                         /* 300 series */
                return true;
        }
 
index 9940cc7..54a8b30 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/poll.h>
 #include <linux/wait.h>
 
+#include <linux/nospec.h>
+
 MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
 MODULE_VERSION("0.1");
 MODULE_LICENSE("GPL");
@@ -909,6 +911,8 @@ static int ioctl_port_to_pff(struct switchtec_dev *stdev,
        default:
                if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
                        return -EINVAL;
+               p.port = array_index_nospec(p.port,
+                                       ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
                p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
                break;
        }
index ece41fb..c4f4d90 100644 (file)
@@ -1040,7 +1040,7 @@ static int madera_pin_probe(struct platform_device *pdev)
        }
 
        /* if the configuration is provided through pdata, apply it */
-       if (pdata) {
+       if (pdata && pdata->gpio_configs) {
                ret = pinctrl_register_mappings(pdata->gpio_configs,
                                                pdata->n_gpio_configs);
                if (ret) {
index fb1afe5..e7f45d9 100644 (file)
 
 #include "pinctrl-intel.h"
 
-#define CNL_PAD_OWN    0x020
-#define CNL_PADCFGLOCK 0x080
-#define CNL_HOSTSW_OWN 0x0b0
-#define CNL_GPI_IE     0x120
+#define CNL_PAD_OWN            0x020
+#define CNL_PADCFGLOCK         0x080
+#define CNL_LP_HOSTSW_OWN      0x0b0
+#define CNL_H_HOSTSW_OWN       0x0c0
+#define CNL_GPI_IE             0x120
 
 #define CNL_GPP(r, s, e, g)                            \
        {                                               \
 
 #define CNL_NO_GPIO    -1
 
-#define CNL_COMMUNITY(b, s, e, g)                      \
+#define CNL_COMMUNITY(b, s, e, o, g)                   \
        {                                               \
                .barno = (b),                           \
                .padown_offset = CNL_PAD_OWN,           \
                .padcfglock_offset = CNL_PADCFGLOCK,    \
-               .hostown_offset = CNL_HOSTSW_OWN,       \
+               .hostown_offset = (o),                  \
                .ie_offset = CNL_GPI_IE,                \
                .pin_base = (s),                        \
                .npins = ((e) - (s) + 1),               \
                .ngpps = ARRAY_SIZE(g),                 \
        }
 
+#define CNLLP_COMMUNITY(b, s, e, g)                    \
+       CNL_COMMUNITY(b, s, e, CNL_LP_HOSTSW_OWN, g)
+
+#define CNLH_COMMUNITY(b, s, e, g)                     \
+       CNL_COMMUNITY(b, s, e, CNL_H_HOSTSW_OWN, g)
+
 /* Cannon Lake-H */
 static const struct pinctrl_pin_desc cnlh_pins[] = {
        /* GPP_A */
@@ -379,7 +386,7 @@ static const struct intel_padgroup cnlh_community1_gpps[] = {
 static const struct intel_padgroup cnlh_community3_gpps[] = {
        CNL_GPP(0, 155, 178, 192),              /* GPP_K */
        CNL_GPP(1, 179, 202, 224),              /* GPP_H */
-       CNL_GPP(2, 203, 215, 258),              /* GPP_E */
+       CNL_GPP(2, 203, 215, 256),              /* GPP_E */
        CNL_GPP(3, 216, 239, 288),              /* GPP_F */
        CNL_GPP(4, 240, 248, CNL_NO_GPIO),      /* SPI */
 };
@@ -442,10 +449,10 @@ static const struct intel_function cnlh_functions[] = {
 };
 
 static const struct intel_community cnlh_communities[] = {
-       CNL_COMMUNITY(0, 0, 50, cnlh_community0_gpps),
-       CNL_COMMUNITY(1, 51, 154, cnlh_community1_gpps),
-       CNL_COMMUNITY(2, 155, 248, cnlh_community3_gpps),
-       CNL_COMMUNITY(3, 249, 298, cnlh_community4_gpps),
+       CNLH_COMMUNITY(0, 0, 50, cnlh_community0_gpps),
+       CNLH_COMMUNITY(1, 51, 154, cnlh_community1_gpps),
+       CNLH_COMMUNITY(2, 155, 248, cnlh_community3_gpps),
+       CNLH_COMMUNITY(3, 249, 298, cnlh_community4_gpps),
 };
 
 static const struct intel_pinctrl_soc_data cnlh_soc_data = {
@@ -803,9 +810,9 @@ static const struct intel_padgroup cnllp_community4_gpps[] = {
 };
 
 static const struct intel_community cnllp_communities[] = {
-       CNL_COMMUNITY(0, 0, 67, cnllp_community0_gpps),
-       CNL_COMMUNITY(1, 68, 180, cnllp_community1_gpps),
-       CNL_COMMUNITY(2, 181, 243, cnllp_community4_gpps),
+       CNLLP_COMMUNITY(0, 0, 67, cnllp_community0_gpps),
+       CNLLP_COMMUNITY(1, 68, 180, cnllp_community1_gpps),
+       CNLLP_COMMUNITY(2, 181, 243, cnllp_community4_gpps),
 };
 
 static const struct intel_pinctrl_soc_data cnllp_soc_data = {
index 62b009b..1ea3438 100644 (file)
@@ -747,13 +747,63 @@ static const struct pinctrl_desc intel_pinctrl_desc = {
        .owner = THIS_MODULE,
 };
 
+/**
+ * intel_gpio_to_pin() - Translate from GPIO offset to pin number
+ * @pctrl: Pinctrl structure
+ * @offset: GPIO offset from gpiolib
+ * @commmunity: Community is filled here if not %NULL
+ * @padgrp: Pad group is filled here if not %NULL
+ *
+ * When coming through gpiolib irqchip, the GPIO offset is not
+ * automatically translated to pinctrl pin number. This function can be
+ * used to find out the corresponding pinctrl pin.
+ */
+static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset,
+                            const struct intel_community **community,
+                            const struct intel_padgroup **padgrp)
+{
+       int i;
+
+       for (i = 0; i < pctrl->ncommunities; i++) {
+               const struct intel_community *comm = &pctrl->communities[i];
+               int j;
+
+               for (j = 0; j < comm->ngpps; j++) {
+                       const struct intel_padgroup *pgrp = &comm->gpps[j];
+
+                       if (pgrp->gpio_base < 0)
+                               continue;
+
+                       if (offset >= pgrp->gpio_base &&
+                           offset < pgrp->gpio_base + pgrp->size) {
+                               int pin;
+
+                               pin = pgrp->base + offset - pgrp->gpio_base;
+                               if (community)
+                                       *community = comm;
+                               if (padgrp)
+                                       *padgrp = pgrp;
+
+                               return pin;
+                       }
+               }
+       }
+
+       return -EINVAL;
+}
+
 static int intel_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
        struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
        void __iomem *reg;
        u32 padcfg0;
+       int pin;
 
-       reg = intel_get_padcfg(pctrl, offset, PADCFG0);
+       pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
+       if (pin < 0)
+               return -EINVAL;
+
+       reg = intel_get_padcfg(pctrl, pin, PADCFG0);
        if (!reg)
                return -EINVAL;
 
@@ -770,8 +820,13 @@ static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
        unsigned long flags;
        void __iomem *reg;
        u32 padcfg0;
+       int pin;
 
-       reg = intel_get_padcfg(pctrl, offset, PADCFG0);
+       pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
+       if (pin < 0)
+               return;
+
+       reg = intel_get_padcfg(pctrl, pin, PADCFG0);
        if (!reg)
                return;
 
@@ -790,8 +845,13 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
        struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
        void __iomem *reg;
        u32 padcfg0;
+       int pin;
 
-       reg = intel_get_padcfg(pctrl, offset, PADCFG0);
+       pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
+       if (pin < 0)
+               return -EINVAL;
+
+       reg = intel_get_padcfg(pctrl, pin, PADCFG0);
        if (!reg)
                return -EINVAL;
 
@@ -827,81 +887,6 @@ static const struct gpio_chip intel_gpio_chip = {
        .set_config = gpiochip_generic_config,
 };
 
-/**
- * intel_gpio_to_pin() - Translate from GPIO offset to pin number
- * @pctrl: Pinctrl structure
- * @offset: GPIO offset from gpiolib
- * @commmunity: Community is filled here if not %NULL
- * @padgrp: Pad group is filled here if not %NULL
- *
- * When coming through gpiolib irqchip, the GPIO offset is not
- * automatically translated to pinctrl pin number. This function can be
- * used to find out the corresponding pinctrl pin.
- */
-static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset,
-                            const struct intel_community **community,
-                            const struct intel_padgroup **padgrp)
-{
-       int i;
-
-       for (i = 0; i < pctrl->ncommunities; i++) {
-               const struct intel_community *comm = &pctrl->communities[i];
-               int j;
-
-               for (j = 0; j < comm->ngpps; j++) {
-                       const struct intel_padgroup *pgrp = &comm->gpps[j];
-
-                       if (pgrp->gpio_base < 0)
-                               continue;
-
-                       if (offset >= pgrp->gpio_base &&
-                           offset < pgrp->gpio_base + pgrp->size) {
-                               int pin;
-
-                               pin = pgrp->base + offset - pgrp->gpio_base;
-                               if (community)
-                                       *community = comm;
-                               if (padgrp)
-                                       *padgrp = pgrp;
-
-                               return pin;
-                       }
-               }
-       }
-
-       return -EINVAL;
-}
-
-static int intel_gpio_irq_reqres(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
-       int pin;
-       int ret;
-
-       pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
-       if (pin >= 0) {
-               ret = gpiochip_lock_as_irq(gc, pin);
-               if (ret) {
-                       dev_err(pctrl->dev, "unable to lock HW IRQ %d for IRQ\n",
-                               pin);
-                       return ret;
-               }
-       }
-       return 0;
-}
-
-static void intel_gpio_irq_relres(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
-       int pin;
-
-       pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
-       if (pin >= 0)
-               gpiochip_unlock_as_irq(gc, pin);
-}
-
 static void intel_gpio_irq_ack(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1117,8 +1102,6 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
 
 static struct irq_chip intel_gpio_irqchip = {
        .name = "intel-gpio",
-       .irq_request_resources = intel_gpio_irq_reqres,
-       .irq_release_resources = intel_gpio_irq_relres,
        .irq_enable = intel_gpio_irq_enable,
        .irq_ack = intel_gpio_irq_ack,
        .irq_mask = intel_gpio_irq_mask,
index 41ccc75..1425c28 100644 (file)
@@ -348,21 +348,12 @@ static void amd_gpio_irq_enable(struct irq_data *d)
        unsigned long flags;
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
-       u32 mask = BIT(INTERRUPT_ENABLE_OFF) | BIT(INTERRUPT_MASK_OFF);
 
        raw_spin_lock_irqsave(&gpio_dev->lock, flags);
        pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
        pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
        pin_reg |= BIT(INTERRUPT_MASK_OFF);
        writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
-       /*
-        * When debounce logic is enabled it takes ~900 us before interrupts
-        * can be enabled.  During this "debounce warm up" period the
-        * "INTERRUPT_ENABLE" bit will read as 0. Poll the bit here until it
-        * reads back as 1, signaling that interrupts are now enabled.
-        */
-       while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
-               continue;
        raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 }
 
@@ -426,7 +417,7 @@ static void amd_gpio_irq_eoi(struct irq_data *d)
 static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
        int ret = 0;
-       u32 pin_reg;
+       u32 pin_reg, pin_reg_irq_en, mask;
        unsigned long flags, irq_flags;
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
@@ -495,6 +486,28 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
        }
 
        pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF;
+       /*
+        * If WAKE_INT_MASTER_REG.MaskStsEn is set, a software write to the
+        * debounce registers of any GPIO will block wake/interrupt status
+        * generation for *all* GPIOs for a lenght of time that depends on
+        * WAKE_INT_MASTER_REG.MaskStsLength[11:0].  During this period the
+        * INTERRUPT_ENABLE bit will read as 0.
+        *
+        * We temporarily enable irq for the GPIO whose configuration is
+        * changing, and then wait for it to read back as 1 to know when
+        * debounce has settled and then disable the irq again.
+        * We do this polling with the spinlock held to ensure other GPIO
+        * access routines do not read an incorrect value for the irq enable
+        * bit of other GPIOs.  We keep the GPIO masked while polling to avoid
+        * spurious irqs, and disable the irq again after polling.
+        */
+       mask = BIT(INTERRUPT_ENABLE_OFF);
+       pin_reg_irq_en = pin_reg;
+       pin_reg_irq_en |= mask;
+       pin_reg_irq_en &= ~BIT(INTERRUPT_MASK_OFF);
+       writel(pin_reg_irq_en, gpio_dev->base + (d->hwirq)*4);
+       while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
+               continue;
        writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
        raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
 
index 6a1b605..628817c 100644 (file)
@@ -793,7 +793,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev)
 
                err = pinctrl_generic_add_group(jzpc->pctl, group->name,
                                group->pins, group->num_pins, group->data);
-               if (err) {
+               if (err < 0) {
                        dev_err(dev, "Failed to register group %s\n",
                                        group->name);
                        return err;
@@ -806,7 +806,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev)
                err = pinmux_generic_add_function(jzpc->pctl, func->name,
                                func->group_names, func->num_group_names,
                                func->data);
-               if (err) {
+               if (err < 0) {
                        dev_err(dev, "Failed to register function %s\n",
                                        func->name);
                        return err;
index 2155a30..5d72ffa 100644 (file)
@@ -634,6 +634,29 @@ static void msm_gpio_irq_mask(struct irq_data *d)
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
        val = readl(pctrl->regs + g->intr_cfg_reg);
+       /*
+        * There are two bits that control interrupt forwarding to the CPU. The
+        * RAW_STATUS_EN bit causes the level or edge sensed on the line to be
+        * latched into the interrupt status register when the hardware detects
+        * an irq that it's configured for (either edge for edge type or level
+        * for level type irq). The 'non-raw' status enable bit causes the
+        * hardware to assert the summary interrupt to the CPU if the latched
+        * status bit is set. There's a bug though, the edge detection logic
+        * seems to have a problem where toggling the RAW_STATUS_EN bit may
+        * cause the status bit to latch spuriously when there isn't any edge
+        * so we can't touch that bit for edge type irqs and we have to keep
+        * the bit set anyway so that edges are latched while the line is masked.
+        *
+        * To make matters more complicated, leaving the RAW_STATUS_EN bit
+        * enabled all the time causes level interrupts to re-latch into the
+        * status register because the level is still present on the line after
+        * we ack it. We clear the raw status enable bit during mask here and
+        * set the bit on unmask so the interrupt can't latch into the hardware
+        * while it's masked.
+        */
+       if (irqd_get_trigger_type(d) & IRQ_TYPE_LEVEL_MASK)
+               val &= ~BIT(g->intr_raw_status_bit);
+
        val &= ~BIT(g->intr_enable_bit);
        writel(val, pctrl->regs + g->intr_cfg_reg);
 
@@ -655,6 +678,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
        val = readl(pctrl->regs + g->intr_cfg_reg);
+       val |= BIT(g->intr_raw_status_bit);
        val |= BIT(g->intr_enable_bit);
        writel(val, pctrl->regs + g->intr_cfg_reg);
 
index 398393a..b6fd483 100644 (file)
@@ -520,7 +520,7 @@ static int get_next_event_xfer(struct cros_ec_device *ec_dev,
        ret = cros_ec_cmd_xfer(ec_dev, msg);
        if (ret > 0) {
                ec_dev->event_size = ret - 1;
-               memcpy(&ec_dev->event_data, msg->data, ec_dev->event_size);
+               memcpy(&ec_dev->event_data, msg->data, ret);
        }
 
        return ret;
index d975462..f10af5c 100644 (file)
@@ -536,6 +536,7 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
                if (obj && obj->type == ACPI_TYPE_INTEGER)
                        *out_data = (u32) obj->integer.value;
        }
+       kfree(output.pointer);
        return status;
 
 }
index 88afe56..cf2229e 100644 (file)
@@ -78,6 +78,7 @@ static int run_smbios_call(struct wmi_device *wdev)
        dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n",
                priv->buf->std.output[0], priv->buf->std.output[1],
                priv->buf->std.output[2], priv->buf->std.output[3]);
+       kfree(output.pointer);
 
        return 0;
 }
index 0f8ac8d..a1bd8aa 100644 (file)
@@ -569,6 +569,25 @@ static int bd71837_probe(struct platform_device *pdev)
                        BD71837_REG_REGLOCK);
        }
 
+       /*
+        * There is a HW quirk in BD71837. The shutdown sequence timings for
+        * bucks/LDOs which are controlled via register interface are changed.
+        * At PMIC poweroff the voltage for BUCK6/7 is cut immediately at the
+        * beginning of shut-down sequence. As bucks 6 and 7 are parent
+        * supplies for LDO5 and LDO6 - this causes LDO5/6 voltage
+        * monitoring to errorneously detect under voltage and force PMIC to
+        * emergency state instead of poweroff. In order to avoid this we
+        * disable voltage monitoring for LDO5 and LDO6
+        */
+       err = regmap_update_bits(pmic->mfd->regmap, BD718XX_REG_MVRFLTMASK2,
+                                BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80,
+                                BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80);
+       if (err) {
+               dev_err(&pmic->pdev->dev,
+                       "Failed to disable voltage monitoring\n");
+               goto err;
+       }
+
        for (i = 0; i < ARRAY_SIZE(pmic_regulator_inits); i++) {
 
                struct regulator_desc *desc;
index bb1324f..9577d89 100644 (file)
@@ -3161,7 +3161,7 @@ static inline int regulator_suspend_toggle(struct regulator_dev *rdev,
        if (!rstate->changeable)
                return -EPERM;
 
-       rstate->enabled = en;
+       rstate->enabled = (en) ? ENABLE_IN_SUSPEND : DISABLE_IN_SUSPEND;
 
        return 0;
 }
@@ -4395,13 +4395,13 @@ regulator_register(const struct regulator_desc *regulator_desc,
            !rdev->desc->fixed_uV)
                rdev->is_switch = true;
 
+       dev_set_drvdata(&rdev->dev, rdev);
        ret = device_register(&rdev->dev);
        if (ret != 0) {
                put_device(&rdev->dev);
                goto unset_supplies;
        }
 
-       dev_set_drvdata(&rdev->dev, rdev);
        rdev_init_debugfs(rdev);
 
        /* try to resolve regulators supply since a new one was registered */
index 638f17d..210fc20 100644 (file)
@@ -213,8 +213,6 @@ static void of_get_regulation_constraints(struct device_node *np,
                else if (of_property_read_bool(suspend_np,
                                        "regulator-off-in-suspend"))
                        suspend_state->enabled = DISABLE_IN_SUSPEND;
-               else
-                       suspend_state->enabled = DO_NOTHING_IN_SUSPEND;
 
                if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
                                          &pval))
index eceba38..2f61f55 100644 (file)
@@ -210,11 +210,11 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
  * Output one or more lines of text on the SCLP console (VT220 and /
  * or line-mode).
  */
-void __sclp_early_printk(const char *str, unsigned int len)
+void __sclp_early_printk(const char *str, unsigned int len, unsigned int force)
 {
        int have_linemode, have_vt220;
 
-       if (sclp_init_state != sclp_init_state_uninitialized)
+       if (!force && sclp_init_state != sclp_init_state_uninitialized)
                return;
        if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0)
                return;
@@ -227,5 +227,10 @@ void __sclp_early_printk(const char *str, unsigned int len)
 
 void sclp_early_printk(const char *str)
 {
-       __sclp_early_printk(str, strlen(str));
+       __sclp_early_printk(str, strlen(str), 0);
+}
+
+void sclp_early_printk_force(const char *str)
+{
+       __sclp_early_printk(str, strlen(str), 1);
 }
index dbe7c7a..fd77e46 100644 (file)
@@ -163,7 +163,7 @@ static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat,
 
        for (i = 0; i < pat->pat_nr; i++, pa++)
                for (j = 0; j < pa->pa_nr; j++)
-                       if (pa->pa_iova_pfn[i] == iova_pfn)
+                       if (pa->pa_iova_pfn[j] == iova_pfn)
                                return true;
 
        return false;
index 770fa9c..f47d16b 100644 (file)
@@ -22,6 +22,7 @@
 #include "vfio_ccw_private.h"
 
 struct workqueue_struct *vfio_ccw_work_q;
+struct kmem_cache *vfio_ccw_io_region;
 
 /*
  * Helpers
@@ -79,7 +80,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
                cp_update_scsw(&private->cp, &irb->scsw);
                cp_free(&private->cp);
        }
-       memcpy(private->io_region.irb_area, irb, sizeof(*irb));
+       memcpy(private->io_region->irb_area, irb, sizeof(*irb));
 
        if (private->io_trigger)
                eventfd_signal(private->io_trigger, 1);
@@ -114,6 +115,14 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
        private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
        if (!private)
                return -ENOMEM;
+
+       private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
+                                              GFP_KERNEL | GFP_DMA);
+       if (!private->io_region) {
+               kfree(private);
+               return -ENOMEM;
+       }
+
        private->sch = sch;
        dev_set_drvdata(&sch->dev, private);
 
@@ -139,6 +148,7 @@ out_disable:
        cio_disable_subchannel(sch);
 out_free:
        dev_set_drvdata(&sch->dev, NULL);
+       kmem_cache_free(vfio_ccw_io_region, private->io_region);
        kfree(private);
        return ret;
 }
@@ -153,6 +163,7 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
 
        dev_set_drvdata(&sch->dev, NULL);
 
+       kmem_cache_free(vfio_ccw_io_region, private->io_region);
        kfree(private);
 
        return 0;
@@ -232,10 +243,20 @@ static int __init vfio_ccw_sch_init(void)
        if (!vfio_ccw_work_q)
                return -ENOMEM;
 
+       vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
+                                       sizeof(struct ccw_io_region), 0,
+                                       SLAB_ACCOUNT, 0,
+                                       sizeof(struct ccw_io_region), NULL);
+       if (!vfio_ccw_io_region) {
+               destroy_workqueue(vfio_ccw_work_q);
+               return -ENOMEM;
+       }
+
        isc_register(VFIO_CCW_ISC);
        ret = css_driver_register(&vfio_ccw_sch_driver);
        if (ret) {
                isc_unregister(VFIO_CCW_ISC);
+               kmem_cache_destroy(vfio_ccw_io_region);
                destroy_workqueue(vfio_ccw_work_q);
        }
 
@@ -246,6 +267,7 @@ static void __exit vfio_ccw_sch_exit(void)
 {
        css_driver_unregister(&vfio_ccw_sch_driver);
        isc_unregister(VFIO_CCW_ISC);
+       kmem_cache_destroy(vfio_ccw_io_region);
        destroy_workqueue(vfio_ccw_work_q);
 }
 module_init(vfio_ccw_sch_init);
index 797a827..f94aa01 100644 (file)
@@ -93,13 +93,13 @@ static void fsm_io_error(struct vfio_ccw_private *private,
                         enum vfio_ccw_event event)
 {
        pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
-       private->io_region.ret_code = -EIO;
+       private->io_region->ret_code = -EIO;
 }
 
 static void fsm_io_busy(struct vfio_ccw_private *private,
                        enum vfio_ccw_event event)
 {
-       private->io_region.ret_code = -EBUSY;
+       private->io_region->ret_code = -EBUSY;
 }
 
 static void fsm_disabled_irq(struct vfio_ccw_private *private,
@@ -126,7 +126,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
 {
        union orb *orb;
        union scsw *scsw = &private->scsw;
-       struct ccw_io_region *io_region = &private->io_region;
+       struct ccw_io_region *io_region = private->io_region;
        struct mdev_device *mdev = private->mdev;
        char *errstr = "request";
 
index 41eeb57..f673e10 100644 (file)
@@ -174,7 +174,7 @@ static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
                return -EINVAL;
 
        private = dev_get_drvdata(mdev_parent_dev(mdev));
-       region = &private->io_region;
+       region = private->io_region;
        if (copy_to_user(buf, (void *)region + *ppos, count))
                return -EFAULT;
 
@@ -196,7 +196,7 @@ static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
        if (private->state != VFIO_CCW_STATE_IDLE)
                return -EACCES;
 
-       region = &private->io_region;
+       region = private->io_region;
        if (copy_from_user((void *)region + *ppos, buf, count))
                return -EFAULT;
 
index 78a66d9..078e46f 100644 (file)
@@ -41,7 +41,7 @@ struct vfio_ccw_private {
        atomic_t                avail;
        struct mdev_device      *mdev;
        struct notifier_block   nb;
-       struct ccw_io_region    io_region;
+       struct ccw_io_region    *io_region;
 
        struct channel_program  cp;
        struct irb              irb;
index ec891bc..f039266 100644 (file)
@@ -872,8 +872,6 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
        if (bits & 0x07)
                return -EINVAL;
 
-       memset(bitmap, 0, bits / 8);
-
        if (str[0] == '0' && str[1] == 'x')
                str++;
        if (*str == 'x')
@@ -895,25 +893,23 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
 }
 
 /*
- * str2clrsetmasks() - parse bitmask argument and set the clear and
- * the set bitmap mask. A concatenation (done with ',') of these terms
- * is recognized:
+ * modify_bitmap() - parse bitmask argument and modify an existing
+ * bit mask accordingly. A concatenation (done with ',') of these
+ * terms is recognized:
  *   +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>]
  * <bitnr> may be any valid number (hex, decimal or octal) in the range
  * 0...bits-1; the leading + or - is required. Here are some examples:
  *   +0-15,+32,-128,-0xFF
  *   -0-255,+1-16,+0x128
  *   +1,+2,+3,+4,-5,-7-10
- * Returns a clear and a set bitmask. Every positive value in the string
- * results in a bit set in the set mask and every negative value in the
- * string results in a bit SET in the clear mask. As a bit may be touched
- * more than once, the last 'operation' wins: +0-255,-128 = all but bit
- * 128 set in the set mask, only bit 128 set in the clear mask.
+ * Returns the new bitmap after all changes have been applied. Every
+ * positive value in the string will set a bit and every negative value
+ * in the string will clear a bit. As a bit may be touched more than once,
+ * the last 'operation' wins:
+ * +0-255,-128 = first bits 0-255 will be set, then bit 128 will be
+ * cleared again. All other bits are unmodified.
  */
-static int str2clrsetmasks(const char *str,
-                          unsigned long *clrmap,
-                          unsigned long *setmap,
-                          int bits)
+static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
 {
        int a, i, z;
        char *np, sign;
@@ -922,9 +918,6 @@ static int str2clrsetmasks(const char *str,
        if (bits & 0x07)
                return -EINVAL;
 
-       memset(clrmap, 0, bits / 8);
-       memset(setmap, 0, bits / 8);
-
        while (*str) {
                sign = *str++;
                if (sign != '+' && sign != '-')
@@ -940,13 +933,10 @@ static int str2clrsetmasks(const char *str,
                        str = np;
                }
                for (i = a; i <= z; i++)
-                       if (sign == '+') {
-                               set_bit_inv(i, setmap);
-                               clear_bit_inv(i, clrmap);
-                       } else {
-                               clear_bit_inv(i, setmap);
-                               set_bit_inv(i, clrmap);
-                       }
+                       if (sign == '+')
+                               set_bit_inv(i, bitmap);
+                       else
+                               clear_bit_inv(i, bitmap);
                while (*str == ',' || *str == '\n')
                        str++;
        }
@@ -970,44 +960,34 @@ static int process_mask_arg(const char *str,
                            unsigned long *bitmap, int bits,
                            struct mutex *lock)
 {
-       int i;
+       unsigned long *newmap, size;
+       int rc;
 
        /* bits needs to be a multiple of 8 */
        if (bits & 0x07)
                return -EINVAL;
 
+       size = BITS_TO_LONGS(bits)*sizeof(unsigned long);
+       newmap = kmalloc(size, GFP_KERNEL);
+       if (!newmap)
+               return -ENOMEM;
+       if (mutex_lock_interruptible(lock)) {
+               kfree(newmap);
+               return -ERESTARTSYS;
+       }
+
        if (*str == '+' || *str == '-') {
-               DECLARE_BITMAP(clrm, bits);
-               DECLARE_BITMAP(setm, bits);
-
-               i = str2clrsetmasks(str, clrm, setm, bits);
-               if (i)
-                       return i;
-               if (mutex_lock_interruptible(lock))
-                       return -ERESTARTSYS;
-               for (i = 0; i < bits; i++) {
-                       if (test_bit_inv(i, clrm))
-                               clear_bit_inv(i, bitmap);
-                       if (test_bit_inv(i, setm))
-                               set_bit_inv(i, bitmap);
-               }
+               memcpy(newmap, bitmap, size);
+               rc = modify_bitmap(str, newmap, bits);
        } else {
-               DECLARE_BITMAP(setm, bits);
-
-               i = hex2bitmap(str, setm, bits);
-               if (i)
-                       return i;
-               if (mutex_lock_interruptible(lock))
-                       return -ERESTARTSYS;
-               for (i = 0; i < bits; i++)
-                       if (test_bit_inv(i, setm))
-                               set_bit_inv(i, bitmap);
-                       else
-                               clear_bit_inv(i, bitmap);
+               memset(newmap, 0, size);
+               rc = hex2bitmap(str, newmap, bits);
        }
+       if (rc == 0)
+               memcpy(bitmap, newmap, size);
        mutex_unlock(lock);
-
-       return 0;
+       kfree(newmap);
+       return rc;
 }
 
 /*
index 49f64eb..ffce6f3 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/netdevice.h>
 #include <linux/netdev_features.h>
 #include <linux/skbuff.h>
+#include <linux/vmalloc.h>
 
 #include <net/iucv/af_iucv.h>
 #include <net/dsfield.h>
@@ -609,7 +610,7 @@ static void qeth_put_reply(struct qeth_reply *reply)
 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
                struct qeth_card *card)
 {
-       char *ipa_name;
+       const char *ipa_name;
        int com = cmd->hdr.command;
        ipa_name = qeth_get_ipa_cmd_name(com);
        if (rc)
@@ -4699,7 +4700,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
 
        priv.buffer_len = oat_data.buffer_len;
        priv.response_len = 0;
-       priv.buffer =  kzalloc(oat_data.buffer_len, GFP_KERNEL);
+       priv.buffer = vzalloc(oat_data.buffer_len);
        if (!priv.buffer) {
                rc = -ENOMEM;
                goto out;
@@ -4740,7 +4741,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
                        rc = -EFAULT;
 
 out_free:
-       kfree(priv.buffer);
+       vfree(priv.buffer);
 out:
        return rc;
 }
@@ -5706,6 +5707,8 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
                dev->priv_flags &= ~IFF_TX_SKB_SHARING;
                dev->hw_features |= NETIF_F_SG;
                dev->vlan_features |= NETIF_F_SG;
+               if (IS_IQD(card))
+                       dev->features |= NETIF_F_SG;
        }
 
        return dev;
@@ -5768,8 +5771,10 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
        qeth_update_from_chp_desc(card);
 
        card->dev = qeth_alloc_netdev(card);
-       if (!card->dev)
+       if (!card->dev) {
+               rc = -ENOMEM;
                goto err_card;
+       }
 
        qeth_determine_capabilities(card);
        enforced_disc = qeth_enforce_discipline(card);
index 5bcb8da..e891c0b 100644 (file)
@@ -148,10 +148,10 @@ EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
 
 struct ipa_rc_msg {
        enum qeth_ipa_return_codes rc;
-       char *msg;
+       const char *msg;
 };
 
-static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
+static const struct ipa_rc_msg qeth_ipa_rc_msg[] = {
        {IPA_RC_SUCCESS,                "success"},
        {IPA_RC_NOTSUPP,                "Command not supported"},
        {IPA_RC_IP_TABLE_FULL,          "Add Addr IP Table Full - ipv6"},
@@ -219,23 +219,23 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
 
 
 
-char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
+const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
 {
-       int x = 0;
-       qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) /
-                       sizeof(struct ipa_rc_msg) - 1].rc = rc;
-       while (qeth_ipa_rc_msg[x].rc != rc)
-               x++;
+       int x;
+
+       for (x = 0; x < ARRAY_SIZE(qeth_ipa_rc_msg) - 1; x++)
+               if (qeth_ipa_rc_msg[x].rc == rc)
+                       return qeth_ipa_rc_msg[x].msg;
        return qeth_ipa_rc_msg[x].msg;
 }
 
 
 struct ipa_cmd_names {
        enum qeth_ipa_cmds cmd;
-       char *name;
+       const char *name;
 };
 
-static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
+static const struct ipa_cmd_names qeth_ipa_cmd_names[] = {
        {IPA_CMD_STARTLAN,      "startlan"},
        {IPA_CMD_STOPLAN,       "stoplan"},
        {IPA_CMD_SETVMAC,       "setvmac"},
@@ -267,13 +267,12 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
        {IPA_CMD_UNKNOWN,       "unknown"},
 };
 
-char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
+const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
 {
-       int x = 0;
-       qeth_ipa_cmd_names[
-               sizeof(qeth_ipa_cmd_names) /
-                       sizeof(struct ipa_cmd_names)-1].cmd = cmd;
-       while (qeth_ipa_cmd_names[x].cmd != cmd)
-               x++;
+       int x;
+
+       for (x = 0; x < ARRAY_SIZE(qeth_ipa_cmd_names) - 1; x++)
+               if (qeth_ipa_cmd_names[x].cmd == cmd)
+                       return qeth_ipa_cmd_names[x].name;
        return qeth_ipa_cmd_names[x].name;
 }
index aa8b919..aa5de1f 100644 (file)
@@ -797,8 +797,8 @@ enum qeth_ipa_arp_return_codes {
        QETH_IPA_ARP_RC_Q_NO_DATA    = 0x0008,
 };
 
-extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
-extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
+extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
+extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
 
 #define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
                               sizeof(struct qeth_ipacmd_setassparms_hdr))
index 710fa74..b5e3853 100644 (file)
@@ -423,7 +423,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
                default:
                        dev_kfree_skb_any(skb);
                        QETH_CARD_TEXT(card, 3, "inbunkno");
-                       QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+                       QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
                        continue;
                }
                work_done++;
index 7175086..ada258c 100644 (file)
@@ -1390,7 +1390,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
                default:
                        dev_kfree_skb_any(skb);
                        QETH_CARD_TEXT(card, 3, "inbunkno");
-                       QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+                       QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
                        continue;
                }
                work_done++;
index 7b31f19..050879a 100644 (file)
@@ -715,22 +715,13 @@ static struct miscdevice openprom_dev = {
 
 static int __init openprom_init(void)
 {
-       struct device_node *dp;
        int err;
 
        err = misc_register(&openprom_dev);
        if (err)
                return err;
 
-       dp = of_find_node_by_path("/");
-       dp = dp->child;
-       while (dp) {
-               if (!strcmp(dp->name, "options"))
-                       break;
-               dp = dp->sibling;
-       }
-       options_node = dp;
-
+       options_node = of_get_child_by_name(of_find_node_by_path("/"), "options");
        if (!options_node) {
                misc_deregister(&openprom_dev);
                return -EIO;
index 524f9ea..6516bc3 100644 (file)
@@ -689,8 +689,7 @@ static int dax_open(struct inode *inode, struct file *f)
 alloc_error:
        kfree(ctx->ccb_buf);
 done:
-       if (ctx != NULL)
-               kfree(ctx);
+       kfree(ctx);
        return -ENOMEM;
 }
 
index 8fc851a..7c09700 100644 (file)
@@ -52,12 +52,12 @@ config SCSI_MQ_DEFAULT
        default y
        depends on SCSI
        ---help---
-         This option enables the new blk-mq based I/O path for SCSI
-         devices by default.  With the option the scsi_mod.use_blk_mq
-         module/boot option defaults to Y, without it to N, but it can
-         still be overridden either way.
+         This option enables the blk-mq based I/O path for SCSI devices by
+         default.  With this option the scsi_mod.use_blk_mq module/boot
+         option defaults to Y, without it to N, but it can still be
+         overridden either way.
 
-         If unsure say N.
+         If unsure say Y.
 
 config SCSI_PROC_FS
        bool "legacy /proc/scsi/ support"
index 29bf1e6..39eb415 100644 (file)
@@ -1346,7 +1346,7 @@ struct fib {
 struct aac_hba_map_info {
        __le32  rmw_nexus;              /* nexus for native HBA devices */
        u8              devtype;        /* device type */
-       u8              reset_state;    /* 0 - no reset, 1..x - */
+       s8              reset_state;    /* 0 - no reset, 1..x - */
                                        /* after xth TM LUN reset */
        u16             qd_limit;
        u32             scan_counter;
index 23d07e9..e519238 100644 (file)
@@ -1602,6 +1602,46 @@ fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
 }
 
 /**
+ *     fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
+ *     @caps32: a 32-bit Port Capabilities value
+ *
+ *     Returns the equivalent 16-bit Port Capabilities value.  Note that
+ *     not all 32-bit Port Capabilities can be represented in the 16-bit
+ *     Port Capabilities and some fields/values may not make it.
+ */
+fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
+{
+       fw_port_cap16_t caps16 = 0;
+
+       #define CAP32_TO_CAP16(__cap) \
+               do { \
+                       if (caps32 & FW_PORT_CAP32_##__cap) \
+                               caps16 |= FW_PORT_CAP_##__cap; \
+               } while (0)
+
+       CAP32_TO_CAP16(SPEED_100M);
+       CAP32_TO_CAP16(SPEED_1G);
+       CAP32_TO_CAP16(SPEED_10G);
+       CAP32_TO_CAP16(SPEED_25G);
+       CAP32_TO_CAP16(SPEED_40G);
+       CAP32_TO_CAP16(SPEED_100G);
+       CAP32_TO_CAP16(FC_RX);
+       CAP32_TO_CAP16(FC_TX);
+       CAP32_TO_CAP16(802_3_PAUSE);
+       CAP32_TO_CAP16(802_3_ASM_DIR);
+       CAP32_TO_CAP16(ANEG);
+       CAP32_TO_CAP16(FORCE_PAUSE);
+       CAP32_TO_CAP16(MDIAUTO);
+       CAP32_TO_CAP16(MDISTRAIGHT);
+       CAP32_TO_CAP16(FEC_RS);
+       CAP32_TO_CAP16(FEC_BASER_RS);
+
+       #undef CAP32_TO_CAP16
+
+       return caps16;
+}
+
+/**
  *      lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
  *      @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
  *
@@ -1759,7 +1799,7 @@ csio_enable_ports(struct csio_hw *hw)
                        val = 1;
 
                        csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO,
-                                      hw->pfn, 0, 1, &param, &val, false,
+                                      hw->pfn, 0, 1, &param, &val, true,
                                       NULL);
 
                        if (csio_mb_issue(hw, mbp)) {
@@ -1769,16 +1809,9 @@ csio_enable_ports(struct csio_hw *hw)
                                return -EINVAL;
                        }
 
-                       csio_mb_process_read_params_rsp(hw, mbp, &retval, 1,
-                                                       &val);
-                       if (retval != FW_SUCCESS) {
-                               csio_err(hw, "FW_PARAMS_CMD(r) port:%d failed: 0x%x\n",
-                                        portid, retval);
-                               mempool_free(mbp, hw->mb_mempool);
-                               return -EINVAL;
-                       }
-
-                       fw_caps = val;
+                       csio_mb_process_read_params_rsp(hw, mbp, &retval,
+                                                       0, NULL);
+                       fw_caps = retval ? FW_CAPS16 : FW_CAPS32;
                }
 
                /* Read PORT information */
@@ -2364,8 +2397,8 @@ bye:
 }
 
 /*
- * Returns -EINVAL if attempts to flash the firmware failed
- * else returns 0,
+ * Returns -EINVAL if attempts to flash the firmware failed,
+ * -ENOMEM if memory allocation failed else returns 0,
  * if flashing was not attempted because the card had the
  * latest firmware ECANCELED is returned
  */
@@ -2393,6 +2426,13 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
                return -EINVAL;
        }
 
+       /* allocate memory to read the header of the firmware on the
+        * card
+        */
+       card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
+       if (!card_fw)
+               return -ENOMEM;
+
        if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
                fw_bin_file = FW_FNAME_T5;
        else
@@ -2406,11 +2446,6 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
                fw_size = fw->size;
        }
 
-       /* allocate memory to read the header of the firmware on the
-        * card
-        */
-       card_fw = kmalloc(sizeof(*card_fw), GFP_KERNEL);
-
        /* upgrade FW logic */
        ret = csio_hw_prep_fw(hw, fw_info, fw_data, fw_size, card_fw,
                         hw->fw_state, reset);
index 9e73ef7..e351af6 100644 (file)
@@ -639,6 +639,7 @@ int csio_handle_intr_status(struct csio_hw *, unsigned int,
 
 fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps);
 fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16);
+fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32);
 fw_port_cap32_t lstatus_to_fwcap(u32 lstatus);
 
 int csio_hw_start(struct csio_hw *);
index c026417..6f13673 100644 (file)
@@ -368,7 +368,7 @@ csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
                        FW_CMD_LEN16_V(sizeof(*cmdp) / 16));
 
        if (fw_caps == FW_CAPS16)
-               cmdp->u.l1cfg.rcap = cpu_to_be32(fc);
+               cmdp->u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(fc));
        else
                cmdp->u.l1cfg32.rcap32 = cpu_to_be32(fc);
 }
@@ -395,8 +395,8 @@ csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
                        *pcaps = fwcaps16_to_caps32(ntohs(rsp->u.info.pcap));
                        *acaps = fwcaps16_to_caps32(ntohs(rsp->u.info.acap));
                } else {
-                       *pcaps = ntohs(rsp->u.info32.pcaps32);
-                       *acaps = ntohs(rsp->u.info32.acaps32);
+                       *pcaps = be32_to_cpu(rsp->u.info32.pcaps32);
+                       *acaps = be32_to_cpu(rsp->u.info32.acaps32);
                }
        }
 }
index f02dcc8..ea4b0bb 100644 (file)
@@ -563,35 +563,13 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
 }
 EXPORT_SYMBOL(scsi_host_get);
 
-struct scsi_host_mq_in_flight {
-       int cnt;
-};
-
-static void scsi_host_check_in_flight(struct request *rq, void *data,
-               bool reserved)
-{
-       struct scsi_host_mq_in_flight *in_flight = data;
-
-       if (blk_mq_request_started(rq))
-               in_flight->cnt++;
-}
-
 /**
  * scsi_host_busy - Return the host busy counter
  * @shost:     Pointer to Scsi_Host to inc.
  **/
 int scsi_host_busy(struct Scsi_Host *shost)
 {
-       struct scsi_host_mq_in_flight in_flight = {
-               .cnt = 0,
-       };
-
-       if (!shost->use_blk_mq)
-               return atomic_read(&shost->host_busy);
-
-       blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight,
-                       &in_flight);
-       return in_flight.cnt;
+       return atomic_read(&shost->host_busy);
 }
 EXPORT_SYMBOL(scsi_host_busy);
 
index 58bb70b..c120929 100644 (file)
@@ -976,7 +976,7 @@ static struct scsi_host_template hpsa_driver_template = {
 #endif
        .sdev_attrs = hpsa_sdev_attrs,
        .shost_attrs = hpsa_shost_attrs,
-       .max_sectors = 1024,
+       .max_sectors = 2048,
        .no_write_same = 1,
 };
 
index fac3773..f42a619 100644 (file)
@@ -3474,11 +3474,10 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
                vscsi->dds.window[LOCAL].liobn,
                vscsi->dds.window[REMOTE].liobn);
 
-       strcpy(vscsi->eye, "VSCSI ");
-       strncat(vscsi->eye, vdev->name, MAX_EYE);
+       snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name);
 
        vscsi->dds.unit_id = vdev->unit_address;
-       strncpy(vscsi->dds.partition_name, partition_name,
+       strscpy(vscsi->dds.partition_name, partition_name,
                sizeof(vscsi->dds.partition_name));
        vscsi->dds.partition_num = partition_number;
 
index f2ec80b..271990b 100644 (file)
@@ -3335,6 +3335,65 @@ static void ipr_release_dump(struct kref *kref)
        LEAVE;
 }
 
+static void ipr_add_remove_thread(struct work_struct *work)
+{
+       unsigned long lock_flags;
+       struct ipr_resource_entry *res;
+       struct scsi_device *sdev;
+       struct ipr_ioa_cfg *ioa_cfg =
+               container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
+       u8 bus, target, lun;
+       int did_work;
+
+       ENTER;
+       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+restart:
+       do {
+               did_work = 0;
+               if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
+                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+                       return;
+               }
+
+               list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+                       if (res->del_from_ml && res->sdev) {
+                               did_work = 1;
+                               sdev = res->sdev;
+                               if (!scsi_device_get(sdev)) {
+                                       if (!res->add_to_ml)
+                                               list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+                                       else
+                                               res->del_from_ml = 0;
+                                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+                                       scsi_remove_device(sdev);
+                                       scsi_device_put(sdev);
+                                       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+                               }
+                               break;
+                       }
+               }
+       } while (did_work);
+
+       list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+               if (res->add_to_ml) {
+                       bus = res->bus;
+                       target = res->target;
+                       lun = res->lun;
+                       res->add_to_ml = 0;
+                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+                       scsi_add_device(ioa_cfg->host, bus, target, lun);
+                       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+                       goto restart;
+               }
+       }
+
+       ioa_cfg->scan_done = 1;
+       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+       kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
+       LEAVE;
+}
+
 /**
  * ipr_worker_thread - Worker thread
  * @work:              ioa config struct
@@ -3349,13 +3408,9 @@ static void ipr_release_dump(struct kref *kref)
 static void ipr_worker_thread(struct work_struct *work)
 {
        unsigned long lock_flags;
-       struct ipr_resource_entry *res;
-       struct scsi_device *sdev;
        struct ipr_dump *dump;
        struct ipr_ioa_cfg *ioa_cfg =
                container_of(work, struct ipr_ioa_cfg, work_q);
-       u8 bus, target, lun;
-       int did_work;
 
        ENTER;
        spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -3393,49 +3448,9 @@ static void ipr_worker_thread(struct work_struct *work)
                return;
        }
 
-restart:
-       do {
-               did_work = 0;
-               if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
-                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-                       return;
-               }
+       schedule_work(&ioa_cfg->scsi_add_work_q);
 
-               list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
-                       if (res->del_from_ml && res->sdev) {
-                               did_work = 1;
-                               sdev = res->sdev;
-                               if (!scsi_device_get(sdev)) {
-                                       if (!res->add_to_ml)
-                                               list_move_tail(&res->queue, &ioa_cfg->free_res_q);
-                                       else
-                                               res->del_from_ml = 0;
-                                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-                                       scsi_remove_device(sdev);
-                                       scsi_device_put(sdev);
-                                       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
-                               }
-                               break;
-                       }
-               }
-       } while (did_work);
-
-       list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
-               if (res->add_to_ml) {
-                       bus = res->bus;
-                       target = res->target;
-                       lun = res->lun;
-                       res->add_to_ml = 0;
-                       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-                       scsi_add_device(ioa_cfg->host, bus, target, lun);
-                       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
-                       goto restart;
-               }
-       }
-
-       ioa_cfg->scan_done = 1;
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-       kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
        LEAVE;
 }
 
@@ -9933,6 +9948,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
        INIT_LIST_HEAD(&ioa_cfg->free_res_q);
        INIT_LIST_HEAD(&ioa_cfg->used_res_q);
        INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
+       INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
        init_waitqueue_head(&ioa_cfg->reset_wait_q);
        init_waitqueue_head(&ioa_cfg->msi_wait_q);
        init_waitqueue_head(&ioa_cfg->eeh_wait_q);
index 68afbbd..f6baa23 100644 (file)
@@ -1575,6 +1575,7 @@ struct ipr_ioa_cfg {
        u8 saved_mode_page_len;
 
        struct work_struct work_q;
+       struct work_struct scsi_add_work_q;
        struct workqueue_struct *reset_work_q;
 
        wait_queue_head_t reset_wait_q;
index e0d0da5..43732e8 100644 (file)
@@ -672,7 +672,7 @@ struct lpfc_hba {
 #define LS_NPIV_FAB_SUPPORTED 0x2      /* Fabric supports NPIV */
 #define LS_IGNORE_ERATT       0x4      /* intr handler should ignore ERATT */
 #define LS_MDS_LINK_DOWN      0x8      /* MDS Diagnostics Link Down */
-#define LS_MDS_LOOPBACK      0x16      /* MDS Diagnostics Link Up (Loopback) */
+#define LS_MDS_LOOPBACK      0x10      /* MDS Diagnostics Link Up (Loopback) */
 
        uint32_t hba_flag;      /* hba generic flags */
 #define HBA_ERATT_HANDLED      0x1 /* This flag is set when eratt handled */
index 5a25553..1a6ed9b 100644 (file)
@@ -360,12 +360,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
                goto buffer_done;
 
        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+               nrport = NULL;
+               spin_lock(&vport->phba->hbalock);
                rport = lpfc_ndlp_get_nrport(ndlp);
-               if (!rport)
-                       continue;
-
-               /* local short-hand pointer. */
-               nrport = rport->remoteport;
+               if (rport)
+                       nrport = rport->remoteport;
+               spin_unlock(&vport->phba->hbalock);
                if (!nrport)
                        continue;
 
@@ -3386,6 +3386,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
        struct lpfc_nodelist  *ndlp;
 #if (IS_ENABLED(CONFIG_NVME_FC))
        struct lpfc_nvme_rport *rport;
+       struct nvme_fc_remote_port *remoteport = NULL;
 #endif
 
        shost = lpfc_shost_from_vport(vport);
@@ -3396,8 +3397,12 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
                if (ndlp->rport)
                        ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
 #if (IS_ENABLED(CONFIG_NVME_FC))
+               spin_lock(&vport->phba->hbalock);
                rport = lpfc_ndlp_get_nrport(ndlp);
                if (rport)
+                       remoteport = rport->remoteport;
+               spin_unlock(&vport->phba->hbalock);
+               if (remoteport)
                        nvme_fc_set_remoteport_devloss(rport->remoteport,
                                                       vport->cfg_devloss_tmo);
 #endif
@@ -5122,16 +5127,16 @@ LPFC_ATTR_R(enable_SmartSAN, 0, 0, 1, "Enable SmartSAN functionality");
 
 /*
 # lpfc_fdmi_on: Controls FDMI support.
-#       0       No FDMI support (default)
-#       1       Traditional FDMI support
+#       0       No FDMI support
+#       1       Traditional FDMI support (default)
 # Traditional FDMI support means the driver will assume FDMI-2 support;
 # however, if that fails, it will fallback to FDMI-1.
 # If lpfc_enable_SmartSAN is set to 1, the driver ignores lpfc_fdmi_on.
 # If lpfc_enable_SmartSAN is set 0, the driver uses the current value of
 # lpfc_fdmi_on.
-# Value range [0,1]. Default value is 0.
+# Value range [0,1]. Default value is 1.
 */
-LPFC_ATTR_R(fdmi_on, 0, 0, 1, "Enable FDMI support");
+LPFC_ATTR_R(fdmi_on, 1, 0, 1, "Enable FDMI support");
 
 /*
 # Specifies the maximum number of ELS cmds we can have outstanding (for
index 9df0c05..aec5b10 100644 (file)
@@ -551,7 +551,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
        unsigned char *statep;
        struct nvme_fc_local_port *localport;
        struct lpfc_nvmet_tgtport *tgtp;
-       struct nvme_fc_remote_port *nrport;
+       struct nvme_fc_remote_port *nrport = NULL;
        struct lpfc_nvme_rport *rport;
 
        cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
@@ -696,11 +696,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
        len += snprintf(buf + len, size - len, "\tRport List:\n");
        list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
                /* local short-hand pointer. */
+               spin_lock(&phba->hbalock);
                rport = lpfc_ndlp_get_nrport(ndlp);
-               if (!rport)
-                       continue;
-
-               nrport = rport->remoteport;
+               if (rport)
+                       nrport = rport->remoteport;
+               spin_unlock(&phba->hbalock);
                if (!nrport)
                        continue;
 
index 028462e..918ae18 100644 (file)
@@ -2725,7 +2725,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
        rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
 
+       spin_lock_irq(&vport->phba->hbalock);
        oldrport = lpfc_ndlp_get_nrport(ndlp);
+       spin_unlock_irq(&vport->phba->hbalock);
        if (!oldrport)
                lpfc_nlp_get(ndlp);
 
@@ -2840,7 +2842,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        struct nvme_fc_local_port *localport;
        struct lpfc_nvme_lport *lport;
        struct lpfc_nvme_rport *rport;
-       struct nvme_fc_remote_port *remoteport;
+       struct nvme_fc_remote_port *remoteport = NULL;
 
        localport = vport->localport;
 
@@ -2854,11 +2856,14 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        if (!lport)
                goto input_err;
 
+       spin_lock_irq(&vport->phba->hbalock);
        rport = lpfc_ndlp_get_nrport(ndlp);
-       if (!rport)
+       if (rport)
+               remoteport = rport->remoteport;
+       spin_unlock_irq(&vport->phba->hbalock);
+       if (!remoteport)
                goto input_err;
 
-       remoteport = rport->remoteport;
        lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
                         "6033 Unreg nvme remoteport %p, portname x%llx, "
                         "port_id x%06x, portstate x%x port type x%x\n",
index fc3babc..a6f96b3 100644 (file)
@@ -77,6 +77,11 @@ enum qedi_nvm_tgts {
        QEDI_NVM_TGT_SEC,
 };
 
+struct qedi_nvm_iscsi_image {
+       struct nvm_iscsi_cfg iscsi_cfg;
+       u32 crc;
+};
+
 struct qedi_uio_ctrl {
        /* meta data */
        u32 uio_hsi_version;
@@ -294,7 +299,7 @@ struct qedi_ctx {
        void *bdq_pbl_list;
        dma_addr_t bdq_pbl_list_dma;
        u8 bdq_pbl_list_num_entries;
-       struct nvm_iscsi_cfg *iscsi_cfg;
+       struct qedi_nvm_iscsi_image *iscsi_image;
        dma_addr_t nvm_buf_dma;
        void __iomem *bdq_primary_prod;
        void __iomem *bdq_secondary_prod;
index aa96bcc..e5bd035 100644 (file)
@@ -1346,23 +1346,26 @@ exit_setup_int:
 
 static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
 {
-       if (qedi->iscsi_cfg)
+       if (qedi->iscsi_image)
                dma_free_coherent(&qedi->pdev->dev,
-                                 sizeof(struct nvm_iscsi_cfg),
-                                 qedi->iscsi_cfg, qedi->nvm_buf_dma);
+                                 sizeof(struct qedi_nvm_iscsi_image),
+                                 qedi->iscsi_image, qedi->nvm_buf_dma);
 }
 
 static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
 {
-       qedi->iscsi_cfg = dma_zalloc_coherent(&qedi->pdev->dev,
-                                            sizeof(struct nvm_iscsi_cfg),
-                                            &qedi->nvm_buf_dma, GFP_KERNEL);
-       if (!qedi->iscsi_cfg) {
+       struct qedi_nvm_iscsi_image nvm_image;
+
+       qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev,
+                                               sizeof(nvm_image),
+                                               &qedi->nvm_buf_dma,
+                                               GFP_KERNEL);
+       if (!qedi->iscsi_image) {
                QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
                return -ENOMEM;
        }
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
-                 "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_cfg,
+                 "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_image,
                  qedi->nvm_buf_dma);
 
        return 0;
@@ -1905,7 +1908,7 @@ qedi_get_nvram_block(struct qedi_ctx *qedi)
        struct nvm_iscsi_block *block;
 
        pf = qedi->dev_info.common.abs_pf_id;
-       block = &qedi->iscsi_cfg->block[0];
+       block = &qedi->iscsi_image->iscsi_cfg.block[0];
        for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) {
                flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >>
                        NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET;
@@ -2194,15 +2197,14 @@ static void qedi_boot_release(void *data)
 static int qedi_get_boot_info(struct qedi_ctx *qedi)
 {
        int ret = 1;
-       u16 len;
-
-       len = sizeof(struct nvm_iscsi_cfg);
+       struct qedi_nvm_iscsi_image nvm_image;
 
        QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
                  "Get NVM iSCSI CFG image\n");
        ret = qedi_ops->common->nvm_get_image(qedi->cdev,
                                              QED_NVM_IMAGE_ISCSI_CFG,
-                                             (char *)qedi->iscsi_cfg, len);
+                                             (char *)qedi->iscsi_image,
+                                             sizeof(nvm_image));
        if (ret)
                QEDI_ERR(&qedi->dbg_ctx,
                         "Could not get NVM image. ret = %d\n", ret);
@@ -2470,6 +2472,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
                /* start qedi context */
                spin_lock_init(&qedi->hba_lock);
                spin_lock_init(&qedi->task_idx_lock);
+               mutex_init(&qedi->stats_lock);
        }
        qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
        qedi_ops->ll2->start(qedi->cdev, &params);
index fecf96f..199d3ba 100644 (file)
@@ -374,8 +374,8 @@ struct atio_from_isp {
 static inline int fcpcmd_is_corrupted(struct atio *atio)
 {
        if (atio->entry_type == ATIO_TYPE7 &&
-           (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
-           FCP_CMD_LENGTH_MIN))
+           ((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) <
+            FCP_CMD_LENGTH_MIN))
                return 1;
        else
                return 0;
index 0adfb3b..eb97d2d 100644 (file)
@@ -345,8 +345,7 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost)
        unsigned long flags;
 
        rcu_read_lock();
-       if (!shost->use_blk_mq)
-               atomic_dec(&shost->host_busy);
+       atomic_dec(&shost->host_busy);
        if (unlikely(scsi_host_in_recovery(shost))) {
                spin_lock_irqsave(shost->host_lock, flags);
                if (shost->host_failed || shost->host_eh_scheduled)
@@ -445,12 +444,7 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget)
 
 static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
 {
-       /*
-        * blk-mq can handle host queue busy efficiently via host-wide driver
-        * tag allocation
-        */
-
-       if (!shost->use_blk_mq && shost->can_queue > 0 &&
+       if (shost->can_queue > 0 &&
            atomic_read(&shost->host_busy) >= shost->can_queue)
                return true;
        if (atomic_read(&shost->host_blocked) > 0)
@@ -1606,10 +1600,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
        if (scsi_host_in_recovery(shost))
                return 0;
 
-       if (!shost->use_blk_mq)
-               busy = atomic_inc_return(&shost->host_busy) - 1;
-       else
-               busy = 0;
+       busy = atomic_inc_return(&shost->host_busy) - 1;
        if (atomic_read(&shost->host_blocked) > 0) {
                if (busy)
                        goto starved;
@@ -1625,7 +1616,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
                                     "unblocking host at zero depth\n"));
        }
 
-       if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue)
+       if (shost->can_queue > 0 && busy >= shost->can_queue)
                goto starved;
        if (shost->host_self_blocked)
                goto starved;
@@ -1711,9 +1702,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
         * with the locks as normal issue path does.
         */
        atomic_inc(&sdev->device_busy);
-
-       if (!shost->use_blk_mq)
-               atomic_inc(&shost->host_busy);
+       atomic_inc(&shost->host_busy);
        if (starget->can_queue > 0)
                atomic_inc(&starget->target_busy);
 
index b79b366..4a57ffe 100644 (file)
@@ -1276,7 +1276,8 @@ static int sd_init_command(struct scsi_cmnd *cmd)
        case REQ_OP_ZONE_RESET:
                return sd_zbc_setup_reset_cmnd(cmd);
        default:
-               BUG();
+               WARN_ON_ONCE(1);
+               return BLKPREP_KILL;
        }
 }
 
@@ -2959,6 +2960,9 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
        if (rot == 1) {
                blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
                blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
+       } else {
+               blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+               blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
        }
 
        if (sdkp->device->type == TYPE_ZBC) {
index 9d5d2ca..c55f38e 100644 (file)
@@ -7940,6 +7940,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
                err = -ENOMEM;
                goto out_error;
        }
+
+       /*
+        * Do not use blk-mq at this time because blk-mq does not support
+        * runtime pm.
+        */
+       host->use_blk_mq = false;
+
        hba = shost_priv(host);
        hba->host = host;
        hba->dev = dev;
index ecb2274..8cc0151 100644 (file)
@@ -2729,6 +2729,9 @@ static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
 {
        unsigned long addr;
 
+       if (!p)
+               return -ENODEV;
+
        addr = gen_pool_alloc(p, cnt);
        if (!addr)
                return -ENOMEM;
index c646d87..681f7d4 100644 (file)
@@ -626,7 +626,7 @@ static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
 {
        u32 shift;
 
-       shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : RX_SYNC_SHIFT_BASE;
+       shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE;
        shift -= tdm_num * 2;
 
        return shift;
index 4b5e250..e5c7e1e 100644 (file)
@@ -899,9 +899,10 @@ static void sdw_release_master_stream(struct sdw_stream_runtime *stream)
        struct sdw_master_runtime *m_rt = stream->m_rt;
        struct sdw_slave_runtime *s_rt, *_s_rt;
 
-       list_for_each_entry_safe(s_rt, _s_rt,
-                       &m_rt->slave_rt_list, m_rt_node)
-               sdw_stream_remove_slave(s_rt->slave, stream);
+       list_for_each_entry_safe(s_rt, _s_rt, &m_rt->slave_rt_list, m_rt_node) {
+               sdw_slave_port_release(s_rt->slave->bus, s_rt->slave, stream);
+               sdw_release_slave_stream(s_rt->slave, stream);
+       }
 
        list_del(&m_rt->bus_node);
 }
@@ -1112,7 +1113,7 @@ int sdw_stream_add_master(struct sdw_bus *bus,
                                "Master runtime config failed for stream:%s",
                                stream->name);
                ret = -ENOMEM;
-               goto error;
+               goto unlock;
        }
 
        ret = sdw_config_stream(bus->dev, stream, stream_config, false);
@@ -1123,11 +1124,11 @@ int sdw_stream_add_master(struct sdw_bus *bus,
        if (ret)
                goto stream_error;
 
-       stream->state = SDW_STREAM_CONFIGURED;
+       goto unlock;
 
 stream_error:
        sdw_release_master_stream(stream);
-error:
+unlock:
        mutex_unlock(&bus->bus_lock);
        return ret;
 }
@@ -1141,6 +1142,10 @@ EXPORT_SYMBOL(sdw_stream_add_master);
  * @stream: SoundWire stream
  * @port_config: Port configuration for audio stream
  * @num_ports: Number of ports
+ *
+ * It is expected that Slave is added before adding Master
+ * to the Stream.
+ *
  */
 int sdw_stream_add_slave(struct sdw_slave *slave,
                struct sdw_stream_config *stream_config,
@@ -1186,6 +1191,12 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
        if (ret)
                goto stream_error;
 
+       /*
+        * Change stream state to CONFIGURED on first Slave add.
+        * Bus is not aware of number of Slave(s) in a stream at this
+        * point so cannot depend on all Slave(s) to be added in order to
+        * change stream state to CONFIGURED.
+        */
        stream->state = SDW_STREAM_CONFIGURED;
        goto error;
 
index 7cb3ab0..3082e72 100644 (file)
 
 #define DRIVER_NAME "fsl-dspi"
 
+#ifdef CONFIG_M5441x
+#define DSPI_FIFO_SIZE                 16
+#else
 #define DSPI_FIFO_SIZE                 4
+#endif
 #define DSPI_DMA_BUFSIZE               (DSPI_FIFO_SIZE * 1024)
 
 #define SPI_MCR                0x00
@@ -623,9 +627,11 @@ static void dspi_tcfq_read(struct fsl_dspi *dspi)
 static void dspi_eoq_write(struct fsl_dspi *dspi)
 {
        int fifo_size = DSPI_FIFO_SIZE;
+       u16 xfer_cmd = dspi->tx_cmd;
 
        /* Fill TX FIFO with as many transfers as possible */
        while (dspi->len && fifo_size--) {
+               dspi->tx_cmd = xfer_cmd;
                /* Request EOQF for last transfer in FIFO */
                if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
                        dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
index 0626e6e..421bfc7 100644 (file)
@@ -300,8 +300,8 @@ static int spi_gpio_request(struct device *dev,
                *mflags |= SPI_MASTER_NO_RX;
 
        spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
-       if (IS_ERR(spi_gpio->mosi))
-               return PTR_ERR(spi_gpio->mosi);
+       if (IS_ERR(spi_gpio->sck))
+               return PTR_ERR(spi_gpio->sck);
 
        for (i = 0; i < num_chipselects; i++) {
                spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs",
index 95dc4d7..b37de1d 100644 (file)
@@ -598,11 +598,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
 
        ret = wait_event_interruptible_timeout(rspi->wait,
                                               rspi->dma_callbacked, HZ);
-       if (ret > 0 && rspi->dma_callbacked)
+       if (ret > 0 && rspi->dma_callbacked) {
                ret = 0;
-       else if (!ret) {
-               dev_err(&rspi->master->dev, "DMA timeout\n");
-               ret = -ETIMEDOUT;
+       } else {
+               if (!ret) {
+                       dev_err(&rspi->master->dev, "DMA timeout\n");
+                       ret = -ETIMEDOUT;
+               }
                if (tx)
                        dmaengine_terminate_all(rspi->master->dma_tx);
                if (rx)
@@ -1350,12 +1352,36 @@ static const struct platform_device_id spi_driver_ids[] = {
 
 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
 
+#ifdef CONFIG_PM_SLEEP
+static int rspi_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+       return spi_master_suspend(rspi->master);
+}
+
+static int rspi_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+       return spi_master_resume(rspi->master);
+}
+
+static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
+#define DEV_PM_OPS     &rspi_pm_ops
+#else
+#define DEV_PM_OPS     NULL
+#endif /* CONFIG_PM_SLEEP */
+
 static struct platform_driver rspi_driver = {
        .probe =        rspi_probe,
        .remove =       rspi_remove,
        .id_table =     spi_driver_ids,
        .driver         = {
                .name = "renesas_spi",
+               .pm = DEV_PM_OPS,
                .of_match_table = of_match_ptr(rspi_of_match),
        },
 };
index 539d6d1..101cd6a 100644 (file)
@@ -397,7 +397,8 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
 
 static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
 {
-       sh_msiof_write(p, STR, sh_msiof_read(p, STR));
+       sh_msiof_write(p, STR,
+                      sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
 }
 
 static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
@@ -1426,12 +1427,37 @@ static const struct platform_device_id spi_driver_ids[] = {
 };
 MODULE_DEVICE_TABLE(platform, spi_driver_ids);
 
+#ifdef CONFIG_PM_SLEEP
+static int sh_msiof_spi_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+       return spi_master_suspend(p->master);
+}
+
+static int sh_msiof_spi_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+       return spi_master_resume(p->master);
+}
+
+static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
+                        sh_msiof_spi_resume);
+#define DEV_PM_OPS     &sh_msiof_spi_pm_ops
+#else
+#define DEV_PM_OPS     NULL
+#endif /* CONFIG_PM_SLEEP */
+
 static struct platform_driver sh_msiof_spi_drv = {
        .probe          = sh_msiof_spi_probe,
        .remove         = sh_msiof_spi_remove,
        .id_table       = spi_driver_ids,
        .driver         = {
                .name           = "spi_sh_msiof",
+               .pm             = DEV_PM_OPS,
                .of_match_table = of_match_ptr(sh_msiof_match),
        },
 };
index 6f7b946..1427f34 100644 (file)
@@ -1063,6 +1063,24 @@ static int tegra_slink_probe(struct platform_device *pdev)
                goto exit_free_master;
        }
 
+       /* disabled clock may cause interrupt storm upon request */
+       tspi->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(tspi->clk)) {
+               ret = PTR_ERR(tspi->clk);
+               dev_err(&pdev->dev, "Can not get clock %d\n", ret);
+               goto exit_free_master;
+       }
+       ret = clk_prepare(tspi->clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
+               goto exit_free_master;
+       }
+       ret = clk_enable(tspi->clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
+               goto exit_free_master;
+       }
+
        spi_irq = platform_get_irq(pdev, 0);
        tspi->irq = spi_irq;
        ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
@@ -1071,14 +1089,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
        if (ret < 0) {
                dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
                                        tspi->irq);
-               goto exit_free_master;
-       }
-
-       tspi->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(tspi->clk)) {
-               dev_err(&pdev->dev, "can not get clock\n");
-               ret = PTR_ERR(tspi->clk);
-               goto exit_free_irq;
+               goto exit_clk_disable;
        }
 
        tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
@@ -1138,6 +1149,8 @@ exit_rx_dma_free:
        tegra_slink_deinit_dma_param(tspi, true);
 exit_free_irq:
        free_irq(spi_irq, tspi);
+exit_clk_disable:
+       clk_disable(tspi->clk);
 exit_free_master:
        spi_master_put(master);
        return ret;
@@ -1150,6 +1163,8 @@ static int tegra_slink_remove(struct platform_device *pdev)
 
        free_irq(tspi->irq, tspi);
 
+       clk_disable(tspi->clk);
+
        if (tspi->tx_dma_chan)
                tegra_slink_deinit_dma_param(tspi, false);
 
index ec395a6..9da0bc5 100644 (file)
@@ -2143,8 +2143,17 @@ int spi_register_controller(struct spi_controller *ctlr)
         */
        if (ctlr->num_chipselect == 0)
                return -EINVAL;
-       /* allocate dynamic bus number using Linux idr */
-       if ((ctlr->bus_num < 0) && ctlr->dev.of_node) {
+       if (ctlr->bus_num >= 0) {
+               /* devices with a fixed bus num must check-in with the num */
+               mutex_lock(&board_lock);
+               id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
+                       ctlr->bus_num + 1, GFP_KERNEL);
+               mutex_unlock(&board_lock);
+               if (WARN(id < 0, "couldn't get idr"))
+                       return id == -ENOSPC ? -EBUSY : id;
+               ctlr->bus_num = id;
+       } else if (ctlr->dev.of_node) {
+               /* allocate dynamic bus number using Linux idr */
                id = of_alias_get_id(ctlr->dev.of_node, "spi");
                if (id >= 0) {
                        ctlr->bus_num = id;
index 96f6149..663b755 100644 (file)
@@ -2,7 +2,7 @@
 
 config EROFS_FS
        tristate "EROFS filesystem support"
-       depends on BROKEN
+       depends on BLOCK
        help
          EROFS(Enhanced Read-Only File System) is a lightweight
          read-only file system with modern designs (eg. page-sized
index 1aec509..2df9768 100644 (file)
@@ -340,7 +340,7 @@ static int erofs_read_super(struct super_block *sb,
                goto err_sbread;
 
        sb->s_magic = EROFS_SUPER_MAGIC;
-       sb->s_flags |= MS_RDONLY | MS_NOATIME;
+       sb->s_flags |= SB_RDONLY | SB_NOATIME;
        sb->s_maxbytes = MAX_LFS_FILESIZE;
        sb->s_time_gran = 1;
 
@@ -627,7 +627,7 @@ static int erofs_remount(struct super_block *sb, int *flags, char *data)
 {
        BUG_ON(!sb_rdonly(sb));
 
-       *flags |= MS_RDONLY;
+       *flags |= SB_RDONLY;
        return 0;
 }
 
index 7e64c7e..a9f4802 100644 (file)
@@ -2,3 +2,7 @@
   GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO
   lines from device tree, ACPI or board files, board files should
   use <linux/gpio/machine.h>
+
+* convert all these over to drm_simple_display_pipe and submit for inclusion
+  into the DRM subsystem under drivers/gpu/drm - fbdev doesn't take any new
+  drivers anymore.
index 6ff8e01..5b1865f 100644 (file)
@@ -1,9 +1,22 @@
 This is a list of things that need to be done to get this driver out of the
 staging directory.
+
+- Implement the gasket framework's functionality through UIO instead of
+  introducing a new user-space drivers framework that is quite similar.
+
+  UIO provides the necessary bits to implement user-space drivers. Meanwhile
+  the gasket APIs adds some extra conveniences like PCI BAR mapping, and
+  MSI interrupts. Add these features to the UIO subsystem, then re-implement
+  the Apex driver as a basic UIO driver instead (include/linux/uio_driver.h)
+
 - Document sysfs files with Documentation/ABI/ entries.
+
 - Use misc interface instead of major number for driver version description.
+
 - Add descriptions of module_param's
+
 - apex_get_status() should actually check status.
+
 - "drivers" should never be dealing with "raw" sysfs calls or mess around with
   kobjects at all. The driver core should handle all of this for you
   automaically. There should not be a need for raw attribute macros.
index f48e06a..9a58aaf 100644 (file)
@@ -1,9 +1,3 @@
-config SOC_CAMERA_IMX074
-       tristate "imx074 support (DEPRECATED)"
-       depends on SOC_CAMERA && I2C
-       help
-         This driver supports IMX074 cameras from Sony
-
 config SOC_CAMERA_MT9T031
        tristate "mt9t031 support (DEPRECATED)"
        depends on SOC_CAMERA && I2C
index da92c49..69cc508 100644 (file)
@@ -59,6 +59,11 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                ret = PTR_ERR(dev);
                goto err_drv_alloc;
        }
+
+       ret = pci_enable_device(pdev);
+       if (ret)
+               goto err_pci_enable;
+
        dev->pdev = pdev;
        pci_set_drvdata(pdev, dev);
 
@@ -75,6 +80,8 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  err_drv_dev_register:
        vbox_driver_unload(dev);
  err_vbox_driver_load:
+       pci_disable_device(pdev);
+ err_pci_enable:
        drm_dev_put(dev);
  err_drv_alloc:
        return ret;
index a83eac8..79836c8 100644 (file)
@@ -323,6 +323,11 @@ static int vbox_crtc_page_flip(struct drm_crtc *crtc,
        if (rc)
                return rc;
 
+       mutex_lock(&vbox->hw_mutex);
+       vbox_set_view(crtc);
+       vbox_do_modeset(crtc, &crtc->mode);
+       mutex_unlock(&vbox->hw_mutex);
+
        spin_lock_irqsave(&drm->event_lock, flags);
 
        if (event)
index f7b07c0..ee7e26b 100644 (file)
@@ -1,4 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_WILC1000) += wilc1000.o
 
 ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \
                -DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\"
@@ -11,9 +12,7 @@ wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \
                        wilc_wlan.o
 
 obj-$(CONFIG_WILC1000_SDIO) += wilc1000-sdio.o
-wilc1000-sdio-objs += $(wilc1000-objs)
 wilc1000-sdio-objs += wilc_sdio.o
 
 obj-$(CONFIG_WILC1000_SPI) += wilc1000-spi.o
-wilc1000-spi-objs += $(wilc1000-objs)
 wilc1000-spi-objs += wilc_spi.o
index 01cf4bd..3b8d237 100644 (file)
@@ -1038,8 +1038,8 @@ void wilc_netdev_cleanup(struct wilc *wilc)
        }
 
        kfree(wilc);
-       wilc_debugfs_remove();
 }
+EXPORT_SYMBOL_GPL(wilc_netdev_cleanup);
 
 static const struct net_device_ops wilc_netdev_ops = {
        .ndo_init = mac_init_fn,
@@ -1062,7 +1062,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
        if (!wl)
                return -ENOMEM;
 
-       wilc_debugfs_init();
        *wilc = wl;
        wl->io_type = io_type;
        wl->hif_func = ops;
@@ -1124,3 +1123,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(wilc_netdev_init);
+
+MODULE_LICENSE("GPL");
index edc7287..8001df6 100644 (file)
@@ -19,6 +19,7 @@ static struct dentry *wilc_dir;
 
 #define DBG_LEVEL_ALL  (DEBUG | INFO | WRN | ERR)
 static atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR);
+EXPORT_SYMBOL_GPL(WILC_DEBUG_LEVEL);
 
 static ssize_t wilc_debug_level_read(struct file *file, char __user *userbuf,
                                     size_t count, loff_t *ppos)
@@ -87,7 +88,7 @@ static struct wilc_debugfs_info_t debugfs_info[] = {
        },
 };
 
-int wilc_debugfs_init(void)
+static int __init wilc_debugfs_init(void)
 {
        int i;
        struct wilc_debugfs_info_t *info;
@@ -103,10 +104,12 @@ int wilc_debugfs_init(void)
        }
        return 0;
 }
+module_init(wilc_debugfs_init);
 
-void wilc_debugfs_remove(void)
+static void __exit wilc_debugfs_remove(void)
 {
        debugfs_remove_recursive(wilc_dir);
 }
+module_exit(wilc_debugfs_remove);
 
 #endif
index 6787b6e..8b184aa 100644 (file)
@@ -417,6 +417,7 @@ void chip_allow_sleep(struct wilc *wilc)
        wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0));
        wilc->hif_func->hif_write_reg(wilc, 0xfa, 0);
 }
+EXPORT_SYMBOL_GPL(chip_allow_sleep);
 
 void chip_wakeup(struct wilc *wilc)
 {
@@ -471,6 +472,7 @@ void chip_wakeup(struct wilc *wilc)
        }
        chip_ps_state = CHIP_WAKEDUP;
 }
+EXPORT_SYMBOL_GPL(chip_wakeup);
 
 void wilc_chip_sleep_manually(struct wilc *wilc)
 {
@@ -484,6 +486,7 @@ void wilc_chip_sleep_manually(struct wilc *wilc)
        chip_ps_state = CHIP_SLEEPING_MANUAL;
        release_bus(wilc, RELEASE_ONLY);
 }
+EXPORT_SYMBOL_GPL(wilc_chip_sleep_manually);
 
 void host_wakeup_notify(struct wilc *wilc)
 {
@@ -491,6 +494,7 @@ void host_wakeup_notify(struct wilc *wilc)
        wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1);
        release_bus(wilc, RELEASE_ONLY);
 }
+EXPORT_SYMBOL_GPL(host_wakeup_notify);
 
 void host_sleep_notify(struct wilc *wilc)
 {
@@ -498,6 +502,7 @@ void host_sleep_notify(struct wilc *wilc)
        wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1);
        release_bus(wilc, RELEASE_ONLY);
 }
+EXPORT_SYMBOL_GPL(host_sleep_notify);
 
 int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
 {
@@ -871,6 +876,7 @@ void wilc_handle_isr(struct wilc *wilc)
 
        release_bus(wilc, RELEASE_ALLOW_SLEEP);
 }
+EXPORT_SYMBOL_GPL(wilc_handle_isr);
 
 int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
                                u32 buffer_size)
index 00d13b1..b81a73b 100644 (file)
@@ -831,6 +831,4 @@ struct wilc;
 int wilc_wlan_init(struct net_device *dev);
 u32 wilc_get_chipid(struct wilc *wilc, bool update);
 
-int wilc_debugfs_init(void);
-void wilc_debugfs_remove(void);
 #endif
index 768cce0..76a2626 100644 (file)
@@ -207,8 +207,8 @@ cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo,
        ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
        sgl->offset = sg_offset;
        if (!ret) {
-               pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
-                       __func__, 0, xferlen, sgcnt);
+               pr_debug("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
+                        __func__, 0, xferlen, sgcnt);
                goto rel_ppods;
        }
 
@@ -250,8 +250,8 @@ cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
        ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length);
        if (ret < 0) {
-               pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
-                       csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
+               pr_debug("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
+                        csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
 
                ttinfo->sgl = NULL;
                ttinfo->nents = 0;
index 94bad43..cc756a1 100644 (file)
@@ -1416,7 +1416,8 @@ static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
 
        sg_init_table(sg, ARRAY_SIZE(sg));
        sg_set_buf(sg, buf, payload_length);
-       sg_set_buf(sg + 1, pad_bytes, padding);
+       if (padding)
+               sg_set_buf(sg + 1, pad_bytes, padding);
 
        ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
 
@@ -3910,10 +3911,14 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
 static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
 {
        int ret;
-       u8 buffer[ISCSI_HDR_LEN], opcode;
+       u8 *buffer, opcode;
        u32 checksum = 0, digest = 0;
        struct kvec iov;
 
+       buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
+       if (!buffer)
+               return;
+
        while (!kthread_should_stop()) {
                /*
                 * Ensure that both TX and RX per connection kthreads
@@ -3921,7 +3926,6 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
                 */
                iscsit_thread_check_cpumask(conn, current, 0);
 
-               memset(buffer, 0, ISCSI_HDR_LEN);
                memset(&iov, 0, sizeof(struct kvec));
 
                iov.iov_base    = buffer;
@@ -3930,7 +3934,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
                ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
                if (ret != ISCSI_HDR_LEN) {
                        iscsit_rx_thread_wait_for_tcp(conn);
-                       return;
+                       break;
                }
 
                if (conn->conn_ops->HeaderDigest) {
@@ -3940,7 +3944,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
                        ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
                        if (ret != ISCSI_CRC_LEN) {
                                iscsit_rx_thread_wait_for_tcp(conn);
-                               return;
+                               break;
                        }
 
                        iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
@@ -3964,7 +3968,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
                }
 
                if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
-                       return;
+                       break;
 
                opcode = buffer[0] & ISCSI_OPCODE_MASK;
 
@@ -3975,13 +3979,15 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
                        " while in Discovery Session, rejecting.\n", opcode);
                        iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
                                          buffer);
-                       return;
+                       break;
                }
 
                ret = iscsi_target_rx_opcode(conn, buffer);
                if (ret < 0)
-                       return;
+                       break;
        }
+
+       kfree(buffer);
 }
 
 int iscsi_target_rx_thread(void *arg)
@@ -4208,22 +4214,15 @@ int iscsit_close_connection(
                crypto_free_ahash(tfm);
        }
 
-       free_cpumask_var(conn->conn_cpumask);
-
-       kfree(conn->conn_ops);
-       conn->conn_ops = NULL;
-
        if (conn->sock)
                sock_release(conn->sock);
 
        if (conn->conn_transport->iscsit_free_conn)
                conn->conn_transport->iscsit_free_conn(conn);
 
-       iscsit_put_transport(conn->conn_transport);
-
        pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
        conn->conn_state = TARG_CONN_STATE_FREE;
-       kfree(conn);
+       iscsit_free_conn(conn);
 
        spin_lock_bh(&sess->conn_lock);
        atomic_dec(&sess->nconn);
index 9518ffd..4e680d7 100644 (file)
 #include "iscsi_target_nego.h"
 #include "iscsi_target_auth.h"
 
-static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
-{
-       int j = DIV_ROUND_UP(len, 2), rc;
-
-       rc = hex2bin(dst, src, j);
-       if (rc < 0)
-               pr_debug("CHAP string contains non hex digit symbols\n");
-
-       dst[j] = '\0';
-       return j;
-}
-
-static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
-{
-       int i;
-
-       for (i = 0; i < src_len; i++) {
-               sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
-       }
-}
-
 static int chap_gen_challenge(
        struct iscsi_conn *conn,
        int caller,
@@ -62,7 +41,7 @@ static int chap_gen_challenge(
        ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH);
        if (unlikely(ret))
                return ret;
-       chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
+       bin2hex(challenge_asciihex, chap->challenge,
                                CHAP_CHALLENGE_LENGTH);
        /*
         * Set CHAP_C, and copy the generated challenge into c_str.
@@ -248,9 +227,16 @@ static int chap_server_compute_md5(
                pr_err("Could not find CHAP_R.\n");
                goto out;
        }
+       if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
+               pr_err("Malformed CHAP_R\n");
+               goto out;
+       }
+       if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
+               pr_err("Malformed CHAP_R\n");
+               goto out;
+       }
 
        pr_debug("[server] Got CHAP_R=%s\n", chap_r);
-       chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
 
        tfm = crypto_alloc_shash("md5", 0, 0);
        if (IS_ERR(tfm)) {
@@ -294,7 +280,7 @@ static int chap_server_compute_md5(
                goto out;
        }
 
-       chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
+       bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
        pr_debug("[server] MD5 Server Digest: %s\n", response);
 
        if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
@@ -349,9 +335,7 @@ static int chap_server_compute_md5(
                pr_err("Could not find CHAP_C.\n");
                goto out;
        }
-       pr_debug("[server] Got CHAP_C=%s\n", challenge);
-       challenge_len = chap_string_to_hex(challenge_binhex, challenge,
-                               strlen(challenge));
+       challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
        if (!challenge_len) {
                pr_err("Unable to convert incoming challenge\n");
                goto out;
@@ -360,6 +344,11 @@ static int chap_server_compute_md5(
                pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
                goto out;
        }
+       if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
+               pr_err("Malformed CHAP_C\n");
+               goto out;
+       }
+       pr_debug("[server] Got CHAP_C=%s\n", challenge);
        /*
         * During mutual authentication, the CHAP_C generated by the
         * initiator must not match the original CHAP_C generated by
@@ -413,7 +402,7 @@ static int chap_server_compute_md5(
        /*
         * Convert response from binary hex to ascii hext.
         */
-       chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
+       bin2hex(response, digest, MD5_SIGNATURE_SIZE);
        *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
                        response);
        *nr_out_len += 1;
index 9e74f8b..bb90c80 100644 (file)
@@ -67,45 +67,10 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
                goto out_req_buf;
        }
 
-       conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
-       if (!conn->conn_ops) {
-               pr_err("Unable to allocate memory for"
-                       " struct iscsi_conn_ops.\n");
-               goto out_rsp_buf;
-       }
-
-       init_waitqueue_head(&conn->queues_wq);
-       INIT_LIST_HEAD(&conn->conn_list);
-       INIT_LIST_HEAD(&conn->conn_cmd_list);
-       INIT_LIST_HEAD(&conn->immed_queue_list);
-       INIT_LIST_HEAD(&conn->response_queue_list);
-       init_completion(&conn->conn_post_wait_comp);
-       init_completion(&conn->conn_wait_comp);
-       init_completion(&conn->conn_wait_rcfr_comp);
-       init_completion(&conn->conn_waiting_on_uc_comp);
-       init_completion(&conn->conn_logout_comp);
-       init_completion(&conn->rx_half_close_comp);
-       init_completion(&conn->tx_half_close_comp);
-       init_completion(&conn->rx_login_comp);
-       spin_lock_init(&conn->cmd_lock);
-       spin_lock_init(&conn->conn_usage_lock);
-       spin_lock_init(&conn->immed_queue_lock);
-       spin_lock_init(&conn->nopin_timer_lock);
-       spin_lock_init(&conn->response_queue_lock);
-       spin_lock_init(&conn->state_lock);
-
-       if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
-               pr_err("Unable to allocate conn->conn_cpumask\n");
-               goto out_conn_ops;
-       }
        conn->conn_login = login;
 
        return login;
 
-out_conn_ops:
-       kfree(conn->conn_ops);
-out_rsp_buf:
-       kfree(login->rsp_buf);
 out_req_buf:
        kfree(login->req_buf);
 out_login:
@@ -310,11 +275,9 @@ static int iscsi_login_zero_tsih_s1(
                return -ENOMEM;
        }
 
-       ret = iscsi_login_set_conn_values(sess, conn, pdu->cid);
-       if (unlikely(ret)) {
-               kfree(sess);
-               return ret;
-       }
+       if (iscsi_login_set_conn_values(sess, conn, pdu->cid))
+               goto free_sess;
+
        sess->init_task_tag     = pdu->itt;
        memcpy(&sess->isid, pdu->isid, 6);
        sess->exp_cmd_sn        = be32_to_cpu(pdu->cmdsn);
@@ -1149,6 +1112,75 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
        return 0;
 }
 
+static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np)
+{
+       struct iscsi_conn *conn;
+
+       conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+       if (!conn) {
+               pr_err("Could not allocate memory for new connection\n");
+               return NULL;
+       }
+       pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+       conn->conn_state = TARG_CONN_STATE_FREE;
+
+       init_waitqueue_head(&conn->queues_wq);
+       INIT_LIST_HEAD(&conn->conn_list);
+       INIT_LIST_HEAD(&conn->conn_cmd_list);
+       INIT_LIST_HEAD(&conn->immed_queue_list);
+       INIT_LIST_HEAD(&conn->response_queue_list);
+       init_completion(&conn->conn_post_wait_comp);
+       init_completion(&conn->conn_wait_comp);
+       init_completion(&conn->conn_wait_rcfr_comp);
+       init_completion(&conn->conn_waiting_on_uc_comp);
+       init_completion(&conn->conn_logout_comp);
+       init_completion(&conn->rx_half_close_comp);
+       init_completion(&conn->tx_half_close_comp);
+       init_completion(&conn->rx_login_comp);
+       spin_lock_init(&conn->cmd_lock);
+       spin_lock_init(&conn->conn_usage_lock);
+       spin_lock_init(&conn->immed_queue_lock);
+       spin_lock_init(&conn->nopin_timer_lock);
+       spin_lock_init(&conn->response_queue_lock);
+       spin_lock_init(&conn->state_lock);
+
+       timer_setup(&conn->nopin_response_timer,
+                   iscsit_handle_nopin_response_timeout, 0);
+       timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
+
+       if (iscsit_conn_set_transport(conn, np->np_transport) < 0)
+               goto free_conn;
+
+       conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
+       if (!conn->conn_ops) {
+               pr_err("Unable to allocate memory for struct iscsi_conn_ops.\n");
+               goto put_transport;
+       }
+
+       if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
+               pr_err("Unable to allocate conn->conn_cpumask\n");
+               goto free_mask;
+       }
+
+       return conn;
+
+free_mask:
+       free_cpumask_var(conn->conn_cpumask);
+put_transport:
+       iscsit_put_transport(conn->conn_transport);
+free_conn:
+       kfree(conn);
+       return NULL;
+}
+
+void iscsit_free_conn(struct iscsi_conn *conn)
+{
+       free_cpumask_var(conn->conn_cpumask);
+       kfree(conn->conn_ops);
+       iscsit_put_transport(conn->conn_transport);
+       kfree(conn);
+}
+
 void iscsi_target_login_sess_out(struct iscsi_conn *conn,
                struct iscsi_np *np, bool zero_tsih, bool new_sess)
 {
@@ -1198,10 +1230,6 @@ old_sess_out:
                crypto_free_ahash(tfm);
        }
 
-       free_cpumask_var(conn->conn_cpumask);
-
-       kfree(conn->conn_ops);
-
        if (conn->param_list) {
                iscsi_release_param_list(conn->param_list);
                conn->param_list = NULL;
@@ -1219,8 +1247,7 @@ old_sess_out:
        if (conn->conn_transport->iscsit_free_conn)
                conn->conn_transport->iscsit_free_conn(conn);
 
-       iscsit_put_transport(conn->conn_transport);
-       kfree(conn);
+       iscsit_free_conn(conn);
 }
 
 static int __iscsi_target_login_thread(struct iscsi_np *np)
@@ -1250,31 +1277,16 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
        }
        spin_unlock_bh(&np->np_thread_lock);
 
-       conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+       conn = iscsit_alloc_conn(np);
        if (!conn) {
-               pr_err("Could not allocate memory for"
-                       " new connection\n");
                /* Get another socket */
                return 1;
        }
-       pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
-       conn->conn_state = TARG_CONN_STATE_FREE;
-
-       timer_setup(&conn->nopin_response_timer,
-                   iscsit_handle_nopin_response_timeout, 0);
-       timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
-
-       if (iscsit_conn_set_transport(conn, np->np_transport) < 0) {
-               kfree(conn);
-               return 1;
-       }
 
        rc = np->np_transport->iscsit_accept_np(np, conn);
        if (rc == -ENOSYS) {
                complete(&np->np_restart_comp);
-               iscsit_put_transport(conn->conn_transport);
-               kfree(conn);
-               conn = NULL;
+               iscsit_free_conn(conn);
                goto exit;
        } else if (rc < 0) {
                spin_lock_bh(&np->np_thread_lock);
@@ -1282,17 +1294,13 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
                        np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
                        spin_unlock_bh(&np->np_thread_lock);
                        complete(&np->np_restart_comp);
-                       iscsit_put_transport(conn->conn_transport);
-                       kfree(conn);
-                       conn = NULL;
+                       iscsit_free_conn(conn);
                        /* Get another socket */
                        return 1;
                }
                spin_unlock_bh(&np->np_thread_lock);
-               iscsit_put_transport(conn->conn_transport);
-               kfree(conn);
-               conn = NULL;
-               goto out;
+               iscsit_free_conn(conn);
+               return 1;
        }
        /*
         * Perform the remaining iSCSI connection initialization items..
@@ -1442,7 +1450,6 @@ old_sess_out:
                tpg_np = NULL;
        }
 
-out:
        return 1;
 
 exit:
index 74ac3ab..3b8e363 100644 (file)
@@ -19,7 +19,7 @@ extern int iscsi_target_setup_login_socket(struct iscsi_np *,
 extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
 extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
 extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
-extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
+extern void iscsit_free_conn(struct iscsi_conn *);
 extern int iscsit_start_kthreads(struct iscsi_conn *);
 extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
 extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
index 977a830..4f28165 100644 (file)
@@ -260,10 +260,13 @@ static int of_thermal_set_mode(struct thermal_zone_device *tz,
 
        mutex_lock(&tz->lock);
 
-       if (mode == THERMAL_DEVICE_ENABLED)
+       if (mode == THERMAL_DEVICE_ENABLED) {
                tz->polling_delay = data->polling_delay;
-       else
+               tz->passive_delay = data->passive_delay;
+       } else {
                tz->polling_delay = 0;
+               tz->passive_delay = 0;
+       }
 
        mutex_unlock(&tz->lock);
 
index c866cc1..450ed66 100644 (file)
@@ -1,16 +1,6 @@
-/*
- * Copyright 2016 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright 2016 Freescale Semiconductor, Inc.
 
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -197,7 +187,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
        int ret;
        struct qoriq_tmu_data *data;
        struct device_node *np = pdev->dev.of_node;
-       u32 site = 0;
+       u32 site;
 
        if (!np) {
                dev_err(&pdev->dev, "Device OF-Node is NULL");
@@ -233,8 +223,9 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err_tmu;
 
-       data->tz = thermal_zone_of_sensor_register(&pdev->dev, data->sensor_id,
-                               data, &tmu_tz_ops);
+       data->tz = devm_thermal_zone_of_sensor_register(&pdev->dev,
+                                                       data->sensor_id,
+                                                       data, &tmu_tz_ops);
        if (IS_ERR(data->tz)) {
                ret = PTR_ERR(data->tz);
                dev_err(&pdev->dev,
@@ -243,7 +234,7 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
        }
 
        /* Enable monitoring */
-       site |= 0x1 << (15 - data->sensor_id);
+       site = 0x1 << (15 - data->sensor_id);
        tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr);
 
        return 0;
@@ -261,8 +252,6 @@ static int qoriq_tmu_remove(struct platform_device *pdev)
 {
        struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
 
-       thermal_zone_of_sensor_unregister(&pdev->dev, data->tz);
-
        /* Disable monitoring */
        tmu_write(data, TMR_DISABLE, &data->regs->tmr);
 
index 766521e..7aed533 100644 (file)
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  R-Car Gen3 THS thermal sensor driver
  *  Based on rcar_thermal.c and work from Hien Dang and Khiem Nguyen.
  *
  * Copyright (C) 2016 Renesas Electronics Corporation.
  * Copyright (C) 2016 Sang Engineering
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; version 2 of the License.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
  */
 #include <linux/delay.h>
 #include <linux/err.h>
index e77e630..78f9328 100644 (file)
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  *  R-Car THS/TSC thermal sensor driver
  *
  * Copyright (C) 2012 Renesas Solutions Corp.
  * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; version 2 of the License.
- *
- *  This program is distributed in the hope that it will be useful, but
- *  WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  */
 #include <linux/delay.h>
 #include <linux/err.h>
@@ -660,6 +648,6 @@ static struct platform_driver rcar_thermal_driver = {
 };
 module_platform_driver(rcar_thermal_driver);
 
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("R-Car THS/TSC thermal sensor driver");
 MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
index e1e264a..28fc4ce 100644 (file)
@@ -738,14 +738,6 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
        u8 link, depth;
        u64 route;
 
-       /*
-        * After NVM upgrade adding root switch device fails because we
-        * initiated reset. During that time ICM might still send
-        * XDomain connected message which we ignore here.
-        */
-       if (!tb->root_switch)
-               return;
-
        link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
        depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
                ICM_LINK_INFO_DEPTH_SHIFT;
@@ -1037,14 +1029,6 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
        if (pkg->hdr.packet_id)
                return;
 
-       /*
-        * After NVM upgrade adding root switch device fails because we
-        * initiated reset. During that time ICM might still send device
-        * connected message which we ignore here.
-        */
-       if (!tb->root_switch)
-               return;
-
        route = get_route(pkg->route_hi, pkg->route_lo);
        authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
        security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
@@ -1408,19 +1392,26 @@ static void icm_handle_notification(struct work_struct *work)
 
        mutex_lock(&tb->lock);
 
-       switch (n->pkg->code) {
-       case ICM_EVENT_DEVICE_CONNECTED:
-               icm->device_connected(tb, n->pkg);
-               break;
-       case ICM_EVENT_DEVICE_DISCONNECTED:
-               icm->device_disconnected(tb, n->pkg);
-               break;
-       case ICM_EVENT_XDOMAIN_CONNECTED:
-               icm->xdomain_connected(tb, n->pkg);
-               break;
-       case ICM_EVENT_XDOMAIN_DISCONNECTED:
-               icm->xdomain_disconnected(tb, n->pkg);
-               break;
+       /*
+        * When the domain is stopped we flush its workqueue but before
+        * that the root switch is removed. In that case we should treat
+        * the queued events as being canceled.
+        */
+       if (tb->root_switch) {
+               switch (n->pkg->code) {
+               case ICM_EVENT_DEVICE_CONNECTED:
+                       icm->device_connected(tb, n->pkg);
+                       break;
+               case ICM_EVENT_DEVICE_DISCONNECTED:
+                       icm->device_disconnected(tb, n->pkg);
+                       break;
+               case ICM_EVENT_XDOMAIN_CONNECTED:
+                       icm->xdomain_connected(tb, n->pkg);
+                       break;
+               case ICM_EVENT_XDOMAIN_DISCONNECTED:
+                       icm->xdomain_disconnected(tb, n->pkg);
+                       break;
+               }
        }
 
        mutex_unlock(&tb->lock);
index 88cff05..5cd6bdf 100644 (file)
@@ -1191,5 +1191,5 @@ static void __exit nhi_unload(void)
        tb_domain_exit();
 }
 
-fs_initcall(nhi_init);
+rootfs_initcall(nhi_init);
 module_exit(nhi_unload);
index 5414c4a..27284a2 100644 (file)
@@ -522,6 +522,8 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
                return -EIO;
 
        while (count > 0) {
+               int ret = 0;
+
                spin_lock_irqsave(&hp->lock, flags);
 
                rsize = hp->outbuf_size - hp->n_outbuf;
@@ -537,10 +539,13 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
                }
 
                if (hp->n_outbuf > 0)
-                       hvc_push(hp);
+                       ret = hvc_push(hp);
 
                spin_unlock_irqrestore(&hp->lock, flags);
 
+               if (!ret)
+                       break;
+
                if (count) {
                        if (hp->n_outbuf > 0)
                                hvc_flush(hp);
@@ -623,6 +628,15 @@ static int hvc_chars_in_buffer(struct tty_struct *tty)
 #define MAX_TIMEOUT            (2000)
 static u32 timeout = MIN_TIMEOUT;
 
+/*
+ * Maximum number of bytes to get from the console driver if hvc_poll is
+ * called from driver (and can't sleep). Any more than this and we break
+ * and start polling with khvcd. This value was derived from from an OpenBMC
+ * console with the OPAL driver that results in about 0.25ms interrupts off
+ * latency.
+ */
+#define HVC_ATOMIC_READ_MAX    128
+
 #define HVC_POLL_READ  0x00000001
 #define HVC_POLL_WRITE 0x00000002
 
@@ -669,8 +683,8 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
        if (!hp->irq_requested)
                poll_mask |= HVC_POLL_READ;
 
+ read_again:
        /* Read data if any */
-
        count = tty_buffer_request_room(&hp->port, N_INBUF);
 
        /* If flip is full, just reschedule a later read */
@@ -717,9 +731,23 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
 #endif /* CONFIG_MAGIC_SYSRQ */
                tty_insert_flip_char(&hp->port, buf[i], 0);
        }
-       if (n == count)
-               poll_mask |= HVC_POLL_READ;
-       read_total = n;
+       read_total += n;
+
+       if (may_sleep) {
+               /* Keep going until the flip is full */
+               spin_unlock_irqrestore(&hp->lock, flags);
+               cond_resched();
+               spin_lock_irqsave(&hp->lock, flags);
+               goto read_again;
+       } else if (read_total < HVC_ATOMIC_READ_MAX) {
+               /* Break and defer if it's a large read in atomic */
+               goto read_again;
+       }
+
+       /*
+        * Latency break, schedule another poll immediately.
+        */
+       poll_mask |= HVC_POLL_READ;
 
  out:
        /* Wakeup write queue if necessary */
index fa8dcb4..d31b975 100644 (file)
@@ -630,10 +630,6 @@ static int dw8250_probe(struct platform_device *pdev)
        if (!data->skip_autocfg)
                dw8250_setup_port(p);
 
-#ifdef CONFIG_PM
-       uart.capabilities |= UART_CAP_RPM;
-#endif
-
        /* If we have a valid fifosize, try hooking up DMA */
        if (p->fifosize) {
                data->dma.rxconf.src_maxburst = p->fifosize / 4;
index 24a5f05..e538959 100644 (file)
@@ -1054,8 +1054,8 @@ static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo)
        /* Get the address of the host memory buffer.
         */
        bdp = pinfo->rx_cur;
-       while (bdp->cbd_sc & BD_SC_EMPTY)
-               ;
+       if (bdp->cbd_sc & BD_SC_EMPTY)
+               return NO_POLL_CHAR;
 
        /* If the buffer address is in the CPM DPRAM, don't
         * convert it.
@@ -1090,7 +1090,11 @@ static int cpm_get_poll_char(struct uart_port *port)
                poll_chars = 0;
        }
        if (poll_chars <= 0) {
-               poll_chars = poll_wait_key(poll_buf, pinfo);
+               int ret = poll_wait_key(poll_buf, pinfo);
+
+               if (ret == NO_POLL_CHAR)
+                       return ret;
+               poll_chars = ret;
                pollp = poll_buf;
        }
        poll_chars--;
index 51e47a6..3f8d127 100644 (file)
@@ -979,7 +979,8 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
        struct circ_buf *ring = &sport->rx_ring;
        int ret, nent;
        int bits, baud;
-       struct tty_struct *tty = tty_port_tty_get(&sport->port.state->port);
+       struct tty_port *port = &sport->port.state->port;
+       struct tty_struct *tty = port->tty;
        struct ktermios *termios = &tty->termios;
 
        baud = tty_get_baud_rate(tty);
index 239c0fa..0f67197 100644 (file)
@@ -2351,6 +2351,14 @@ static int imx_uart_probe(struct platform_device *pdev)
                                ret);
                        return ret;
                }
+
+               ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0,
+                                      dev_name(&pdev->dev), sport);
+               if (ret) {
+                       dev_err(&pdev->dev, "failed to request rts irq: %d\n",
+                               ret);
+                       return ret;
+               }
        } else {
                ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0,
                                       dev_name(&pdev->dev), sport);
index d04b5ee..170e446 100644 (file)
@@ -511,6 +511,7 @@ static void mvebu_uart_set_termios(struct uart_port *port,
                termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
                termios->c_cflag &= CREAD | CBAUD;
                termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
+               termios->c_cflag |= CS8;
        }
 
        spin_unlock_irqrestore(&port->lock, flags);
index ac4424b..ab3f6e9 100644 (file)
@@ -292,6 +292,33 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
        },
 
        /*
+        * The "SCIFA" that is in RZ/T and RZ/A2.
+        * It looks like a normal SCIF with FIFO data, but with a
+        * compressed address space. Also, the break out of interrupts
+        * are different: ERI/BRI, RXI, TXI, TEI, DRI.
+        */
+       [SCIx_RZ_SCIFA_REGTYPE] = {
+               .regs = {
+                       [SCSMR]         = { 0x00, 16 },
+                       [SCBRR]         = { 0x02,  8 },
+                       [SCSCR]         = { 0x04, 16 },
+                       [SCxTDR]        = { 0x06,  8 },
+                       [SCxSR]         = { 0x08, 16 },
+                       [SCxRDR]        = { 0x0A,  8 },
+                       [SCFCR]         = { 0x0C, 16 },
+                       [SCFDR]         = { 0x0E, 16 },
+                       [SCSPTR]        = { 0x10, 16 },
+                       [SCLSR]         = { 0x12, 16 },
+               },
+               .fifosize = 16,
+               .overrun_reg = SCLSR,
+               .overrun_mask = SCLSR_ORER,
+               .sampling_rate_mask = SCI_SR(32),
+               .error_mask = SCIF_DEFAULT_ERROR_MASK,
+               .error_clear = SCIF_ERROR_CLEAR,
+       },
+
+       /*
         * Common SH-3 SCIF definitions.
         */
        [SCIx_SH3_SCIF_REGTYPE] = {
@@ -319,15 +346,15 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
        [SCIx_SH4_SCIF_REGTYPE] = {
                .regs = {
                        [SCSMR]         = { 0x00, 16 },
-                       [SCBRR]         = { 0x02,  8 },
-                       [SCSCR]         = { 0x04, 16 },
-                       [SCxTDR]        = { 0x06,  8 },
-                       [SCxSR]         = { 0x08, 16 },
-                       [SCxRDR]        = { 0x0a,  8 },
-                       [SCFCR]         = { 0x0c, 16 },
-                       [SCFDR]         = { 0x0e, 16 },
-                       [SCSPTR]        = { 0x10, 16 },
-                       [SCLSR]         = { 0x12, 16 },
+                       [SCBRR]         = { 0x04,  8 },
+                       [SCSCR]         = { 0x08, 16 },
+                       [SCxTDR]        = { 0x0c,  8 },
+                       [SCxSR]         = { 0x10, 16 },
+                       [SCxRDR]        = { 0x14,  8 },
+                       [SCFCR]         = { 0x18, 16 },
+                       [SCFDR]         = { 0x1c, 16 },
+                       [SCSPTR]        = { 0x20, 16 },
+                       [SCLSR]         = { 0x24, 16 },
                },
                .fifosize = 16,
                .overrun_reg = SCLSR,
@@ -2810,7 +2837,7 @@ static int sci_init_single(struct platform_device *dev,
 {
        struct uart_port *port = &sci_port->port;
        const struct resource *res;
-       unsigned int i, regtype;
+       unsigned int i;
        int ret;
 
        sci_port->cfg   = p;
@@ -2847,7 +2874,6 @@ static int sci_init_single(struct platform_device *dev,
        if (unlikely(sci_port->params == NULL))
                return -EINVAL;
 
-       regtype = sci_port->params - sci_port_params;
        switch (p->type) {
        case PORT_SCIFB:
                sci_port->rx_trigger = 48;
@@ -2902,10 +2928,6 @@ static int sci_init_single(struct platform_device *dev,
                        port->regshift = 1;
        }
 
-       if (regtype == SCIx_SH4_SCIF_REGTYPE)
-               if (sci_port->reg_size >= 0x20)
-                       port->regshift = 1;
-
        /*
         * The UART port needs an IRQ value, so we peg this to the RX IRQ
         * for the multi-IRQ ports, which is where we are primarily
@@ -3110,6 +3132,10 @@ static const struct of_device_id of_sci_match[] = {
                .compatible = "renesas,scif-r7s72100",
                .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH2_SCIF_FIFODATA_REGTYPE),
        },
+       {
+               .compatible = "renesas,scif-r7s9210",
+               .data = SCI_OF_DATA(PORT_SCIF, SCIx_RZ_SCIFA_REGTYPE),
+       },
        /* Family-specific types */
        {
                .compatible = "renesas,rcar-gen1-scif",
index 32bc3e3..5e5da9a 100644 (file)
@@ -1255,6 +1255,7 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
 static int tty_reopen(struct tty_struct *tty)
 {
        struct tty_driver *driver = tty->driver;
+       int retval;
 
        if (driver->type == TTY_DRIVER_TYPE_PTY &&
            driver->subtype == PTY_TYPE_MASTER)
@@ -1268,10 +1269,14 @@ static int tty_reopen(struct tty_struct *tty)
 
        tty->count++;
 
-       if (!tty->ldisc)
-               return tty_ldisc_reinit(tty, tty->termios.c_line);
+       if (tty->ldisc)
+               return 0;
 
-       return 0;
+       retval = tty_ldisc_reinit(tty, tty->termios.c_line);
+       if (retval)
+               tty->count--;
+
+       return retval;
 }
 
 /**
index a78ad10..73cdc0d 100644 (file)
@@ -32,6 +32,8 @@
 #include <asm/io.h>
 #include <linux/uaccess.h>
 
+#include <linux/nospec.h>
+
 #include <linux/kbd_kern.h>
 #include <linux/vt_kern.h>
 #include <linux/kbd_diacr.h>
@@ -700,6 +702,8 @@ int vt_ioctl(struct tty_struct *tty,
                if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
                        ret = -ENXIO;
                else {
+                       vsa.console = array_index_nospec(vsa.console,
+                                                        MAX_NR_CONSOLES + 1);
                        vsa.console--;
                        console_lock();
                        ret = vc_allocate(vsa.console);
index 27346d6..bc03b0a 100644 (file)
@@ -780,20 +780,9 @@ static int acm_tty_write(struct tty_struct *tty,
        }
 
        if (acm->susp_count) {
-               if (acm->putbuffer) {
-                       /* now to preserve order */
-                       usb_anchor_urb(acm->putbuffer->urb, &acm->delayed);
-                       acm->putbuffer = NULL;
-               }
                usb_anchor_urb(wb->urb, &acm->delayed);
                spin_unlock_irqrestore(&acm->write_lock, flags);
                return count;
-       } else {
-               if (acm->putbuffer) {
-                       /* at this point there is no good way to handle errors */
-                       acm_start_wb(acm, acm->putbuffer);
-                       acm->putbuffer = NULL;
-               }
        }
 
        stat = acm_start_wb(acm, wb);
@@ -804,66 +793,6 @@ static int acm_tty_write(struct tty_struct *tty,
        return count;
 }
 
-static void acm_tty_flush_chars(struct tty_struct *tty)
-{
-       struct acm *acm = tty->driver_data;
-       struct acm_wb *cur;
-       int err;
-       unsigned long flags;
-
-       spin_lock_irqsave(&acm->write_lock, flags);
-
-       cur = acm->putbuffer;
-       if (!cur) /* nothing to do */
-               goto out;
-
-       acm->putbuffer = NULL;
-       err = usb_autopm_get_interface_async(acm->control);
-       if (err < 0) {
-               cur->use = 0;
-               acm->putbuffer = cur;
-               goto out;
-       }
-
-       if (acm->susp_count)
-               usb_anchor_urb(cur->urb, &acm->delayed);
-       else
-               acm_start_wb(acm, cur);
-out:
-       spin_unlock_irqrestore(&acm->write_lock, flags);
-       return;
-}
-
-static int acm_tty_put_char(struct tty_struct *tty, unsigned char ch)
-{
-       struct acm *acm = tty->driver_data;
-       struct acm_wb *cur;
-       int wbn;
-       unsigned long flags;
-
-overflow:
-       cur = acm->putbuffer;
-       if (!cur) {
-               spin_lock_irqsave(&acm->write_lock, flags);
-               wbn = acm_wb_alloc(acm);
-               if (wbn >= 0) {
-                       cur = &acm->wb[wbn];
-                       acm->putbuffer = cur;
-               }
-               spin_unlock_irqrestore(&acm->write_lock, flags);
-               if (!cur)
-                       return 0;
-       }
-
-       if (cur->len == acm->writesize) {
-               acm_tty_flush_chars(tty);
-               goto overflow;
-       }
-
-       cur->buf[cur->len++] = ch;
-       return 1;
-}
-
 static int acm_tty_write_room(struct tty_struct *tty)
 {
        struct acm *acm = tty->driver_data;
@@ -1585,6 +1514,7 @@ static void acm_disconnect(struct usb_interface *intf)
 {
        struct acm *acm = usb_get_intfdata(intf);
        struct tty_struct *tty;
+       int i;
 
        /* sibling interface is already cleaning up */
        if (!acm)
@@ -1615,6 +1545,11 @@ static void acm_disconnect(struct usb_interface *intf)
 
        tty_unregister_device(acm_tty_driver, acm->minor);
 
+       usb_free_urb(acm->ctrlurb);
+       for (i = 0; i < ACM_NW; i++)
+               usb_free_urb(acm->wb[i].urb);
+       for (i = 0; i < acm->rx_buflimit; i++)
+               usb_free_urb(acm->read_urbs[i]);
        acm_write_buffers_free(acm);
        usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
        acm_read_buffers_free(acm);
@@ -1987,8 +1922,6 @@ static const struct tty_operations acm_ops = {
        .cleanup =              acm_tty_cleanup,
        .hangup =               acm_tty_hangup,
        .write =                acm_tty_write,
-       .put_char =             acm_tty_put_char,
-       .flush_chars =          acm_tty_flush_chars,
        .write_room =           acm_tty_write_room,
        .ioctl =                acm_tty_ioctl,
        .throttle =             acm_tty_throttle,
index eacc116..ca06b20 100644 (file)
@@ -96,7 +96,6 @@ struct acm {
        unsigned long read_urbs_free;
        struct urb *read_urbs[ACM_NR];
        struct acm_rb read_buffers[ACM_NR];
-       struct acm_wb *putbuffer;                       /* for acm_tty_put_char() */
        int rx_buflimit;
        spinlock_t read_lock;
        u8 *notification_buffer;                        /* to reassemble fragmented notifications */
index 50a2362..48277bb 100644 (file)
@@ -246,6 +246,31 @@ int of_usb_update_otg_caps(struct device_node *np,
 }
 EXPORT_SYMBOL_GPL(of_usb_update_otg_caps);
 
+/**
+ * usb_of_get_companion_dev - Find the companion device
+ * @dev: the device pointer to find a companion
+ *
+ * Find the companion device from platform bus.
+ *
+ * Takes a reference to the returned struct device which needs to be dropped
+ * after use.
+ *
+ * Return: On success, a pointer to the companion device, %NULL on failure.
+ */
+struct device *usb_of_get_companion_dev(struct device *dev)
+{
+       struct device_node *node;
+       struct platform_device *pdev = NULL;
+
+       node = of_parse_phandle(dev->of_node, "companion", 0);
+       if (node)
+               pdev = of_find_device_by_node(node);
+
+       of_node_put(node);
+
+       return pdev ? &pdev->dev : NULL;
+}
+EXPORT_SYMBOL_GPL(usb_of_get_companion_dev);
 #endif
 
 MODULE_LICENSE("GPL");
index 15cc76e..99116af 100644 (file)
@@ -109,8 +109,15 @@ static void *usb_role_switch_match(struct device_connection *con, int ep,
  */
 struct usb_role_switch *usb_role_switch_get(struct device *dev)
 {
-       return device_connection_find_match(dev, "usb-role-switch", NULL,
-                                           usb_role_switch_match);
+       struct usb_role_switch *sw;
+
+       sw = device_connection_find_match(dev, "usb-role-switch", NULL,
+                                         usb_role_switch_match);
+
+       if (!IS_ERR_OR_NULL(sw))
+               WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+
+       return sw;
 }
 EXPORT_SYMBOL_GPL(usb_role_switch_get);
 
@@ -122,8 +129,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_get);
  */
 void usb_role_switch_put(struct usb_role_switch *sw)
 {
-       if (!IS_ERR_OR_NULL(sw))
+       if (!IS_ERR_OR_NULL(sw)) {
                put_device(&sw->dev);
+               module_put(sw->dev.parent->driver->owner);
+       }
 }
 EXPORT_SYMBOL_GPL(usb_role_switch_put);
 
index 6ce77b3..244417d 100644 (file)
@@ -1434,10 +1434,13 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        struct async *as = NULL;
        struct usb_ctrlrequest *dr = NULL;
        unsigned int u, totlen, isofrmlen;
-       int i, ret, is_in, num_sgs = 0, ifnum = -1;
+       int i, ret, num_sgs = 0, ifnum = -1;
        int number_of_packets = 0;
        unsigned int stream_id = 0;
        void *buf;
+       bool is_in;
+       bool allow_short = false;
+       bool allow_zero = false;
        unsigned long mask =    USBDEVFS_URB_SHORT_NOT_OK |
                                USBDEVFS_URB_BULK_CONTINUATION |
                                USBDEVFS_URB_NO_FSBR |
@@ -1471,6 +1474,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        u = 0;
        switch (uurb->type) {
        case USBDEVFS_URB_TYPE_CONTROL:
+               if (is_in)
+                       allow_short = true;
                if (!usb_endpoint_xfer_control(&ep->desc))
                        return -EINVAL;
                /* min 8 byte setup packet */
@@ -1511,6 +1516,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
                break;
 
        case USBDEVFS_URB_TYPE_BULK:
+               if (!is_in)
+                       allow_zero = true;
+               else
+                       allow_short = true;
                switch (usb_endpoint_type(&ep->desc)) {
                case USB_ENDPOINT_XFER_CONTROL:
                case USB_ENDPOINT_XFER_ISOC:
@@ -1531,6 +1540,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
                if (!usb_endpoint_xfer_int(&ep->desc))
                        return -EINVAL;
  interrupt_urb:
+               if (!is_in)
+                       allow_zero = true;
+               else
+                       allow_short = true;
                break;
 
        case USBDEVFS_URB_TYPE_ISO:
@@ -1676,14 +1689,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
        u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
        if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
                u |= URB_ISO_ASAP;
-       if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in)
+       if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
                u |= URB_SHORT_NOT_OK;
-       if (uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+       if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
                u |= URB_ZERO_PACKET;
        if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT)
                u |= URB_NO_INTERRUPT;
        as->urb->transfer_flags = u;
 
+       if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
+               dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n");
+       if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+               dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n");
+
        as->urb->transfer_buffer_length = uurb->buffer_length;
        as->urb->setup_packet = (unsigned char *)dr;
        dr = NULL;
index e76e95f..a1f225f 100644 (file)
@@ -512,7 +512,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
        struct device *dev;
        struct usb_device *udev;
        int retval = 0;
-       int lpm_disable_error = -ENODEV;
 
        if (!iface)
                return -ENODEV;
@@ -533,16 +532,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
 
        iface->condition = USB_INTERFACE_BOUND;
 
-       /* See the comment about disabling LPM in usb_probe_interface(). */
-       if (driver->disable_hub_initiated_lpm) {
-               lpm_disable_error = usb_unlocked_disable_lpm(udev);
-               if (lpm_disable_error) {
-                       dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n",
-                               __func__, driver->name);
-                       return -ENOMEM;
-               }
-       }
-
        /* Claimed interfaces are initially inactive (suspended) and
         * runtime-PM-enabled, but only if the driver has autosuspend
         * support.  Otherwise they are marked active, to prevent the
@@ -561,9 +550,20 @@ int usb_driver_claim_interface(struct usb_driver *driver,
        if (device_is_registered(dev))
                retval = device_bind_driver(dev);
 
-       /* Attempt to re-enable USB3 LPM, if the disable was successful. */
-       if (!lpm_disable_error)
-               usb_unlocked_enable_lpm(udev);
+       if (retval) {
+               dev->driver = NULL;
+               usb_set_intfdata(iface, NULL);
+               iface->needs_remote_wakeup = 0;
+               iface->condition = USB_INTERFACE_UNBOUND;
+
+               /*
+                * Unbound interfaces are always runtime-PM-disabled
+                * and runtime-PM-suspended
+                */
+               if (driver->supports_autosuspend)
+                       pm_runtime_disable(dev);
+               pm_runtime_set_suspended(dev);
+       }
 
        return retval;
 }
index 66fe1b7..0343246 100644 (file)
@@ -515,8 +515,6 @@ static int resume_common(struct device *dev, int event)
                                event == PM_EVENT_RESTORE);
                if (retval) {
                        dev_err(dev, "PCI post-resume error %d!\n", retval);
-                       if (hcd->shared_hcd)
-                               usb_hc_died(hcd->shared_hcd);
                        usb_hc_died(hcd);
                }
        }
index 228672f..bfa5eda 100644 (file)
@@ -1341,6 +1341,11 @@ void usb_enable_interface(struct usb_device *dev,
  * is submitted that needs that bandwidth.  Some other operating systems
  * allocate bandwidth early, when a configuration is chosen.
  *
+ * xHCI reserves bandwidth and configures the alternate setting in
+ * usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting
+ * may be disabled. Drivers cannot rely on any particular alternate
+ * setting being in effect after a failure.
+ *
  * This call is synchronous, and may not be used in an interrupt context.
  * Also, drivers must not change altsettings while urbs are scheduled for
  * endpoints in that interface; all such urbs must first be completed
@@ -1376,6 +1381,12 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
                         alternate);
                return -EINVAL;
        }
+       /*
+        * usb3 hosts configure the interface in usb_hcd_alloc_bandwidth,
+        * including freeing dropped endpoint ring buffers.
+        * Make sure the interface endpoints are flushed before that
+        */
+       usb_disable_interface(dev, iface, false);
 
        /* Make sure we have enough bandwidth for this alternate interface.
         * Remove the current alt setting and add the new alt setting.
index fd77442..651708d 100644 (file)
@@ -105,29 +105,3 @@ usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum)
        return NULL;
 }
 EXPORT_SYMBOL_GPL(usb_of_get_interface_node);
-
-/**
- * usb_of_get_companion_dev - Find the companion device
- * @dev: the device pointer to find a companion
- *
- * Find the companion device from platform bus.
- *
- * Takes a reference to the returned struct device which needs to be dropped
- * after use.
- *
- * Return: On success, a pointer to the companion device, %NULL on failure.
- */
-struct device *usb_of_get_companion_dev(struct device *dev)
-{
-       struct device_node *node;
-       struct platform_device *pdev = NULL;
-
-       node = of_parse_phandle(dev->of_node, "companion", 0);
-       if (node)
-               pdev = of_find_device_by_node(node);
-
-       of_node_put(node);
-
-       return pdev ? &pdev->dev : NULL;
-}
-EXPORT_SYMBOL_GPL(usb_of_get_companion_dev);
index 097057d..178d6c6 100644 (file)
@@ -58,6 +58,7 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
        quirk_list = kcalloc(quirk_count, sizeof(struct quirk_entry),
                             GFP_KERNEL);
        if (!quirk_list) {
+               quirk_count = 0;
                mutex_unlock(&quirk_mutex);
                return -ENOMEM;
        }
@@ -154,7 +155,7 @@ static struct kparam_string quirks_param_string = {
        .string = quirks_param,
 };
 
-module_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644);
+device_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644);
 MODULE_PARM_DESC(quirks, "Add/modify USB quirks by specifying quirks=vendorID:productID:quirks");
 
 /* Lists of quirky USB devices, split in device quirks and interface quirks.
@@ -178,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* CBM - Flash disk */
        { USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* WORLDE Controller KS49 or Prodipe MIDI 49C USB controller */
+       { USB_DEVICE(0x0218, 0x0201), .driver_info =
+                       USB_QUIRK_CONFIG_INTF_STRINGS },
+
        /* WORLDE easy key (easykey.25) MIDI controller  */
        { USB_DEVICE(0x0218, 0x0401), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -406,6 +411,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x2040, 0x7200), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
 
+       /* DJI CineSSD */
+       { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+
        /* INTEL VALUE SSD */
        { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
 
index 623be31..79d8bd7 100644 (file)
@@ -228,6 +228,8 @@ struct usb_host_interface *usb_find_alt_setting(
        struct usb_interface_cache *intf_cache = NULL;
        int i;
 
+       if (!config)
+               return NULL;
        for (i = 0; i < config->desc.bNumInterfaces; i++) {
                if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
                                == iface_num) {
index 9a53a58..5776428 100644 (file)
@@ -412,8 +412,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
        dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n",
                (unsigned long)res->start, hsotg->regs);
 
-       hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg);
-
        retval = dwc2_lowlevel_hw_init(hsotg);
        if (retval)
                return retval;
@@ -438,6 +436,8 @@ static int dwc2_driver_probe(struct platform_device *dev)
        if (retval)
                return retval;
 
+       hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg);
+
        retval = dwc2_get_dr_mode(hsotg);
        if (retval)
                goto error;
index 40bf9e0..4c2771c 100644 (file)
@@ -180,8 +180,7 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int dwc3_of_simple_runtime_suspend(struct device *dev)
+static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
 {
        struct dwc3_of_simple   *simple = dev_get_drvdata(dev);
        int                     i;
@@ -192,7 +191,7 @@ static int dwc3_of_simple_runtime_suspend(struct device *dev)
        return 0;
 }
 
-static int dwc3_of_simple_runtime_resume(struct device *dev)
+static int __maybe_unused dwc3_of_simple_runtime_resume(struct device *dev)
 {
        struct dwc3_of_simple   *simple = dev_get_drvdata(dev);
        int                     ret;
@@ -210,7 +209,7 @@ static int dwc3_of_simple_runtime_resume(struct device *dev)
        return 0;
 }
 
-static int dwc3_of_simple_suspend(struct device *dev)
+static int __maybe_unused dwc3_of_simple_suspend(struct device *dev)
 {
        struct dwc3_of_simple *simple = dev_get_drvdata(dev);
 
@@ -220,7 +219,7 @@ static int dwc3_of_simple_suspend(struct device *dev)
        return 0;
 }
 
-static int dwc3_of_simple_resume(struct device *dev)
+static int __maybe_unused dwc3_of_simple_resume(struct device *dev)
 {
        struct dwc3_of_simple *simple = dev_get_drvdata(dev);
 
@@ -229,7 +228,6 @@ static int dwc3_of_simple_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
 static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(dwc3_of_simple_suspend, dwc3_of_simple_resume)
index 5edd794..1286076 100644 (file)
@@ -85,8 +85,8 @@ static int dwc3_byt_enable_ulpi_refclock(struct pci_dev *pci)
        u32             value;
 
        reg = pcim_iomap(pci, GP_RWBAR, 0);
-       if (IS_ERR(reg))
-               return PTR_ERR(reg);
+       if (!reg)
+               return -ENOMEM;
 
        value = readl(reg + GP_RWREG1);
        if (!(value & GP_RWREG1_ULPI_REFCLK_DISABLE))
index 032ea7d..2b53194 100644 (file)
@@ -473,7 +473,6 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
 
 /**
  * dwc3_gadget_start_config - configure ep resources
- * @dwc: pointer to our controller context structure
  * @dep: endpoint that is being enabled
  *
  * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
index 53a48f5..587c503 100644 (file)
@@ -1063,12 +1063,15 @@ static const struct usb_gadget_ops fotg210_gadget_ops = {
 static int fotg210_udc_remove(struct platform_device *pdev)
 {
        struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
+       int i;
 
        usb_del_gadget_udc(&fotg210->gadget);
        iounmap(fotg210->reg);
        free_irq(platform_get_irq(pdev, 0), fotg210);
 
        fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
+       for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+               kfree(fotg210->ep[i]);
        kfree(fotg210);
 
        return 0;
@@ -1099,7 +1102,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
        /* initialize udc */
        fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
        if (fotg210 == NULL)
-               goto err_alloc;
+               goto err;
 
        for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
                _ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
@@ -1111,7 +1114,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
        fotg210->reg = ioremap(res->start, resource_size(res));
        if (fotg210->reg == NULL) {
                pr_err("ioremap error.\n");
-               goto err_map;
+               goto err_alloc;
        }
 
        spin_lock_init(&fotg210->lock);
@@ -1159,7 +1162,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
        fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
                                GFP_KERNEL);
        if (fotg210->ep0_req == NULL)
-               goto err_req;
+               goto err_map;
 
        fotg210_init(fotg210);
 
@@ -1187,12 +1190,14 @@ err_req:
        fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
 
 err_map:
-       if (fotg210->reg)
-               iounmap(fotg210->reg);
+       iounmap(fotg210->reg);
 
 err_alloc:
+       for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+               kfree(fotg210->ep[i]);
        kfree(fotg210);
 
+err:
        return ret;
 }
 
index 318246d..b02ab2a 100644 (file)
@@ -1545,11 +1545,14 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
                writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
        } else {
                writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
-               stop_activity(dev, dev->driver);
+               stop_activity(dev, NULL);
        }
 
        spin_unlock_irqrestore(&dev->lock, flags);
 
+       if (!is_on && dev->driver)
+               dev->driver->disconnect(&dev->gadget);
+
        return 0;
 }
 
@@ -2466,8 +2469,11 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
                nuke(&dev->ep[i]);
 
        /* report disconnect; the driver is already quiesced */
-       if (driver)
+       if (driver) {
+               spin_unlock(&dev->lock);
                driver->disconnect(&dev->gadget);
+               spin_lock(&dev->lock);
+       }
 
        usb_reinit(dev);
 }
@@ -3341,6 +3347,8 @@ next_endpoints:
                BIT(PCI_RETRY_ABORT_INTERRUPT))
 
 static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
+__releases(dev->lock)
+__acquires(dev->lock)
 {
        struct net2280_ep       *ep;
        u32                     tmp, num, mask, scratch;
@@ -3381,12 +3389,14 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
                        if (disconnect || reset) {
                                stop_activity(dev, dev->driver);
                                ep0_start(dev);
+                               spin_unlock(&dev->lock);
                                if (reset)
                                        usb_gadget_udc_reset
                                                (&dev->gadget, dev->driver);
                                else
                                        (dev->driver->disconnect)
                                                (&dev->gadget);
+                               spin_lock(&dev->lock);
                                return;
                        }
                }
@@ -3405,6 +3415,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
        tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
        if (stat & tmp) {
                writel(tmp, &dev->regs->irqstat1);
+               spin_unlock(&dev->lock);
                if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
                        if (dev->driver->suspend)
                                dev->driver->suspend(&dev->gadget);
@@ -3415,6 +3426,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
                                dev->driver->resume(&dev->gadget);
                        /* at high speed, note erratum 0133 */
                }
+               spin_lock(&dev->lock);
                stat &= ~tmp;
        }
 
index 1f879b3..e1656f3 100644 (file)
@@ -812,12 +812,15 @@ static void usb3_irq_epc_int_1_speed(struct renesas_usb3 *usb3)
        switch (speed) {
        case USB_STA_SPEED_SS:
                usb3->gadget.speed = USB_SPEED_SUPER;
+               usb3->gadget.ep0->maxpacket = USB3_EP0_SS_MAX_PACKET_SIZE;
                break;
        case USB_STA_SPEED_HS:
                usb3->gadget.speed = USB_SPEED_HIGH;
+               usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
                break;
        case USB_STA_SPEED_FS:
                usb3->gadget.speed = USB_SPEED_FULL;
+               usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
                break;
        default:
                usb3->gadget.speed = USB_SPEED_UNKNOWN;
@@ -2513,7 +2516,7 @@ static int renesas_usb3_init_ep(struct renesas_usb3 *usb3, struct device *dev,
                        /* for control pipe */
                        usb3->gadget.ep0 = &usb3_ep->ep;
                        usb_ep_set_maxpacket_limit(&usb3_ep->ep,
-                                               USB3_EP0_HSFS_MAX_PACKET_SIZE);
+                                               USB3_EP0_SS_MAX_PACKET_SIZE);
                        usb3_ep->ep.caps.type_control = true;
                        usb3_ep->ep.caps.dir_in = true;
                        usb3_ep->ep.caps.dir_out = true;
index 072bd5d..5b8a3d9 100644 (file)
@@ -2555,7 +2555,7 @@ static int u132_get_frame(struct usb_hcd *hcd)
        } else {
                int frame = 0;
                dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n");
-               msleep(100);
+               mdelay(100);
                return frame;
        }
 }
index ef350c3..b1f27aa 100644 (file)
@@ -1613,6 +1613,10 @@ void xhci_endpoint_copy(struct xhci_hcd *xhci,
        in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
        in_ep_ctx->deq = out_ep_ctx->deq;
        in_ep_ctx->tx_info = out_ep_ctx->tx_info;
+       if (xhci->quirks & XHCI_MTK_HOST) {
+               in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
+               in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
+       }
 }
 
 /* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
index 7334da9..71d0d33 100644 (file)
@@ -642,10 +642,10 @@ static int __maybe_unused xhci_mtk_resume(struct device *dev)
        xhci_mtk_host_enable(mtk);
 
        xhci_dbg(xhci, "%s: restart port polling\n", __func__);
-       set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
-       usb_hcd_poll_rh_status(hcd);
        set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
        usb_hcd_poll_rh_status(xhci->shared_hcd);
+       set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+       usb_hcd_poll_rh_status(hcd);
        return 0;
 }
 
index 6372edf..722860e 100644 (file)
@@ -185,6 +185,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
            (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+            pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+            pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
             pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
                xhci->quirks |= XHCI_MISSING_CAS;
index 8dc77e3..94e9392 100644 (file)
@@ -153,7 +153,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
 {
        const struct xhci_plat_priv *priv_match;
        const struct hc_driver  *driver;
-       struct device           *sysdev;
+       struct device           *sysdev, *tmpdev;
        struct xhci_hcd         *xhci;
        struct resource         *res;
        struct usb_hcd          *hcd;
@@ -273,19 +273,24 @@ static int xhci_plat_probe(struct platform_device *pdev)
                goto disable_clk;
        }
 
-       if (device_property_read_bool(sysdev, "usb2-lpm-disable"))
-               xhci->quirks |= XHCI_HW_LPM_DISABLE;
+       /* imod_interval is the interrupt moderation value in nanoseconds. */
+       xhci->imod_interval = 40000;
 
-       if (device_property_read_bool(sysdev, "usb3-lpm-capable"))
-               xhci->quirks |= XHCI_LPM_SUPPORT;
+       /* Iterate over all parent nodes for finding quirks */
+       for (tmpdev = &pdev->dev; tmpdev; tmpdev = tmpdev->parent) {
 
-       if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped"))
-               xhci->quirks |= XHCI_BROKEN_PORT_PED;
+               if (device_property_read_bool(tmpdev, "usb2-lpm-disable"))
+                       xhci->quirks |= XHCI_HW_LPM_DISABLE;
 
-       /* imod_interval is the interrupt moderation value in nanoseconds. */
-       xhci->imod_interval = 40000;
-       device_property_read_u32(sysdev, "imod-interval-ns",
-                                &xhci->imod_interval);
+               if (device_property_read_bool(tmpdev, "usb3-lpm-capable"))
+                       xhci->quirks |= XHCI_LPM_SUPPORT;
+
+               if (device_property_read_bool(tmpdev, "quirk-broken-port-ped"))
+                       xhci->quirks |= XHCI_BROKEN_PORT_PED;
+
+               device_property_read_u32(tmpdev, "imod-interval-ns",
+                                        &xhci->imod_interval);
+       }
 
        hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
        if (IS_ERR(hcd->usb_phy)) {
index 61f48b1..0420eef 100644 (file)
@@ -37,6 +37,21 @@ static unsigned long long quirks;
 module_param(quirks, ullong, S_IRUGO);
 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
 
+static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
+{
+       struct xhci_segment *seg = ring->first_seg;
+
+       if (!td || !td->start_seg)
+               return false;
+       do {
+               if (seg == td->start_seg)
+                       return true;
+               seg = seg->next;
+       } while (seg && seg != ring->first_seg);
+
+       return false;
+}
+
 /* TODO: copied from ehci-hcd.c - can this be refactored? */
 /*
  * xhci_handshake - spin reading hc until handshake completes or fails
@@ -1571,6 +1586,21 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
                goto done;
        }
 
+       /*
+        * check ring is not re-allocated since URB was enqueued. If it is, then
+        * make sure none of the ring related pointers in this URB private data
+        * are touched, such as td_list, otherwise we overwrite freed data
+        */
+       if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
+               xhci_err(xhci, "Canceled URB td not found on endpoint ring");
+               for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
+                       td = &urb_priv->td[i];
+                       if (!list_empty(&td->cancelled_td_list))
+                               list_del_init(&td->cancelled_td_list);
+               }
+               goto err_giveback;
+       }
+
        if (xhci->xhc_state & XHCI_STATE_HALTED) {
                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                                "HC halted, freeing TD manually.");
index 82f2206..b5d6616 100644 (file)
@@ -369,7 +369,7 @@ static unsigned char parport_uss720_frob_control(struct parport *pp, unsigned ch
        mask &= 0x0f;
        val &= 0x0f;
        d = (priv->reg[1] & (~mask)) ^ val;
-       if (set_1284_register(pp, 2, d, GFP_KERNEL))
+       if (set_1284_register(pp, 2, d, GFP_ATOMIC))
                return 0;
        priv->reg[1] = d;
        return d & 0xf;
@@ -379,7 +379,7 @@ static unsigned char parport_uss720_read_status(struct parport *pp)
 {
        unsigned char ret;
 
-       if (get_1284_register(pp, 1, &ret, GFP_KERNEL))
+       if (get_1284_register(pp, 1, &ret, GFP_ATOMIC))
                return 0;
        return ret & 0xf8;
 }
index 3be40ea..6d9fd5f 100644 (file)
@@ -413,6 +413,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
        spin_unlock_irqrestore(&dev->lock, flags);
        mutex_unlock(&dev->io_mutex);
 
+       if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
+               return -EIO;
+
        return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
 }
 
@@ -421,13 +424,13 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
 {
        struct usb_yurex *dev;
        int i, set = 0, retval = 0;
-       char buffer[16];
+       char buffer[16 + 1];
        char *data = buffer;
        unsigned long long c, c2 = 0;
        signed long timeout = 0;
        DEFINE_WAIT(wait);
 
-       count = min(sizeof(buffer), count);
+       count = min(sizeof(buffer) - 1, count);
        dev = file->private_data;
 
        /* verify that we actually have some data to write */
@@ -446,6 +449,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
                retval = -EFAULT;
                goto error;
        }
+       buffer[count] = 0;
        memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE);
 
        switch (buffer[0]) {
index eecfd06..d045d84 100644 (file)
@@ -107,8 +107,12 @@ static int mtu3_device_enable(struct mtu3 *mtu)
                (SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN |
                SSUSB_U2_PORT_HOST_SEL));
 
-       if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
+       if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) {
                mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+               if (mtu->is_u3_ip)
+                       mtu3_setbits(ibase, SSUSB_U3_CTRL(0),
+                                    SSUSB_U3_PORT_DUAL_MODE);
+       }
 
        return ssusb_check_clocks(mtu->ssusb, check_clk);
 }
index 6ee3714..a45bb25 100644 (file)
 
 /* U3D_SSUSB_U3_CTRL_0P */
 #define SSUSB_U3_PORT_SSP_SPEED        BIT(9)
+#define SSUSB_U3_PORT_DUAL_MODE        BIT(7)
 #define SSUSB_U3_PORT_HOST_SEL         BIT(2)
 #define SSUSB_U3_PORT_PDN              BIT(1)
 #define SSUSB_U3_PORT_DIS              BIT(0)
index df827ff..23a0df7 100644 (file)
@@ -658,16 +658,6 @@ dsps_dma_controller_create(struct musb *musb, void __iomem *base)
        return controller;
 }
 
-static void dsps_dma_controller_destroy(struct dma_controller *c)
-{
-       struct musb *musb = c->musb;
-       struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent);
-       void __iomem *usbss_base = glue->usbss_base;
-
-       musb_writel(usbss_base, USBSS_IRQ_CLEARR, USBSS_IRQ_PD_COMP);
-       cppi41_dma_controller_destroy(c);
-}
-
 #ifdef CONFIG_PM_SLEEP
 static void dsps_dma_controller_suspend(struct dsps_glue *glue)
 {
@@ -697,7 +687,7 @@ static struct musb_platform_ops dsps_ops = {
 
 #ifdef CONFIG_USB_TI_CPPI41_DMA
        .dma_init       = dsps_dma_controller_create,
-       .dma_exit       = dsps_dma_controller_destroy,
+       .dma_exit       = cppi41_dma_controller_destroy,
 #endif
        .enable         = dsps_musb_enable,
        .disable        = dsps_musb_disable,
index e53c682..9bbcee3 100644 (file)
@@ -173,7 +173,7 @@ struct ump_interrupt {
 }  __attribute__((packed));
 
 
-#define TIUMP_GET_PORT_FROM_CODE(c)    (((c) >> 4) - 3)
+#define TIUMP_GET_PORT_FROM_CODE(c)    (((c) >> 6) & 0x01)
 #define TIUMP_GET_FUNC_FROM_CODE(c)    ((c) & 0x0f)
 #define TIUMP_INTERRUPT_CODE_LSR       0x03
 #define TIUMP_INTERRUPT_CODE_MSR       0x04
index 0215b70..e72ad9f 100644 (file)
@@ -561,6 +561,9 @@ static void option_instat_callback(struct urb *urb);
 /* Interface is reserved */
 #define RSVD(ifnum)    ((BIT(ifnum) & 0xff) << 0)
 
+/* Interface must have two endpoints */
+#define NUMEP2         BIT(16)
+
 
 static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -1081,8 +1084,9 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(4) },
        { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
          .driver_info = RSVD(4) },
-       { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06),
-         .driver_info = RSVD(4) | RSVD(5) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
+         .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1999,6 +2003,13 @@ static int option_probe(struct usb_serial *serial,
        if (device_flags & RSVD(iface_desc->bInterfaceNumber))
                return -ENODEV;
 
+       /*
+        * Allow matching on bNumEndpoints for devices whose interface numbers
+        * can change (e.g. Quectel EP06).
+        */
+       if (device_flags & NUMEP2 && iface_desc->bNumEndpoints != 2)
+               return -ENODEV;
+
        /* Store the device flags so we can use them during attach. */
        usb_set_serial_data(serial, (void *)device_flags);
 
index 3010878..e3c5832 100644 (file)
@@ -1119,7 +1119,7 @@ static void ti_break(struct tty_struct *tty, int break_state)
 
 static int ti_get_port_from_code(unsigned char code)
 {
-       return (code >> 4) - 3;
+       return (code >> 6) & 0x01;
 }
 
 static int ti_get_func_from_code(unsigned char code)
index 40864c2..4d02735 100644 (file)
@@ -84,7 +84,8 @@ DEVICE(moto_modem, MOTO_IDS);
 
 /* Motorola Tetra driver */
 #define MOTOROLA_TETRA_IDS()                   \
-       { USB_DEVICE(0x0cad, 0x9011) }  /* Motorola Solutions TETRA PEI */
+       { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
+       { USB_DEVICE(0x0cad, 0x9012) }  /* MTP6550 */
 DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
 
 /* Novatel Wireless GPS driver */
index c267f28..e227bb5 100644 (file)
@@ -376,6 +376,15 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
                return 0;
        }
 
+       if ((us->fflags & US_FL_NO_ATA_1X) &&
+                       (srb->cmnd[0] == ATA_12 || srb->cmnd[0] == ATA_16)) {
+               memcpy(srb->sense_buffer, usb_stor_sense_invalidCDB,
+                      sizeof(usb_stor_sense_invalidCDB));
+               srb->result = SAM_STAT_CHECK_CONDITION;
+               done(srb);
+               return 0;
+       }
+
        /* enqueue the command and wake up the control thread */
        srb->scsi_done = done;
        us->srb = srb;
index 9e9de54..1f7b401 100644 (file)
@@ -842,6 +842,27 @@ static int uas_slave_configure(struct scsi_device *sdev)
                sdev->skip_ms_page_8 = 1;
                sdev->wce_default_on = 1;
        }
+
+       /*
+        * Some disks return the total number of blocks in response
+        * to READ CAPACITY rather than the highest block number.
+        * If this device makes that mistake, tell the sd driver.
+        */
+       if (devinfo->flags & US_FL_FIX_CAPACITY)
+               sdev->fix_capacity = 1;
+
+       /*
+        * Some devices don't like MODE SENSE with page=0x3f,
+        * which is the command used for checking if a device
+        * is write-protected.  Now that we tell the sd driver
+        * to do a 192-byte transfer with this command the
+        * majority of devices work fine, but a few still can't
+        * handle it.  The sd driver will simply assume those
+        * devices are write-enabled.
+        */
+       if (devinfo->flags & US_FL_NO_WP_DETECT)
+               sdev->skip_ms_page_3f = 1;
+
        scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
        return 0;
 }
index 22fcfcc..f7f83b2 100644 (file)
@@ -2288,6 +2288,13 @@ UNUSUAL_DEV(  0x2735, 0x100b, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_GO_SLOW ),
 
+/* Reported-by: Tim Anderson <tsa@biglakesoftware.com> */
+UNUSUAL_DEV(  0x2ca3, 0x0031, 0x0000, 0x9999,
+               "DJI",
+               "CineSSD",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_ATA_1X),
+
 /*
  * Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
  * Mio Moov 330
index 95a2b10..76299b6 100644 (file)
@@ -255,12 +255,13 @@ EXPORT_SYMBOL_GPL(typec_altmode_unregister_driver);
 /* API for the port drivers */
 
 /**
- * typec_match_altmode - Match SVID to an array of alternate modes
+ * typec_match_altmode - Match SVID and mode to an array of alternate modes
  * @altmodes: Array of alternate modes
- * @n: Number of elements in the array, or -1 for NULL termiated arrays
+ * @n: Number of elements in the array, or -1 for NULL terminated arrays
  * @svid: Standard or Vendor ID to match with
+ * @mode: Mode to match with
  *
- * Return pointer to an alternate mode with SVID mathing @svid, or NULL when no
+ * Return pointer to an alternate mode with SVID matching @svid, or NULL when no
  * match is found.
  */
 struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes,
index c202975..e61dffb 100644 (file)
@@ -1484,7 +1484,6 @@ EXPORT_SYMBOL_GPL(typec_set_mode);
  * typec_port_register_altmode - Register USB Type-C Port Alternate Mode
  * @port: USB Type-C Port that supports the alternate mode
  * @desc: Description of the alternate mode
- * @drvdata: Private pointer to driver specific info
  *
  * This routine is used to register an alternate mode that @port is capable of
  * supporting.
index ddaac63..d990aa5 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/device.h>
 #include <linux/list.h>
+#include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/usb/typec_mux.h>
 
@@ -49,8 +50,10 @@ struct typec_switch *typec_switch_get(struct device *dev)
        mutex_lock(&switch_lock);
        sw = device_connection_find_match(dev, "typec-switch", NULL,
                                          typec_switch_match);
-       if (!IS_ERR_OR_NULL(sw))
+       if (!IS_ERR_OR_NULL(sw)) {
+               WARN_ON(!try_module_get(sw->dev->driver->owner));
                get_device(sw->dev);
+       }
        mutex_unlock(&switch_lock);
 
        return sw;
@@ -65,8 +68,10 @@ EXPORT_SYMBOL_GPL(typec_switch_get);
  */
 void typec_switch_put(struct typec_switch *sw)
 {
-       if (!IS_ERR_OR_NULL(sw))
+       if (!IS_ERR_OR_NULL(sw)) {
+               module_put(sw->dev->driver->owner);
                put_device(sw->dev);
+       }
 }
 EXPORT_SYMBOL_GPL(typec_switch_put);
 
@@ -136,8 +141,10 @@ struct typec_mux *typec_mux_get(struct device *dev, const char *name)
 
        mutex_lock(&mux_lock);
        mux = device_connection_find_match(dev, name, NULL, typec_mux_match);
-       if (!IS_ERR_OR_NULL(mux))
+       if (!IS_ERR_OR_NULL(mux)) {
+               WARN_ON(!try_module_get(mux->dev->driver->owner));
                get_device(mux->dev);
+       }
        mutex_unlock(&mux_lock);
 
        return mux;
@@ -152,8 +159,10 @@ EXPORT_SYMBOL_GPL(typec_mux_get);
  */
 void typec_mux_put(struct typec_mux *mux)
 {
-       if (!IS_ERR_OR_NULL(mux))
+       if (!IS_ERR_OR_NULL(mux)) {
+               module_put(mux->dev->driver->owner);
                put_device(mux->dev);
+       }
 }
 EXPORT_SYMBOL_GPL(typec_mux_put);
 
index 96c1d84..b13c6b4 100644 (file)
@@ -952,7 +952,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
        list_for_each_entry_safe(node, n, &d->pending_list, node) {
                struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
                if (msg->iova <= vq_msg->iova &&
-                   msg->iova + msg->size - 1 > vq_msg->iova &&
+                   msg->iova + msg->size - 1 >= vq_msg->iova &&
                    vq_msg->type == VHOST_IOTLB_MISS) {
                        vhost_poll_queue(&node->vq->poll);
                        list_del(&node->node);
index 8235b28..d09bab3 100644 (file)
@@ -333,6 +333,8 @@ extern const struct aty_pll_ops aty_pll_ct; /* Integrated */
 extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll);
 extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
 
+extern const u8 aty_postdividers[8];
+
 
     /*
      *  Hardware cursor support
@@ -359,7 +361,6 @@ static inline void wait_for_idle(struct atyfb_par *par)
 
 extern void aty_reset_engine(const struct atyfb_par *par);
 extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
-extern u8   aty_ld_pll_ct(int offset, const struct atyfb_par *par);
 
 void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
 void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
index a9a8272..05111e9 100644 (file)
@@ -3087,17 +3087,18 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
                /*
                 * PLL Reference Divider M:
                 */
-               M = pll_regs[2];
+               M = pll_regs[PLL_REF_DIV];
 
                /*
                 * PLL Feedback Divider N (Dependent on CLOCK_CNTL):
                 */
-               N = pll_regs[7 + (clock_cntl & 3)];
+               N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)];
 
                /*
                 * PLL Post Divider P (Dependent on CLOCK_CNTL):
                 */
-               P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
+               P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl & 3) << 1)) & 3) |
+                                    ((pll_regs[PLL_EXT_CNTL] >> (2 + (clock_cntl & 3))) & 4)];
 
                /*
                 * PLL Divider Q:
index 74a62aa..f87cc81 100644 (file)
@@ -115,7 +115,7 @@ static void aty_st_pll_ct(int offset, u8 val, const struct atyfb_par *par)
  */
 
 #define Maximum_DSP_PRECISION 7
-static u8 postdividers[] = {1,2,4,8,3};
+const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12};
 
 static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
 {
@@ -222,7 +222,7 @@ static int aty_valid_pll_ct(const struct fb_info *info, u32 vclk_per, struct pll
                pll->vclk_post_div += (q <  64*8);
                pll->vclk_post_div += (q <  32*8);
        }
-       pll->vclk_post_div_real = postdividers[pll->vclk_post_div];
+       pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div];
        //    pll->vclk_post_div <<= 6;
        pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
        pllvclk = (1000000 * 2 * pll->vclk_fb_div) /
@@ -513,7 +513,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
                u8 mclk_fb_div, pll_ext_cntl;
                pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
                pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par);
-               pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07];
+               pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07];
                mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par);
                if (pll_ext_cntl & PLL_MFB_TIMES_4_2B)
                        mclk_fb_div <<= 1;
@@ -535,7 +535,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
                xpost_div += (q <  64*8);
                xpost_div += (q <  32*8);
        }
-       pll->ct.xclk_post_div_real = postdividers[xpost_div];
+       pll->ct.xclk_post_div_real = aty_postdividers[xpost_div];
        pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
 
 #ifdef CONFIG_PPC
@@ -584,7 +584,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
                        mpost_div += (q <  64*8);
                        mpost_div += (q <  32*8);
                }
-               sclk_post_div_real = postdividers[mpost_div];
+               sclk_post_div_real = aty_postdividers[mpost_div];
                pll->ct.sclk_fb_div = q * sclk_post_div_real / 8;
                pll->ct.spll_cntl2 = mpost_div << 4;
 #ifdef DEBUG
index 3946649..ba90687 100644 (file)
@@ -42,6 +42,7 @@ struct bmp_dib_header {
        u32 colors_important;
 } __packed;
 
+static bool use_bgrt = true;
 static bool request_mem_succeeded = false;
 static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
 
@@ -160,6 +161,9 @@ static void efifb_show_boot_graphics(struct fb_info *info)
        void *bgrt_image = NULL;
        u8 *dst = info->screen_base;
 
+       if (!use_bgrt)
+               return;
+
        if (!bgrt_tab.image_address) {
                pr_info("efifb: No BGRT, not showing boot graphics\n");
                return;
@@ -290,6 +294,8 @@ static int efifb_setup(char *options)
                                screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
                        else if (!strcmp(this_opt, "nowc"))
                                mem_flags &= ~EFI_MEMORY_WC;
+                       else if (!strcmp(this_opt, "nobgrt"))
+                               use_bgrt = false;
                }
        }
 
index ef69273..a3edb20 100644 (file)
@@ -496,6 +496,9 @@ static int omapfb_memory_read(struct fb_info *fbi,
        if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
                return -EFAULT;
 
+       if (mr->w > 4096 || mr->h > 4096)
+               return -EINVAL;
+
        if (mr->w * mr->h * 3 > mr->buffer_size)
                return -EINVAL;
 
@@ -509,7 +512,7 @@ static int omapfb_memory_read(struct fb_info *fbi,
                        mr->x, mr->y, mr->w, mr->h);
 
        if (r > 0) {
-               if (copy_to_user(mr->buffer, buf, mr->buffer_size))
+               if (copy_to_user(mr->buffer, buf, r))
                        r = -EFAULT;
        }
 
index def3a50..d059d04 100644 (file)
@@ -712,7 +712,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
        /*
         * enable controller clock
         */
-       clk_enable(fbi->clk);
+       clk_prepare_enable(fbi->clk);
 
        pxa168fb_set_par(info);
 
@@ -767,7 +767,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
 failed_free_cmap:
        fb_dealloc_cmap(&info->cmap);
 failed_free_clk:
-       clk_disable(fbi->clk);
+       clk_disable_unprepare(fbi->clk);
 failed_free_fbmem:
        dma_free_coherent(fbi->dev, info->fix.smem_len,
                        info->screen_base, fbi->fb_start_dma);
@@ -807,7 +807,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
        dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
                    info->screen_base, info->fix.smem_start);
 
-       clk_disable(fbi->clk);
+       clk_disable_unprepare(fbi->clk);
 
        framebuffer_release(info);
 
index 045e8af..9e88e3f 100644 (file)
@@ -1157,7 +1157,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
                        dev_name);
                   goto out_err0;
                }
-               /* fall though */
+               /* fall through */
        case S9000_ID_ARTIST:
        case S9000_ID_HCRX:
        case S9000_ID_TIMBER:
index b459edf..90d387b 100644 (file)
@@ -79,15 +79,19 @@ config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
          This value is used to allocate enough space in internal
          tables needed for physical memory administration.
 
-config XEN_SCRUB_PAGES
-       bool "Scrub pages before returning them to system"
+config XEN_SCRUB_PAGES_DEFAULT
+       bool "Scrub pages before returning them to system by default"
        depends on XEN_BALLOON
        default y
        help
          Scrub pages before returning them to the system for reuse by
          other domains.  This makes sure that any confidential data
          is not accidentally visible to other domains.  Is it more
-         secure, but slightly less efficient.
+         secure, but slightly less efficient. This can be controlled with
+         xen_scrub_pages=0 parameter and
+         /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
+         This option only sets the default value.
+
          If in doubt, say yes.
 
 config XEN_DEV_EVTCHN
index d4265c8..b1357aa 100644 (file)
@@ -19,15 +19,16 @@ static void enable_hotplug_cpu(int cpu)
 
 static void disable_hotplug_cpu(int cpu)
 {
-       if (cpu_online(cpu)) {
-               lock_device_hotplug();
+       if (!cpu_is_hotpluggable(cpu))
+               return;
+       lock_device_hotplug();
+       if (cpu_online(cpu))
                device_offline(get_cpu_device(cpu));
-               unlock_device_hotplug();
-       }
-       if (cpu_present(cpu))
+       if (!cpu_online(cpu) && cpu_present(cpu)) {
                xen_arch_unregister_cpu(cpu);
-
-       set_cpu_present(cpu, false);
+               set_cpu_present(cpu, false);
+       }
+       unlock_device_hotplug();
 }
 
 static int vcpu_online(unsigned int cpu)
index 08e4af0..e6c1934 100644 (file)
@@ -138,7 +138,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
                clear_evtchn_to_irq_row(row);
        }
 
-       evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
+       evtchn_to_irq[row][col] = irq;
        return 0;
 }
 
index 57390c7..b0b02a5 100644 (file)
@@ -492,12 +492,19 @@ static bool in_range(struct gntdev_grant_map *map,
        return true;
 }
 
-static void unmap_if_in_range(struct gntdev_grant_map *map,
-                             unsigned long start, unsigned long end)
+static int unmap_if_in_range(struct gntdev_grant_map *map,
+                             unsigned long start, unsigned long end,
+                             bool blockable)
 {
        unsigned long mstart, mend;
        int err;
 
+       if (!in_range(map, start, end))
+               return 0;
+
+       if (!blockable)
+               return -EAGAIN;
+
        mstart = max(start, map->vma->vm_start);
        mend   = min(end,   map->vma->vm_end);
        pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
@@ -508,6 +515,8 @@ static void unmap_if_in_range(struct gntdev_grant_map *map,
                                (mstart - map->vma->vm_start) >> PAGE_SHIFT,
                                (mend - mstart) >> PAGE_SHIFT);
        WARN_ON(err);
+
+       return 0;
 }
 
 static int mn_invl_range_start(struct mmu_notifier *mn,
@@ -519,25 +528,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn,
        struct gntdev_grant_map *map;
        int ret = 0;
 
-       /* TODO do we really need a mutex here? */
        if (blockable)
                mutex_lock(&priv->lock);
        else if (!mutex_trylock(&priv->lock))
                return -EAGAIN;
 
        list_for_each_entry(map, &priv->maps, next) {
-               if (in_range(map, start, end)) {
-                       ret = -EAGAIN;
+               ret = unmap_if_in_range(map, start, end, blockable);
+               if (ret)
                        goto out_unlock;
-               }
-               unmap_if_in_range(map, start, end);
        }
        list_for_each_entry(map, &priv->freeable_maps, next) {
-               if (in_range(map, start, end)) {
-                       ret = -EAGAIN;
+               ret = unmap_if_in_range(map, start, end, blockable);
+               if (ret)
                        goto out_unlock;
-               }
-               unmap_if_in_range(map, start, end);
        }
 
 out_unlock:
index 7bafa70..84575ba 100644 (file)
@@ -1040,18 +1040,33 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
                return ret;
 
        for (i = 0; i < count; i++) {
-               /* Retry eagain maps */
-               if (map_ops[i].status == GNTST_eagain)
-                       gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
-                                               &map_ops[i].status, __func__);
-
-               if (map_ops[i].status == GNTST_okay) {
+               switch (map_ops[i].status) {
+               case GNTST_okay:
+               {
                        struct xen_page_foreign *foreign;
 
                        SetPageForeign(pages[i]);
                        foreign = xen_page_foreign(pages[i]);
                        foreign->domid = map_ops[i].dom;
                        foreign->gref = map_ops[i].ref;
+                       break;
+               }
+
+               case GNTST_no_device_space:
+                       pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
+                       break;
+
+               case GNTST_eagain:
+                       /* Retry eagain maps */
+                       gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
+                                               map_ops + i,
+                                               &map_ops[i].status, __func__);
+                       /* Test status in next loop iteration. */
+                       i--;
+                       break;
+
+               default:
+                       break;
                }
        }
 
index c93d8ef..5bb01a6 100644 (file)
@@ -280,9 +280,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
                /*
                 * The Xenstore watch fires directly after registering it and
                 * after a suspend/resume cycle. So ENOENT is no error but
-                * might happen in those cases.
+                * might happen in those cases. ERANGE is observed when we get
+                * an empty value (''), this happens when we acknowledge the
+                * request by writing '\0' below.
                 */
-               if (err != -ENOENT)
+               if (err != -ENOENT && err != -ERANGE)
                        pr_err("Error %d reading sysrq code in control/sysrq\n",
                               err);
                xenbus_transaction_end(xbt, 1);
index 084799c..3782cf0 100644 (file)
 
 #include <xen/interface/memory.h>
 #include <xen/mem-reservation.h>
+#include <linux/moduleparam.h>
+
+bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
+core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);
 
 /*
  * Use one extent per PAGE_SIZE to avoid to break down the page into
index 294f35c..63c1494 100644 (file)
@@ -44,6 +44,7 @@
 #include <xen/xenbus.h>
 #include <xen/features.h>
 #include <xen/page.h>
+#include <xen/mem-reservation.h>
 
 #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
 
@@ -137,6 +138,7 @@ static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
 static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
 static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
 static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
+static DEVICE_BOOL_ATTR(scrub_pages, 0644, xen_scrub_pages);
 
 static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr,
                              char *buf)
@@ -203,6 +205,7 @@ static struct attribute *balloon_attrs[] = {
        &dev_attr_max_schedule_delay.attr.attr,
        &dev_attr_retry_count.attr.attr,
        &dev_attr_max_retry_count.attr.attr,
+       &dev_attr_scrub_pages.attr.attr,
        NULL
 };
 
index f208883..5b47188 100644 (file)
@@ -402,10 +402,19 @@ static ssize_t modalias_show(struct device *dev,
 }
 static DEVICE_ATTR_RO(modalias);
 
+static ssize_t state_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%s\n",
+                       xenbus_strstate(to_xenbus_device(dev)->state));
+}
+static DEVICE_ATTR_RO(state);
+
 static struct attribute *xenbus_dev_attrs[] = {
        &dev_attr_nodename.attr,
        &dev_attr_devtype.attr,
        &dev_attr_modalias.attr,
+       &dev_attr_state.attr,
        NULL,
 };
 
index 0c3285c..476dcbb 100644 (file)
@@ -98,13 +98,13 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
                goto inval;
 
        args = strchr(name, ' ');
-       if (!args)
-               goto inval;
-       do {
-               *args++ = 0;
-       } while(*args == ' ');
-       if (!*args)
-               goto inval;
+       if (args) {
+               do {
+                       *args++ = 0;
+               } while(*args == ' ');
+               if (!*args)
+                       goto inval;
+       }
 
        /* determine command to perform */
        _debug("cmd=%s name=%s args=%s", buf, name, args);
@@ -120,7 +120,6 @@ static int afs_proc_cells_write(struct file *file, char *buf, size_t size)
 
                if (test_and_set_bit(AFS_CELL_FL_NO_GC, &cell->flags))
                        afs_put_cell(net, cell);
-               printk("kAFS: Added new cell '%s'\n", name);
        } else {
                goto inval;
        }
index 53af9f5..2cddfe7 100644 (file)
@@ -1280,6 +1280,7 @@ struct btrfs_root {
        int send_in_progress;
        struct btrfs_subvolume_writers *subv_writers;
        atomic_t will_be_snapshotted;
+       atomic_t snapshot_force_cow;
 
        /* For qgroup metadata reserved space */
        spinlock_t qgroup_meta_rsv_lock;
@@ -3390,9 +3391,9 @@ do {                                                                      \
 #define btrfs_debug(fs_info, fmt, args...) \
        btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
 #define btrfs_debug_in_rcu(fs_info, fmt, args...) \
-       btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
+       btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
 #define btrfs_debug_rl_in_rcu(fs_info, fmt, args...) \
-       btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
+       btrfs_no_printk_in_rcu(fs_info, KERN_DEBUG fmt, ##args)
 #define btrfs_debug_rl(fs_info, fmt, args...) \
        btrfs_no_printk(fs_info, KERN_DEBUG fmt, ##args)
 #endif
@@ -3404,6 +3405,13 @@ do {                                                     \
        rcu_read_unlock();                              \
 } while (0)
 
+#define btrfs_no_printk_in_rcu(fs_info, fmt, args...)  \
+do {                                                   \
+       rcu_read_lock();                                \
+       btrfs_no_printk(fs_info, fmt, ##args);          \
+       rcu_read_unlock();                              \
+} while (0)
+
 #define btrfs_printk_ratelimited(fs_info, fmt, args...)                \
 do {                                                           \
        static DEFINE_RATELIMIT_STATE(_rs,                      \
index 5124c15..05dc3c1 100644 (file)
@@ -1187,6 +1187,7 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
        atomic_set(&root->log_batch, 0);
        refcount_set(&root->refs, 1);
        atomic_set(&root->will_be_snapshotted, 0);
+       atomic_set(&root->snapshot_force_cow, 0);
        root->log_transid = 0;
        root->log_transid_committed = -1;
        root->last_log_commit = 0;
index de6f75f..2d90742 100644 (file)
@@ -5800,7 +5800,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
  * root: the root of the parent directory
  * rsv: block reservation
  * items: the number of items that we need do reservation
- * qgroup_reserved: used to return the reserved size in qgroup
+ * use_global_rsv: allow fallback to the global block reservation
  *
  * This function is used to reserve the space for snapshot/subvolume
  * creation and deletion. Those operations are different with the
@@ -5810,10 +5810,10 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
  * the space reservation mechanism in start_transaction().
  */
 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
-                                    struct btrfs_block_rsv *rsv,
-                                    int items,
+                                    struct btrfs_block_rsv *rsv, int items,
                                     bool use_global_rsv)
 {
+       u64 qgroup_num_bytes = 0;
        u64 num_bytes;
        int ret;
        struct btrfs_fs_info *fs_info = root->fs_info;
@@ -5821,12 +5821,11 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
 
        if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
                /* One for parent inode, two for dir entries */
-               num_bytes = 3 * fs_info->nodesize;
-               ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
+               qgroup_num_bytes = 3 * fs_info->nodesize;
+               ret = btrfs_qgroup_reserve_meta_prealloc(root,
+                               qgroup_num_bytes, true);
                if (ret)
                        return ret;
-       } else {
-               num_bytes = 0;
        }
 
        num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
@@ -5838,8 +5837,8 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
        if (ret == -ENOSPC && use_global_rsv)
                ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
 
-       if (ret && num_bytes)
-               btrfs_qgroup_free_meta_prealloc(root, num_bytes);
+       if (ret && qgroup_num_bytes)
+               btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
 
        return ret;
 }
index 9357a19..3ea5339 100644 (file)
@@ -1271,7 +1271,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
        u64 disk_num_bytes;
        u64 ram_bytes;
        int extent_type;
-       int ret, err;
+       int ret;
        int type;
        int nocow;
        int check_prev = 1;
@@ -1403,11 +1403,8 @@ next_slot:
                         * if there are pending snapshots for this root,
                         * we fall into common COW way.
                         */
-                       if (!nolock) {
-                               err = btrfs_start_write_no_snapshotting(root);
-                               if (!err)
-                                       goto out_check;
-                       }
+                       if (!nolock && atomic_read(&root->snapshot_force_cow))
+                               goto out_check;
                        /*
                         * force cow if csum exists in the range.
                         * this ensure that csum for a given extent are
@@ -1416,9 +1413,6 @@ next_slot:
                        ret = csum_exist_in_range(fs_info, disk_bytenr,
                                                  num_bytes);
                        if (ret) {
-                               if (!nolock)
-                                       btrfs_end_write_no_snapshotting(root);
-
                                /*
                                 * ret could be -EIO if the above fails to read
                                 * metadata.
@@ -1431,11 +1425,8 @@ next_slot:
                                WARN_ON_ONCE(nolock);
                                goto out_check;
                        }
-                       if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) {
-                               if (!nolock)
-                                       btrfs_end_write_no_snapshotting(root);
+                       if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
                                goto out_check;
-                       }
                        nocow = 1;
                } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
                        extent_end = found_key.offset +
@@ -1448,8 +1439,6 @@ next_slot:
 out_check:
                if (extent_end <= start) {
                        path->slots[0]++;
-                       if (!nolock && nocow)
-                               btrfs_end_write_no_snapshotting(root);
                        if (nocow)
                                btrfs_dec_nocow_writers(fs_info, disk_bytenr);
                        goto next_slot;
@@ -1471,8 +1460,6 @@ out_check:
                                             end, page_started, nr_written, 1,
                                             NULL);
                        if (ret) {
-                               if (!nolock && nocow)
-                                       btrfs_end_write_no_snapshotting(root);
                                if (nocow)
                                        btrfs_dec_nocow_writers(fs_info,
                                                                disk_bytenr);
@@ -1492,8 +1479,6 @@ out_check:
                                          ram_bytes, BTRFS_COMPRESS_NONE,
                                          BTRFS_ORDERED_PREALLOC);
                        if (IS_ERR(em)) {
-                               if (!nolock && nocow)
-                                       btrfs_end_write_no_snapshotting(root);
                                if (nocow)
                                        btrfs_dec_nocow_writers(fs_info,
                                                                disk_bytenr);
@@ -1532,8 +1517,6 @@ out_check:
                                             EXTENT_CLEAR_DATA_RESV,
                                             PAGE_UNLOCK | PAGE_SET_PRIVATE2);
 
-               if (!nolock && nocow)
-                       btrfs_end_write_no_snapshotting(root);
                cur_offset = extent_end;
 
                /*
@@ -6639,6 +6622,8 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                drop_inode = 1;
        } else {
                struct dentry *parent = dentry->d_parent;
+               int ret;
+
                err = btrfs_update_inode(trans, root, inode);
                if (err)
                        goto fail;
@@ -6652,7 +6637,12 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                                goto fail;
                }
                d_instantiate(dentry, inode);
-               btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent);
+               ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,
+                                        true, NULL);
+               if (ret == BTRFS_NEED_TRANS_COMMIT) {
+                       err = btrfs_commit_transaction(trans);
+                       trans = NULL;
+               }
        }
 
 fail:
@@ -9388,14 +9378,21 @@ static int btrfs_rename_exchange(struct inode *old_dir,
        u64 new_idx = 0;
        u64 root_objectid;
        int ret;
-       int ret2;
        bool root_log_pinned = false;
        bool dest_log_pinned = false;
+       struct btrfs_log_ctx ctx_root;
+       struct btrfs_log_ctx ctx_dest;
+       bool sync_log_root = false;
+       bool sync_log_dest = false;
+       bool commit_transaction = false;
 
        /* we only allow rename subvolume link between subvolumes */
        if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
                return -EXDEV;
 
+       btrfs_init_log_ctx(&ctx_root, old_inode);
+       btrfs_init_log_ctx(&ctx_dest, new_inode);
+
        /* close the race window with snapshot create/destroy ioctl */
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
                down_read(&fs_info->subvol_sem);
@@ -9542,15 +9539,29 @@ static int btrfs_rename_exchange(struct inode *old_dir,
 
        if (root_log_pinned) {
                parent = new_dentry->d_parent;
-               btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
-                               parent);
+               ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),
+                                        BTRFS_I(old_dir), parent,
+                                        false, &ctx_root);
+               if (ret == BTRFS_NEED_LOG_SYNC)
+                       sync_log_root = true;
+               else if (ret == BTRFS_NEED_TRANS_COMMIT)
+                       commit_transaction = true;
+               ret = 0;
                btrfs_end_log_trans(root);
                root_log_pinned = false;
        }
        if (dest_log_pinned) {
-               parent = old_dentry->d_parent;
-               btrfs_log_new_name(trans, BTRFS_I(new_inode), BTRFS_I(new_dir),
-                               parent);
+               if (!commit_transaction) {
+                       parent = old_dentry->d_parent;
+                       ret = btrfs_log_new_name(trans, BTRFS_I(new_inode),
+                                                BTRFS_I(new_dir), parent,
+                                                false, &ctx_dest);
+                       if (ret == BTRFS_NEED_LOG_SYNC)
+                               sync_log_dest = true;
+                       else if (ret == BTRFS_NEED_TRANS_COMMIT)
+                               commit_transaction = true;
+                       ret = 0;
+               }
                btrfs_end_log_trans(dest);
                dest_log_pinned = false;
        }
@@ -9583,8 +9594,26 @@ out_fail:
                        dest_log_pinned = false;
                }
        }
-       ret2 = btrfs_end_transaction(trans);
-       ret = ret ? ret : ret2;
+       if (!ret && sync_log_root && !commit_transaction) {
+               ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root,
+                                    &ctx_root);
+               if (ret)
+                       commit_transaction = true;
+       }
+       if (!ret && sync_log_dest && !commit_transaction) {
+               ret = btrfs_sync_log(trans, BTRFS_I(new_inode)->root,
+                                    &ctx_dest);
+               if (ret)
+                       commit_transaction = true;
+       }
+       if (commit_transaction) {
+               ret = btrfs_commit_transaction(trans);
+       } else {
+               int ret2;
+
+               ret2 = btrfs_end_transaction(trans);
+               ret = ret ? ret : ret2;
+       }
 out_notrans:
        if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&fs_info->subvol_sem);
@@ -9661,6 +9690,9 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        int ret;
        u64 old_ino = btrfs_ino(BTRFS_I(old_inode));
        bool log_pinned = false;
+       struct btrfs_log_ctx ctx;
+       bool sync_log = false;
+       bool commit_transaction = false;
 
        if (btrfs_ino(BTRFS_I(new_dir)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
                return -EPERM;
@@ -9818,8 +9850,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (log_pinned) {
                struct dentry *parent = new_dentry->d_parent;
 
-               btrfs_log_new_name(trans, BTRFS_I(old_inode), BTRFS_I(old_dir),
-                               parent);
+               btrfs_init_log_ctx(&ctx, old_inode);
+               ret = btrfs_log_new_name(trans, BTRFS_I(old_inode),
+                                        BTRFS_I(old_dir), parent,
+                                        false, &ctx);
+               if (ret == BTRFS_NEED_LOG_SYNC)
+                       sync_log = true;
+               else if (ret == BTRFS_NEED_TRANS_COMMIT)
+                       commit_transaction = true;
+               ret = 0;
                btrfs_end_log_trans(root);
                log_pinned = false;
        }
@@ -9856,7 +9895,19 @@ out_fail:
                btrfs_end_log_trans(root);
                log_pinned = false;
        }
-       btrfs_end_transaction(trans);
+       if (!ret && sync_log) {
+               ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx);
+               if (ret)
+                       commit_transaction = true;
+       }
+       if (commit_transaction) {
+               ret = btrfs_commit_transaction(trans);
+       } else {
+               int ret2;
+
+               ret2 = btrfs_end_transaction(trans);
+               ret = ret ? ret : ret2;
+       }
 out_notrans:
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&fs_info->subvol_sem);
index 63600dc..d60b6ca 100644 (file)
@@ -747,6 +747,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
        struct btrfs_pending_snapshot *pending_snapshot;
        struct btrfs_trans_handle *trans;
        int ret;
+       bool snapshot_force_cow = false;
 
        if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
                return -EINVAL;
@@ -763,6 +764,11 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
                goto free_pending;
        }
 
+       /*
+        * Force new buffered writes to reserve space even when NOCOW is
+        * possible. This is to avoid later writeback (running dealloc) to
+        * fallback to COW mode and unexpectedly fail with ENOSPC.
+        */
        atomic_inc(&root->will_be_snapshotted);
        smp_mb__after_atomic();
        /* wait for no snapshot writes */
@@ -773,6 +779,14 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
        if (ret)
                goto dec_and_free;
 
+       /*
+        * All previous writes have started writeback in NOCOW mode, so now
+        * we force future writes to fallback to COW mode during snapshot
+        * creation.
+        */
+       atomic_inc(&root->snapshot_force_cow);
+       snapshot_force_cow = true;
+
        btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
 
        btrfs_init_block_rsv(&pending_snapshot->block_rsv,
@@ -837,6 +851,8 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
 fail:
        btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv);
 dec_and_free:
+       if (snapshot_force_cow)
+               atomic_dec(&root->snapshot_force_cow);
        if (atomic_dec_and_test(&root->will_be_snapshotted))
                wake_up_var(&root->will_be_snapshotted);
 free_pending:
@@ -3453,6 +3469,25 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
 
                same_lock_start = min_t(u64, loff, dst_loff);
                same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
+       } else {
+               /*
+                * If the source and destination inodes are different, the
+                * source's range end offset matches the source's i_size, that
+                * i_size is not a multiple of the sector size, and the
+                * destination range does not go past the destination's i_size,
+                * we must round down the length to the nearest sector size
+                * multiple. If we don't do this adjustment we end replacing
+                * with zeroes the bytes in the range that starts at the
+                * deduplication range's end offset and ends at the next sector
+                * size multiple.
+                */
+               if (loff + olen == i_size_read(src) &&
+                   dst_loff + len < i_size_read(dst)) {
+                       const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
+
+                       len = round_down(i_size_read(src), sz) - loff;
+                       olen = len;
+               }
        }
 
 again:
index 4353bb6..d4917c0 100644 (file)
@@ -1019,10 +1019,9 @@ out_add_root:
        spin_unlock(&fs_info->qgroup_lock);
 
        ret = btrfs_commit_transaction(trans);
-       if (ret) {
-               trans = NULL;
+       trans = NULL;
+       if (ret)
                goto out_free_path;
-       }
 
        ret = qgroup_rescan_init(fs_info, 0, 1);
        if (!ret) {
index 1650dc4..3c2ae0e 100644 (file)
@@ -6025,14 +6025,25 @@ void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
  * Call this after adding a new name for a file and it will properly
  * update the log to reflect the new name.
  *
- * It will return zero if all goes well, and it will return 1 if a
- * full transaction commit is required.
+ * @ctx can not be NULL when @sync_log is false, and should be NULL when it's
+ * true (because it's not used).
+ *
+ * Return value depends on whether @sync_log is true or false.
+ * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
+ *            committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT
+ *            otherwise.
+ * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to
+ *             to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log,
+ *             or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
+ *             committed (without attempting to sync the log).
  */
 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
                        struct btrfs_inode *inode, struct btrfs_inode *old_dir,
-                       struct dentry *parent)
+                       struct dentry *parent,
+                       bool sync_log, struct btrfs_log_ctx *ctx)
 {
        struct btrfs_fs_info *fs_info = trans->fs_info;
+       int ret;
 
        /*
         * this will force the logging code to walk the dentry chain
@@ -6047,9 +6058,34 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
         */
        if (inode->logged_trans <= fs_info->last_trans_committed &&
            (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
-               return 0;
+               return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT :
+                       BTRFS_DONT_NEED_LOG_SYNC;
+
+       if (sync_log) {
+               struct btrfs_log_ctx ctx2;
+
+               btrfs_init_log_ctx(&ctx2, &inode->vfs_inode);
+               ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
+                                            LOG_INODE_EXISTS, &ctx2);
+               if (ret == BTRFS_NO_LOG_SYNC)
+                       return BTRFS_DONT_NEED_TRANS_COMMIT;
+               else if (ret)
+                       return BTRFS_NEED_TRANS_COMMIT;
+
+               ret = btrfs_sync_log(trans, inode->root, &ctx2);
+               if (ret)
+                       return BTRFS_NEED_TRANS_COMMIT;
+               return BTRFS_DONT_NEED_TRANS_COMMIT;
+       }
+
+       ASSERT(ctx);
+       ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
+                                    LOG_INODE_EXISTS, ctx);
+       if (ret == BTRFS_NO_LOG_SYNC)
+               return BTRFS_DONT_NEED_LOG_SYNC;
+       else if (ret)
+               return BTRFS_NEED_TRANS_COMMIT;
 
-       return btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
-                                     LOG_INODE_EXISTS, NULL);
+       return BTRFS_NEED_LOG_SYNC;
 }
 
index 122e68b..7ab9bb8 100644 (file)
@@ -71,8 +71,16 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
                             int for_rename);
 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
                                   struct btrfs_inode *dir);
+/* Return values for btrfs_log_new_name() */
+enum {
+       BTRFS_DONT_NEED_TRANS_COMMIT,
+       BTRFS_NEED_TRANS_COMMIT,
+       BTRFS_DONT_NEED_LOG_SYNC,
+       BTRFS_NEED_LOG_SYNC,
+};
 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
                        struct btrfs_inode *inode, struct btrfs_inode *old_dir,
-                       struct dentry *parent);
+                       struct dentry *parent,
+                       bool sync_log, struct btrfs_log_ctx *ctx);
 
 #endif
index da86706..f4405e4 100644 (file)
@@ -4491,7 +4491,12 @@ again:
 
        /* Now btrfs_update_device() will change the on-disk size. */
        ret = btrfs_update_device(trans, device);
-       btrfs_end_transaction(trans);
+       if (ret < 0) {
+               btrfs_abort_transaction(trans, ret);
+               btrfs_end_transaction(trans);
+       } else {
+               ret = btrfs_commit_transaction(trans);
+       }
 done:
        btrfs_free_path(path);
        if (ret) {
index 4cc679d..6f1ae3a 100644 (file)
@@ -39,7 +39,6 @@
 #include <linux/buffer_head.h>
 #include <linux/task_io_accounting_ops.h>
 #include <linux/bio.h>
-#include <linux/notifier.h>
 #include <linux/cpu.h>
 #include <linux/bitops.h>
 #include <linux/mpage.h>
index 43ca3b7..eab1359 100644 (file)
@@ -602,6 +602,8 @@ static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
 
 /*
  * create a new fs client
+ *
+ * Success or not, this function consumes @fsopt and @opt.
  */
 static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
                                        struct ceph_options *opt)
@@ -609,17 +611,20 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
        struct ceph_fs_client *fsc;
        int page_count;
        size_t size;
-       int err = -ENOMEM;
+       int err;
 
        fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
-       if (!fsc)
-               return ERR_PTR(-ENOMEM);
+       if (!fsc) {
+               err = -ENOMEM;
+               goto fail;
+       }
 
        fsc->client = ceph_create_client(opt, fsc);
        if (IS_ERR(fsc->client)) {
                err = PTR_ERR(fsc->client);
                goto fail;
        }
+       opt = NULL; /* fsc->client now owns this */
 
        fsc->client->extra_mon_dispatch = extra_mon_dispatch;
        fsc->client->osdc.abort_on_full = true;
@@ -677,6 +682,9 @@ fail_client:
        ceph_destroy_client(fsc->client);
 fail:
        kfree(fsc);
+       if (opt)
+               ceph_destroy_options(opt);
+       destroy_mount_options(fsopt);
        return ERR_PTR(err);
 }
 
@@ -1042,8 +1050,6 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
        fsc = create_fs_client(fsopt, opt);
        if (IS_ERR(fsc)) {
                res = ERR_CAST(fsc);
-               destroy_mount_options(fsopt);
-               ceph_destroy_options(opt);
                goto out_final;
        }
 
index 35c83fe..abcd78e 100644 (file)
@@ -6,6 +6,7 @@ config CIFS
        select CRYPTO_MD4
        select CRYPTO_MD5
        select CRYPTO_SHA256
+       select CRYPTO_SHA512
        select CRYPTO_CMAC
        select CRYPTO_HMAC
        select CRYPTO_ARC4
index b380e08..a2b2355 100644 (file)
@@ -105,9 +105,6 @@ convert_sfm_char(const __u16 src_char, char *target)
        case SFM_LESSTHAN:
                *target = '<';
                break;
-       case SFM_SLASH:
-               *target = '\\';
-               break;
        case SFM_SPACE:
                *target = ' ';
                break;
index 0c9ab62..9dcaed0 100644 (file)
@@ -1553,6 +1553,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
 
 /* Flags */
 #define   MID_WAIT_CANCELLED    1 /* Cancelled while waiting for response */
+#define   MID_DELETED            2 /* Mid has been dequeued/deleted */
 
 /* Types of response buffer returned from SendReceive2 */
 #define   CIFS_NO_BUFFER        0    /* Response buffer not returned */
index dc2f4cf..5657b79 100644 (file)
@@ -601,10 +601,15 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
        }
 
        count = 0;
+       /*
+        * We know that all the name entries in the protocols array
+        * are short (< 16 bytes anyway) and are NUL terminated.
+        */
        for (i = 0; i < CIFS_NUM_PROT; i++) {
-               strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
-               count += strlen(protocols[i].name) + 1;
-               /* null at end of source and target buffers anyway */
+               size_t len = strlen(protocols[i].name) + 1;
+
+               memcpy(pSMB->DialectsArray+count, protocols[i].name, len);
+               count += len;
        }
        inc_rfc1001_len(pSMB, count);
        pSMB->ByteCount = cpu_to_le16(count);
index c832a8a..52d71b6 100644 (file)
@@ -659,7 +659,15 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
                mid->mid_state = MID_RESPONSE_RECEIVED;
        else
                mid->mid_state = MID_RESPONSE_MALFORMED;
-       list_del_init(&mid->qhead);
+       /*
+        * Trying to handle/dequeue a mid after the send_recv()
+        * function has finished processing it is a bug.
+        */
+       if (mid->mid_flags & MID_DELETED)
+               printk_once(KERN_WARNING
+                           "trying to dequeue a deleted mid\n");
+       else
+               list_del_init(&mid->qhead);
        spin_unlock(&GlobalMid_Lock);
 }
 
@@ -938,8 +946,7 @@ next_pdu:
                } else {
                        mids[0] = server->ops->find_mid(server, buf);
                        bufs[0] = buf;
-                       if (mids[0])
-                               num_mids = 1;
+                       num_mids = 1;
 
                        if (!mids[0] || !mids[0]->receive)
                                length = standard_receive3(server, mids[0]);
@@ -2547,7 +2554,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb_vol *volume_info)
        if (tcon == NULL)
                return -ENOMEM;
 
-       snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->serverName);
+       snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->server->hostname);
 
        /* cannot fail */
        nls_codepage = load_nls_default();
index d32eaa4..6e8765f 100644 (file)
@@ -467,6 +467,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
        oparms.cifs_sb = cifs_sb;
        oparms.desired_access = GENERIC_READ;
        oparms.create_options = CREATE_NOT_DIR;
+       if (backup_cred(cifs_sb))
+               oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
        oparms.disposition = FILE_OPEN;
        oparms.path = path;
        oparms.fid = &fid;
index dacb2c0..6926685 100644 (file)
@@ -402,9 +402,17 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
                        (struct smb_com_transaction_change_notify_rsp *)buf;
                struct file_notify_information *pnotify;
                __u32 data_offset = 0;
+               size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
+
                if (get_bcc(buf) > sizeof(struct file_notify_information)) {
                        data_offset = le32_to_cpu(pSMBr->DataOffset);
 
+                       if (data_offset >
+                           len - sizeof(struct file_notify_information)) {
+                               cifs_dbg(FYI, "invalid data_offset %u\n",
+                                        data_offset);
+                               return true;
+                       }
                        pnotify = (struct file_notify_information *)
                                ((char *)&pSMBr->hdr.Protocol + data_offset);
                        cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
index eeab81c..e169e1a 100644 (file)
@@ -376,8 +376,15 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
 
                new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) +
                                pfData->FileNameLength;
-       } else
-               new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset);
+       } else {
+               u32 next_offset = le32_to_cpu(pDirInfo->NextEntryOffset);
+
+               if (old_entry + next_offset < old_entry) {
+                       cifs_dbg(VFS, "invalid offset %u\n", next_offset);
+                       return NULL;
+               }
+               new_entry = old_entry + next_offset;
+       }
        cifs_dbg(FYI, "new entry %p old entry %p\n", new_entry, old_entry);
        /* validate that new_entry is not past end of SMB */
        if (new_entry >= end_of_smb) {
index db04536..6a9c475 100644 (file)
@@ -248,16 +248,20 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
                 * MacOS server pads after SMB2.1 write response with 3 bytes
                 * of junk. Other servers match RFC1001 len to actual
                 * SMB2/SMB3 frame length (header + smb2 response specific data)
-                * Some windows servers do too when compounding is used.
-                * Log the server error (once), but allow it and continue
+                * Some windows servers also pad up to 8 bytes when compounding.
+                * If pad is longer than eight bytes, log the server behavior
+                * (once), since may indicate a problem but allow it and continue
                 * since the frame is parseable.
                 */
                if (clc_len < len) {
-                       printk_once(KERN_WARNING
-                               "SMB2 server sent bad RFC1001 len %d not %d\n",
-                               len, clc_len);
+                       pr_warn_once(
+                            "srv rsp padded more than expected. Length %d not %d for cmd:%d mid:%llu\n",
+                            len, clc_len, command, mid);
                        return 0;
                }
+               pr_warn_once(
+                       "srv rsp too short, len %d not %d. cmd:%d mid:%llu\n",
+                       len, clc_len, command, mid);
 
                return 1;
        }
index 247a98e..89985a0 100644 (file)
@@ -630,7 +630,10 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_READ_ATTRIBUTES;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = &fid;
        oparms.reconnect = false;
 
@@ -779,7 +782,10 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_READ_EA;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = &fid;
        oparms.reconnect = false;
 
@@ -858,7 +864,10 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_WRITE_EA;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = &fid;
        oparms.reconnect = false;
 
@@ -1453,7 +1462,10 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = fid;
        oparms.reconnect = false;
 
@@ -1465,7 +1477,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
        }
 
        srch_inf->entries_in_buffer = 0;
-       srch_inf->index_of_last_entry = 0;
+       srch_inf->index_of_last_entry = 2;
 
        rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
                                  fid->volatile_fid, 0, srch_inf);
@@ -1857,7 +1869,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
        oparms.tcon = tcon;
        oparms.desired_access = FILE_READ_ATTRIBUTES;
        oparms.disposition = FILE_OPEN;
-       oparms.create_options = 0;
+       if (backup_cred(cifs_sb))
+               oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
+       else
+               oparms.create_options = 0;
        oparms.fid = &fid;
        oparms.reconnect = false;
 
@@ -3639,7 +3654,7 @@ struct smb_version_values smb21_values = {
 struct smb_version_values smb3any_values = {
        .version_string = SMB3ANY_VERSION_STRING,
        .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3660,7 +3675,7 @@ struct smb_version_values smb3any_values = {
 struct smb_version_values smbdefault_values = {
        .version_string = SMBDEFAULT_VERSION_STRING,
        .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3681,7 +3696,7 @@ struct smb_version_values smbdefault_values = {
 struct smb_version_values smb30_values = {
        .version_string = SMB30_VERSION_STRING,
        .protocol_id = SMB30_PROT_ID,
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3702,7 +3717,7 @@ struct smb_version_values smb30_values = {
 struct smb_version_values smb302_values = {
        .version_string = SMB302_VERSION_STRING,
        .protocol_id = SMB302_PROT_ID,
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -3723,7 +3738,7 @@ struct smb_version_values smb302_values = {
 struct smb_version_values smb311_values = {
        .version_string = SMB311_VERSION_STRING,
        .protocol_id = SMB311_PROT_ID,
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
index 5740aa8..f54d07b 100644 (file)
@@ -2178,6 +2178,9 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
        if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
            *oplock == SMB2_OPLOCK_LEVEL_NONE)
                req->RequestedOplockLevel = *oplock;
+       else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
+                 (oparms->create_options & CREATE_NOT_FILE))
+               req->RequestedOplockLevel = *oplock; /* no srv lease support */
        else {
                rc = add_lease_context(server, iov, &n_iov,
                                       oparms->fid->lease_key, oplock);
@@ -2456,14 +2459,14 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        /* We check for obvious errors in the output buffer length and offset */
        if (*plen == 0)
                goto ioctl_exit; /* server returned no data */
-       else if (*plen > 0xFF00) {
+       else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
                cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
                *plen = 0;
                rc = -EIO;
                goto ioctl_exit;
        }
 
-       if (rsp_iov.iov_len < le32_to_cpu(rsp->OutputOffset) + *plen) {
+       if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) {
                cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
                        le32_to_cpu(rsp->OutputOffset));
                *plen = 0;
@@ -3574,33 +3577,38 @@ num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
        int len;
        unsigned int entrycount = 0;
        unsigned int next_offset = 0;
-       FILE_DIRECTORY_INFO *entryptr;
+       char *entryptr;
+       FILE_DIRECTORY_INFO *dir_info;
 
        if (bufstart == NULL)
                return 0;
 
-       entryptr = (FILE_DIRECTORY_INFO *)bufstart;
+       entryptr = bufstart;
 
        while (1) {
-               entryptr = (FILE_DIRECTORY_INFO *)
-                                       ((char *)entryptr + next_offset);
-
-               if ((char *)entryptr + size > end_of_buf) {
+               if (entryptr + next_offset < entryptr ||
+                   entryptr + next_offset > end_of_buf ||
+                   entryptr + next_offset + size > end_of_buf) {
                        cifs_dbg(VFS, "malformed search entry would overflow\n");
                        break;
                }
 
-               len = le32_to_cpu(entryptr->FileNameLength);
-               if ((char *)entryptr + len + size > end_of_buf) {
+               entryptr = entryptr + next_offset;
+               dir_info = (FILE_DIRECTORY_INFO *)entryptr;
+
+               len = le32_to_cpu(dir_info->FileNameLength);
+               if (entryptr + len < entryptr ||
+                   entryptr + len > end_of_buf ||
+                   entryptr + len + size > end_of_buf) {
                        cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
                                 end_of_buf);
                        break;
                }
 
-               *lastentry = (char *)entryptr;
+               *lastentry = entryptr;
                entrycount++;
 
-               next_offset = le32_to_cpu(entryptr->NextEntryOffset);
+               next_offset = le32_to_cpu(dir_info->NextEntryOffset);
                if (!next_offset)
                        break;
        }
index 78f96fa..b48f439 100644 (file)
@@ -142,7 +142,8 @@ void
 cifs_delete_mid(struct mid_q_entry *mid)
 {
        spin_lock(&GlobalMid_Lock);
-       list_del(&mid->qhead);
+       list_del_init(&mid->qhead);
+       mid->mid_flags |= MID_DELETED;
        spin_unlock(&GlobalMid_Lock);
 
        DeleteMidQEntry(mid);
@@ -772,6 +773,11 @@ cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
        return mid;
 }
 
+static void
+cifs_noop_callback(struct mid_q_entry *mid)
+{
+}
+
 int
 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
                   const int flags, const int num_rqst, struct smb_rqst *rqst,
@@ -826,8 +832,13 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
                }
 
                midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
+               /*
+                * We don't invoke the callback compounds unless it is the last
+                * request.
+                */
+               if (i < num_rqst - 1)
+                       midQ[i]->callback = cifs_noop_callback;
        }
-
        cifs_in_send_inc(ses->server);
        rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
        cifs_in_send_dec(ses->server);
@@ -908,6 +919,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
                        midQ[i]->resp_buf = NULL;
        }
 out:
+       /*
+        * This will dequeue all mids. After this it is important that the
+        * demultiplex_thread will not process any of these mids any futher.
+        * This is prevented above by using a noop callback that will not
+        * wake this thread except for the very last PDU.
+        */
        for (i = 0; i < num_rqst; i++)
                cifs_delete_mid(midQ[i]);
        add_credits(ses->server, credits, optype);
index f32d712..4becbf1 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -447,6 +447,7 @@ bool dax_lock_mapping_entry(struct page *page)
                        xa_unlock_irq(&mapping->i_pages);
                        break;
                } else if (IS_ERR(entry)) {
+                       xa_unlock_irq(&mapping->i_pages);
                        WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
                        continue;
                }
@@ -1120,21 +1121,12 @@ static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
 {
        struct inode *inode = mapping->host;
        unsigned long vaddr = vmf->address;
-       vm_fault_t ret = VM_FAULT_NOPAGE;
-       struct page *zero_page;
-       pfn_t pfn;
-
-       zero_page = ZERO_PAGE(0);
-       if (unlikely(!zero_page)) {
-               ret = VM_FAULT_OOM;
-               goto out;
-       }
+       pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
+       vm_fault_t ret;
 
-       pfn = page_to_pfn_t(zero_page);
        dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
                        false);
        ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
-out:
        trace_dax_load_hole(inode, vmf, ret);
        return ret;
 }
index 7f7ee18..e4bb938 100644 (file)
@@ -1448,6 +1448,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
        }
        inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
        ei->i_flags = le32_to_cpu(raw_inode->i_flags);
+       ext2_set_inode_flags(inode);
        ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
        ei->i_frag_no = raw_inode->i_frag;
        ei->i_frag_size = raw_inode->i_fsize;
@@ -1517,7 +1518,6 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
                           new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
        }
        brelse (bh);
-       ext2_set_inode_flags(inode);
        unlock_new_inode(inode);
        return inode;
        
index e2902d3..f93f988 100644 (file)
@@ -76,7 +76,7 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
        else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
                error_msg = "rec_len is too small for name_len";
        else if (unlikely(((char *) de - buf) + rlen > size))
-               error_msg = "directory entry across range";
+               error_msg = "directory entry overrun";
        else if (unlikely(le32_to_cpu(de->inode) >
                        le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
                error_msg = "inode out of bounds";
@@ -85,18 +85,16 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
 
        if (filp)
                ext4_error_file(filp, function, line, bh->b_blocknr,
-                               "bad entry in directory: %s - offset=%u(%u), "
-                               "inode=%u, rec_len=%d, name_len=%d",
-                               error_msg, (unsigned) (offset % size),
-                               offset, le32_to_cpu(de->inode),
-                               rlen, de->name_len);
+                               "bad entry in directory: %s - offset=%u, "
+                               "inode=%u, rec_len=%d, name_len=%d, size=%d",
+                               error_msg, offset, le32_to_cpu(de->inode),
+                               rlen, de->name_len, size);
        else
                ext4_error_inode(dir, function, line, bh->b_blocknr,
-                               "bad entry in directory: %s - offset=%u(%u), "
-                               "inode=%u, rec_len=%d, name_len=%d",
-                               error_msg, (unsigned) (offset % size),
-                               offset, le32_to_cpu(de->inode),
-                               rlen, de->name_len);
+                               "bad entry in directory: %s - offset=%u, "
+                               "inode=%u, rec_len=%d, name_len=%d, size=%d",
+                                error_msg, offset, le32_to_cpu(de->inode),
+                                rlen, de->name_len, size);
 
        return 1;
 }
index 0f0edd1..caff935 100644 (file)
 #define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_EXT4_FS_ENCRYPTION)
 #include <linux/fscrypt.h>
 
+#include <linux/compiler.h>
+
+/* Until this gets included into linux/compiler-gcc.h */
+#ifndef __nonstring
+#if defined(GCC_VERSION) && (GCC_VERSION >= 80000)
+#define __nonstring __attribute__((nonstring))
+#else
+#define __nonstring
+#endif
+#endif
+
 /*
  * The fourth extended filesystem constants/structures
  */
@@ -675,6 +686,9 @@ enum {
 /* Max physical block we can address w/o extents */
 #define EXT4_MAX_BLOCK_FILE_PHYS       0xFFFFFFFF
 
+/* Max logical block we can support */
+#define EXT4_MAX_LOGICAL_BLOCK         0xFFFFFFFF
+
 /*
  * Structure of an inode on the disk
  */
@@ -1226,7 +1240,7 @@ struct ext4_super_block {
        __le32  s_feature_ro_compat;    /* readonly-compatible feature set */
 /*68*/ __u8    s_uuid[16];             /* 128-bit uuid for volume */
 /*78*/ char    s_volume_name[16];      /* volume name */
-/*88*/ char    s_last_mounted[64];     /* directory where last mounted */
+/*88*/ char    s_last_mounted[64] __nonstring; /* directory where last mounted */
 /*C8*/ __le32  s_algorithm_usage_bitmap; /* For compression */
        /*
         * Performance hints.  Directory preallocation should only
@@ -1277,13 +1291,13 @@ struct ext4_super_block {
        __le32  s_first_error_time;     /* first time an error happened */
        __le32  s_first_error_ino;      /* inode involved in first error */
        __le64  s_first_error_block;    /* block involved of first error */
-       __u8    s_first_error_func[32]; /* function where the error happened */
+       __u8    s_first_error_func[32] __nonstring;     /* function where the error happened */
        __le32  s_first_error_line;     /* line number where error happened */
        __le32  s_last_error_time;      /* most recent time of an error */
        __le32  s_last_error_ino;       /* inode involved in last error */
        __le32  s_last_error_line;      /* line number where error happened */
        __le64  s_last_error_block;     /* block involved of last error */
-       __u8    s_last_error_func[32];  /* function where the error happened */
+       __u8    s_last_error_func[32] __nonstring;      /* function where the error happened */
 #define EXT4_S_ERR_END offsetof(struct ext4_super_block, s_mount_opts)
        __u8    s_mount_opts[64];
        __le32  s_usr_quota_inum;       /* inode for tracking user quota */
index 3543fe8..7b47360 100644 (file)
@@ -1753,6 +1753,7 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
 {
        int err, inline_size;
        struct ext4_iloc iloc;
+       size_t inline_len;
        void *inline_pos;
        unsigned int offset;
        struct ext4_dir_entry_2 *de;
@@ -1780,8 +1781,9 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
                goto out;
        }
 
+       inline_len = ext4_get_inline_size(dir);
        offset = EXT4_INLINE_DOTDOT_SIZE;
-       while (offset < dir->i_size) {
+       while (offset < inline_len) {
                de = ext4_get_inline_entry(dir, &iloc, offset,
                                           &inline_pos, &inline_size);
                if (ext4_check_dir_entry(dir, NULL, de,
index d0dd585..d767e99 100644 (file)
@@ -3413,12 +3413,16 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
 {
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        unsigned int blkbits = inode->i_blkbits;
-       unsigned long first_block = offset >> blkbits;
-       unsigned long last_block = (offset + length - 1) >> blkbits;
+       unsigned long first_block, last_block;
        struct ext4_map_blocks map;
        bool delalloc = false;
        int ret;
 
+       if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
+               return -EINVAL;
+       first_block = offset >> blkbits;
+       last_block = min_t(loff_t, (offset + length - 1) >> blkbits,
+                          EXT4_MAX_LOGICAL_BLOCK);
 
        if (flags & IOMAP_REPORT) {
                if (ext4_has_inline_data(inode)) {
@@ -3948,6 +3952,7 @@ static const struct address_space_operations ext4_dax_aops = {
        .writepages             = ext4_dax_writepages,
        .direct_IO              = noop_direct_IO,
        .set_page_dirty         = noop_set_page_dirty,
+       .bmap                   = ext4_bmap,
        .invalidatepage         = noop_invalidatepage,
 };
 
@@ -4192,9 +4197,8 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
        return 0;
 }
 
-static void ext4_wait_dax_page(struct ext4_inode_info *ei, bool *did_unlock)
+static void ext4_wait_dax_page(struct ext4_inode_info *ei)
 {
-       *did_unlock = true;
        up_write(&ei->i_mmap_sem);
        schedule();
        down_write(&ei->i_mmap_sem);
@@ -4204,14 +4208,12 @@ int ext4_break_layouts(struct inode *inode)
 {
        struct ext4_inode_info *ei = EXT4_I(inode);
        struct page *page;
-       bool retry;
        int error;
 
        if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem)))
                return -EINVAL;
 
        do {
-               retry = false;
                page = dax_layout_busy_page(inode->i_mapping);
                if (!page)
                        return 0;
@@ -4219,8 +4221,8 @@ int ext4_break_layouts(struct inode *inode)
                error = ___wait_var_event(&page->_refcount,
                                atomic_read(&page->_refcount) == 1,
                                TASK_INTERRUPTIBLE, 0, 0,
-                               ext4_wait_dax_page(ei, &retry));
-       } while (error == 0 && retry);
+                               ext4_wait_dax_page(ei));
+       } while (error == 0);
 
        return error;
 }
@@ -4895,6 +4897,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                 * not initialized on a new filesystem. */
        }
        ei->i_flags = le32_to_cpu(raw_inode->i_flags);
+       ext4_set_inode_flags(inode);
        inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
        ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
        if (ext4_has_feature_64bit(sb))
@@ -5041,7 +5044,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                goto bad_inode;
        }
        brelse(iloc.bh);
-       ext4_set_inode_flags(inode);
 
        unlock_new_inode(inode);
        return inode;
index 39b07c2..2305b43 100644 (file)
@@ -49,7 +49,6 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
         */
        sb_start_write(sb);
        ext4_mmp_csum_set(sb, mmp);
-       mark_buffer_dirty(bh);
        lock_buffer(bh);
        bh->b_end_io = end_buffer_write_sync;
        get_bh(bh);
index 116ff68..377d516 100644 (file)
@@ -3478,6 +3478,12 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
        int credits;
        u8 old_file_type;
 
+       if (new.inode && new.inode->i_nlink == 0) {
+               EXT4_ERROR_INODE(new.inode,
+                                "target of rename is already freed");
+               return -EFSCORRUPTED;
+       }
+
        if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT)) &&
            (!projid_eq(EXT4_I(new_dir)->i_projid,
                        EXT4_I(old_dentry->d_inode)->i_projid)))
index e5fb384..ebbc663 100644 (file)
@@ -19,6 +19,7 @@
 
 int ext4_resize_begin(struct super_block *sb)
 {
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
        int ret = 0;
 
        if (!capable(CAP_SYS_RESOURCE))
@@ -29,7 +30,7 @@ int ext4_resize_begin(struct super_block *sb)
          * because the user tools have no way of handling this.  Probably a
          * bad time to do it anyways.
          */
-       if (EXT4_SB(sb)->s_sbh->b_blocknr !=
+       if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
            le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
                ext4_warning(sb, "won't resize using backup superblock at %llu",
                        (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
@@ -1986,6 +1987,26 @@ retry:
                }
        }
 
+       /*
+        * Make sure the last group has enough space so that it's
+        * guaranteed to have enough space for all metadata blocks
+        * that it might need to hold.  (We might not need to store
+        * the inode table blocks in the last block group, but there
+        * will be cases where this might be needed.)
+        */
+       if ((ext4_group_first_block_no(sb, n_group) +
+            ext4_group_overhead_blocks(sb, n_group) + 2 +
+            sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
+               n_blocks_count = ext4_group_first_block_no(sb, n_group);
+               n_group--;
+               n_blocks_count_retry = 0;
+               if (resize_inode) {
+                       iput(resize_inode);
+                       resize_inode = NULL;
+               }
+               goto retry;
+       }
+
        /* extend the last group */
        if (n_group == o_group)
                add = n_blocks_count - o_blocks_count;
index 5863fd2..1145109 100644 (file)
@@ -2145,6 +2145,8 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
                SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
        if (test_opt(sb, DATA_ERR_ABORT))
                SEQ_OPTS_PUTS("data_err=abort");
+       if (DUMMY_ENCRYPTION_ENABLED(sbi))
+               SEQ_OPTS_PUTS("test_dummy_encryption");
 
        ext4_show_quota_options(seq, sb);
        return 0;
@@ -4378,11 +4380,13 @@ no_journal:
        block = ext4_count_free_clusters(sb);
        ext4_free_blocks_count_set(sbi->s_es, 
                                   EXT4_C2B(sbi, block));
+       ext4_superblock_csum_set(sb);
        err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
                                  GFP_KERNEL);
        if (!err) {
                unsigned long freei = ext4_count_free_inodes(sb);
                sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
+               ext4_superblock_csum_set(sb);
                err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
                                          GFP_KERNEL);
        }
index 03128ed..3c159a7 100644 (file)
@@ -975,6 +975,10 @@ static void gfs2_iomap_journaled_page_done(struct inode *inode, loff_t pos,
 {
        struct gfs2_inode *ip = GFS2_I(inode);
 
+       if (!page_has_buffers(page)) {
+               create_empty_buffers(page, inode->i_sb->s_blocksize,
+                                    (1 << BH_Dirty)|(1 << BH_Uptodate));
+       }
        gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
 }
 
index 3212c29..2005529 100644 (file)
@@ -230,7 +230,7 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
        ret = -EXDEV;
        if (src_file.file->f_path.mnt != dst_file->f_path.mnt)
                goto fdput;
-       ret = do_clone_file_range(src_file.file, off, dst_file, destoff, olen);
+       ret = vfs_clone_file_range(src_file.file, off, dst_file, destoff, olen);
 fdput:
        fdput(src_file);
        return ret;
index ec3fba7..488a9e7 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/mpage.h>
 #include <linux/user_namespace.h>
 #include <linux/seq_file.h>
+#include <linux/blkdev.h>
 
 #include "isofs.h"
 #include "zisofs.h"
@@ -653,6 +654,12 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
        /*
         * What if bugger tells us to go beyond page size?
         */
+       if (bdev_logical_block_size(s->s_bdev) > 2048) {
+               printk(KERN_WARNING
+                      "ISOFS: unsupported/invalid hardware sector size %d\n",
+                       bdev_logical_block_size(s->s_bdev));
+               goto out_freesbi;
+       }
        opt.blocksize = sb_min_blocksize(s, opt.blocksize);
 
        sbi->s_high_sierra = 0; /* default is iso9660 */
index 34830f6..8220a16 100644 (file)
@@ -1637,6 +1637,14 @@ static void nfs_state_set_delegation(struct nfs4_state *state,
        write_sequnlock(&state->seqlock);
 }
 
+static void nfs_state_clear_delegation(struct nfs4_state *state)
+{
+       write_seqlock(&state->seqlock);
+       nfs4_stateid_copy(&state->stateid, &state->open_stateid);
+       clear_bit(NFS_DELEGATED_STATE, &state->flags);
+       write_sequnlock(&state->seqlock);
+}
+
 static int update_open_stateid(struct nfs4_state *state,
                const nfs4_stateid *open_stateid,
                const nfs4_stateid *delegation,
@@ -2145,10 +2153,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
        if (IS_ERR(opendata))
                return PTR_ERR(opendata);
        nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
-       write_seqlock(&state->seqlock);
-       nfs4_stateid_copy(&state->stateid, &state->open_stateid);
-       write_sequnlock(&state->seqlock);
-       clear_bit(NFS_DELEGATED_STATE, &state->flags);
+       nfs_state_clear_delegation(state);
        switch (type & (FMODE_READ|FMODE_WRITE)) {
        case FMODE_READ|FMODE_WRITE:
        case FMODE_WRITE:
@@ -2601,10 +2606,7 @@ static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
                const nfs4_stateid *stateid)
 {
        nfs_remove_bad_delegation(state->inode, stateid);
-       write_seqlock(&state->seqlock);
-       nfs4_stateid_copy(&state->stateid, &state->open_stateid);
-       write_sequnlock(&state->seqlock);
-       clear_bit(NFS_DELEGATED_STATE, &state->flags);
+       nfs_state_clear_delegation(state);
 }
 
 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
@@ -2672,15 +2674,20 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
        delegation = rcu_dereference(NFS_I(state->inode)->delegation);
        if (delegation == NULL) {
                rcu_read_unlock();
+               nfs_state_clear_delegation(state);
                return;
        }
 
        nfs4_stateid_copy(&stateid, &delegation->stateid);
-       if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) ||
-               !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
-                       &delegation->flags)) {
+       if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
+               rcu_read_unlock();
+               nfs_state_clear_delegation(state);
+               return;
+       }
+
+       if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
+                               &delegation->flags)) {
                rcu_read_unlock();
-               nfs_finish_clear_delegation_stateid(state, &stateid);
                return;
        }
 
index 3df0eb5..40a08cd 100644 (file)
@@ -1390,6 +1390,8 @@ int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_
 
        if (!nfs4_state_mark_reclaim_nograce(clp, state))
                return -EBADF;
+       nfs_inode_find_delegation_state_and_recover(state->inode,
+                       &state->stateid);
        dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
                        clp->cl_hostname);
        nfs4_schedule_state_manager(clp);
index a275fba..b1483b3 100644 (file)
@@ -1137,7 +1137,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
                TP_fast_assign(
                        __entry->error = error;
                        __entry->fhandle = nfs_fhandle_hash(fhandle);
-                       if (inode != NULL) {
+                       if (!IS_ERR_OR_NULL(inode)) {
                                __entry->fileid = NFS_FILEID(inode);
                                __entry->dev = inode->i_sb->s_dev;
                        } else {
@@ -1194,7 +1194,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
                TP_fast_assign(
                        __entry->error = error;
                        __entry->fhandle = nfs_fhandle_hash(fhandle);
-                       if (inode != NULL) {
+                       if (!IS_ERR_OR_NULL(inode)) {
                                __entry->fileid = NFS_FILEID(inode);
                                __entry->dev = inode->i_sb->s_dev;
                        } else {
index e8f232d..7d9a51e 100644 (file)
@@ -1740,16 +1740,16 @@ static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
        return ret;
 }
 
-static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
+static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
 {
        /*
         * send layoutcommit as it can hold up layoutreturn due to lseg
         * reference
         */
        pnfs_layoutcommit_inode(lo->plh_inode, false);
-       return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
+       return wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
                                   nfs_wait_bit_killable,
-                                  TASK_UNINTERRUPTIBLE);
+                                  TASK_KILLABLE);
 }
 
 static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
@@ -1830,7 +1830,9 @@ pnfs_update_layout(struct inode *ino,
        }
 
 lookup_again:
-       nfs4_client_recover_expired_lease(clp);
+       lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp));
+       if (IS_ERR(lseg))
+               goto out;
        first = false;
        spin_lock(&ino->i_lock);
        lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
@@ -1863,9 +1865,9 @@ lookup_again:
        if (list_empty(&lo->plh_segs) &&
            atomic_read(&lo->plh_outstanding) != 0) {
                spin_unlock(&ino->i_lock);
-               if (wait_var_event_killable(&lo->plh_outstanding,
-                                       atomic_read(&lo->plh_outstanding) == 0
-                                       || !list_empty(&lo->plh_segs)))
+               lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
+                                       atomic_read(&lo->plh_outstanding)));
+               if (IS_ERR(lseg) || !list_empty(&lo->plh_segs))
                        goto out_put_layout_hdr;
                pnfs_put_layout_hdr(lo);
                goto lookup_again;
@@ -1898,8 +1900,11 @@ lookup_again:
                if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
                                     &lo->plh_flags)) {
                        spin_unlock(&ino->i_lock);
-                       wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET,
-                                   TASK_UNINTERRUPTIBLE);
+                       lseg = ERR_PTR(wait_on_bit(&lo->plh_flags,
+                                               NFS_LAYOUT_FIRST_LAYOUTGET,
+                                               TASK_KILLABLE));
+                       if (IS_ERR(lseg))
+                               goto out_put_layout_hdr;
                        pnfs_put_layout_hdr(lo);
                        dprintk("%s retrying\n", __func__);
                        goto lookup_again;
@@ -1925,7 +1930,8 @@ lookup_again:
        if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
                spin_unlock(&ino->i_lock);
                dprintk("%s wait for layoutreturn\n", __func__);
-               if (pnfs_prepare_to_retry_layoutget(lo)) {
+               lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
+               if (!IS_ERR(lseg)) {
                        if (first)
                                pnfs_clear_first_layoutget(lo);
                        pnfs_put_layout_hdr(lo);
index 55a099e..b53e763 100644 (file)
@@ -541,7 +541,8 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
 __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
                u64 dst_pos, u64 count)
 {
-       return nfserrno(do_clone_file_range(src, src_pos, dst, dst_pos, count));
+       return nfserrno(vfs_clone_file_range(src, src_pos, dst, dst_pos,
+                                            count));
 }
 
 ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
index 03b8ba9..235b959 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * alloc.c - NILFS dat/inode allocator
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Originally written by Koji Sato.
  * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji.
  */
index 05149e6..0303c39 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * alloc.h - persistent object (dat entry/disk inode) allocator/deallocator
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Originally written by Koji Sato.
  * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji.
  */
index 01fb183..fb5a9a8 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * bmap.c - NILFS block mapping.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 2b6ffbe..2c63858 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * bmap.h - NILFS block mapping.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index dec98ca..ebb24a3 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * btnode.c - NILFS B-tree node cache
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Originally written by Seiji Kihara.
  * Fully revised by Ryusuke Konishi for stabilization and simplification.
  *
index 4e8aaa1..0f88dbc 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * btnode.h - NILFS B-tree node cache
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Seiji Kihara.
  * Revised by Ryusuke Konishi.
  */
index 16a7a67..23e043e 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * btree.c - NILFS B-tree.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 2184e47..d1421b6 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * btree.h - NILFS B-tree.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index a15a160..8d41311 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * cpfile.c - NILFS checkpoint file.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 6eca972..6336222 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * cpfile.h - NILFS checkpoint file.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index dffedb2..6f40666 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * dat.c - NILFS disk address translation.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 57dc6cf..b17ee34 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * dat.h - NILFS disk address translation.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 582831a..81394e2 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * dir.c - NILFS directory entry operations
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Modified for NILFS by Amagai Yoshiji.
  */
 /*
index 96e3ed0..533e24e 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * direct.c - NILFS direct block pointer.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index cfe85e8..ec9a23c 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * direct.h - NILFS direct block pointer.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 7da0fac..64bc813 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * file.c - NILFS regular file handling primitives including fsync().
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Amagai Yoshiji and Ryusuke Konishi.
  */
 
index 853a831..aa3c328 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * gcinode.c - dummy inodes to buffer blocks for garbage collection
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Seiji Kihara, Amagai Yoshiji, and Ryusuke Konishi.
  * Revised by Ryusuke Konishi.
  *
index b8fa45c..4140d23 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * ifile.c - NILFS inode file
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Amagai Yoshiji.
  * Revised by Ryusuke Konishi.
  *
index 188b94f..a1e1e57 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * ifile.h - NILFS inode file
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Amagai Yoshiji.
  * Revised by Ryusuke Konishi.
  *
index 6a612d8..6710855 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * inode.c - NILFS inode operations.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 1d2c3d7..9b96d79 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * ioctl.c - NILFS ioctl operations.
  *
  * Copyright (C) 2007, 2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index c6bc103..700870a 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * mdt.c - meta data file for NILFS
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  */
 
index 3f67f39..e77aea4 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * mdt.h - NILFS meta data file prototype and definitions
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  */
 
index dd52d3f..9fe6d4a 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * namei.c - NILFS pathname lookup operations.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Modified for NILFS by Amagai Yoshiji and Ryusuke Konishi.
  */
 /*
index 33f8c8f..a2f247b 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * nilfs.h - NILFS local header file.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato and Ryusuke Konishi.
  */
 
index 4cb850a..329a056 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * page.c - buffer/page management specific to NILFS
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi and Seiji Kihara.
  */
 
index f3687c9..62b9bb4 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * page.h - buffer/page management specific to NILFS
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi and Seiji Kihara.
  */
 
index 5139efe..140b663 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * recovery.c - NILFS recovery logic
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  */
 
index 68cb9e4..20c479b 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * segbuf.c - NILFS segment buffer
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 10e1693..9bea1bd 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * segbuf.h - NILFS Segment buffer prototypes and definitions
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 0953635..445eef4 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * segment.c - NILFS segment constructor.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 04634e3..f5cf530 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * segment.h - NILFS Segment constructor prototypes and definitions
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index c7fa139..bf3f8f0 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * sufile.c - NILFS segment usage file.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  * Revised by Ryusuke Konishi.
  */
index 673a891..c4e2c7a 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * sufile.h - NILFS segment usage file.
  *
  * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Koji Sato.
  */
 
index 1b9067c..26290aa 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * super.c - NILFS module and super block management.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  */
 /*
index 4b25837..e60be7b 100644 (file)
@@ -1,19 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * sysfs.c - sysfs support implementation.
  *
  * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation.
  * Copyright (C) 2014 HGST, Inc., a Western Digital Company.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com>
  */
 
index 648cedf..d001eb8 100644 (file)
@@ -1,19 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * sysfs.h - sysfs support declarations.
  *
  * Copyright (C) 2005-2014 Nippon Telegraph and Telephone Corporation.
  * Copyright (C) 2014 HGST, Inc., a Western Digital Company.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Vyacheslav Dubeyko <Vyacheslav.Dubeyko@hgst.com>
  */
 
index 1a85317..484785c 100644 (file)
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * the_nilfs.c - the_nilfs shared structure.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index 36da177..380a543 100644 (file)
@@ -1,18 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*
  * the_nilfs.h - the_nilfs shared structure.
  *
  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * Written by Ryusuke Konishi.
  *
  */
index f174397..ababdbf 100644 (file)
@@ -351,16 +351,9 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
 
        iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
 
-       if ((mask & FS_MODIFY) ||
-           (test_mask & to_tell->i_fsnotify_mask)) {
-               iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
-                       fsnotify_first_mark(&to_tell->i_fsnotify_marks);
-       }
-
-       if (mnt && ((mask & FS_MODIFY) ||
-                   (test_mask & mnt->mnt_fsnotify_mask))) {
-               iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
-                       fsnotify_first_mark(&to_tell->i_fsnotify_marks);
+       iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
+               fsnotify_first_mark(&to_tell->i_fsnotify_marks);
+       if (mnt) {
                iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] =
                        fsnotify_first_mark(&mnt->mnt_fsnotify_marks);
        }
index 05506d6..59cdb27 100644 (file)
@@ -132,13 +132,13 @@ static void __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn)
        struct fsnotify_mark *mark;
 
        assert_spin_locked(&conn->lock);
+       /* We can get detached connector here when inode is getting unlinked. */
+       if (!fsnotify_valid_obj_type(conn->type))
+               return;
        hlist_for_each_entry(mark, &conn->list, obj_list) {
                if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED)
                        new_mask |= mark->mask;
        }
-       if (WARN_ON(!fsnotify_valid_obj_type(conn->type)))
-               return;
-
        *fsnotify_conn_mask_p(conn) = new_mask;
 }
 
index d9ebe11..1d098c3 100644 (file)
@@ -342,6 +342,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
                                 * for this bh as it's not marked locally
                                 * uptodate. */
                                status = -EIO;
+                               clear_buffer_needs_validate(bh);
                                put_bh(bh);
                                bhs[i] = NULL;
                                continue;
index aaca094..826f056 100644 (file)
@@ -584,9 +584,9 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
 
        res->last_used = 0;
 
-       spin_lock(&dlm->spinlock);
+       spin_lock(&dlm->track_lock);
        list_add_tail(&res->tracking, &dlm->tracking_list);
-       spin_unlock(&dlm->spinlock);
+       spin_unlock(&dlm->track_lock);
 
        memset(res->lvb, 0, DLM_LVB_LEN);
        memset(res->refmap, 0, sizeof(res->refmap));
index 7869622..7a5ee14 100644 (file)
@@ -2946,6 +2946,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                if (map_end & (PAGE_SIZE - 1))
                        to = map_end & (PAGE_SIZE - 1);
 
+retry:
                page = find_or_create_page(mapping, page_index, GFP_NOFS);
                if (!page) {
                        ret = -ENOMEM;
@@ -2954,11 +2955,18 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                }
 
                /*
-                * In case PAGE_SIZE <= CLUSTER_SIZE, This page
-                * can't be dirtied before we CoW it out.
+                * In case PAGE_SIZE <= CLUSTER_SIZE, we do not expect a dirty
+                * page, so write it back.
                 */
-               if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
-                       BUG_ON(PageDirty(page));
+               if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
+                       if (PageDirty(page)) {
+                               /*
+                                * write_on_page will unlock the page on return
+                                */
+                               ret = write_one_page(page);
+                               goto retry;
+                       }
+               }
 
                if (!PageUptodate(page)) {
                        ret = block_read_full_page(page, ocfs2_get_block);
index 296037a..1cc797a 100644 (file)
@@ -141,7 +141,7 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
        }
 
        /* Try to use clone_file_range to clone up within the same fs */
-       error = vfs_clone_file_range(old_file, 0, new_file, 0, len);
+       error = do_clone_file_range(old_file, 0, new_file, 0, len);
        if (!error)
                goto out;
        /* Couldn't clone, so now we try to copy the data */
index 32e9282..986313d 100644 (file)
@@ -131,9 +131,6 @@ static int ovl_open(struct inode *inode, struct file *file)
        if (IS_ERR(realfile))
                return PTR_ERR(realfile);
 
-       /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
-       file->f_mapping = realfile->f_mapping;
-
        file->private_data = realfile;
 
        return 0;
@@ -243,8 +240,10 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
                goto out_unlock;
 
        old_cred = ovl_override_creds(file_inode(file)->i_sb);
+       file_start_write(real.file);
        ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
                             ovl_iocb_to_rwf(iocb));
+       file_end_write(real.file);
        revert_creds(old_cred);
 
        /* Update size */
@@ -334,6 +333,25 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len
        return ret;
 }
 
+static int ovl_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
+{
+       struct fd real;
+       const struct cred *old_cred;
+       int ret;
+
+       ret = ovl_real_fdget(file, &real);
+       if (ret)
+               return ret;
+
+       old_cred = ovl_override_creds(file_inode(file)->i_sb);
+       ret = vfs_fadvise(real.file, offset, len, advice);
+       revert_creds(old_cred);
+
+       fdput(real);
+
+       return ret;
+}
+
 static long ovl_real_ioctl(struct file *file, unsigned int cmd,
                           unsigned long arg)
 {
@@ -502,6 +520,7 @@ const struct file_operations ovl_file_operations = {
        .fsync          = ovl_fsync,
        .mmap           = ovl_mmap,
        .fallocate      = ovl_fallocate,
+       .fadvise        = ovl_fadvise,
        .unlocked_ioctl = ovl_ioctl,
        .compat_ioctl   = ovl_compat_ioctl,
 
index e0bb217..3b7ed5d 100644 (file)
@@ -467,6 +467,10 @@ static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                return -EOPNOTSUPP;
 
        old_cred = ovl_override_creds(inode->i_sb);
+
+       if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC)
+               filemap_write_and_wait(realinode->i_mapping);
+
        err = realinode->i_op->fiemap(realinode, fieinfo, start, len);
        revert_creds(old_cred);
 
@@ -500,6 +504,11 @@ static const struct inode_operations ovl_special_inode_operations = {
        .update_time    = ovl_update_time,
 };
 
+static const struct address_space_operations ovl_aops = {
+       /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
+       .direct_IO              = noop_direct_IO,
+};
+
 /*
  * It is possible to stack overlayfs instance on top of another
  * overlayfs instance as lower layer. We need to annonate the
@@ -571,6 +580,7 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev,
        case S_IFREG:
                inode->i_op = &ovl_file_inode_operations;
                inode->i_fop = &ovl_file_operations;
+               inode->i_mapping->a_ops = &ovl_aops;
                break;
 
        case S_IFDIR:
index f287118..9c0ca6a 100644 (file)
@@ -686,7 +686,7 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
                        index = NULL;
                        goto out;
                }
-               pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
+               pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%.*s, err=%i);\n"
                                    "overlayfs: mount with '-o index=off' to disable inodes index.\n",
                                    d_inode(origin)->i_ino, name.len, name.name,
                                    err);
index f61839e..a3c0d95 100644 (file)
@@ -152,8 +152,8 @@ static inline int ovl_do_setxattr(struct dentry *dentry, const char *name,
                                  const void *value, size_t size, int flags)
 {
        int err = vfs_setxattr(dentry, name, value, size, flags);
-       pr_debug("setxattr(%pd2, \"%s\", \"%*s\", 0x%x) = %i\n",
-                dentry, name, (int) size, (char *) value, flags, err);
+       pr_debug("setxattr(%pd2, \"%s\", \"%*pE\", %zu, 0x%x) = %i\n",
+                dentry, name, min((int)size, 48), value, size, flags, err);
        return err;
 }
 
index 2e0fc93..30adc9d 100644 (file)
@@ -982,16 +982,6 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath)
        if (err)
                goto out;
 
-       err = -EBUSY;
-       if (ovl_inuse_trylock(upperpath->dentry)) {
-               ofs->upperdir_locked = true;
-       } else if (ofs->config.index) {
-               pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
-               goto out;
-       } else {
-               pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
-       }
-
        upper_mnt = clone_private_mount(upperpath);
        err = PTR_ERR(upper_mnt);
        if (IS_ERR(upper_mnt)) {
@@ -1002,6 +992,17 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath)
        /* Don't inherit atime flags */
        upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME);
        ofs->upper_mnt = upper_mnt;
+
+       err = -EBUSY;
+       if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) {
+               ofs->upperdir_locked = true;
+       } else if (ofs->config.index) {
+               pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
+               goto out;
+       } else {
+               pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
+       }
+
        err = 0;
 out:
        return err;
@@ -1101,8 +1102,10 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
                goto out;
        }
 
+       ofs->workbasedir = dget(workpath.dentry);
+
        err = -EBUSY;
-       if (ovl_inuse_trylock(workpath.dentry)) {
+       if (ovl_inuse_trylock(ofs->workbasedir)) {
                ofs->workdir_locked = true;
        } else if (ofs->config.index) {
                pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n");
@@ -1111,7 +1114,6 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
                pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
        }
 
-       ofs->workbasedir = dget(workpath.dentry);
        err = ovl_make_workdir(ofs, &workpath);
        if (err)
                goto out;
index 8cfb62c..ace4fe4 100644 (file)
@@ -683,7 +683,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
        struct dentry *upperdentry = ovl_dentry_upper(dentry);
        struct dentry *index = NULL;
        struct inode *inode;
-       struct qstr name;
+       struct qstr name = { };
        int err;
 
        err = ovl_get_index_name(lowerdentry, &name);
@@ -726,6 +726,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
                goto fail;
 
 out:
+       kfree(name.name);
        dput(index);
        return;
 
index ccf86f1..7e9f07b 100644 (file)
@@ -407,6 +407,20 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
        unsigned long *entries;
        int err;
 
+       /*
+        * The ability to racily run the kernel stack unwinder on a running task
+        * and then observe the unwinder output is scary; while it is useful for
+        * debugging kernel issues, it can also allow an attacker to leak kernel
+        * stack contents.
+        * Doing this in a manner that is at least safe from races would require
+        * some work to ensure that the remote task can not be scheduled; and
+        * even then, this would still expose the unwinder as local attack
+        * surface.
+        * Therefore, this interface is restricted to root.
+        */
+       if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
+               return -EACCES;
+
        entries = kmalloc_array(MAX_STACK_TRACE_DEPTH, sizeof(*entries),
                                GFP_KERNEL);
        if (!entries)
index ad72261..d297fe4 100644 (file)
@@ -464,6 +464,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
                                ret = -EFAULT;
                                goto out;
                        }
+                       m = NULL;       /* skip the list anchor */
                } else if (m->type == KCORE_VMALLOC) {
                        vread(buf, (char *)start, tsz);
                        /* we have to zero-fill user buffer even if no read */
index bbd1e35..f4fd2e7 100644 (file)
@@ -898,8 +898,22 @@ static struct platform_driver ramoops_driver = {
        },
 };
 
-static void ramoops_register_dummy(void)
+static inline void ramoops_unregister_dummy(void)
 {
+       platform_device_unregister(dummy);
+       dummy = NULL;
+
+       kfree(dummy_data);
+       dummy_data = NULL;
+}
+
+static void __init ramoops_register_dummy(void)
+{
+       /*
+        * Prepare a dummy platform data structure to carry the module
+        * parameters. If mem_size isn't set, then there are no module
+        * parameters, and we can skip this.
+        */
        if (!mem_size)
                return;
 
@@ -932,21 +946,28 @@ static void ramoops_register_dummy(void)
        if (IS_ERR(dummy)) {
                pr_info("could not create platform device: %ld\n",
                        PTR_ERR(dummy));
+               dummy = NULL;
+               ramoops_unregister_dummy();
        }
 }
 
 static int __init ramoops_init(void)
 {
+       int ret;
+
        ramoops_register_dummy();
-       return platform_driver_register(&ramoops_driver);
+       ret = platform_driver_register(&ramoops_driver);
+       if (ret != 0)
+               ramoops_unregister_dummy();
+
+       return ret;
 }
 late_initcall(ramoops_init);
 
 static void __exit ramoops_exit(void)
 {
        platform_driver_unregister(&ramoops_driver);
-       platform_device_unregister(dummy);
-       kfree(dummy_data);
+       ramoops_unregister_dummy();
 }
 module_exit(ramoops_exit);
 
index 951a14e..0792595 100644 (file)
@@ -429,7 +429,12 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size,
        vaddr = vmap(pages, page_count, VM_MAP, prot);
        kfree(pages);
 
-       return vaddr;
+       /*
+        * Since vmap() uses page granularity, we must add the offset
+        * into the page here, to get the byte granularity address
+        * into the mapping to represent the actual "start" location.
+        */
+       return vaddr + offset_in_page(start);
 }
 
 static void *persistent_ram_iomap(phys_addr_t start, size_t size,
@@ -448,6 +453,11 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
        else
                va = ioremap_wc(start, size);
 
+       /*
+        * Since request_mem_region() and ioremap() are byte-granularity
+        * there is no need handle anything special like we do when the
+        * vmap() case in persistent_ram_vmap() above.
+        */
        return va;
 }
 
@@ -468,7 +478,7 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
                return -ENOMEM;
        }
 
-       prz->buffer = prz->vaddr + offset_in_page(start);
+       prz->buffer = prz->vaddr;
        prz->buffer_size = size - sizeof(struct persistent_ram_buffer);
 
        return 0;
@@ -515,7 +525,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
 
        if (prz->vaddr) {
                if (pfn_valid(prz->paddr >> PAGE_SHIFT)) {
-                       vunmap(prz->vaddr);
+                       /* We must vunmap() at page-granularity. */
+                       vunmap(prz->vaddr - offset_in_page(prz->paddr));
                } else {
                        iounmap(prz->vaddr);
                        release_mem_region(prz->paddr, prz->size);
index 860bfbe..f0cbf58 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/quotaops.h>
 #include <linux/types.h>
 #include <linux/writeback.h>
+#include <linux/nospec.h>
 
 static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
                                     qid_t id)
@@ -120,8 +121,6 @@ static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
        struct if_dqinfo uinfo;
        int ret;
 
-       /* This checks whether qc_state has enough entries... */
-       BUILD_BUG_ON(MAXQUOTAS > XQM_MAXQUOTAS);
        if (!sb->s_qcop->get_state)
                return -ENOSYS;
        ret = sb->s_qcop->get_state(sb, &state);
@@ -354,10 +353,10 @@ static int quota_getstate(struct super_block *sb, struct fs_quota_stat *fqs)
         * GETXSTATE quotactl has space for just one set of time limits so
         * report them for the first enabled quota type
         */
-       for (type = 0; type < XQM_MAXQUOTAS; type++)
+       for (type = 0; type < MAXQUOTAS; type++)
                if (state.s_state[type].flags & QCI_ACCT_ENABLED)
                        break;
-       BUG_ON(type == XQM_MAXQUOTAS);
+       BUG_ON(type == MAXQUOTAS);
        fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
        fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
        fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
@@ -427,10 +426,10 @@ static int quota_getstatev(struct super_block *sb, struct fs_quota_statv *fqs)
         * GETXSTATV quotactl has space for just one set of time limits so
         * report them for the first enabled quota type
         */
-       for (type = 0; type < XQM_MAXQUOTAS; type++)
+       for (type = 0; type < MAXQUOTAS; type++)
                if (state.s_state[type].flags & QCI_ACCT_ENABLED)
                        break;
-       BUG_ON(type == XQM_MAXQUOTAS);
+       BUG_ON(type == MAXQUOTAS);
        fqs->qs_btimelimit = state.s_state[type].spc_timelimit;
        fqs->qs_itimelimit = state.s_state[type].ino_timelimit;
        fqs->qs_rtbtimelimit = state.s_state[type].rt_spc_timelimit;
@@ -701,8 +700,9 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
 {
        int ret;
 
-       if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
+       if (type >= MAXQUOTAS)
                return -EINVAL;
+       type = array_index_nospec(type, MAXQUOTAS);
        /*
         * Quota not supported on this fs? Check this before s_quota_types
         * since they needn't be set if quota is not supported at all.
index 39b4a21..8a2737f 100644 (file)
@@ -1818,8 +1818,8 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
 }
 EXPORT_SYMBOL(vfs_clone_file_prep_inodes);
 
-int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
-               struct file *file_out, loff_t pos_out, u64 len)
+int do_clone_file_range(struct file *file_in, loff_t pos_in,
+                       struct file *file_out, loff_t pos_out, u64 len)
 {
        struct inode *inode_in = file_inode(file_in);
        struct inode *inode_out = file_inode(file_out);
@@ -1866,6 +1866,19 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
 
        return ret;
 }
+EXPORT_SYMBOL(do_clone_file_range);
+
+int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
+                        struct file *file_out, loff_t pos_out, u64 len)
+{
+       int ret;
+
+       file_start_write(file_out);
+       ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len);
+       file_end_write(file_out);
+
+       return ret;
+}
 EXPORT_SYMBOL(vfs_clone_file_range);
 
 /*
index 23e7042..bf000c8 100644 (file)
@@ -1912,7 +1912,9 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
                mutex_unlock(&c->bu_mutex);
        }
 
-       ubifs_assert(c, c->lst.taken_empty_lebs > 0);
+       if (!c->need_recovery)
+               ubifs_assert(c, c->lst.taken_empty_lebs > 0);
+
        return 0;
 }
 
@@ -1954,6 +1956,9 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode)
        int dev, vol;
        char *endptr;
 
+       if (!name || !*name)
+               return ERR_PTR(-EINVAL);
+
        /* First, try to open using the device node path method */
        ubi = ubi_open_volume_path(name, mode);
        if (!IS_ERR(ubi))
index 61afdfe..f5ad1ed 100644 (file)
@@ -152,12 +152,6 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
        ui->data_len = size;
 
        mutex_lock(&host_ui->ui_mutex);
-
-       if (!host->i_nlink) {
-               err = -ENOENT;
-               goto out_noent;
-       }
-
        host->i_ctime = current_time(host);
        host_ui->xattr_cnt += 1;
        host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
@@ -190,7 +184,6 @@ out_cancel:
        host_ui->xattr_size -= CALC_XATTR_BYTES(size);
        host_ui->xattr_names -= fname_len(nm);
        host_ui->flags &= ~UBIFS_CRYPT_FL;
-out_noent:
        mutex_unlock(&host_ui->ui_mutex);
 out_free:
        make_bad_inode(inode);
@@ -242,12 +235,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
        mutex_unlock(&ui->ui_mutex);
 
        mutex_lock(&host_ui->ui_mutex);
-
-       if (!host->i_nlink) {
-               err = -ENOENT;
-               goto out_noent;
-       }
-
        host->i_ctime = current_time(host);
        host_ui->xattr_size -= CALC_XATTR_BYTES(old_size);
        host_ui->xattr_size += CALC_XATTR_BYTES(size);
@@ -269,7 +256,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
 out_cancel:
        host_ui->xattr_size -= CALC_XATTR_BYTES(size);
        host_ui->xattr_size += CALC_XATTR_BYTES(old_size);
-out_noent:
        mutex_unlock(&host_ui->ui_mutex);
        make_bad_inode(inode);
 out_free:
@@ -496,12 +482,6 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host,
                return err;
 
        mutex_lock(&host_ui->ui_mutex);
-
-       if (!host->i_nlink) {
-               err = -ENOENT;
-               goto out_noent;
-       }
-
        host->i_ctime = current_time(host);
        host_ui->xattr_cnt -= 1;
        host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm));
@@ -521,7 +501,6 @@ out_cancel:
        host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
        host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
        host_ui->xattr_names += fname_len(nm);
-out_noent:
        mutex_unlock(&host_ui->ui_mutex);
        ubifs_release_budget(c, &req);
        make_bad_inode(inode);
@@ -561,9 +540,6 @@ static int ubifs_xattr_remove(struct inode *host, const char *name)
 
        ubifs_assert(c, inode_is_locked(host));
 
-       if (!host->i_nlink)
-               return -ENOENT;
-
        if (fname_len(&nm) > UBIFS_MAX_NLEN)
                return -ENAMETOOLONG;
 
index 3040dc2..6f51565 100644 (file)
@@ -764,9 +764,7 @@ static int udf_find_fileset(struct super_block *sb,
                            struct kernel_lb_addr *root)
 {
        struct buffer_head *bh = NULL;
-       long lastblock;
        uint16_t ident;
-       struct udf_sb_info *sbi;
 
        if (fileset->logicalBlockNum != 0xFFFFFFFF ||
            fileset->partitionReferenceNum != 0xFFFF) {
@@ -779,69 +777,11 @@ static int udf_find_fileset(struct super_block *sb,
                        return 1;
                }
 
-       }
-
-       sbi = UDF_SB(sb);
-       if (!bh) {
-               /* Search backwards through the partitions */
-               struct kernel_lb_addr newfileset;
-
-/* --> cvg: FIXME - is it reasonable? */
-               return 1;
-
-               for (newfileset.partitionReferenceNum = sbi->s_partitions - 1;
-                    (newfileset.partitionReferenceNum != 0xFFFF &&
-                     fileset->logicalBlockNum == 0xFFFFFFFF &&
-                     fileset->partitionReferenceNum == 0xFFFF);
-                    newfileset.partitionReferenceNum--) {
-                       lastblock = sbi->s_partmaps
-                                       [newfileset.partitionReferenceNum]
-                                               .s_partition_len;
-                       newfileset.logicalBlockNum = 0;
-
-                       do {
-                               bh = udf_read_ptagged(sb, &newfileset, 0,
-                                                     &ident);
-                               if (!bh) {
-                                       newfileset.logicalBlockNum++;
-                                       continue;
-                               }
-
-                               switch (ident) {
-                               case TAG_IDENT_SBD:
-                               {
-                                       struct spaceBitmapDesc *sp;
-                                       sp = (struct spaceBitmapDesc *)
-                                                               bh->b_data;
-                                       newfileset.logicalBlockNum += 1 +
-                                               ((le32_to_cpu(sp->numOfBytes) +
-                                                 sizeof(struct spaceBitmapDesc)
-                                                 - 1) >> sb->s_blocksize_bits);
-                                       brelse(bh);
-                                       break;
-                               }
-                               case TAG_IDENT_FSD:
-                                       *fileset = newfileset;
-                                       break;
-                               default:
-                                       newfileset.logicalBlockNum++;
-                                       brelse(bh);
-                                       bh = NULL;
-                                       break;
-                               }
-                       } while (newfileset.logicalBlockNum < lastblock &&
-                                fileset->logicalBlockNum == 0xFFFFFFFF &&
-                                fileset->partitionReferenceNum == 0xFFFF);
-               }
-       }
-
-       if ((fileset->logicalBlockNum != 0xFFFFFFFF ||
-            fileset->partitionReferenceNum != 0xFFFF) && bh) {
                udf_debug("Fileset at block=%u, partition=%u\n",
                          fileset->logicalBlockNum,
                          fileset->partitionReferenceNum);
 
-               sbi->s_partition = fileset->partitionReferenceNum;
+               UDF_SB(sb)->s_partition = fileset->partitionReferenceNum;
                udf_load_fileset(sb, bh, root);
                brelse(bh);
                return 0;
@@ -1570,10 +1510,16 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
  */
 #define PART_DESC_ALLOC_STEP 32
 
+struct part_desc_seq_scan_data {
+       struct udf_vds_record rec;
+       u32 partnum;
+};
+
 struct desc_seq_scan_data {
        struct udf_vds_record vds[VDS_POS_LENGTH];
        unsigned int size_part_descs;
-       struct udf_vds_record *part_descs_loc;
+       unsigned int num_part_descs;
+       struct part_desc_seq_scan_data *part_descs_loc;
 };
 
 static struct udf_vds_record *handle_partition_descriptor(
@@ -1582,10 +1528,14 @@ static struct udf_vds_record *handle_partition_descriptor(
 {
        struct partitionDesc *desc = (struct partitionDesc *)bh->b_data;
        int partnum;
+       int i;
 
        partnum = le16_to_cpu(desc->partitionNumber);
-       if (partnum >= data->size_part_descs) {
-               struct udf_vds_record *new_loc;
+       for (i = 0; i < data->num_part_descs; i++)
+               if (partnum == data->part_descs_loc[i].partnum)
+                       return &(data->part_descs_loc[i].rec);
+       if (data->num_part_descs >= data->size_part_descs) {
+               struct part_desc_seq_scan_data *new_loc;
                unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
 
                new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
@@ -1597,7 +1547,7 @@ static struct udf_vds_record *handle_partition_descriptor(
                data->part_descs_loc = new_loc;
                data->size_part_descs = new_size;
        }
-       return &(data->part_descs_loc[partnum]);
+       return &(data->part_descs_loc[data->num_part_descs++].rec);
 }
 
 
@@ -1647,6 +1597,7 @@ static noinline int udf_process_sequence(
 
        memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
        data.size_part_descs = PART_DESC_ALLOC_STEP;
+       data.num_part_descs = 0;
        data.part_descs_loc = kcalloc(data.size_part_descs,
                                      sizeof(*data.part_descs_loc),
                                      GFP_KERNEL);
@@ -1658,7 +1609,6 @@ static noinline int udf_process_sequence(
         * are in it.
         */
        for (; (!done && block <= lastblock); block++) {
-
                bh = udf_read_tagged(sb, block, block, &ident);
                if (!bh)
                        break;
@@ -1730,13 +1680,10 @@ static noinline int udf_process_sequence(
        }
 
        /* Now handle prevailing Partition Descriptors */
-       for (i = 0; i < data.size_part_descs; i++) {
-               if (data.part_descs_loc[i].block) {
-                       ret = udf_load_partdesc(sb,
-                                               data.part_descs_loc[i].block);
-                       if (ret < 0)
-                               return ret;
-               }
+       for (i = 0; i < data.num_part_descs; i++) {
+               ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
+               if (ret < 0)
+                       return ret;
        }
 
        return 0;
index daa7325..0d6a6a4 100644 (file)
@@ -948,17 +948,19 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
        int err = 0;
 
 #ifdef CONFIG_FS_POSIX_ACL
-       if (inode->i_acl) {
-               err = xattr_list_one(&buffer, &remaining_size,
-                                    XATTR_NAME_POSIX_ACL_ACCESS);
-               if (err)
-                       return err;
-       }
-       if (inode->i_default_acl) {
-               err = xattr_list_one(&buffer, &remaining_size,
-                                    XATTR_NAME_POSIX_ACL_DEFAULT);
-               if (err)
-                       return err;
+       if (IS_POSIXACL(inode)) {
+               if (inode->i_acl) {
+                       err = xattr_list_one(&buffer, &remaining_size,
+                                            XATTR_NAME_POSIX_ACL_ACCESS);
+                       if (err)
+                               return err;
+               }
+               if (inode->i_default_acl) {
+                       err = xattr_list_one(&buffer, &remaining_size,
+                                            XATTR_NAME_POSIX_ACL_DEFAULT);
+                       if (err)
+                               return err;
+               }
        }
 #endif
 
index 66d1d45..d356f80 100644 (file)
@@ -1026,7 +1026,8 @@ static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
 #define ioport_map ioport_map
 static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
 {
-       return PCI_IOBASE + (port & MMIO_UPPER_LIMIT);
+       port &= IO_SPACE_LIMIT;
+       return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
 }
 #endif
 
index 989f8e5..971bb78 100644 (file)
@@ -87,9 +87,10 @@ struct drm_client_dev {
        struct drm_file *file;
 };
 
-int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
-                  const char *name, const struct drm_client_funcs *funcs);
+int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
+                   const char *name, const struct drm_client_funcs *funcs);
 void drm_client_release(struct drm_client_dev *client);
+void drm_client_add(struct drm_client_dev *client);
 
 void drm_client_dev_unregister(struct drm_device *dev);
 void drm_client_dev_hotplug(struct drm_device *dev);
index 46a8009..152b305 100644 (file)
@@ -675,7 +675,7 @@ static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
 static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
 {
        return drm_core_check_feature(dev, DRIVER_ATOMIC) ||
-               dev->mode_config.funcs->atomic_commit != NULL;
+               (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL);
 }
 
 
index 582a0ec..7778147 100644 (file)
@@ -89,7 +89,6 @@ struct drm_panel {
        struct drm_device *drm;
        struct drm_connector *connector;
        struct device *dev;
-       struct device_link *link;
 
        const struct drm_panel_funcs *funcs;
 
index ca1d2cc..18863d5 100644 (file)
@@ -199,47 +199,57 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
 
 #define __declare_arg_0(a0, res)                                       \
        struct arm_smccc_res   *___res = res;                           \
-       register u32           r0 asm("r0") = a0;                       \
+       register unsigned long r0 asm("r0") = (u32)a0;                  \
        register unsigned long r1 asm("r1");                            \
        register unsigned long r2 asm("r2");                            \
        register unsigned long r3 asm("r3")
 
 #define __declare_arg_1(a0, a1, res)                                   \
+       typeof(a1) __a1 = a1;                                           \
        struct arm_smccc_res   *___res = res;                           \
-       register u32           r0 asm("r0") = a0;                       \
-       register typeof(a1)    r1 asm("r1") = a1;                       \
+       register unsigned long r0 asm("r0") = (u32)a0;                  \
+       register unsigned long r1 asm("r1") = __a1;                     \
        register unsigned long r2 asm("r2");                            \
        register unsigned long r3 asm("r3")
 
 #define __declare_arg_2(a0, a1, a2, res)                               \
+       typeof(a1) __a1 = a1;                                           \
+       typeof(a2) __a2 = a2;                                           \
        struct arm_smccc_res   *___res = res;                           \
-       register u32           r0 asm("r0") = a0;                       \
-       register typeof(a1)    r1 asm("r1") = a1;                       \
-       register typeof(a2)    r2 asm("r2") = a2;                       \
+       register unsigned long r0 asm("r0") = (u32)a0;                  \
+       register unsigned long r1 asm("r1") = __a1;                     \
+       register unsigned long r2 asm("r2") = __a2;                     \
        register unsigned long r3 asm("r3")
 
 #define __declare_arg_3(a0, a1, a2, a3, res)                           \
+       typeof(a1) __a1 = a1;                                           \
+       typeof(a2) __a2 = a2;                                           \
+       typeof(a3) __a3 = a3;                                           \
        struct arm_smccc_res   *___res = res;                           \
-       register u32           r0 asm("r0") = a0;                       \
-       register typeof(a1)    r1 asm("r1") = a1;                       \
-       register typeof(a2)    r2 asm("r2") = a2;                       \
-       register typeof(a3)    r3 asm("r3") = a3
+       register unsigned long r0 asm("r0") = (u32)a0;                  \
+       register unsigned long r1 asm("r1") = __a1;                     \
+       register unsigned long r2 asm("r2") = __a2;                     \
+       register unsigned long r3 asm("r3") = __a3
 
 #define __declare_arg_4(a0, a1, a2, a3, a4, res)                       \
+       typeof(a4) __a4 = a4;                                           \
        __declare_arg_3(a0, a1, a2, a3, res);                           \
-       register typeof(a4) r4 asm("r4") = a4
+       register unsigned long r4 asm("r4") = __a4
 
 #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res)                   \
+       typeof(a5) __a5 = a5;                                           \
        __declare_arg_4(a0, a1, a2, a3, a4, res);                       \
-       register typeof(a5) r5 asm("r5") = a5
+       register unsigned long r5 asm("r5") = __a5
 
 #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res)               \
+       typeof(a6) __a6 = a6;                                           \
        __declare_arg_5(a0, a1, a2, a3, a4, a5, res);                   \
-       register typeof(a6) r6 asm("r6") = a6
+       register unsigned long r6 asm("r6") = __a6
 
 #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res)           \
+       typeof(a7) __a7 = a7;                                           \
        __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res);               \
-       register typeof(a7) r7 asm("r7") = a7
+       register unsigned long r7 asm("r7") = __a7
 
 #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
 #define __declare_args(count, ...)  ___declare_args(count, __VA_ARGS__)
index 34aec30..6d766a1 100644 (file)
@@ -56,6 +56,7 @@ struct blkcg {
        struct list_head                all_blkcgs_node;
 #ifdef CONFIG_CGROUP_WRITEBACK
        struct list_head                cgwb_list;
+       refcount_t                      cgwb_refcnt;
 #endif
 };
 
@@ -89,7 +90,6 @@ struct blkg_policy_data {
        /* the blkg and policy id this per-policy data belongs to */
        struct blkcg_gq                 *blkg;
        int                             plid;
-       bool                            offline;
 };
 
 /*
@@ -387,6 +387,49 @@ static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
        return cpd ? cpd->blkcg : NULL;
 }
 
+extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
+
+#ifdef CONFIG_CGROUP_WRITEBACK
+
+/**
+ * blkcg_cgwb_get - get a reference for blkcg->cgwb_list
+ * @blkcg: blkcg of interest
+ *
+ * This is used to track the number of active wb's related to a blkcg.
+ */
+static inline void blkcg_cgwb_get(struct blkcg *blkcg)
+{
+       refcount_inc(&blkcg->cgwb_refcnt);
+}
+
+/**
+ * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list
+ * @blkcg: blkcg of interest
+ *
+ * This is used to track the number of active wb's related to a blkcg.
+ * When this count goes to zero, all active wb has finished so the
+ * blkcg can continue destruction by calling blkcg_destroy_blkgs().
+ * This work may occur in cgwb_release_workfn() on the cgwb_release
+ * workqueue.
+ */
+static inline void blkcg_cgwb_put(struct blkcg *blkcg)
+{
+       if (refcount_dec_and_test(&blkcg->cgwb_refcnt))
+               blkcg_destroy_blkgs(blkcg);
+}
+
+#else
+
+static inline void blkcg_cgwb_get(struct blkcg *blkcg) { }
+
+static inline void blkcg_cgwb_put(struct blkcg *blkcg)
+{
+       /* wb isn't being accounted, so trigger destruction right away */
+       blkcg_destroy_blkgs(blkcg);
+}
+
+#endif
+
 /**
  * blkg_path - format cgroup path of blkg
  * @blkg: blkg of interest
index d6869e0..6980014 100644 (file)
@@ -54,7 +54,7 @@ struct blk_stat_callback;
  * Maximum number of blkcg policies allowed to be registered concurrently.
  * Defined here to simplify include dependency.
  */
-#define BLKCG_MAX_POLS         3
+#define BLKCG_MAX_POLS         5
 
 typedef void (rq_end_io_fn)(struct request *, blk_status_t);
 
index 763bbad..4d36b27 100644 (file)
 #define __noretpoline __attribute__((indirect_branch("keep")))
 #endif
 
-/*
- * it doesn't make sense on ARM (currently the only user of __naked)
- * to trace naked functions because then mcount is called without
- * stack and frame pointer being set up and there is no chance to
- * restore the lr register to the value before mcount was called.
- *
- * The asm() bodies of naked functions often depend on standard calling
- * conventions, therefore they must be noinline and noclone.
- *
- * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
- * See GCC PR44290.
- */
-#define __naked                __attribute__((naked)) noinline __noclone notrace
-
 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
 
 #define __optimize(level)      __attribute__((__optimize__(level)))
index 3525c17..db192be 100644 (file)
@@ -226,6 +226,14 @@ struct ftrace_likely_data {
 #define notrace                        __attribute__((no_instrument_function))
 #endif
 
+/*
+ * it doesn't make sense on ARM (currently the only user of __naked)
+ * to trace naked functions because then mcount is called without
+ * stack and frame pointer being set up and there is no chance to
+ * restore the lr register to the value before mcount was called.
+ */
+#define __naked                        __attribute__((naked)) notrace
+
 #define __compiler_offsetof(a, b)      __builtin_offsetof(a, b)
 
 /*
index 8942e61..8ab5df7 100644 (file)
@@ -53,12 +53,20 @@ enum fpga_mgr_states {
        FPGA_MGR_STATE_OPERATING,
 };
 
-/*
- * FPGA Manager flags
- * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
- * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
- * FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
- * FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
+/**
+ * DOC: FPGA Manager flags
+ *
+ * Flags used in the &fpga_image_info->flags field
+ *
+ * %FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
+ *
+ * %FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
+ *
+ * %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted
+ *
+ * %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
+ *
+ * %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
  */
 #define FPGA_MGR_PARTIAL_RECONFIG      BIT(0)
 #define FPGA_MGR_EXTERNAL_CONFIG       BIT(1)
index 3332270..897eae8 100644 (file)
@@ -1763,6 +1763,7 @@ struct file_operations {
                        u64);
        int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t,
                        u64);
+       int (*fadvise)(struct file *, loff_t, loff_t, int);
 } __randomize_layout;
 
 struct inode_operations {
@@ -1827,8 +1828,10 @@ extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
 extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
                                      struct inode *inode_out, loff_t pos_out,
                                      u64 *len, bool is_dedupe);
+extern int do_clone_file_range(struct file *file_in, loff_t pos_in,
+                              struct file *file_out, loff_t pos_out, u64 len);
 extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
-               struct file *file_out, loff_t pos_out, u64 len);
+                               struct file *file_out, loff_t pos_out, u64 len);
 extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
                                         struct inode *dest, loff_t destoff,
                                         loff_t len, bool *is_same);
@@ -2772,19 +2775,6 @@ static inline void file_end_write(struct file *file)
        __sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
 }
 
-static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
-                                     struct file *file_out, loff_t pos_out,
-                                     u64 len)
-{
-       int ret;
-
-       file_start_write(file_out);
-       ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
-       file_end_write(file_out);
-
-       return ret;
-}
-
 /*
  * get_write_access() gets write permission for a file.
  * put_write_access() releases this write permission.
@@ -3459,4 +3449,8 @@ static inline bool dir_relax_shared(struct inode *inode)
 extern bool path_noexec(const struct path *path);
 extern void inode_nohighmem(struct inode *inode);
 
+/* mm/fadvise.c */
+extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
+                      int advice);
+
 #endif /* _LINUX_FS_H */
index 5786442..25c08c6 100644 (file)
@@ -83,10 +83,10 @@ struct partition {
 } __attribute__((packed));
 
 struct disk_stats {
+       u64 nsecs[NR_STAT_GROUPS];
        unsigned long sectors[NR_STAT_GROUPS];
        unsigned long ios[NR_STAT_GROUPS];
        unsigned long merges[NR_STAT_GROUPS];
-       unsigned long ticks[NR_STAT_GROUPS];
        unsigned long io_ticks;
        unsigned long time_in_queue;
 };
@@ -354,6 +354,9 @@ static inline void free_part_stats(struct hd_struct *part)
 
 #endif /* CONFIG_SMP */
 
+#define part_stat_read_msecs(part, which)                              \
+       div_u64(part_stat_read(part, nsecs[which]), NSEC_PER_MSEC)
+
 #define part_stat_read_accum(part, field)                              \
        (part_stat_read(part, field[STAT_READ]) +                       \
         part_stat_read(part, field[STAT_WRITE]) +                      \
index 834e646..d44a783 100644 (file)
@@ -526,6 +526,7 @@ struct hid_input {
        const char *name;
        bool registered;
        struct list_head reports;       /* the list of reports */
+       unsigned int application;       /* application usage for this input */
 };
 
 enum hid_type {
index 6b68e34..087fd5f 100644 (file)
@@ -140,6 +140,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
 pte_t *huge_pte_offset(struct mm_struct *mm,
                       unsigned long addr, unsigned long sz);
 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end);
 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
                              int write);
 struct page *follow_huge_pd(struct vm_area_struct *vma,
@@ -170,6 +172,18 @@ static inline unsigned long hugetlb_total_pages(void)
        return 0;
 }
 
+static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
+                                       pte_t *ptep)
+{
+       return 0;
+}
+
+static inline void adjust_range_if_pmd_sharing_possible(
+                               struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end)
+{
+}
+
 #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n)        ({ BUG(); 0; })
 #define follow_huge_addr(mm, addr, write)      ERR_PTR(-EINVAL)
 #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
index b79387f..65b4eae 100644 (file)
@@ -855,7 +855,7 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg)
 }
 
 u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold);
-void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf);
+void i2c_put_dma_safe_msg_buf(u8 *buf, struct i2c_msg *msg, bool xferred);
 
 int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
 /**
index 0205aee..c926698 100644 (file)
@@ -733,8 +733,6 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 int kvm_vcpu_yield_to(struct kvm_vcpu *target);
 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
-void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
-void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
 
 void kvm_flush_remote_tlbs(struct kvm *kvm);
 void kvm_reload_remote_mmus(struct kvm *kvm);
index 8a12570..50bed4f 100644 (file)
@@ -21,7 +21,7 @@
 /*
  * Regulator configuration
  */
-/* DA9063 regulator IDs */
+/* DA9063 and DA9063L regulator IDs */
 enum {
        /* BUCKs */
        DA9063_ID_BCORE1,
@@ -37,18 +37,20 @@ enum {
        DA9063_ID_BMEM_BIO_MERGED,
        /* When two BUCKs are merged, they cannot be reused separately */
 
-       /* LDOs */
+       /* LDOs on both DA9063 and DA9063L */
+       DA9063_ID_LDO3,
+       DA9063_ID_LDO7,
+       DA9063_ID_LDO8,
+       DA9063_ID_LDO9,
+       DA9063_ID_LDO11,
+
+       /* DA9063-only LDOs */
        DA9063_ID_LDO1,
        DA9063_ID_LDO2,
-       DA9063_ID_LDO3,
        DA9063_ID_LDO4,
        DA9063_ID_LDO5,
        DA9063_ID_LDO6,
-       DA9063_ID_LDO7,
-       DA9063_ID_LDO8,
-       DA9063_ID_LDO9,
        DA9063_ID_LDO10,
-       DA9063_ID_LDO11,
 };
 
 /* Regulators platform data */
index a528747..e8338e5 100644 (file)
@@ -78,9 +78,9 @@ enum {
        BD71837_REG_TRANS_COND0        = 0x1F,
        BD71837_REG_TRANS_COND1        = 0x20,
        BD71837_REG_VRFAULTEN          = 0x21,
-       BD71837_REG_MVRFLTMASK0        = 0x22,
-       BD71837_REG_MVRFLTMASK1        = 0x23,
-       BD71837_REG_MVRFLTMASK2        = 0x24,
+       BD718XX_REG_MVRFLTMASK0        = 0x22,
+       BD718XX_REG_MVRFLTMASK1        = 0x23,
+       BD718XX_REG_MVRFLTMASK2        = 0x24,
        BD71837_REG_RCVCFG             = 0x25,
        BD71837_REG_RCVNUM             = 0x26,
        BD71837_REG_PWRONCONFIG0       = 0x27,
@@ -159,6 +159,33 @@ enum {
 #define BUCK8_MASK             0x3F
 #define BUCK8_DEFAULT          0x1E
 
+/* BD718XX Voltage monitoring masks */
+#define BD718XX_BUCK1_VRMON80           0x1
+#define BD718XX_BUCK1_VRMON130          0x2
+#define BD718XX_BUCK2_VRMON80           0x4
+#define BD718XX_BUCK2_VRMON130          0x8
+#define BD718XX_1ST_NODVS_BUCK_VRMON80  0x1
+#define BD718XX_1ST_NODVS_BUCK_VRMON130 0x2
+#define BD718XX_2ND_NODVS_BUCK_VRMON80  0x4
+#define BD718XX_2ND_NODVS_BUCK_VRMON130 0x8
+#define BD718XX_3RD_NODVS_BUCK_VRMON80  0x10
+#define BD718XX_3RD_NODVS_BUCK_VRMON130 0x20
+#define BD718XX_4TH_NODVS_BUCK_VRMON80  0x40
+#define BD718XX_4TH_NODVS_BUCK_VRMON130 0x80
+#define BD718XX_LDO1_VRMON80            0x1
+#define BD718XX_LDO2_VRMON80            0x2
+#define BD718XX_LDO3_VRMON80            0x4
+#define BD718XX_LDO4_VRMON80            0x8
+#define BD718XX_LDO5_VRMON80            0x10
+#define BD718XX_LDO6_VRMON80            0x20
+
+/* BD71837 specific voltage monitoring masks */
+#define BD71837_BUCK3_VRMON80           0x10
+#define BD71837_BUCK3_VRMON130          0x20
+#define BD71837_BUCK4_VRMON80           0x40
+#define BD71837_BUCK4_VRMON130          0x80
+#define BD71837_LDO7_VRMON80            0x40
+
 /* BD71837_REG_IRQ bits */
 #define IRQ_SWRST              0x40
 #define IRQ_PWRON_S            0x20
index 7a45271..66d94b4 100644 (file)
@@ -362,8 +362,8 @@ struct mlx5_frag_buf {
 struct mlx5_frag_buf_ctrl {
        struct mlx5_frag_buf    frag_buf;
        u32                     sz_m1;
-       u32                     frag_sz_m1;
-       u32                     strides_offset;
+       u16                     frag_sz_m1;
+       u16                     strides_offset;
        u8                      log_sz;
        u8                      log_stride;
        u8                      log_frag_strides;
@@ -995,7 +995,7 @@ static inline u32 mlx5_base_mkey(const u32 key)
 }
 
 static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
-                                       u32 strides_offset,
+                                       u16 strides_offset,
                                        struct mlx5_frag_buf_ctrl *fbc)
 {
        fbc->log_stride = log_stride;
@@ -1052,7 +1052,7 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
 void mlx5_health_cleanup(struct mlx5_core_dev *dev);
 int mlx5_health_init(struct mlx5_core_dev *dev);
 void mlx5_start_health_poll(struct mlx5_core_dev *dev);
-void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
 void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
 void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
 void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
index 83a33a1..7f5ca2c 100644 (file)
@@ -90,6 +90,8 @@ struct mlx5_hairpin {
 
        u32 *rqn;
        u32 *sqn;
+
+       bool peer_gone;
 };
 
 struct mlx5_hairpin *
index a61ebe8..0416a72 100644 (file)
@@ -2455,6 +2455,12 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
        return vma;
 }
 
+static inline bool range_in_vma(struct vm_area_struct *vma,
+                               unsigned long start, unsigned long end)
+{
+       return (vma && vma->vm_start <= start && end <= vma->vm_end);
+}
+
 #ifdef CONFIG_MMU
 pgprot_t vm_get_page_prot(unsigned long vm_flags);
 void vma_set_page_prot(struct vm_area_struct *vma);
index cd2bc93..5ed8f62 100644 (file)
@@ -341,7 +341,7 @@ struct mm_struct {
        struct {
                struct vm_area_struct *mmap;            /* list of VMAs */
                struct rb_root mm_rb;
-               u32 vmacache_seqnum;                   /* per-thread vmacache */
+               u64 vmacache_seqnum;                   /* per-thread vmacache */
 #ifdef CONFIG_MMU
                unsigned long (*get_unmapped_area) (struct file *filp,
                                unsigned long addr, unsigned long len,
index 5fe8768..d7016dc 100644 (file)
@@ -32,7 +32,7 @@
 #define VMACACHE_MASK (VMACACHE_SIZE - 1)
 
 struct vmacache {
-       u32 seqnum;
+       u64 seqnum;
        struct vm_area_struct *vmas[VMACACHE_SIZE];
 };
 
index 1e22d96..3f4c0b1 100644 (file)
@@ -671,12 +671,6 @@ typedef struct pglist_data {
 #ifdef CONFIG_NUMA_BALANCING
        /* Lock serializing the migrate rate limiting window */
        spinlock_t numabalancing_migrate_lock;
-
-       /* Rate limiting time interval */
-       unsigned long numabalancing_migrate_next_window;
-
-       /* Number of pages migrated during the rate limiting time interval */
-       unsigned long numabalancing_migrate_nr_pages;
 #endif
        /*
         * This is a per-node reserve of pages that are not available
index 1298a7d..01797cb 100644 (file)
@@ -754,6 +754,7 @@ struct tb_service_id {
  * struct typec_device_id - USB Type-C alternate mode identifiers
  * @svid: Standard or Vendor ID
  * @mode: Mode index
+ * @driver_data: Driver specific data
  */
 struct typec_device_id {
        __u16 svid;
index ca5ab98..c7861e4 100644 (file)
@@ -1730,6 +1730,8 @@ enum netdev_priv_flags {
  *                     switch driver and used to set the phys state of the
  *                     switch port.
  *
+ *     @wol_enabled:   Wake-on-LAN is enabled
+ *
  *     FIXME: cleanup struct net_device such that network protocol info
  *     moves out.
  */
@@ -2014,6 +2016,7 @@ struct net_device {
        struct lock_class_key   *qdisc_tx_busylock;
        struct lock_class_key   *qdisc_running_key;
        bool                    proto_down;
+       unsigned                wol_enabled:1;
 };
 #define to_net_dev(d) container_of(d, struct net_device, dev)
 
index 07efffd..bbe99d2 100644 (file)
@@ -215,6 +215,8 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
                break;
        case NFPROTO_ARP:
 #ifdef CONFIG_NETFILTER_FAMILY_ARP
+               if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
+                       break;
                hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
 #endif
                break;
index 67662d0..3ef82d3 100644 (file)
@@ -49,8 +49,9 @@ struct netpoll_info {
 };
 
 #ifdef CONFIG_NETPOLL
-extern void netpoll_poll_disable(struct net_device *dev);
-extern void netpoll_poll_enable(struct net_device *dev);
+void netpoll_poll_dev(struct net_device *dev);
+void netpoll_poll_disable(struct net_device *dev);
+void netpoll_poll_enable(struct net_device *dev);
 #else
 static inline void netpoll_poll_disable(struct net_device *dev) { return; }
 static inline void netpoll_poll_enable(struct net_device *dev) { return; }
index 4d25e4f..99b0ebf 100644 (file)
@@ -256,6 +256,9 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
 #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
 #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
 
+extern bool of_node_name_eq(const struct device_node *np, const char *name);
+extern bool of_node_name_prefix(const struct device_node *np, const char *prefix);
+
 static inline const char *of_node_full_name(const struct device_node *np)
 {
        return np ? np->full_name : "<no-node>";
@@ -290,6 +293,8 @@ extern struct device_node *of_get_next_child(const struct device_node *node,
 extern struct device_node *of_get_next_available_child(
        const struct device_node *node, struct device_node *prev);
 
+extern struct device_node *of_get_compatible_child(const struct device_node *parent,
+                                       const char *compatible);
 extern struct device_node *of_get_child_by_name(const struct device_node *node,
                                        const char *name);
 
@@ -561,6 +566,16 @@ static inline struct device_node *to_of_node(const struct fwnode_handle *fwnode)
        return NULL;
 }
 
+static inline bool of_node_name_eq(const struct device_node *np, const char *name)
+{
+       return false;
+}
+
+static inline bool of_node_name_prefix(const struct device_node *np, const char *prefix)
+{
+       return false;
+}
+
 static inline const char* of_node_full_name(const struct device_node *np)
 {
        return "<no-node>";
@@ -632,6 +647,12 @@ static inline bool of_have_populated_dt(void)
        return false;
 }
 
+static inline struct device_node *of_get_compatible_child(const struct device_node *parent,
+                                       const char *compatible)
+{
+       return NULL;
+}
+
 static inline struct device_node *of_get_child_by_name(
                                        const struct device_node *node,
                                        const char *name)
@@ -967,6 +988,18 @@ static inline struct device_node *of_find_matching_node(
        return of_find_matching_node_and_match(from, matches, NULL);
 }
 
+static inline const char *of_node_get_device_type(const struct device_node *np)
+{
+       return of_get_property(np, "type", NULL);
+}
+
+static inline bool of_node_is_type(const struct device_node *np, const char *type)
+{
+       const char *match = of_node_get_device_type(np);
+
+       return np && match && type && !strcmp(match, type);
+}
+
 /**
  * of_property_count_u8_elems - Count the number of u8 elements in a property
  *
index e72ca8d..6925828 100644 (file)
@@ -1235,6 +1235,9 @@ void pci_bus_remove_resources(struct pci_bus *bus);
 int devm_request_pci_bus_resources(struct device *dev,
                                   struct list_head *resources);
 
+/* Temporary until new and working PCI SBR API in place */
+int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
+
 #define pci_bus_for_each_resource(bus, res, i)                         \
        for (i = 0;                                                     \
            (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
index 99d366c..d157983 100644 (file)
 
 #define PCI_VENDOR_ID_OCZ              0x1b85
 
+#define PCI_VENDOR_ID_NCUBE            0x10ff
+
 #endif /* _LINUX_PCI_IDS_H */
index 9abc0ca..9f0aa1b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Driver for Texas Instruments INA219, INA226 power monitor chips
  *
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
index ca9772c..f32dd27 100644 (file)
@@ -408,13 +408,7 @@ struct qc_type_state {
 
 struct qc_state {
        unsigned int s_incoredqs;       /* Number of dquots in core */
-       /*
-        * Per quota type information. The array should really have
-        * max(MAXQUOTAS, XQM_MAXQUOTAS) entries. BUILD_BUG_ON in
-        * quota_getinfo() makes sure XQM_MAXQUOTAS is large enough.  Once VFS
-        * supports project quotas, this can be changed to MAXQUOTAS
-        */
-       struct qc_type_state s_state[XQM_MAXQUOTAS];
+       struct qc_type_state s_state[MAXQUOTAS];  /* Per quota type information */
 };
 
 /* Structure for communicating via ->set_info */
index 3468703..a459a5e 100644 (file)
@@ -48,9 +48,9 @@ struct regulator;
  * DISABLE_IN_SUSPEND  - turn off regulator in suspend states
  * ENABLE_IN_SUSPEND   - keep regulator on in suspend states
  */
-#define DO_NOTHING_IN_SUSPEND  (-1)
-#define DISABLE_IN_SUSPEND     0
-#define ENABLE_IN_SUSPEND      1
+#define DO_NOTHING_IN_SUSPEND  0
+#define DISABLE_IN_SUSPEND     1
+#define ENABLE_IN_SUSPEND      2
 
 /* Regulator active discharge flags */
 enum regulator_active_discharge {
index c0e795d..1c89611 100644 (file)
@@ -36,6 +36,7 @@ enum {
        SCIx_SH4_SCIF_FIFODATA_REGTYPE,
        SCIx_SH7705_SCIF_REGTYPE,
        SCIx_HSCIF_REGTYPE,
+       SCIx_RZ_SCIFA_REGTYPE,
 
        SCIx_NR_REGTYPES,
 };
index b2bd4b4..69ee304 100644 (file)
@@ -81,8 +81,10 @@ enum spi_mem_data_dir {
  * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
  * @data.buswidth: number of IO lanes used to send/receive the data
  * @data.dir: direction of the transfer
- * @data.buf.in: input buffer
- * @data.buf.out: output buffer
+ * @data.nbytes: number of data bytes to send/receive. Can be zero if the
+ *              operation does not involve transferring data
+ * @data.buf.in: input buffer (must be DMA-able)
+ * @data.buf.out: output buffer (must be DMA-able)
  */
 struct spi_mem_op {
        struct {
@@ -105,7 +107,6 @@ struct spi_mem_op {
                u8 buswidth;
                enum spi_mem_data_dir dir;
                unsigned int nbytes;
-               /* buf.{in,out} must be DMA-able. */
                union {
                        void *in;
                        const void *out;
index c43e9a0..7ddfc65 100644 (file)
@@ -30,6 +30,7 @@
 
 #define MTL_MAX_RX_QUEUES      8
 #define MTL_MAX_TX_QUEUES      8
+#define STMMAC_CH_MAX          8
 
 #define STMMAC_RX_COE_NONE     0
 #define STMMAC_RX_COE_TYPE1    1
index 5d73880..a5a3cfc 100644 (file)
@@ -258,8 +258,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
 extern int persistent_clock_is_local;
 
 extern void read_persistent_clock64(struct timespec64 *ts);
-void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock,
-                                          struct timespec64 *boot_offset);
+void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
+                                         struct timespec64 *boot_offset);
 extern int update_persistent_clock64(struct timespec64 now);
 
 /*
index 7f2e16e..041f7e5 100644 (file)
@@ -158,8 +158,10 @@ extern void syscall_unregfunc(void);
                 * For rcuidle callers, use srcu since sched-rcu        \
                 * doesn't work from the idle path.                     \
                 */                                                     \
-               if (rcuidle)                                            \
+               if (rcuidle) {                                          \
                        idx = srcu_read_lock_notrace(&tracepoint_srcu); \
+                       rcu_irq_enter_irqson();                         \
+               }                                                       \
                                                                        \
                it_func_ptr = rcu_dereference_raw((tp)->funcs);         \
                                                                        \
@@ -171,8 +173,10 @@ extern void syscall_unregfunc(void);
                        } while ((++it_func_ptr)->func);                \
                }                                                       \
                                                                        \
-               if (rcuidle)                                            \
+               if (rcuidle) {                                          \
+                       rcu_irq_exit_irqson();                          \
                        srcu_read_unlock_notrace(&tracepoint_srcu, idx);\
+               }                                                       \
                                                                        \
                preempt_enable_notrace();                               \
        } while (0)
index 409c845..422b1c0 100644 (file)
@@ -172,7 +172,7 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
 static __always_inline __must_check
 size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
 {
-       if (unlikely(!check_copy_size(addr, bytes, false)))
+       if (unlikely(!check_copy_size(addr, bytes, true)))
                return 0;
        else
                return _copy_to_iter_mcsafe(addr, bytes, i);
index a34539b..7e6ac01 100644 (file)
@@ -133,15 +133,18 @@ struct vga_switcheroo_handler {
  * @can_switch: check if the device is in a position to switch now.
  *     Mandatory. The client should return false if a user space process
  *     has one of its device files open
+ * @gpu_bound: notify the client id to audio client when the GPU is bound.
  *
  * Client callbacks. A client can be either a GPU or an audio device on a GPU.
  * The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be
  * set to NULL. For audio clients, the @reprobe member is bogus.
+ * OTOH, @gpu_bound is only for audio clients, and not used for GPU clients.
  */
 struct vga_switcheroo_client_ops {
        void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state);
        void (*reprobe)(struct pci_dev *dev);
        bool (*can_switch)(struct pci_dev *dev);
+       void (*gpu_bound)(struct pci_dev *dev, enum vga_switcheroo_client_id);
 };
 
 #if defined(CONFIG_VGA_SWITCHEROO)
index 9397628..cb462f9 100644 (file)
@@ -5,6 +5,24 @@
 #include <linux/if_vlan.h>
 #include <uapi/linux/virtio_net.h>
 
+static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
+                                          const struct virtio_net_hdr *hdr)
+{
+       switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+       case VIRTIO_NET_HDR_GSO_TCPV4:
+       case VIRTIO_NET_HDR_GSO_UDP:
+               skb->protocol = cpu_to_be16(ETH_P_IP);
+               break;
+       case VIRTIO_NET_HDR_GSO_TCPV6:
+               skb->protocol = cpu_to_be16(ETH_P_IPV6);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
                                        const struct virtio_net_hdr *hdr,
                                        bool little_endian)
index 5c7f010..47a3441 100644 (file)
@@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
 #ifdef CONFIG_DEBUG_VM_VMACACHE
                VMACACHE_FIND_CALLS,
                VMACACHE_FIND_HITS,
-               VMACACHE_FULL_FLUSHES,
 #endif
 #ifdef CONFIG_SWAP
                SWAP_RA,
index 3e9a963..6fce268 100644 (file)
@@ -10,7 +10,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
        memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
 }
 
-extern void vmacache_flush_all(struct mm_struct *mm);
 extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
 extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
                                                    unsigned long addr);
@@ -24,10 +23,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
 static inline void vmacache_invalidate(struct mm_struct *mm)
 {
        mm->vmacache_seqnum++;
-
-       /* deal with overflows */
-       if (unlikely(mm->vmacache_seqnum == 0))
-               vmacache_flush_all(mm);
 }
 
 #endif /* __LINUX_VMACACHE_H */
index ea73fef..8586cfb 100644 (file)
@@ -38,10 +38,13 @@ struct v4l2_ctrl_handler;
  * @prio: priority of the file handler, as defined by &enum v4l2_priority
  *
  * @wait: event' s wait queue
+ * @subscribe_lock: serialise changes to the subscribed list; guarantee that
+ *                 the add and del event callbacks are orderly called
  * @subscribed: list of subscribed events
  * @available: list of events waiting to be dequeued
  * @navailable: number of available events at @available list
  * @sequence: event sequence number
+ *
  * @m2m_ctx: pointer to &struct v4l2_m2m_ctx
  */
 struct v4l2_fh {
@@ -52,6 +55,7 @@ struct v4l2_fh {
 
        /* Events */
        wait_queue_head_t       wait;
+       struct mutex            subscribe_lock;
        struct list_head        subscribed;
        struct list_head        available;
        unsigned int            navailable;
index 1ad5b19..9703034 100644 (file)
@@ -23,13 +23,11 @@ struct tc_action {
        const struct tc_action_ops      *ops;
        __u32                           type; /* for backward compat(TCA_OLD_COMPAT) */
        __u32                           order;
-       struct list_head                list;
        struct tcf_idrinfo              *idrinfo;
 
        u32                             tcfa_index;
        refcount_t                      tcfa_refcnt;
        atomic_t                        tcfa_bindcnt;
-       u32                             tcfa_capab;
        int                             tcfa_action;
        struct tcf_t                    tcfa_tm;
        struct gnet_stats_basic_packed  tcfa_bstats;
@@ -44,7 +42,6 @@ struct tc_action {
 #define tcf_index      common.tcfa_index
 #define tcf_refcnt     common.tcfa_refcnt
 #define tcf_bindcnt    common.tcfa_bindcnt
-#define tcf_capab      common.tcfa_capab
 #define tcf_action     common.tcfa_action
 #define tcf_tm         common.tcfa_tm
 #define tcf_bstats     common.tcfa_bstats
@@ -102,7 +99,6 @@ struct tc_action_ops {
        size_t  (*get_fill_size)(const struct tc_action *act);
        struct net_device *(*get_dev)(const struct tc_action *a);
        void    (*put_dev)(struct net_device *dev);
-       int     (*delete)(struct net *net, u32 index);
 };
 
 struct tc_action_net {
@@ -148,8 +144,6 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
                       const struct tc_action_ops *ops,
                       struct netlink_ext_ack *extack);
 int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
-bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
-                   int bind);
 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
                   struct tc_action **a, const struct tc_action_ops *ops,
                   int bind, bool cpustats);
@@ -158,7 +152,6 @@ void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
 void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
 int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
                        struct tc_action **a, int bind);
-int tcf_idr_delete_index(struct tc_action_net *tn, u32 index);
 int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
 
 static inline int tcf_idr_release(struct tc_action *a, bool bind)
index a2d0581..b46d68a 100644 (file)
@@ -139,12 +139,6 @@ struct bond_parm_tbl {
        int mode;
 };
 
-struct netdev_notify_work {
-       struct delayed_work     work;
-       struct net_device       *dev;
-       struct netdev_bonding_info bonding_info;
-};
-
 struct slave {
        struct net_device *dev; /* first - useful for panic debug */
        struct bonding *bond; /* our master */
@@ -172,6 +166,7 @@ struct slave {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll *np;
 #endif
+       struct delayed_work notify_work;
        struct kobject kobj;
        struct rtnl_link_stats64 slave_stats;
 };
index 9a85097..4de121e 100644 (file)
@@ -4852,8 +4852,6 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
  *
  * @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried.
  * @freq: the freqency(in MHz) to be queried.
- * @ptr: pointer where the regdb wmm data is to be stored (or %NULL if
- *     irrelevant). This can be used later for deduplication.
  * @rule: pointer to store the wmm rule from the regulatory db.
  *
  * Self-managed wireless drivers can use this function to  query
@@ -4865,8 +4863,8 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
  *
  * Return: 0 on success. -ENODATA.
  */
-int reg_query_regdb_wmm(char *alpha2, int freq, u32 *ptr,
-                       struct ieee80211_wmm_rule *rule);
+int reg_query_regdb_wmm(char *alpha2, int freq,
+                       struct ieee80211_reg_rule *rule);
 
 /*
  * callbacks for asynchronous cfg80211 methods, notification
index e03b933..a80fd0a 100644 (file)
@@ -130,12 +130,6 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
        return sk->sk_bound_dev_if;
 }
 
-static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
-{
-       return rcu_dereference_check(ireq->ireq_opt,
-                                    refcount_read(&ireq->req.rsk_refcnt) > 0);
-}
-
 struct inet_cork {
        unsigned int            flags;
        __be32                  addr;
index d5f62cc..3394d75 100644 (file)
@@ -30,7 +30,7 @@ struct nf_conn_timeout {
 };
 
 static inline unsigned int *
-nf_ct_timeout_data(struct nf_conn_timeout *t)
+nf_ct_timeout_data(const struct nf_conn_timeout *t)
 {
        struct nf_ct_timeout *timeout;
 
index 0c154f9..39e1d87 100644 (file)
  *   nla_find()                                find attribute in stream of attributes
  *   nla_find_nested()                 find attribute in nested attributes
  *   nla_parse()                       parse and validate stream of attrs
- *   nla_parse_nested()                        parse nested attribuets
+ *   nla_parse_nested()                        parse nested attributes
  *   nla_for_each_attr()               loop over all attributes
  *   nla_for_each_nested()             loop over the nested attributes
  *=========================================================================
index 316694d..008f466 100644 (file)
@@ -87,7 +87,7 @@ struct nfc_hci_pipe {
  * According to specification 102 622 chapter 4.4 Pipes,
  * the pipe identifier is 7 bits long.
  */
-#define NFC_HCI_MAX_PIPES              127
+#define NFC_HCI_MAX_PIPES              128
 struct nfc_hci_init_data {
        u8 gate_count;
        struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
index ef727f7..75a3f3f 100644 (file)
@@ -298,19 +298,13 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
 #endif
 }
 
-static inline void tcf_exts_to_list(const struct tcf_exts *exts,
-                                   struct list_head *actions)
-{
 #ifdef CONFIG_NET_CLS_ACT
-       int i;
-
-       for (i = 0; i < exts->nr_actions; i++) {
-               struct tc_action *a = exts->actions[i];
-
-               list_add_tail(&a->list, actions);
-       }
+#define tcf_exts_for_each_action(i, a, exts) \
+       for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
+#else
+#define tcf_exts_for_each_action(i, a, exts) \
+       for (; 0; (void)(i), (void)(a), (void)(exts))
 #endif
-}
 
 static inline void
 tcf_exts_stats_update(const struct tcf_exts *exts,
@@ -361,6 +355,15 @@ static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
 #endif
 }
 
+static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts)
+{
+#ifdef CONFIG_NET_CLS_ACT
+       return exts->actions[0];
+#else
+       return NULL;
+#endif
+}
+
 /**
  * tcf_exts_exec - execute tc filter extensions
  * @skb: socket buffer
index 60f8cc8..3469750 100644 (file)
@@ -217,15 +217,15 @@ struct ieee80211_wmm_rule {
 struct ieee80211_reg_rule {
        struct ieee80211_freq_range freq_range;
        struct ieee80211_power_rule power_rule;
-       struct ieee80211_wmm_rule *wmm_rule;
+       struct ieee80211_wmm_rule wmm_rule;
        u32 flags;
        u32 dfs_cac_ms;
+       bool has_wmm;
 };
 
 struct ieee80211_regdomain {
        struct rcu_head rcu_head;
        u32 n_reg_rules;
-       u32 n_wmm_rules;
        char alpha2[3];
        enum nl80211_dfs_regions dfs_region;
        struct ieee80211_reg_rule reg_rules[];
index d5c683e..0a769cf 100644 (file)
@@ -171,15 +171,14 @@ struct cipher_context {
        char *rec_seq;
 };
 
+union tls_crypto_context {
+       struct tls_crypto_info info;
+       struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
+};
+
 struct tls_context {
-       union {
-               struct tls_crypto_info crypto_send;
-               struct tls12_crypto_info_aes_gcm_128 crypto_send_aes_gcm_128;
-       };
-       union {
-               struct tls_crypto_info crypto_recv;
-               struct tls12_crypto_info_aes_gcm_128 crypto_recv_aes_gcm_128;
-       };
+       union tls_crypto_context crypto_send;
+       union tls_crypto_context crypto_recv;
 
        struct list_head list;
        struct net_device *netdev;
@@ -367,8 +366,8 @@ static inline void tls_fill_prepend(struct tls_context *ctx,
         * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
         */
        buf[0] = record_type;
-       buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.version);
-       buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.version);
+       buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.info.version);
+       buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.info.version);
        /* we can use IV for nonce explicit according to spec */
        buf[3] = pkt_len >> 8;
        buf[4] = pkt_len & 0xFF;
index 6f1e1f3..cd1773d 100644 (file)
@@ -412,6 +412,7 @@ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus);
 void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus);
 void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus);
 void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
+int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset);
 
 void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
 int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
index af9ef16..fdaaafd 100644 (file)
@@ -407,6 +407,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
 int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
 void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
 int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
+                        struct snd_soc_pcm_runtime *rtd,
                         const struct snd_soc_pcm_stream *params,
                         unsigned int num_params,
                         struct snd_soc_dapm_widget *source,
index 7113728..705b33d 100644 (file)
@@ -70,33 +70,6 @@ TRACE_EVENT(mm_migrate_pages,
                __print_symbolic(__entry->mode, MIGRATE_MODE),
                __print_symbolic(__entry->reason, MIGRATE_REASON))
 );
-
-TRACE_EVENT(mm_numa_migrate_ratelimit,
-
-       TP_PROTO(struct task_struct *p, int dst_nid, unsigned long nr_pages),
-
-       TP_ARGS(p, dst_nid, nr_pages),
-
-       TP_STRUCT__entry(
-               __array(        char,           comm,   TASK_COMM_LEN)
-               __field(        pid_t,          pid)
-               __field(        int,            dst_nid)
-               __field(        unsigned long,  nr_pages)
-       ),
-
-       TP_fast_assign(
-               memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
-               __entry->pid            = p->pid;
-               __entry->dst_nid        = dst_nid;
-               __entry->nr_pages       = nr_pages;
-       ),
-
-       TP_printk("comm=%s pid=%d dst_nid=%d nr_pages=%lu",
-               __entry->comm,
-               __entry->pid,
-               __entry->dst_nid,
-               __entry->nr_pages)
-);
 #endif /* _TRACE_MIGRATE_H */
 
 /* This part must be outside protection */
index 196587b..837393f 100644 (file)
@@ -56,7 +56,6 @@ enum rxrpc_peer_trace {
        rxrpc_peer_new,
        rxrpc_peer_processing,
        rxrpc_peer_put,
-       rxrpc_peer_queued_error,
 };
 
 enum rxrpc_conn_trace {
@@ -257,8 +256,7 @@ enum rxrpc_tx_point {
        EM(rxrpc_peer_got,                      "GOT") \
        EM(rxrpc_peer_new,                      "NEW") \
        EM(rxrpc_peer_processing,               "PRO") \
-       EM(rxrpc_peer_put,                      "PUT") \
-       E_(rxrpc_peer_queued_error,             "QER")
+       E_(rxrpc_peer_put,                      "PUT")
 
 #define rxrpc_conn_traces \
        EM(rxrpc_conn_got,                      "GOT") \
index e4732d3..b0f8e87 100644 (file)
@@ -26,7 +26,9 @@
 #define HUGETLB_FLAG_ENCODE_2MB                (21 << HUGETLB_FLAG_ENCODE_SHIFT)
 #define HUGETLB_FLAG_ENCODE_8MB                (23 << HUGETLB_FLAG_ENCODE_SHIFT)
 #define HUGETLB_FLAG_ENCODE_16MB       (24 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_32MB       (25 << HUGETLB_FLAG_ENCODE_SHIFT)
 #define HUGETLB_FLAG_ENCODE_256MB      (28 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512MB      (29 << HUGETLB_FLAG_ENCODE_SHIFT)
 #define HUGETLB_FLAG_ENCODE_1GB                (30 << HUGETLB_FLAG_ENCODE_SHIFT)
 #define HUGETLB_FLAG_ENCODE_2GB                (31 << HUGETLB_FLAG_ENCODE_SHIFT)
 #define HUGETLB_FLAG_ENCODE_16GB       (34 << HUGETLB_FLAG_ENCODE_SHIFT)
index 07548de..251be35 100644 (file)
@@ -952,6 +952,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_S390_HPAGE_1M 156
 #define KVM_CAP_NESTED_STATE 157
 #define KVM_CAP_ARM_INJECT_SERROR_ESR 158
+#define KVM_CAP_MSR_PLATFORM_INFO 159
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index 015a4c0..7a8a267 100644 (file)
@@ -25,7 +25,9 @@
 #define MFD_HUGE_2MB   HUGETLB_FLAG_ENCODE_2MB
 #define MFD_HUGE_8MB   HUGETLB_FLAG_ENCODE_8MB
 #define MFD_HUGE_16MB  HUGETLB_FLAG_ENCODE_16MB
+#define MFD_HUGE_32MB  HUGETLB_FLAG_ENCODE_32MB
 #define MFD_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define MFD_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
 #define MFD_HUGE_1GB   HUGETLB_FLAG_ENCODE_1GB
 #define MFD_HUGE_2GB   HUGETLB_FLAG_ENCODE_2GB
 #define MFD_HUGE_16GB  HUGETLB_FLAG_ENCODE_16GB
index bfd5938..d0f515d 100644 (file)
@@ -28,7 +28,9 @@
 #define MAP_HUGE_2MB   HUGETLB_FLAG_ENCODE_2MB
 #define MAP_HUGE_8MB   HUGETLB_FLAG_ENCODE_8MB
 #define MAP_HUGE_16MB  HUGETLB_FLAG_ENCODE_16MB
+#define MAP_HUGE_32MB  HUGETLB_FLAG_ENCODE_32MB
 #define MAP_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define MAP_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
 #define MAP_HUGE_1GB   HUGETLB_FLAG_ENCODE_1GB
 #define MAP_HUGE_2GB   HUGETLB_FLAG_ENCODE_2GB
 #define MAP_HUGE_16GB  HUGETLB_FLAG_ENCODE_16GB
index eeb787b..f35eb72 100644 (file)
@@ -144,7 +144,7 @@ enum perf_event_sample_format {
 
        PERF_SAMPLE_MAX = 1U << 20,             /* non-ABI */
 
-       __PERF_SAMPLE_CALLCHAIN_EARLY           = 1ULL << 63,
+       __PERF_SAMPLE_CALLCHAIN_EARLY           = 1ULL << 63, /* non-ABI; internal use */
 };
 
 /*
index dc520e1..8b73cb6 100644 (file)
@@ -37,6 +37,7 @@
 
 #include <linux/types.h>
 #include <linux/socket.h>              /* For __kernel_sockaddr_storage. */
+#include <linux/in6.h>                 /* For struct in6_addr. */
 
 #define RDS_IB_ABI_VERSION             0x301
 
index dde1344..6507ad0 100644 (file)
@@ -65,7 +65,9 @@ struct shmid_ds {
 #define SHM_HUGE_2MB   HUGETLB_FLAG_ENCODE_2MB
 #define SHM_HUGE_8MB   HUGETLB_FLAG_ENCODE_8MB
 #define SHM_HUGE_16MB  HUGETLB_FLAG_ENCODE_16MB
+#define SHM_HUGE_32MB  HUGETLB_FLAG_ENCODE_32MB
 #define SHM_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define SHM_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
 #define SHM_HUGE_1GB   HUGETLB_FLAG_ENCODE_1GB
 #define SHM_HUGE_2GB   HUGETLB_FLAG_ENCODE_2GB
 #define SHM_HUGE_16GB  HUGETLB_FLAG_ENCODE_16GB
index b1e22c4..84c3de8 100644 (file)
@@ -176,7 +176,7 @@ struct vhost_memory {
 #define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
 
 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
-#define VHOST_GET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x26, __u64)
+#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
 
 /* VHOST_NET specific defines */
 
index f58cafa..f39352c 100644 (file)
@@ -10,6 +10,8 @@
 #ifndef __HDA_TPLG_INTERFACE_H__
 #define __HDA_TPLG_INTERFACE_H__
 
+#include <linux/types.h>
+
 /*
  * Default types range from 0~12. type can range from 0 to 0xff
  * SST types start at higher to avoid any overlapping in future
@@ -143,10 +145,10 @@ enum skl_module_param_type {
 };
 
 struct skl_dfw_algo_data {
-       u32 set_params:2;
-       u32 rsvd:30;
-       u32 param_id;
-       u32 max;
+       __u32 set_params:2;
+       __u32 rsvd:30;
+       __u32 param_id;
+       __u32 max;
        char params[0];
 } __packed;
 
@@ -163,68 +165,68 @@ enum skl_tuple_type {
 /* v4 configuration data */
 
 struct skl_dfw_v4_module_pin {
-       u16 module_id;
-       u16 instance_id;
+       __u16 module_id;
+       __u16 instance_id;
 } __packed;
 
 struct skl_dfw_v4_module_fmt {
-       u32 channels;
-       u32 freq;
-       u32 bit_depth;
-       u32 valid_bit_depth;
-       u32 ch_cfg;
-       u32 interleaving_style;
-       u32 sample_type;
-       u32 ch_map;
+       __u32 channels;
+       __u32 freq;
+       __u32 bit_depth;
+       __u32 valid_bit_depth;
+       __u32 ch_cfg;
+       __u32 interleaving_style;
+       __u32 sample_type;
+       __u32 ch_map;
 } __packed;
 
 struct skl_dfw_v4_module_caps {
-       u32 set_params:2;
-       u32 rsvd:30;
-       u32 param_id;
-       u32 caps_size;
-       u32 caps[HDA_SST_CFG_MAX];
+       __u32 set_params:2;
+       __u32 rsvd:30;
+       __u32 param_id;
+       __u32 caps_size;
+       __u32 caps[HDA_SST_CFG_MAX];
 } __packed;
 
 struct skl_dfw_v4_pipe {
-       u8 pipe_id;
-       u8 pipe_priority;
-       u16 conn_type:4;
-       u16 rsvd:4;
-       u16 memory_pages:8;
+       __u8 pipe_id;
+       __u8 pipe_priority;
+       __u16 conn_type:4;
+       __u16 rsvd:4;
+       __u16 memory_pages:8;
 } __packed;
 
 struct skl_dfw_v4_module {
        char uuid[SKL_UUID_STR_SZ];
 
-       u16 module_id;
-       u16 instance_id;
-       u32 max_mcps;
-       u32 mem_pages;
-       u32 obs;
-       u32 ibs;
-       u32 vbus_id;
-
-       u32 max_in_queue:8;
-       u32 max_out_queue:8;
-       u32 time_slot:8;
-       u32 core_id:4;
-       u32 rsvd1:4;
-
-       u32 module_type:8;
-       u32 conn_type:4;
-       u32 dev_type:4;
-       u32 hw_conn_type:4;
-       u32 rsvd2:12;
-
-       u32 params_fixup:8;
-       u32 converter:8;
-       u32 input_pin_type:1;
-       u32 output_pin_type:1;
-       u32 is_dynamic_in_pin:1;
-       u32 is_dynamic_out_pin:1;
-       u32 is_loadable:1;
-       u32 rsvd3:11;
+       __u16 module_id;
+       __u16 instance_id;
+       __u32 max_mcps;
+       __u32 mem_pages;
+       __u32 obs;
+       __u32 ibs;
+       __u32 vbus_id;
+
+       __u32 max_in_queue:8;
+       __u32 max_out_queue:8;
+       __u32 time_slot:8;
+       __u32 core_id:4;
+       __u32 rsvd1:4;
+
+       __u32 module_type:8;
+       __u32 conn_type:4;
+       __u32 dev_type:4;
+       __u32 hw_conn_type:4;
+       __u32 rsvd2:12;
+
+       __u32 params_fixup:8;
+       __u32 converter:8;
+       __u32 input_pin_type:1;
+       __u32 output_pin_type:1;
+       __u32 is_dynamic_in_pin:1;
+       __u32 is_dynamic_out_pin:1;
+       __u32 is_loadable:1;
+       __u32 rsvd3:11;
 
        struct skl_dfw_v4_pipe pipe;
        struct skl_dfw_v4_module_fmt in_fmt[MAX_IN_QUEUE];
index 80b52b4..a2ab516 100644 (file)
 
 #include <xen/page.h>
 
+extern bool xen_scrub_pages;
+
 static inline void xenmem_reservation_scrub_page(struct page *page)
 {
-#ifdef CONFIG_XEN_SCRUB_PAGES
-       clear_highpage(page);
-#endif
+       if (xen_scrub_pages)
+               clear_highpage(page);
 }
 
 #ifdef CONFIG_XEN_HAVE_PVMMU
index b0eb375..1c65fb3 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -199,13 +199,14 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
        }
 
        ipc_unlock_object(ipcp);
+       ipcp = ERR_PTR(-EIDRM);
 err:
        rcu_read_unlock();
        /*
         * Callers of shm_lock() must validate the status of the returned ipc
         * object pointer and error out as appropriate.
         */
-       return (void *)ipcp;
+       return ERR_CAST(ipcp);
 }
 
 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
index 2590700..138f030 100644 (file)
@@ -1844,7 +1844,7 @@ static int btf_check_all_metas(struct btf_verifier_env *env)
 
        hdr = &btf->hdr;
        cur = btf->nohdr_data + hdr->type_off;
-       end = btf->nohdr_data + hdr->type_len;
+       end = cur + hdr->type_len;
 
        env->log_type_id = 1;
        while (cur < end) {
index 04b8eda..03cc59e 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/jhash.h>
 #include <linux/filter.h>
 #include <linux/rculist_nulls.h>
+#include <linux/random.h>
 #include <uapi/linux/btf.h>
 #include "percpu_freelist.h"
 #include "bpf_lru_list.h"
@@ -41,6 +42,7 @@ struct bpf_htab {
        atomic_t count; /* number of elements in this hashtable */
        u32 n_buckets;  /* number of hash buckets */
        u32 elem_size;  /* size of each element in bytes */
+       u32 hashrnd;
 };
 
 /* each htab element is struct htab_elem + key + value */
@@ -371,6 +373,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
        if (!htab->buckets)
                goto free_htab;
 
+       htab->hashrnd = get_random_int();
        for (i = 0; i < htab->n_buckets; i++) {
                INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
                raw_spin_lock_init(&htab->buckets[i].lock);
@@ -402,9 +405,9 @@ free_htab:
        return ERR_PTR(err);
 }
 
-static inline u32 htab_map_hash(const void *key, u32 key_len)
+static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
 {
-       return jhash(key, key_len, 0);
+       return jhash(key, key_len, hashrnd);
 }
 
 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
@@ -470,7 +473,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
 
        key_size = map->key_size;
 
-       hash = htab_map_hash(key, key_size);
+       hash = htab_map_hash(key, key_size, htab->hashrnd);
 
        head = select_bucket(htab, hash);
 
@@ -597,7 +600,7 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
        if (!key)
                goto find_first_elem;
 
-       hash = htab_map_hash(key, key_size);
+       hash = htab_map_hash(key, key_size, htab->hashrnd);
 
        head = select_bucket(htab, hash);
 
@@ -824,7 +827,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
 
        key_size = map->key_size;
 
-       hash = htab_map_hash(key, key_size);
+       hash = htab_map_hash(key, key_size, htab->hashrnd);
 
        b = __select_bucket(htab, hash);
        head = &b->head;
@@ -880,7 +883,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
 
        key_size = map->key_size;
 
-       hash = htab_map_hash(key, key_size);
+       hash = htab_map_hash(key, key_size, htab->hashrnd);
 
        b = __select_bucket(htab, hash);
        head = &b->head;
@@ -945,7 +948,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
 
        key_size = map->key_size;
 
-       hash = htab_map_hash(key, key_size);
+       hash = htab_map_hash(key, key_size, htab->hashrnd);
 
        b = __select_bucket(htab, hash);
        head = &b->head;
@@ -998,7 +1001,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
 
        key_size = map->key_size;
 
-       hash = htab_map_hash(key, key_size);
+       hash = htab_map_hash(key, key_size, htab->hashrnd);
 
        b = __select_bucket(htab, hash);
        head = &b->head;
@@ -1071,7 +1074,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
 
        key_size = map->key_size;
 
-       hash = htab_map_hash(key, key_size);
+       hash = htab_map_hash(key, key_size, htab->hashrnd);
        b = __select_bucket(htab, hash);
        head = &b->head;
 
@@ -1103,7 +1106,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
 
        key_size = map->key_size;
 
-       hash = htab_map_hash(key, key_size);
+       hash = htab_map_hash(key, key_size, htab->hashrnd);
        b = __select_bucket(htab, hash);
        head = &b->head;
 
index 22ad967..830d7f0 100644 (file)
@@ -129,7 +129,7 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
        struct bpf_cgroup_storage *storage;
        struct bpf_storage_buffer *new;
 
-       if (flags & BPF_NOEXIST)
+       if (flags != BPF_ANY && flags != BPF_EXIST)
                return -EINVAL;
 
        storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
@@ -195,6 +195,9 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
        if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
                return ERR_PTR(-EINVAL);
 
+       if (attr->value_size == 0)
+               return ERR_PTR(-EINVAL);
+
        if (attr->value_size > PAGE_SIZE)
                return ERR_PTR(-E2BIG);
 
index 98e621a..0a0f2ec 100644 (file)
@@ -132,6 +132,7 @@ struct smap_psock {
        struct work_struct gc_work;
 
        struct proto *sk_proto;
+       void (*save_unhash)(struct sock *sk);
        void (*save_close)(struct sock *sk, long timeout);
        void (*save_data_ready)(struct sock *sk);
        void (*save_write_space)(struct sock *sk);
@@ -143,6 +144,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
                            int offset, size_t size, int flags);
+static void bpf_tcp_unhash(struct sock *sk);
 static void bpf_tcp_close(struct sock *sk, long timeout);
 
 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
@@ -184,6 +186,7 @@ static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
                         struct proto *base)
 {
        prot[SOCKMAP_BASE]                      = *base;
+       prot[SOCKMAP_BASE].unhash               = bpf_tcp_unhash;
        prot[SOCKMAP_BASE].close                = bpf_tcp_close;
        prot[SOCKMAP_BASE].recvmsg              = bpf_tcp_recvmsg;
        prot[SOCKMAP_BASE].stream_memory_read   = bpf_tcp_stream_read;
@@ -217,6 +220,7 @@ static int bpf_tcp_init(struct sock *sk)
                return -EBUSY;
        }
 
+       psock->save_unhash = sk->sk_prot->unhash;
        psock->save_close = sk->sk_prot->close;
        psock->sk_proto = sk->sk_prot;
 
@@ -236,7 +240,7 @@ static int bpf_tcp_init(struct sock *sk)
 }
 
 static void smap_release_sock(struct smap_psock *psock, struct sock *sock);
-static int free_start_sg(struct sock *sk, struct sk_msg_buff *md);
+static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge);
 
 static void bpf_tcp_release(struct sock *sk)
 {
@@ -248,7 +252,7 @@ static void bpf_tcp_release(struct sock *sk)
                goto out;
 
        if (psock->cork) {
-               free_start_sg(psock->sock, psock->cork);
+               free_start_sg(psock->sock, psock->cork, true);
                kfree(psock->cork);
                psock->cork = NULL;
        }
@@ -305,39 +309,21 @@ static struct smap_psock_map_entry *psock_map_pop(struct sock *sk,
        return e;
 }
 
-static void bpf_tcp_close(struct sock *sk, long timeout)
+static void bpf_tcp_remove(struct sock *sk, struct smap_psock *psock)
 {
-       void (*close_fun)(struct sock *sk, long timeout);
        struct smap_psock_map_entry *e;
        struct sk_msg_buff *md, *mtmp;
-       struct smap_psock *psock;
        struct sock *osk;
 
-       lock_sock(sk);
-       rcu_read_lock();
-       psock = smap_psock_sk(sk);
-       if (unlikely(!psock)) {
-               rcu_read_unlock();
-               release_sock(sk);
-               return sk->sk_prot->close(sk, timeout);
-       }
-
-       /* The psock may be destroyed anytime after exiting the RCU critial
-        * section so by the time we use close_fun the psock may no longer
-        * be valid. However, bpf_tcp_close is called with the sock lock
-        * held so the close hook and sk are still valid.
-        */
-       close_fun = psock->save_close;
-
        if (psock->cork) {
-               free_start_sg(psock->sock, psock->cork);
+               free_start_sg(psock->sock, psock->cork, true);
                kfree(psock->cork);
                psock->cork = NULL;
        }
 
        list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
                list_del(&md->list);
-               free_start_sg(psock->sock, md);
+               free_start_sg(psock->sock, md, true);
                kfree(md);
        }
 
@@ -369,7 +355,7 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
                        /* If another thread deleted this object skip deletion.
                         * The refcnt on psock may or may not be zero.
                         */
-                       if (l) {
+                       if (l && l == link) {
                                hlist_del_rcu(&link->hash_node);
                                smap_release_sock(psock, link->sk);
                                free_htab_elem(htab, link);
@@ -379,6 +365,42 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
                kfree(e);
                e = psock_map_pop(sk, psock);
        }
+}
+
+static void bpf_tcp_unhash(struct sock *sk)
+{
+       void (*unhash_fun)(struct sock *sk);
+       struct smap_psock *psock;
+
+       rcu_read_lock();
+       psock = smap_psock_sk(sk);
+       if (unlikely(!psock)) {
+               rcu_read_unlock();
+               if (sk->sk_prot->unhash)
+                       sk->sk_prot->unhash(sk);
+               return;
+       }
+       unhash_fun = psock->save_unhash;
+       bpf_tcp_remove(sk, psock);
+       rcu_read_unlock();
+       unhash_fun(sk);
+}
+
+static void bpf_tcp_close(struct sock *sk, long timeout)
+{
+       void (*close_fun)(struct sock *sk, long timeout);
+       struct smap_psock *psock;
+
+       lock_sock(sk);
+       rcu_read_lock();
+       psock = smap_psock_sk(sk);
+       if (unlikely(!psock)) {
+               rcu_read_unlock();
+               release_sock(sk);
+               return sk->sk_prot->close(sk, timeout);
+       }
+       close_fun = psock->save_close;
+       bpf_tcp_remove(sk, psock);
        rcu_read_unlock();
        release_sock(sk);
        close_fun(sk, timeout);
@@ -570,14 +592,16 @@ static void free_bytes_sg(struct sock *sk, int bytes,
        md->sg_start = i;
 }
 
-static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
+static int free_sg(struct sock *sk, int start,
+                  struct sk_msg_buff *md, bool charge)
 {
        struct scatterlist *sg = md->sg_data;
        int i = start, free = 0;
 
        while (sg[i].length) {
                free += sg[i].length;
-               sk_mem_uncharge(sk, sg[i].length);
+               if (charge)
+                       sk_mem_uncharge(sk, sg[i].length);
                if (!md->skb)
                        put_page(sg_page(&sg[i]));
                sg[i].length = 0;
@@ -594,9 +618,9 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
        return free;
 }
 
-static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
+static int free_start_sg(struct sock *sk, struct sk_msg_buff *md, bool charge)
 {
-       int free = free_sg(sk, md->sg_start, md);
+       int free = free_sg(sk, md->sg_start, md, charge);
 
        md->sg_start = md->sg_end;
        return free;
@@ -604,7 +628,7 @@ static int free_start_sg(struct sock *sk, struct sk_msg_buff *md)
 
 static int free_curr_sg(struct sock *sk, struct sk_msg_buff *md)
 {
-       return free_sg(sk, md->sg_curr, md);
+       return free_sg(sk, md->sg_curr, md, true);
 }
 
 static int bpf_map_msg_verdict(int _rc, struct sk_msg_buff *md)
@@ -718,7 +742,7 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes,
                list_add_tail(&r->list, &psock->ingress);
                sk->sk_data_ready(sk);
        } else {
-               free_start_sg(sk, r);
+               free_start_sg(sk, r, true);
                kfree(r);
        }
 
@@ -752,14 +776,10 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send,
                release_sock(sk);
        }
        smap_release_sock(psock, sk);
-       if (unlikely(err))
-               goto out;
-       return 0;
+       return err;
 out_rcu:
        rcu_read_unlock();
-out:
-       free_bytes_sg(NULL, send, md, false);
-       return err;
+       return 0;
 }
 
 static inline void bpf_md_init(struct smap_psock *psock)
@@ -822,7 +842,7 @@ more_data:
        case __SK_PASS:
                err = bpf_tcp_push(sk, send, m, flags, true);
                if (unlikely(err)) {
-                       *copied -= free_start_sg(sk, m);
+                       *copied -= free_start_sg(sk, m, true);
                        break;
                }
 
@@ -845,16 +865,17 @@ more_data:
                lock_sock(sk);
 
                if (unlikely(err < 0)) {
-                       free_start_sg(sk, m);
+                       int free = free_start_sg(sk, m, false);
+
                        psock->sg_size = 0;
                        if (!cork)
-                               *copied -= send;
+                               *copied -= free;
                } else {
                        psock->sg_size -= send;
                }
 
                if (cork) {
-                       free_start_sg(sk, m);
+                       free_start_sg(sk, m, true);
                        psock->sg_size = 0;
                        kfree(m);
                        m = NULL;
@@ -912,6 +933,8 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 
        if (unlikely(flags & MSG_ERRQUEUE))
                return inet_recv_error(sk, msg, len, addr_len);
+       if (!skb_queue_empty(&sk->sk_receive_queue))
+               return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
 
        rcu_read_lock();
        psock = smap_psock_sk(sk);
@@ -922,9 +945,6 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                goto out;
        rcu_read_unlock();
 
-       if (!skb_queue_empty(&sk->sk_receive_queue))
-               return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
-
        lock_sock(sk);
 bytes_ready:
        while (copied != len) {
@@ -1122,7 +1142,7 @@ wait_for_memory:
                err = sk_stream_wait_memory(sk, &timeo);
                if (err) {
                        if (m && m != psock->cork)
-                               free_start_sg(sk, m);
+                               free_start_sg(sk, m, true);
                        goto out_err;
                }
        }
@@ -1427,12 +1447,15 @@ out:
 static void smap_write_space(struct sock *sk)
 {
        struct smap_psock *psock;
+       void (*write_space)(struct sock *sk);
 
        rcu_read_lock();
        psock = smap_psock_sk(sk);
        if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state)))
                schedule_work(&psock->tx_work);
+       write_space = psock->save_write_space;
        rcu_read_unlock();
+       write_space(sk);
 }
 
 static void smap_stop_sock(struct smap_psock *psock, struct sock *sk)
@@ -1461,10 +1484,16 @@ static void smap_destroy_psock(struct rcu_head *rcu)
        schedule_work(&psock->gc_work);
 }
 
+static bool psock_is_smap_sk(struct sock *sk)
+{
+       return inet_csk(sk)->icsk_ulp_ops == &bpf_tcp_ulp_ops;
+}
+
 static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
 {
        if (refcount_dec_and_test(&psock->refcnt)) {
-               tcp_cleanup_ulp(sock);
+               if (psock_is_smap_sk(sock))
+                       tcp_cleanup_ulp(sock);
                write_lock_bh(&sock->sk_callback_lock);
                smap_stop_sock(psock, sock);
                write_unlock_bh(&sock->sk_callback_lock);
@@ -1578,13 +1607,13 @@ static void smap_gc_work(struct work_struct *w)
                bpf_prog_put(psock->bpf_tx_msg);
 
        if (psock->cork) {
-               free_start_sg(psock->sock, psock->cork);
+               free_start_sg(psock->sock, psock->cork, true);
                kfree(psock->cork);
        }
 
        list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
                list_del(&md->list);
-               free_start_sg(psock->sock, md);
+               free_start_sg(psock->sock, md, true);
                kfree(md);
        }
 
@@ -1891,6 +1920,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
         * doesn't update user data.
         */
        if (psock) {
+               if (!psock_is_smap_sk(sock)) {
+                       err = -EBUSY;
+                       goto out_progs;
+               }
                if (READ_ONCE(psock->bpf_parse) && parse) {
                        err = -EBUSY;
                        goto out_progs;
@@ -2086,8 +2119,12 @@ static int sock_map_update_elem(struct bpf_map *map,
                return -EINVAL;
        }
 
+       /* ULPs are currently supported only for TCP sockets in ESTABLISHED
+        * state.
+        */
        if (skops.sk->sk_type != SOCK_STREAM ||
-           skops.sk->sk_protocol != IPPROTO_TCP) {
+           skops.sk->sk_protocol != IPPROTO_TCP ||
+           skops.sk->sk_state != TCP_ESTABLISHED) {
                fput(socket->file);
                return -EOPNOTSUPP;
        }
@@ -2140,7 +2177,9 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
                return ERR_PTR(-EPERM);
 
        /* check sanity of attributes */
-       if (attr->max_entries == 0 || attr->value_size != 4 ||
+       if (attr->max_entries == 0 ||
+           attr->key_size == 0 ||
+           attr->value_size != 4 ||
            attr->map_flags & ~SOCK_CREATE_FLAG_MASK)
                return ERR_PTR(-EINVAL);
 
@@ -2267,8 +2306,10 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
        }
        l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
                             htab->map.numa_node);
-       if (!l_new)
+       if (!l_new) {
+               atomic_dec(&htab->count);
                return ERR_PTR(-ENOMEM);
+       }
 
        memcpy(l_new->key, key, key_size);
        l_new->sk = sk;
@@ -2438,6 +2479,16 @@ static int sock_hash_update_elem(struct bpf_map *map,
                return -EINVAL;
        }
 
+       /* ULPs are currently supported only for TCP sockets in ESTABLISHED
+        * state.
+        */
+       if (skops.sk->sk_type != SOCK_STREAM ||
+           skops.sk->sk_protocol != IPPROTO_TCP ||
+           skops.sk->sk_state != TCP_ESTABLISHED) {
+               fput(socket->file);
+               return -EOPNOTSUPP;
+       }
+
        lock_sock(skops.sk);
        preempt_disable();
        rcu_read_lock();
@@ -2528,10 +2579,22 @@ const struct bpf_map_ops sock_hash_ops = {
        .map_check_btf = map_check_no_btf,
 };
 
+static bool bpf_is_valid_sock_op(struct bpf_sock_ops_kern *ops)
+{
+       return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
+              ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB;
+}
 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
           struct bpf_map *, map, void *, key, u64, flags)
 {
        WARN_ON_ONCE(!rcu_read_lock_held());
+
+       /* ULPs are currently supported only for TCP sockets in ESTABLISHED
+        * state. This checks that the sock ops triggering the update is
+        * one indicating we are (or will be soon) in an ESTABLISHED state.
+        */
+       if (!bpf_is_valid_sock_op(bpf_sock))
+               return -EOPNOTSUPP;
        return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
 }
 
@@ -2550,6 +2613,9 @@ BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, bpf_sock,
           struct bpf_map *, map, void *, key, u64, flags)
 {
        WARN_ON_ONCE(!rcu_read_lock_held());
+
+       if (!bpf_is_valid_sock_op(bpf_sock))
+               return -EOPNOTSUPP;
        return sock_hash_ctx_update_elem(bpf_sock, map, key, flags);
 }
 
index 9224611..465952a 100644 (file)
@@ -2896,6 +2896,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        u64 umin_val, umax_val;
        u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
 
+       if (insn_bitness == 32) {
+               /* Relevant for 32-bit RSH: Information can propagate towards
+                * LSB, so it isn't sufficient to only truncate the output to
+                * 32 bits.
+                */
+               coerce_reg_to_size(dst_reg, 4);
+               coerce_reg_to_size(&src_reg, 4);
+       }
+
        smin_val = src_reg.smin_value;
        smax_val = src_reg.smax_value;
        umin_val = src_reg.umin_value;
@@ -3131,7 +3140,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        if (BPF_CLASS(insn->code) != BPF_ALU64) {
                /* 32-bit ALU ops are (32,32)->32 */
                coerce_reg_to_size(dst_reg, 4);
-               coerce_reg_to_size(&src_reg, 4);
        }
 
        __reg_deduce_bounds(dst_reg);
@@ -3163,7 +3171,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
                                 * an arbitrary scalar. Disallow all math except
                                 * pointer subtraction
                                 */
-                               if (opcode == BPF_SUB){
+                               if (opcode == BPF_SUB && env->allow_ptr_leaks) {
                                        mark_reg_unknown(env, regs, insn->dst_reg);
                                        return 0;
                                }
index ed44d7d..0097ace 100644 (file)
@@ -102,8 +102,6 @@ static inline void cpuhp_lock_release(bool bringup) { }
  * @name:      Name of the step
  * @startup:   Startup function of the step
  * @teardown:  Teardown function of the step
- * @skip_onerr:        Do not invoke the functions on error rollback
- *             Will go away once the notifiers are gone
  * @cant_stop: Bringup/teardown can't be stopped at this step
  */
 struct cpuhp_step {
@@ -119,7 +117,6 @@ struct cpuhp_step {
                                         struct hlist_node *node);
        } teardown;
        struct hlist_head       list;
-       bool                    skip_onerr;
        bool                    cant_stop;
        bool                    multi_instance;
 };
@@ -550,12 +547,8 @@ static int bringup_cpu(unsigned int cpu)
 
 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
 {
-       for (st->state--; st->state > st->target; st->state--) {
-               struct cpuhp_step *step = cpuhp_get_step(st->state);
-
-               if (!step->skip_onerr)
-                       cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
-       }
+       for (st->state--; st->state > st->target; st->state--)
+               cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
 }
 
 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
@@ -614,15 +607,15 @@ static void cpuhp_thread_fun(unsigned int cpu)
        bool bringup = st->bringup;
        enum cpuhp_state state;
 
+       if (WARN_ON_ONCE(!st->should_run))
+               return;
+
        /*
         * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
         * that if we see ->should_run we also see the rest of the state.
         */
        smp_mb();
 
-       if (WARN_ON_ONCE(!st->should_run))
-               return;
-
        cpuhp_lock_acquire(bringup);
 
        if (st->single) {
@@ -644,12 +637,6 @@ static void cpuhp_thread_fun(unsigned int cpu)
 
        WARN_ON_ONCE(!cpuhp_is_ap_state(state));
 
-       if (st->rollback) {
-               struct cpuhp_step *step = cpuhp_get_step(state);
-               if (step->skip_onerr)
-                       goto next;
-       }
-
        if (cpuhp_is_atomic_state(state)) {
                local_irq_disable();
                st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
@@ -673,7 +660,6 @@ static void cpuhp_thread_fun(unsigned int cpu)
                st->should_run = false;
        }
 
-next:
        cpuhp_lock_release(bringup);
 
        if (!st->should_run)
@@ -916,12 +902,8 @@ void cpuhp_report_idle_dead(void)
 
 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
 {
-       for (st->state++; st->state < st->target; st->state++) {
-               struct cpuhp_step *step = cpuhp_get_step(st->state);
-
-               if (!step->skip_onerr)
-                       cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
-       }
+       for (st->state++; st->state < st->target; st->state++)
+               cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
 }
 
 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
@@ -934,7 +916,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
                if (ret) {
                        st->target = prev_state;
-                       undo_cpu_down(cpu, st);
+                       if (st->state < prev_state)
+                               undo_cpu_down(cpu, st);
                        break;
                }
        }
@@ -987,7 +970,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
         * to do the further cleanups.
         */
        ret = cpuhp_down_callbacks(cpu, st, target);
-       if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
+       if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
                cpuhp_reset_state(st, prev_state);
                __cpuhp_kick_ap(st);
        }
index 9bd5430..1b1d63b 100644 (file)
@@ -23,6 +23,9 @@ config ARCH_HAS_SYNC_DMA_FOR_CPU
        bool
        select NEED_DMA_MAP_STATE
 
+config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
+       bool
+
 config DMA_DIRECT_OPS
        bool
        depends on HAS_DMA
index 1c35b7b..de87b02 100644 (file)
@@ -168,7 +168,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
 int dma_direct_supported(struct device *dev, u64 mask)
 {
 #ifdef CONFIG_ZONE_DMA
-       if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+       if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)))
                return 0;
 #else
        /*
@@ -177,7 +177,7 @@ int dma_direct_supported(struct device *dev, u64 mask)
         * memory, or by providing a ZONE_DMA32.  If neither is the case, the
         * architecture needs to use an IOMMU instead of the direct mapping.
         */
-       if (mask < DMA_BIT_MASK(32))
+       if (mask < phys_to_dma(dev, DMA_BIT_MASK(32)))
                return 0;
 #endif
        /*
index 2a62b96..5a97f34 100644 (file)
@@ -2867,16 +2867,11 @@ static int perf_event_modify_breakpoint(struct perf_event *bp,
        _perf_event_disable(bp);
 
        err = modify_user_hw_breakpoint_check(bp, attr, true);
-       if (err) {
-               if (!bp->attr.disabled)
-                       _perf_event_enable(bp);
 
-               return err;
-       }
-
-       if (!attr->disabled)
+       if (!bp->attr.disabled)
                _perf_event_enable(bp);
-       return 0;
+
+       return err;
 }
 
 static int perf_event_modify_attr(struct perf_event *event,
@@ -3940,6 +3935,12 @@ int perf_event_read_local(struct perf_event *event, u64 *value,
                goto out;
        }
 
+       /* If this is a pinned event it must be running on this CPU */
+       if (event->attr.pinned && event->oncpu != smp_processor_id()) {
+               ret = -EBUSY;
+               goto out;
+       }
+
        /*
         * If the event is currently on this CPU, its either a per-task event,
         * or local to this CPU. Furthermore it means its ACTIVE (otherwise
@@ -5948,6 +5949,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
                unsigned long sp;
                unsigned int rem;
                u64 dyn_size;
+               mm_segment_t fs;
 
                /*
                 * We dump:
@@ -5965,7 +5967,10 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
 
                /* Data. */
                sp = perf_user_stack_pointer(regs);
+               fs = get_fs();
+               set_fs(USER_DS);
                rem = __output_copy_user(handle, (void *) sp, dump_size);
+               set_fs(fs);
                dyn_size = dump_size - rem;
 
                perf_output_skip(handle, rem);
@@ -8309,6 +8314,8 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
                        goto unlock;
 
                list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
+                       if (event->cpu != smp_processor_id())
+                               continue;
                        if (event->attr.type != PERF_TYPE_TRACEPOINT)
                                continue;
                        if (event->attr.config != entry->type)
@@ -9426,9 +9433,7 @@ static void free_pmu_context(struct pmu *pmu)
        if (pmu->task_ctx_nr > perf_invalid_context)
                return;
 
-       mutex_lock(&pmus_lock);
        free_percpu(pmu->pmu_cpu_context);
-       mutex_unlock(&pmus_lock);
 }
 
 /*
@@ -9684,12 +9689,8 @@ EXPORT_SYMBOL_GPL(perf_pmu_register);
 
 void perf_pmu_unregister(struct pmu *pmu)
 {
-       int remove_device;
-
        mutex_lock(&pmus_lock);
-       remove_device = pmu_bus_running;
        list_del_rcu(&pmu->entry);
-       mutex_unlock(&pmus_lock);
 
        /*
         * We dereference the pmu list under both SRCU and regular RCU, so
@@ -9701,13 +9702,14 @@ void perf_pmu_unregister(struct pmu *pmu)
        free_percpu(pmu->pmu_disable_count);
        if (pmu->type >= PERF_TYPE_MAX)
                idr_remove(&pmu_idr, pmu->type);
-       if (remove_device) {
+       if (pmu_bus_running) {
                if (pmu->nr_addr_filters)
                        device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
                device_del(pmu->dev);
                put_device(pmu->dev);
        }
        free_pmu_context(pmu);
+       mutex_unlock(&pmus_lock);
 }
 EXPORT_SYMBOL_GPL(perf_pmu_unregister);
 
index b3814fc..d6b5618 100644 (file)
@@ -509,6 +509,8 @@ modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *a
  */
 int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
 {
+       int err;
+
        /*
         * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
         * will not be possible to raise IPIs that invoke __perf_event_disable.
@@ -520,15 +522,12 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
        else
                perf_event_disable(bp);
 
-       if (!attr->disabled) {
-               int err = modify_user_hw_breakpoint_check(bp, attr, false);
+       err = modify_user_hw_breakpoint_check(bp, attr, false);
 
-               if (err)
-                       return err;
+       if (!bp->attr.disabled)
                perf_event_enable(bp);
-               bp->attr.disabled = 0;
-       }
-       return 0;
+
+       return err;
 }
 EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
 
index d896e9c..f0b5847 100644 (file)
@@ -550,8 +550,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
                        goto out;
        }
        /* a new mm has just been created */
-       arch_dup_mmap(oldmm, mm);
-       retval = 0;
+       retval = arch_dup_mmap(oldmm, mm);
 out:
        up_write(&mm->mmap_sem);
        flush_tlb_mm(oldmm);
index 01ebdf1..2e62503 100644 (file)
@@ -678,7 +678,7 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val,
        case MODULE_STATE_COMING:
                ret = jump_label_add_module(mod);
                if (ret) {
-                       WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
+                       WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
                        jump_label_del_module(mod);
                }
                break;
index e406c5f..dd13f86 100644 (file)
@@ -55,7 +55,6 @@
 
 #include "lockdep_internals.h"
 
-#include <trace/events/preemptirq.h>
 #define CREATE_TRACE_POINTS
 #include <trace/events/lock.h>
 
index 1a81a12..3f8a351 100644 (file)
@@ -389,7 +389,7 @@ static bool __ww_mutex_wound(struct mutex *lock,
                /*
                 * wake_up_process() paired with set_current_state()
                 * inserts sufficient barriers to make sure @owner either sees
-                * it's wounded in __ww_mutex_lock_check_stamp() or has a
+                * it's wounded in __ww_mutex_check_kill() or has a
                 * wakeup pending to re-read the wounded state.
                 */
                if (owner != current)
@@ -946,7 +946,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        }
 
        debug_mutex_lock_common(lock, &waiter);
-       debug_mutex_add_waiter(lock, &waiter, current);
 
        lock_contended(&lock->dep_map, ip);
 
index 5b915b3..65a3b7e 100644 (file)
@@ -260,7 +260,7 @@ static void test_cycle_work(struct work_struct *work)
 {
        struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
        struct ww_acquire_ctx ctx;
-       int err;
+       int err, erra = 0;
 
        ww_acquire_init(&ctx, &ww_class);
        ww_mutex_lock(&cycle->a_mutex, &ctx);
@@ -270,17 +270,19 @@ static void test_cycle_work(struct work_struct *work)
 
        err = ww_mutex_lock(cycle->b_mutex, &ctx);
        if (err == -EDEADLK) {
+               err = 0;
                ww_mutex_unlock(&cycle->a_mutex);
                ww_mutex_lock_slow(cycle->b_mutex, &ctx);
-               err = ww_mutex_lock(&cycle->a_mutex, &ctx);
+               erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
        }
 
        if (!err)
                ww_mutex_unlock(cycle->b_mutex);
-       ww_mutex_unlock(&cycle->a_mutex);
+       if (!erra)
+               ww_mutex_unlock(&cycle->a_mutex);
        ww_acquire_fini(&ctx);
 
-       cycle->result = err;
+       cycle->result = err ?: erra;
 }
 
 static int __test_cycle(unsigned int nthreads)
@@ -324,7 +326,7 @@ static int __test_cycle(unsigned int nthreads)
                if (!cycle->result)
                        continue;
 
-               pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n",
+               pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
                       n, nthreads, cycle->result);
                ret = -EINVAL;
                break;
index de1cfc4..cdf63e5 100644 (file)
@@ -195,7 +195,7 @@ struct pid *alloc_pid(struct pid_namespace *ns)
                idr_preload_end();
 
                if (nr < 0) {
-                       retval = nr;
+                       retval = (nr == -ENOSPC) ? -EAGAIN : nr;
                        goto out_free;
                }
 
index 924e37f..9bf5404 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/kmsg_dump.h>
 #include <linux/syslog.h>
 #include <linux/cpu.h>
-#include <linux/notifier.h>
 #include <linux/rculist.h>
 #include <linux/poll.h>
 #include <linux/irq_work.h>
@@ -352,7 +351,6 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
  */
 
 enum log_flags {
-       LOG_NOCONS      = 1,    /* suppress print, do not print to console */
        LOG_NEWLINE     = 2,    /* text ended with a newline */
        LOG_PREFIX      = 4,    /* text started with a prefix */
        LOG_CONT        = 8,    /* text is a fragment of a continuation line */
@@ -1882,9 +1880,6 @@ int vprintk_store(int facility, int level,
        if (dict)
                lflags |= LOG_PREFIX|LOG_NEWLINE;
 
-       if (suppress_message_printing(level))
-               lflags |= LOG_NOCONS;
-
        return log_output(facility, level, lflags,
                          dict, dictlen, text, text_len);
 }
@@ -2033,6 +2028,7 @@ static void call_console_drivers(const char *ext_text, size_t ext_len,
                                 const char *text, size_t len) {}
 static size_t msg_print_text(const struct printk_log *msg,
                             bool syslog, char *buf, size_t size) { return 0; }
+static bool suppress_message_printing(int level) { return false; }
 
 #endif /* CONFIG_PRINTK */
 
@@ -2369,10 +2365,11 @@ skip:
                        break;
 
                msg = log_from_idx(console_idx);
-               if (msg->flags & LOG_NOCONS) {
+               if (suppress_message_printing(msg->level)) {
                        /*
-                        * Skip record if !ignore_loglevel, and
-                        * record has level above the console loglevel.
+                        * Skip record we have buffered and already printed
+                        * directly to the console when we received it, and
+                        * record that has level above the console loglevel.
                         */
                        console_idx = log_next(console_idx);
                        console_seq++;
index a0a74c5..0913b4d 100644 (file)
@@ -306,12 +306,12 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
        return printk_safe_log_store(s, fmt, args);
 }
 
-void printk_nmi_enter(void)
+void notrace printk_nmi_enter(void)
 {
        this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
 }
 
-void printk_nmi_exit(void)
+void notrace printk_nmi_exit(void)
 {
        this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
 }
index 625bc98..ad97f3b 100644 (file)
@@ -1167,7 +1167,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 
        if (task_cpu(p) != new_cpu) {
                if (p->sched_class->migrate_task_rq)
-                       p->sched_class->migrate_task_rq(p);
+                       p->sched_class->migrate_task_rq(p, new_cpu);
                p->se.nr_migrations++;
                rseq_migrate(p);
                perf_event_task_migrate(p);
index 997ea7b..91e4202 100644 (file)
@@ -1607,7 +1607,7 @@ out:
        return cpu;
 }
 
-static void migrate_task_rq_dl(struct task_struct *p)
+static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
 {
        struct rq *rq;
 
index 60caf1f..6383aa6 100644 (file)
@@ -89,12 +89,12 @@ struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
 
 static void sched_feat_disable(int i)
 {
-       static_key_disable(&sched_feat_keys[i]);
+       static_key_disable_cpuslocked(&sched_feat_keys[i]);
 }
 
 static void sched_feat_enable(int i)
 {
-       static_key_enable(&sched_feat_keys[i]);
+       static_key_enable_cpuslocked(&sched_feat_keys[i]);
 }
 #else
 static void sched_feat_disable(int i) { };
@@ -146,9 +146,11 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
 
        /* Ensure the static_key remains in a consistent state */
        inode = file_inode(filp);
+       cpus_read_lock();
        inode_lock(inode);
        ret = sched_feat_set(cmp);
        inode_unlock(inode);
+       cpus_read_unlock();
        if (ret < 0)
                return ret;
 
index b39fb59..7fc4a37 100644 (file)
@@ -1392,6 +1392,17 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
        int last_cpupid, this_cpupid;
 
        this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
+       last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
+
+       /*
+        * Allow first faults or private faults to migrate immediately early in
+        * the lifetime of a task. The magic number 4 is based on waiting for
+        * two full passes of the "multi-stage node selection" test that is
+        * executed below.
+        */
+       if ((p->numa_preferred_nid == -1 || p->numa_scan_seq <= 4) &&
+           (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
+               return true;
 
        /*
         * Multi-stage node selection is used in conjunction with a periodic
@@ -1410,7 +1421,6 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
         * This quadric squishes small probabilities, making it less likely we
         * act on an unlikely task<->page relation.
         */
-       last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
        if (!cpupid_pid_unset(last_cpupid) &&
                                cpupid_to_nid(last_cpupid) != dst_nid)
                return false;
@@ -1514,6 +1524,21 @@ struct task_numa_env {
 static void task_numa_assign(struct task_numa_env *env,
                             struct task_struct *p, long imp)
 {
+       struct rq *rq = cpu_rq(env->dst_cpu);
+
+       /* Bail out if run-queue part of active NUMA balance. */
+       if (xchg(&rq->numa_migrate_on, 1))
+               return;
+
+       /*
+        * Clear previous best_cpu/rq numa-migrate flag, since task now
+        * found a better CPU to move/swap.
+        */
+       if (env->best_cpu != -1) {
+               rq = cpu_rq(env->best_cpu);
+               WRITE_ONCE(rq->numa_migrate_on, 0);
+       }
+
        if (env->best_task)
                put_task_struct(env->best_task);
        if (p)
@@ -1553,6 +1578,13 @@ static bool load_too_imbalanced(long src_load, long dst_load,
 }
 
 /*
+ * Maximum NUMA importance can be 1998 (2*999);
+ * SMALLIMP @ 30 would be close to 1998/64.
+ * Used to deter task migration.
+ */
+#define SMALLIMP       30
+
+/*
  * This checks if the overall compute and NUMA accesses of the system would
  * be improved if the source tasks was migrated to the target dst_cpu taking
  * into account that it might be best if task running on the dst_cpu should
@@ -1569,6 +1601,9 @@ static void task_numa_compare(struct task_numa_env *env,
        long moveimp = imp;
        int dist = env->dist;
 
+       if (READ_ONCE(dst_rq->numa_migrate_on))
+               return;
+
        rcu_read_lock();
        cur = task_rcu_dereference(&dst_rq->curr);
        if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
@@ -1582,7 +1617,7 @@ static void task_numa_compare(struct task_numa_env *env,
                goto unlock;
 
        if (!cur) {
-               if (maymove || imp > env->best_imp)
+               if (maymove && moveimp >= env->best_imp)
                        goto assign;
                else
                        goto unlock;
@@ -1625,16 +1660,22 @@ static void task_numa_compare(struct task_numa_env *env,
                               task_weight(cur, env->dst_nid, dist);
        }
 
-       if (imp <= env->best_imp)
-               goto unlock;
-
        if (maymove && moveimp > imp && moveimp > env->best_imp) {
-               imp = moveimp - 1;
+               imp = moveimp;
                cur = NULL;
                goto assign;
        }
 
        /*
+        * If the NUMA importance is less than SMALLIMP,
+        * task migration might only result in ping pong
+        * of tasks and also hurt performance due to cache
+        * misses.
+        */
+       if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2)
+               goto unlock;
+
+       /*
         * In the overloaded case, try and keep the load balanced.
         */
        load = task_h_load(env->p) - task_h_load(cur);
@@ -1710,6 +1751,7 @@ static int task_numa_migrate(struct task_struct *p)
                .best_cpu = -1,
        };
        struct sched_domain *sd;
+       struct rq *best_rq;
        unsigned long taskweight, groupweight;
        int nid, ret, dist;
        long taskimp, groupimp;
@@ -1805,20 +1847,17 @@ static int task_numa_migrate(struct task_struct *p)
        if (env.best_cpu == -1)
                return -EAGAIN;
 
-       /*
-        * Reset the scan period if the task is being rescheduled on an
-        * alternative node to recheck if the tasks is now properly placed.
-        */
-       p->numa_scan_period = task_scan_start(p);
-
+       best_rq = cpu_rq(env.best_cpu);
        if (env.best_task == NULL) {
                ret = migrate_task_to(p, env.best_cpu);
+               WRITE_ONCE(best_rq->numa_migrate_on, 0);
                if (ret != 0)
                        trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
                return ret;
        }
 
        ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
+       WRITE_ONCE(best_rq->numa_migrate_on, 0);
 
        if (ret != 0)
                trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
@@ -2596,6 +2635,39 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr)
        }
 }
 
+static void update_scan_period(struct task_struct *p, int new_cpu)
+{
+       int src_nid = cpu_to_node(task_cpu(p));
+       int dst_nid = cpu_to_node(new_cpu);
+
+       if (!static_branch_likely(&sched_numa_balancing))
+               return;
+
+       if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING))
+               return;
+
+       if (src_nid == dst_nid)
+               return;
+
+       /*
+        * Allow resets if faults have been trapped before one scan
+        * has completed. This is most likely due to a new task that
+        * is pulled cross-node due to wakeups or load balancing.
+        */
+       if (p->numa_scan_seq) {
+               /*
+                * Avoid scan adjustments if moving to the preferred
+                * node or if the task was not previously running on
+                * the preferred node.
+                */
+               if (dst_nid == p->numa_preferred_nid ||
+                   (p->numa_preferred_nid != -1 && src_nid != p->numa_preferred_nid))
+                       return;
+       }
+
+       p->numa_scan_period = task_scan_start(p);
+}
+
 #else
 static void task_tick_numa(struct rq *rq, struct task_struct *curr)
 {
@@ -2609,6 +2681,10 @@ static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
 {
 }
 
+static inline void update_scan_period(struct task_struct *p, int new_cpu)
+{
+}
+
 #endif /* CONFIG_NUMA_BALANCING */
 
 static void
@@ -3362,6 +3438,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
  * attach_entity_load_avg - attach this entity to its cfs_rq load avg
  * @cfs_rq: cfs_rq to attach to
  * @se: sched_entity to attach
+ * @flags: migration hints
  *
  * Must call update_cfs_rq_load_avg() before this, since we rely on
  * cfs_rq->avg.last_update_time being current.
@@ -6274,7 +6351,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
  * cfs_rq_of(p) references at time of call are still valid and identify the
  * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
  */
-static void migrate_task_rq_fair(struct task_struct *p)
+static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
 {
        /*
         * As blocked tasks retain absolute vruntime the migration needs to
@@ -6327,6 +6404,8 @@ static void migrate_task_rq_fair(struct task_struct *p)
 
        /* We have migrated, no longer consider this task hot */
        p->se.exec_start = 0;
+
+       update_scan_period(p, new_cpu);
 }
 
 static void task_dead_fair(struct task_struct *p)
@@ -7263,6 +7342,7 @@ static void update_blocked_averages(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
        struct cfs_rq *cfs_rq, *pos;
+       const struct sched_class *curr_class;
        struct rq_flags rf;
        bool done = true;
 
@@ -7299,8 +7379,10 @@ static void update_blocked_averages(int cpu)
                if (cfs_rq_has_blocked(cfs_rq))
                        done = false;
        }
-       update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
-       update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+
+       curr_class = rq->curr->sched_class;
+       update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
+       update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
        update_irq_load_avg(rq, 0);
        /* Don't need periodic decay once load/util_avg are null */
        if (others_have_blocked(rq))
@@ -7365,13 +7447,16 @@ static inline void update_blocked_averages(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
        struct cfs_rq *cfs_rq = &rq->cfs;
+       const struct sched_class *curr_class;
        struct rq_flags rf;
 
        rq_lock_irqsave(rq, &rf);
        update_rq_clock(rq);
        update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
-       update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
-       update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+
+       curr_class = rq->curr->sched_class;
+       update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
+       update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
        update_irq_load_avg(rq, 0);
 #ifdef CONFIG_NO_HZ_COMMON
        rq->last_blocked_load_update_tick = jiffies;
@@ -7482,10 +7567,10 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
        return load_idx;
 }
 
-static unsigned long scale_rt_capacity(int cpu)
+static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
-       unsigned long max = arch_scale_cpu_capacity(NULL, cpu);
+       unsigned long max = arch_scale_cpu_capacity(sd, cpu);
        unsigned long used, free;
        unsigned long irq;
 
@@ -7507,7 +7592,7 @@ static unsigned long scale_rt_capacity(int cpu)
 
 static void update_cpu_capacity(struct sched_domain *sd, int cpu)
 {
-       unsigned long capacity = scale_rt_capacity(cpu);
+       unsigned long capacity = scale_rt_capacity(sd, cpu);
        struct sched_group *sdg = sd->groups;
 
        cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu);
@@ -8269,7 +8354,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
 force_balance:
        /* Looks like there is an imbalance. Compute it */
        calculate_imbalance(env, &sds);
-       return sds.busiest;
+       return env->imbalance ? sds.busiest : NULL;
 
 out_balanced:
        env->imbalance = 0;
@@ -9638,7 +9723,8 @@ static inline bool vruntime_normalized(struct task_struct *p)
         * - A task which has been woken up by try_to_wake_up() and
         *   waiting for actually being woken up by sched_ttwu_pending().
         */
-       if (!se->sum_exec_runtime || p->state == TASK_WAKING)
+       if (!se->sum_exec_runtime ||
+           (p->state == TASK_WAKING && p->sched_remote_wakeup))
                return true;
 
        return false;
index 4a2e8ca..455fa33 100644 (file)
@@ -783,6 +783,7 @@ struct rq {
 #ifdef CONFIG_NUMA_BALANCING
        unsigned int            nr_numa_running;
        unsigned int            nr_preferred_running;
+       unsigned int            numa_migrate_on;
 #endif
        #define CPU_LOAD_IDX_MAX 5
        unsigned long           cpu_load[CPU_LOAD_IDX_MAX];
@@ -1523,7 +1524,7 @@ struct sched_class {
 
 #ifdef CONFIG_SMP
        int  (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
-       void (*migrate_task_rq)(struct task_struct *p);
+       void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
 
        void (*task_woken)(struct rq *this_rq, struct task_struct *task);
 
index 56a0fed..505a41c 100644 (file)
@@ -1295,7 +1295,7 @@ static void init_numa_topology_type(void)
 
        n = sched_max_numa_distance;
 
-       if (sched_domains_numa_levels <= 1) {
+       if (sched_domains_numa_levels <= 2) {
                sched_numa_topology_type = NUMA_DIRECT;
                return;
        }
@@ -1380,9 +1380,6 @@ void sched_init_numa(void)
                        break;
        }
 
-       if (!level)
-               return;
-
        /*
         * 'level' contains the number of unique distances
         *
index cf5c675..123bd73 100644 (file)
@@ -71,9 +71,6 @@
 #include <asm/io.h>
 #include <asm/unistd.h>
 
-/* Hardening for Spectre-v1 */
-#include <linux/nospec.h>
-
 #include "uid16.h"
 
 #ifndef SET_UNALIGN_CTL
index f74fb00..0e6e97a 100644 (file)
@@ -133,19 +133,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags)
        spin_unlock_irqrestore(&watchdog_lock, *flags);
 }
 
+static int clocksource_watchdog_kthread(void *data);
+static void __clocksource_change_rating(struct clocksource *cs, int rating);
+
 /*
  * Interval: 0.5sec Threshold: 0.0625s
  */
 #define WATCHDOG_INTERVAL (HZ >> 1)
 #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
 
+static void clocksource_watchdog_work(struct work_struct *work)
+{
+       /*
+        * We cannot directly run clocksource_watchdog_kthread() here, because
+        * clocksource_select() calls timekeeping_notify() which uses
+        * stop_machine(). One cannot use stop_machine() from a workqueue() due
+        * lock inversions wrt CPU hotplug.
+        *
+        * Also, we only ever run this work once or twice during the lifetime
+        * of the kernel, so there is no point in creating a more permanent
+        * kthread for this.
+        *
+        * If kthread_run fails the next watchdog scan over the
+        * watchdog_list will find the unstable clock again.
+        */
+       kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
+}
+
 static void __clocksource_unstable(struct clocksource *cs)
 {
        cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
        cs->flags |= CLOCK_SOURCE_UNSTABLE;
 
        /*
-        * If the clocksource is registered clocksource_watchdog_work() will
+        * If the clocksource is registered clocksource_watchdog_kthread() will
         * re-rate and re-select.
         */
        if (list_empty(&cs->list)) {
@@ -156,7 +177,7 @@ static void __clocksource_unstable(struct clocksource *cs)
        if (cs->mark_unstable)
                cs->mark_unstable(cs);
 
-       /* kick clocksource_watchdog_work() */
+       /* kick clocksource_watchdog_kthread() */
        if (finished_booting)
                schedule_work(&watchdog_work);
 }
@@ -166,7 +187,7 @@ static void __clocksource_unstable(struct clocksource *cs)
  * @cs:                clocksource to be marked unstable
  *
  * This function is called by the x86 TSC code to mark clocksources as unstable;
- * it defers demotion and re-selection to a work.
+ * it defers demotion and re-selection to a kthread.
  */
 void clocksource_mark_unstable(struct clocksource *cs)
 {
@@ -391,9 +412,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
        }
 }
 
-static void __clocksource_change_rating(struct clocksource *cs, int rating);
-
-static int __clocksource_watchdog_work(void)
+static int __clocksource_watchdog_kthread(void)
 {
        struct clocksource *cs, *tmp;
        unsigned long flags;
@@ -418,12 +437,13 @@ static int __clocksource_watchdog_work(void)
        return select;
 }
 
-static void clocksource_watchdog_work(struct work_struct *work)
+static int clocksource_watchdog_kthread(void *data)
 {
        mutex_lock(&clocksource_mutex);
-       if (__clocksource_watchdog_work())
+       if (__clocksource_watchdog_kthread())
                clocksource_select();
        mutex_unlock(&clocksource_mutex);
+       return 0;
 }
 
 static bool clocksource_is_watchdog(struct clocksource *cs)
@@ -442,7 +462,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
 static void clocksource_select_watchdog(bool fallback) { }
 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
 static inline void clocksource_resume_watchdog(void) { }
-static inline int __clocksource_watchdog_work(void) { return 0; }
+static inline int __clocksource_watchdog_kthread(void) { return 0; }
 static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
 void clocksource_mark_unstable(struct clocksource *cs) { }
 
@@ -810,7 +830,7 @@ static int __init clocksource_done_booting(void)
        /*
         * Run the watchdog first to eliminate unstable clock sources
         */
-       __clocksource_watchdog_work();
+       __clocksource_watchdog_kthread();
        clocksource_select();
        mutex_unlock(&clocksource_mutex);
        return 0;
index 1d92d4a..65bd461 100644 (file)
@@ -1546,6 +1546,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
        tmp_iter_page = first_page;
 
        do {
+               cond_resched();
+
                to_remove_page = tmp_iter_page;
                rb_inc_page(cpu_buffer, &tmp_iter_page);
 
index 5470dce..977918d 100644 (file)
@@ -261,7 +261,7 @@ static void __touch_watchdog(void)
  * entering idle state.  This should only be used for scheduler events.
  * Use touch_softlockup_watchdog() for everything else.
  */
-void touch_softlockup_watchdog_sched(void)
+notrace void touch_softlockup_watchdog_sched(void)
 {
        /*
         * Preemption can be enabled.  It doesn't matter which CPU's timestamp
@@ -270,7 +270,7 @@ void touch_softlockup_watchdog_sched(void)
        raw_cpu_write(watchdog_touch_ts, 0);
 }
 
-void touch_softlockup_watchdog(void)
+notrace void touch_softlockup_watchdog(void)
 {
        touch_softlockup_watchdog_sched();
        wq_watchdog_touch(raw_smp_processor_id());
index 1f7020d..7138116 100644 (file)
@@ -29,7 +29,7 @@ static struct cpumask dead_events_mask;
 static unsigned long hardlockup_allcpu_dumped;
 static atomic_t watchdog_cpus = ATOMIC_INIT(0);
 
-void arch_touch_nmi_watchdog(void)
+notrace void arch_touch_nmi_watchdog(void)
 {
        /*
         * Using __raw here because some code paths have
index 60e8019..0280dea 100644 (file)
@@ -5574,7 +5574,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
        mod_timer(&wq_watchdog_timer, jiffies + thresh);
 }
 
-void wq_watchdog_touch(int cpu)
+notrace void wq_watchdog_touch(int cpu)
 {
        if (cpu >= 0)
                per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
index 6133167..4966c4f 100644 (file)
@@ -1277,13 +1277,13 @@ config WARN_ALL_UNSEEDED_RANDOM
          time.  This is really bad from a security perspective, and
          so architecture maintainers really need to do what they can
          to get the CRNG seeded sooner after the system is booted.
-         However, since users can not do anything actionble to
+         However, since users cannot do anything actionable to
          address this, by default the kernel will issue only a single
          warning for the first use of unseeded randomness.
 
          Say Y here if you want to receive warnings for all uses of
          unseeded randomness.  This will be of use primarily for
-         those developers interersted in improving the security of
+         those developers interested in improving the security of
          Linux kernels running on their architecture (or
          subarchitecture).
 
index c72577e..a66595b 100644 (file)
@@ -4,7 +4,6 @@
  */
 
 #include <linux/percpu_counter.h>
-#include <linux/notifier.h>
 #include <linux/mutex.h>
 #include <linux/init.h>
 #include <linux/cpu.h>
index 310e29b..30526af 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/rhashtable.h>
 #include <linux/err.h>
 #include <linux/export.h>
-#include <linux/rhashtable.h>
 
 #define HASH_DEFAULT_SIZE      64UL
 #define HASH_MIN_SIZE          4U
index d5b3a3f..812e59e 100644 (file)
@@ -2794,7 +2794,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
                                                copy = end - str;
                                        memcpy(str, args, copy);
                                        str += len;
-                                       args += len;
+                                       args += len + 1;
                                }
                        }
                        if (process)
index 25a5d87..912aae5 100644 (file)
@@ -15,7 +15,6 @@
  * but they are bigger and use more memory for the lookup table.
  */
 
-#include <linux/crc32poly.h>
 #include "xz_private.h"
 
 /*
index 482b90f..09360eb 100644 (file)
 #      endif
 #endif
 
+#ifndef CRC32_POLY_LE
+#define CRC32_POLY_LE 0xedb88320
+#endif
+
 /*
  * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
  * before calling xz_dec_lzma2_run().
index a550635..de64ea6 100644 (file)
@@ -637,6 +637,7 @@ config DEFERRED_STRUCT_PAGE_INIT
        depends on NO_BOOTMEM
        depends on SPARSEMEM
        depends on !NEED_PER_CPU_KM
+       depends on 64BIT
        help
          Ordinarily all struct pages are initialised during early boot in a
          single thread. On very large machines this can take a considerable
index 8716bda..26ef77a 100644 (file)
@@ -32,7 +32,7 @@ ifdef CONFIG_CROSS_MEMORY_ATTACH
 mmu-$(CONFIG_MMU)      += process_vm_access.o
 endif
 
-obj-y                  := filemap.o mempool.o oom_kill.o \
+obj-y                  := filemap.o mempool.o oom_kill.o fadvise.o \
                           maccess.o page_alloc.o page-writeback.o \
                           readahead.o swap.o truncate.o vmscan.o shmem.o \
                           util.o mmzone.o vmstat.o backing-dev.o \
@@ -49,7 +49,6 @@ else
        obj-y           += bootmem.o
 endif
 
-obj-$(CONFIG_ADVISE_SYSCALLS)  += fadvise.o
 ifdef CONFIG_MMU
        obj-$(CONFIG_ADVISE_SYSCALLS)   += madvise.o
 endif
index f5981e9..8a8bb87 100644 (file)
@@ -491,6 +491,7 @@ static void cgwb_release_workfn(struct work_struct *work)
 {
        struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
                                                release_work);
+       struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css);
 
        mutex_lock(&wb->bdi->cgwb_release_mutex);
        wb_shutdown(wb);
@@ -499,6 +500,9 @@ static void cgwb_release_workfn(struct work_struct *work)
        css_put(wb->blkcg_css);
        mutex_unlock(&wb->bdi->cgwb_release_mutex);
 
+       /* triggers blkg destruction if cgwb_refcnt becomes zero */
+       blkcg_cgwb_put(blkcg);
+
        fprop_local_destroy_percpu(&wb->memcg_completions);
        percpu_ref_exit(&wb->refcnt);
        wb_exit(wb);
@@ -597,6 +601,7 @@ static int cgwb_create(struct backing_dev_info *bdi,
                        list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
                        list_add(&wb->memcg_node, memcg_cgwb_list);
                        list_add(&wb->blkcg_node, blkcg_cgwb_list);
+                       blkcg_cgwb_get(blkcg);
                        css_get(memcg_css);
                        css_get(blkcg_css);
                }
index 38c9265..bd10aad 100644 (file)
@@ -114,7 +114,7 @@ EXPORT_SYMBOL(dump_vma);
 
 void dump_mm(const struct mm_struct *mm)
 {
-       pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n"
+       pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
 #ifdef CONFIG_MMU
                "get_unmapped_area %px\n"
 #endif
@@ -142,7 +142,7 @@ void dump_mm(const struct mm_struct *mm)
                "tlb_flush_pending %d\n"
                "def_flags: %#lx(%pGv)\n",
 
-               mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
+               mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
 #ifdef CONFIG_MMU
                mm->get_unmapped_area,
 #endif
index 2d8376e..467bcd0 100644 (file)
@@ -27,9 +27,9 @@
  * deactivate the pages and clear PG_Referenced.
  */
 
-int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
+static int generic_fadvise(struct file *file, loff_t offset, loff_t len,
+                          int advice)
 {
-       struct fd f = fdget(fd);
        struct inode *inode;
        struct address_space *mapping;
        struct backing_dev_info *bdi;
@@ -37,22 +37,14 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
        pgoff_t start_index;
        pgoff_t end_index;
        unsigned long nrpages;
-       int ret = 0;
-
-       if (!f.file)
-               return -EBADF;
 
-       inode = file_inode(f.file);
-       if (S_ISFIFO(inode->i_mode)) {
-               ret = -ESPIPE;
-               goto out;
-       }
+       inode = file_inode(file);
+       if (S_ISFIFO(inode->i_mode))
+               return -ESPIPE;
 
-       mapping = f.file->f_mapping;
-       if (!mapping || len < 0) {
-               ret = -EINVAL;
-               goto out;
-       }
+       mapping = file->f_mapping;
+       if (!mapping || len < 0)
+               return -EINVAL;
 
        bdi = inode_to_bdi(mapping->host);
 
@@ -67,9 +59,9 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
                        /* no bad return value, but ignore advice */
                        break;
                default:
-                       ret = -EINVAL;
+                       return -EINVAL;
                }
-               goto out;
+               return 0;
        }
 
        /*
@@ -85,21 +77,21 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
 
        switch (advice) {
        case POSIX_FADV_NORMAL:
-               f.file->f_ra.ra_pages = bdi->ra_pages;
-               spin_lock(&f.file->f_lock);
-               f.file->f_mode &= ~FMODE_RANDOM;
-               spin_unlock(&f.file->f_lock);
+               file->f_ra.ra_pages = bdi->ra_pages;
+               spin_lock(&file->f_lock);
+               file->f_mode &= ~FMODE_RANDOM;
+               spin_unlock(&file->f_lock);
                break;
        case POSIX_FADV_RANDOM:
-               spin_lock(&f.file->f_lock);
-               f.file->f_mode |= FMODE_RANDOM;
-               spin_unlock(&f.file->f_lock);
+               spin_lock(&file->f_lock);
+               file->f_mode |= FMODE_RANDOM;
+               spin_unlock(&file->f_lock);
                break;
        case POSIX_FADV_SEQUENTIAL:
-               f.file->f_ra.ra_pages = bdi->ra_pages * 2;
-               spin_lock(&f.file->f_lock);
-               f.file->f_mode &= ~FMODE_RANDOM;
-               spin_unlock(&f.file->f_lock);
+               file->f_ra.ra_pages = bdi->ra_pages * 2;
+               spin_lock(&file->f_lock);
+               file->f_mode &= ~FMODE_RANDOM;
+               spin_unlock(&file->f_lock);
                break;
        case POSIX_FADV_WILLNEED:
                /* First and last PARTIAL page! */
@@ -115,8 +107,7 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
                 * Ignore return value because fadvise() shall return
                 * success even if filesystem can't retrieve a hint,
                 */
-               force_page_cache_readahead(mapping, f.file, start_index,
-                                          nrpages);
+               force_page_cache_readahead(mapping, file, start_index, nrpages);
                break;
        case POSIX_FADV_NOREUSE:
                break;
@@ -183,9 +174,32 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
                }
                break;
        default:
-               ret = -EINVAL;
+               return -EINVAL;
        }
-out:
+       return 0;
+}
+
+int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
+{
+       if (file->f_op->fadvise)
+               return file->f_op->fadvise(file, offset, len, advice);
+
+       return generic_fadvise(file, offset, len, advice);
+}
+EXPORT_SYMBOL(vfs_fadvise);
+
+#ifdef CONFIG_ADVISE_SYSCALLS
+
+int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
+{
+       struct fd f = fdget(fd);
+       int ret;
+
+       if (!f.file)
+               return -EBADF;
+
+       ret = vfs_fadvise(f.file, offset, len, advice);
+
        fdput(f);
        return ret;
 }
@@ -203,3 +217,4 @@ SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice)
 }
 
 #endif
+#endif
index 6a47370..7405c9d 100644 (file)
@@ -19,7 +19,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
                struct gup_benchmark *gup)
 {
        ktime_t start_time, end_time;
-       unsigned long i, nr, nr_pages, addr, next;
+       unsigned long i, nr_pages, addr, next;
+       int nr;
        struct page **pages;
 
        nr_pages = gup->size / PAGE_SIZE;
index c3bc7e9..0070406 100644 (file)
@@ -821,11 +821,11 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
         * but we need to be consistent with PTEs and architectures that
         * can't support a 'special' bit.
         */
-       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
+                       !pfn_t_devmap(pfn));
        BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
                                                (VM_PFNMAP|VM_MIXEDMAP));
        BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
-       BUG_ON(!pfn_t_devmap(pfn));
 
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return VM_FAULT_SIGBUS;
@@ -2931,7 +2931,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
        else
                page_add_file_rmap(new, true);
        set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
-       if (vma->vm_flags & VM_LOCKED)
+       if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
                mlock_vma_page(new);
        update_mmu_cache_pmd(vma, address, pvmw->pmd);
 }
index 3c21775..5c390f5 100644 (file)
@@ -3326,8 +3326,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
        struct page *page;
        struct hstate *h = hstate_vma(vma);
        unsigned long sz = huge_page_size(h);
-       const unsigned long mmun_start = start; /* For mmu_notifiers */
-       const unsigned long mmun_end   = end;   /* For mmu_notifiers */
+       unsigned long mmun_start = start;       /* For mmu_notifiers */
+       unsigned long mmun_end   = end;         /* For mmu_notifiers */
 
        WARN_ON(!is_vm_hugetlb_page(vma));
        BUG_ON(start & ~huge_page_mask(h));
@@ -3339,6 +3339,11 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
         */
        tlb_remove_check_page_size_change(tlb, sz);
        tlb_start_vma(tlb, vma);
+
+       /*
+        * If sharing possible, alert mmu notifiers of worst case.
+        */
+       adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end);
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        address = start;
        for (; address < end; address += sz) {
@@ -3349,6 +3354,10 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
                ptl = huge_pte_lock(h, mm, ptep);
                if (huge_pmd_unshare(mm, &address, ptep)) {
                        spin_unlock(ptl);
+                       /*
+                        * We just unmapped a page of PMDs by clearing a PUD.
+                        * The caller's TLB flush range should cover this area.
+                        */
                        continue;
                }
 
@@ -3431,12 +3440,23 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
 {
        struct mm_struct *mm;
        struct mmu_gather tlb;
+       unsigned long tlb_start = start;
+       unsigned long tlb_end = end;
+
+       /*
+        * If shared PMDs were possibly used within this vma range, adjust
+        * start/end for worst case tlb flushing.
+        * Note that we can not be sure if PMDs are shared until we try to
+        * unmap pages.  However, we want to make sure TLB flushing covers
+        * the largest possible range.
+        */
+       adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
 
        mm = vma->vm_mm;
 
-       tlb_gather_mmu(&tlb, mm, start, end);
+       tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
        __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
-       tlb_finish_mmu(&tlb, start, end);
+       tlb_finish_mmu(&tlb, tlb_start, tlb_end);
 }
 
 /*
@@ -4298,11 +4318,21 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
        pte_t pte;
        struct hstate *h = hstate_vma(vma);
        unsigned long pages = 0;
+       unsigned long f_start = start;
+       unsigned long f_end = end;
+       bool shared_pmd = false;
+
+       /*
+        * In the case of shared PMDs, the area to flush could be beyond
+        * start/end.  Set f_start/f_end to cover the maximum possible
+        * range if PMD sharing is possible.
+        */
+       adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end);
 
        BUG_ON(address >= end);
-       flush_cache_range(vma, address, end);
+       flush_cache_range(vma, f_start, f_end);
 
-       mmu_notifier_invalidate_range_start(mm, start, end);
+       mmu_notifier_invalidate_range_start(mm, f_start, f_end);
        i_mmap_lock_write(vma->vm_file->f_mapping);
        for (; address < end; address += huge_page_size(h)) {
                spinlock_t *ptl;
@@ -4313,6 +4343,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                if (huge_pmd_unshare(mm, &address, ptep)) {
                        pages++;
                        spin_unlock(ptl);
+                       shared_pmd = true;
                        continue;
                }
                pte = huge_ptep_get(ptep);
@@ -4348,9 +4379,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
         * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
         * may have cleared our pud entry and done put_page on the page table:
         * once we release i_mmap_rwsem, another task can do the final put_page
-        * and that page table be reused and filled with junk.
+        * and that page table be reused and filled with junk.  If we actually
+        * did unshare a page of pmds, flush the range corresponding to the pud.
         */
-       flush_hugetlb_tlb_range(vma, start, end);
+       if (shared_pmd)
+               flush_hugetlb_tlb_range(vma, f_start, f_end);
+       else
+               flush_hugetlb_tlb_range(vma, start, end);
        /*
         * No need to call mmu_notifier_invalidate_range() we are downgrading
         * page table protection not changing it to point to a new page.
@@ -4358,7 +4393,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
         * See Documentation/vm/mmu_notifier.rst
         */
        i_mmap_unlock_write(vma->vm_file->f_mapping);
-       mmu_notifier_invalidate_range_end(mm, start, end);
+       mmu_notifier_invalidate_range_end(mm, f_start, f_end);
 
        return pages << h->order;
 }
@@ -4545,13 +4580,41 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
        /*
         * check on proper vm_flags and page table alignment
         */
-       if (vma->vm_flags & VM_MAYSHARE &&
-           vma->vm_start <= base && end <= vma->vm_end)
+       if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
                return true;
        return false;
 }
 
 /*
+ * Determine if start,end range within vma could be mapped by shared pmd.
+ * If yes, adjust start and end to cover range associated with possible
+ * shared pmd mappings.
+ */
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end)
+{
+       unsigned long check_addr = *start;
+
+       if (!(vma->vm_flags & VM_MAYSHARE))
+               return;
+
+       for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
+               unsigned long a_start = check_addr & PUD_MASK;
+               unsigned long a_end = a_start + PUD_SIZE;
+
+               /*
+                * If sharing is possible, adjust start/end if necessary.
+                */
+               if (range_in_vma(vma, a_start, a_end)) {
+                       if (a_start < *start)
+                               *start = a_start;
+                       if (a_end > *end)
+                               *end = a_end;
+               }
+       }
+}
+
+/*
  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
  * and returns the corresponding pte. While this is not necessary for the
  * !shared pmd case because we can allocate the pmd later as well, it makes the
@@ -4648,6 +4711,11 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 {
        return 0;
 }
+
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+                               unsigned long *start, unsigned long *end)
+{
+}
 #define want_pmd_share()       (0)
 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
 
index 9a085d5..17dd883 100644 (file)
@@ -2097,6 +2097,11 @@ static int __init kmemleak_late_init(void)
 
        kmemleak_initialized = 1;
 
+       dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
+                                    &kmemleak_fops);
+       if (!dentry)
+               pr_warn("Failed to create the debugfs kmemleak file\n");
+
        if (kmemleak_error) {
                /*
                 * Some error occurred and kmemleak was disabled. There is a
@@ -2108,10 +2113,6 @@ static int __init kmemleak_late_init(void)
                return -ENOMEM;
        }
 
-       dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
-                                    &kmemleak_fops);
-       if (!dentry)
-               pr_warn("Failed to create the debugfs kmemleak file\n");
        mutex_lock(&scan_mutex);
        start_scan_thread();
        mutex_unlock(&scan_mutex);
index 972a9ea..71d21df 100644 (file)
@@ -96,7 +96,7 @@ static long madvise_behavior(struct vm_area_struct *vma,
                new_flags |= VM_DONTDUMP;
                break;
        case MADV_DODUMP:
-               if (new_flags & VM_SPECIAL) {
+               if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
                        error = -EINVAL;
                        goto out;
                }
index 4ead5a4..e79cb59 100644 (file)
@@ -1701,8 +1701,6 @@ static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int
        if (mem_cgroup_out_of_memory(memcg, mask, order))
                return OOM_SUCCESS;
 
-       WARN(1,"Memory cgroup charge failed because of no reclaimable memory! "
-               "This looks like a misconfiguration or a kernel bug.");
        return OOM_FAILED;
 }
 
index 9eea6e8..38d94b7 100644 (file)
@@ -1333,7 +1333,8 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
                        if (__PageMovable(page))
                                return pfn;
                        if (PageHuge(page)) {
-                               if (page_huge_active(page))
+                               if (hugepage_migration_supported(page_hstate(page)) &&
+                                   page_huge_active(page))
                                        return pfn;
                                else
                                        pfn = round_up(pfn + 1,
index d6a2e89..84381b5 100644 (file)
@@ -275,6 +275,9 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
                if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
                        mlock_vma_page(new);
 
+               if (PageTransHuge(page) && PageMlocked(page))
+                       clear_page_mlock(page);
+
                /* No need to invalidate - it was non-present before */
                update_mmu_cache(vma, pvmw.address, pvmw.pte);
        }
@@ -1411,7 +1414,7 @@ retry:
                                 * we encounter them after the rest of the list
                                 * is processed.
                                 */
-                               if (PageTransHuge(page)) {
+                               if (PageTransHuge(page) && !PageHuge(page)) {
                                        lock_page(page);
                                        rc = split_huge_page_to_list(page, from);
                                        unlock_page(page);
@@ -1855,46 +1858,6 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
        return newpage;
 }
 
-/*
- * page migration rate limiting control.
- * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
- * window of time. Default here says do not migrate more than 1280M per second.
- */
-static unsigned int migrate_interval_millisecs __read_mostly = 100;
-static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
-
-/* Returns true if the node is migrate rate-limited after the update */
-static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
-                                       unsigned long nr_pages)
-{
-       /*
-        * Rate-limit the amount of data that is being migrated to a node.
-        * Optimal placement is no good if the memory bus is saturated and
-        * all the time is being spent migrating!
-        */
-       if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
-               spin_lock(&pgdat->numabalancing_migrate_lock);
-               pgdat->numabalancing_migrate_nr_pages = 0;
-               pgdat->numabalancing_migrate_next_window = jiffies +
-                       msecs_to_jiffies(migrate_interval_millisecs);
-               spin_unlock(&pgdat->numabalancing_migrate_lock);
-       }
-       if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
-               trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
-                                                               nr_pages);
-               return true;
-       }
-
-       /*
-        * This is an unlocked non-atomic update so errors are possible.
-        * The consequences are failing to migrate when we potentiall should
-        * have which is not severe enough to warrant locking. If it is ever
-        * a problem, it can be converted to a per-cpu counter.
-        */
-       pgdat->numabalancing_migrate_nr_pages += nr_pages;
-       return false;
-}
-
 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
 {
        int page_lru;
@@ -1967,14 +1930,6 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
        if (page_is_file_cache(page) && PageDirty(page))
                goto out;
 
-       /*
-        * Rate-limit the amount of data that is being migrated to a node.
-        * Optimal placement is no good if the memory bus is saturated and
-        * all the time is being spent migrating!
-        */
-       if (numamigrate_update_ratelimit(pgdat, 1))
-               goto out;
-
        isolated = numamigrate_isolate_page(pgdat, page);
        if (!isolated)
                goto out;
@@ -2021,14 +1976,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
        unsigned long mmun_start = address & HPAGE_PMD_MASK;
        unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
 
-       /*
-        * Rate-limit the amount of data that is being migrated to a node.
-        * Optimal placement is no good if the memory bus is saturated and
-        * all the time is being spent migrating!
-        */
-       if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
-               goto out_dropref;
-
        new_page = alloc_pages_node(node,
                (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
                HPAGE_PMD_ORDER);
@@ -2125,7 +2072,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
 
 out_fail:
        count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
-out_dropref:
        ptl = pmd_lock(mm, pmd);
        if (pmd_same(*pmd, entry)) {
                entry = pmd_modify(entry, vma->vm_page_prot);
index b5b25e4..f10aa53 100644 (file)
@@ -522,6 +522,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
 
                        tlb_gather_mmu(&tlb, mm, start, end);
                        if (mmu_notifier_invalidate_range_start_nonblock(mm, start, end)) {
+                               tlb_finish_mmu(&tlb, start, end);
                                ret = false;
                                continue;
                        }
@@ -1103,10 +1104,17 @@ bool out_of_memory(struct oom_control *oc)
        }
 
        select_bad_process(oc);
-       /* Found nothing?!?! Either we hang forever, or we panic. */
-       if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
+       /* Found nothing?!?! */
+       if (!oc->chosen) {
                dump_header(oc, NULL);
-               panic("Out of memory and no killable processes...\n");
+               pr_warn("Out of memory and no killable processes...\n");
+               /*
+                * If we got here due to an actual allocation at the
+                * system level, we cannot survive this and will enter
+                * an endless loop in the allocator. Bail out now.
+                */
+               if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
+                       panic("System is deadlocked on memory\n");
        }
        if (oc->chosen && oc->chosen != (void *)-1UL)
                oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
index 6551d3b..84ae9bf 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/mpage.h>
 #include <linux/rmap.h>
 #include <linux/percpu.h>
-#include <linux/notifier.h>
 #include <linux/smp.h>
 #include <linux/sysctl.h>
 #include <linux/cpu.h>
index e75865d..706a738 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/slab.h>
 #include <linux/ratelimit.h>
 #include <linux/oom.h>
-#include <linux/notifier.h>
 #include <linux/topology.h>
 #include <linux/sysctl.h>
 #include <linux/cpu.h>
@@ -6198,8 +6197,6 @@ static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
 static void pgdat_init_numabalancing(struct pglist_data *pgdat)
 {
        spin_lock_init(&pgdat->numabalancing_migrate_lock);
-       pgdat->numabalancing_migrate_nr_pages = 0;
-       pgdat->numabalancing_migrate_next_window = jiffies;
 }
 #else
 static void pgdat_init_numabalancing(struct pglist_data *pgdat) {}
@@ -7709,6 +7706,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
                 * handle each tail page individually in migration.
                 */
                if (PageHuge(page)) {
+
+                       if (!hugepage_migration_supported(page_hstate(page)))
+                               goto unmovable;
+
                        iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
                        continue;
                }
index a749d4d..4b90682 100644 (file)
@@ -1212,6 +1212,7 @@ static void pcpu_free_chunk(struct pcpu_chunk *chunk)
 {
        if (!chunk)
                return;
+       pcpu_mem_free(chunk->md_blocks);
        pcpu_mem_free(chunk->bound_map);
        pcpu_mem_free(chunk->alloc_map);
        pcpu_mem_free(chunk);
index a59ea70..4e63014 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/file.h>
 #include <linux/mm_inline.h>
 #include <linux/blk-cgroup.h>
+#include <linux/fadvise.h>
 
 #include "internal.h"
 
@@ -575,24 +576,6 @@ page_cache_async_readahead(struct address_space *mapping,
 }
 EXPORT_SYMBOL_GPL(page_cache_async_readahead);
 
-static ssize_t
-do_readahead(struct address_space *mapping, struct file *filp,
-            pgoff_t index, unsigned long nr)
-{
-       if (!mapping || !mapping->a_ops)
-               return -EINVAL;
-
-       /*
-        * Readahead doesn't make sense for DAX inodes, but we don't want it
-        * to report a failure either.  Instead, we just return success and
-        * don't do any work.
-        */
-       if (dax_mapping(mapping))
-               return 0;
-
-       return force_page_cache_readahead(mapping, filp, index, nr);
-}
-
 ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
 {
        ssize_t ret;
@@ -600,16 +583,22 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
 
        ret = -EBADF;
        f = fdget(fd);
-       if (f.file) {
-               if (f.file->f_mode & FMODE_READ) {
-                       struct address_space *mapping = f.file->f_mapping;
-                       pgoff_t start = offset >> PAGE_SHIFT;
-                       pgoff_t end = (offset + count - 1) >> PAGE_SHIFT;
-                       unsigned long len = end - start + 1;
-                       ret = do_readahead(mapping, f.file, start, len);
-               }
-               fdput(f);
-       }
+       if (!f.file || !(f.file->f_mode & FMODE_READ))
+               goto out;
+
+       /*
+        * The readahead() syscall is intended to run only on files
+        * that can execute readahead. If readahead is not possible
+        * on this file, then we must return -EINVAL.
+        */
+       ret = -EINVAL;
+       if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
+           !S_ISREG(file_inode(f.file)->i_mode))
+               goto out;
+
+       ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
+out:
+       fdput(f);
        return ret;
 }
 
index eb47780..1e79fac 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1362,11 +1362,21 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
        }
 
        /*
-        * We have to assume the worse case ie pmd for invalidation. Note that
-        * the page can not be free in this function as call of try_to_unmap()
-        * must hold a reference on the page.
+        * For THP, we have to assume the worse case ie pmd for invalidation.
+        * For hugetlb, it could be much worse if we need to do pud
+        * invalidation in the case of pmd sharing.
+        *
+        * Note that the page can not be free in this function as call of
+        * try_to_unmap() must hold a reference on the page.
         */
        end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
+       if (PageHuge(page)) {
+               /*
+                * If sharing is possible, start and end will be adjusted
+                * accordingly.
+                */
+               adjust_range_if_pmd_sharing_possible(vma, &start, &end);
+       }
        mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
 
        while (page_vma_mapped_walk(&pvmw)) {
@@ -1409,6 +1419,32 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
                address = pvmw.address;
 
+               if (PageHuge(page)) {
+                       if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
+                               /*
+                                * huge_pmd_unshare unmapped an entire PMD
+                                * page.  There is no way of knowing exactly
+                                * which PMDs may be cached for this mm, so
+                                * we must flush them all.  start/end were
+                                * already adjusted above to cover this range.
+                                */
+                               flush_cache_range(vma, start, end);
+                               flush_tlb_range(vma, start, end);
+                               mmu_notifier_invalidate_range(mm, start, end);
+
+                               /*
+                                * The ref count of the PMD page was dropped
+                                * which is part of the way map counting
+                                * is done for shared PMDs.  Return 'true'
+                                * here.  When there is no other sharing,
+                                * huge_pmd_unshare returns false and we will
+                                * unmap the actual page and drop map count
+                                * to zero.
+                                */
+                               page_vma_mapped_walk_done(&pvmw);
+                               break;
+                       }
+               }
 
                if (IS_ENABLED(CONFIG_MIGRATION) &&
                    (flags & TTU_MIGRATION) &&
index 0376c12..4469426 100644 (file)
@@ -2227,6 +2227,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
                        mpol_shared_policy_init(&info->policy, NULL);
                        break;
                }
+
+               lockdep_annotate_inode_mutex_key(inode);
        } else
                shmem_free_inode(sb);
        return inode;
index ce2b9e5..8da34a8 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -19,7 +19,6 @@
 #include <linux/slab.h>
 #include "slab.h"
 #include <linux/proc_fs.h>
-#include <linux/notifier.h>
 #include <linux/seq_file.h>
 #include <linux/kasan.h>
 #include <linux/cpu.h>
index d2890a4..9e3ebd2 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -435,11 +435,14 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
 EXPORT_SYMBOL(kvmalloc_node);
 
 /**
- * kvfree - free memory allocated with kvmalloc
- * @addr: pointer returned by kvmalloc
+ * kvfree() - Free memory.
+ * @addr: Pointer to allocated memory.
  *
- * If the memory is allocated from vmalloc area it is freed with vfree().
- * Otherwise kfree() is used.
+ * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
+ * It is slightly more efficient to use kfree() or vfree() if you are certain
+ * that you know which one to use.
+ *
+ * Context: Any context except NMI.
  */
 void kvfree(const void *addr)
 {
index ea517be..cdc32a3 100644 (file)
 #define VMACACHE_HASH(addr) ((addr >> VMACACHE_SHIFT) & VMACACHE_MASK)
 
 /*
- * Flush vma caches for threads that share a given mm.
- *
- * The operation is safe because the caller holds the mmap_sem
- * exclusively and other threads accessing the vma cache will
- * have mmap_sem held at least for read, so no extra locking
- * is required to maintain the vma cache.
- */
-void vmacache_flush_all(struct mm_struct *mm)
-{
-       struct task_struct *g, *p;
-
-       count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
-
-       /*
-        * Single threaded tasks need not iterate the entire
-        * list of process. We can avoid the flushing as well
-        * since the mm's seqnum was increased and don't have
-        * to worry about other threads' seqnum. Current's
-        * flush will occur upon the next lookup.
-        */
-       if (atomic_read(&mm->mm_users) == 1)
-               return;
-
-       rcu_read_lock();
-       for_each_process_thread(g, p) {
-               /*
-                * Only flush the vmacache pointers as the
-                * mm seqnum is already set and curr's will
-                * be set upon invalidation when the next
-                * lookup is done.
-                */
-               if (mm == p->mm)
-                       vmacache_flush(p);
-       }
-       rcu_read_unlock();
-}
-
-/*
  * This task may be accessing a foreign mm via (for example)
  * get_user_pages()->find_vma().  The vmacache is task-local and this
  * task's vmacache pertains to a different mm (ie, its own).  There is
index 7e7d255..c5ef724 100644 (file)
@@ -476,6 +476,17 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
        delta = freeable >> priority;
        delta *= 4;
        do_div(delta, shrinker->seeks);
+
+       /*
+        * Make sure we apply some minimal pressure on default priority
+        * even on small cgroups. Stale objects are not only consuming memory
+        * by themselves, but can also hold a reference to a dying cgroup,
+        * preventing it from being reclaimed. A dying cgroup with all
+        * corresponding structures like per-cpu stats and kmem caches
+        * can be really big, so it may lead to a significant waste of memory.
+        */
+       delta = max_t(unsigned long long, delta, min(freeable, batch_size));
+
        total_scan += delta;
        if (total_scan < 0) {
                pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
@@ -569,8 +580,8 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
                        struct mem_cgroup *memcg, int priority)
 {
        struct memcg_shrinker_map *map;
-       unsigned long freed = 0;
-       int ret, i;
+       unsigned long ret, freed = 0;
+       int i;
 
        if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
                return 0;
@@ -666,9 +677,8 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
                                 struct mem_cgroup *memcg,
                                 int priority)
 {
+       unsigned long ret, freed = 0;
        struct shrinker *shrinker;
-       unsigned long freed = 0;
-       int ret;
 
        if (!mem_cgroup_is_root(memcg))
                return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
index 8ba0870..7878da7 100644 (file)
@@ -1275,6 +1275,9 @@ const char * const vmstat_text[] = {
 #ifdef CONFIG_SMP
        "nr_tlb_remote_flush",
        "nr_tlb_remote_flush_received",
+#else
+       "", /* nr_tlb_remote_flush */
+       "", /* nr_tlb_remote_flush_received */
 #endif /* CONFIG_SMP */
        "nr_tlb_local_flush_all",
        "nr_tlb_local_flush_one",
@@ -1283,7 +1286,6 @@ const char * const vmstat_text[] = {
 #ifdef CONFIG_DEBUG_VM_VMACACHE
        "vmacache_find_calls",
        "vmacache_find_hits",
-       "vmacache_full_flushes",
 #endif
 #ifdef CONFIG_SWAP
        "swap_ra",
index 71c20c1..9f481cf 100644 (file)
@@ -241,7 +241,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
                 * the packet to be exactly of that size to make the link
                 * throughput estimation effective.
                 */
-               skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len);
+               skb_put_zero(skb, probe_len - hard_iface->bat_v.elp_skb->len);
 
                batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                           "Sending unicast (probe) ELP packet on interface %s to %pM\n",
@@ -268,6 +268,7 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
        struct batadv_priv *bat_priv;
        struct sk_buff *skb;
        u32 elp_interval;
+       bool ret;
 
        bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
        hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
@@ -329,8 +330,11 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
                 * may sleep and that is not allowed in an rcu protected
                 * context. Therefore schedule a task for that.
                 */
-               queue_work(batadv_event_workqueue,
-                          &hardif_neigh->bat_v.metric_work);
+               ret = queue_work(batadv_event_workqueue,
+                                &hardif_neigh->bat_v.metric_work);
+
+               if (!ret)
+                       batadv_hardif_neigh_put(hardif_neigh);
        }
        rcu_read_unlock();
 
index ff9659a..5f1aeed 100644 (file)
@@ -1772,6 +1772,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
 {
        struct batadv_bla_backbone_gw *backbone_gw;
        struct ethhdr *ethhdr;
+       bool ret;
 
        ethhdr = eth_hdr(skb);
 
@@ -1795,8 +1796,13 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
        if (unlikely(!backbone_gw))
                return true;
 
-       queue_work(batadv_event_workqueue, &backbone_gw->report_work);
-       /* backbone_gw is unreferenced in the report work function function */
+       ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
+
+       /* backbone_gw is unreferenced in the report work function function
+        * if queue_work() call was successful
+        */
+       if (!ret)
+               batadv_backbone_gw_put(backbone_gw);
 
        return true;
 }
index 8b198ee..140c61a 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/kernel.h>
 #include <linux/kref.h>
 #include <linux/list.h>
+#include <linux/lockdep.h>
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/rculist.h>
@@ -348,6 +349,9 @@ out:
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator announcing gateway capabilities
  * @gateway: announced bandwidth information
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (gw.list_lock).
  */
 static void batadv_gw_node_add(struct batadv_priv *bat_priv,
                               struct batadv_orig_node *orig_node,
@@ -355,6 +359,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
 {
        struct batadv_gw_node *gw_node;
 
+       lockdep_assert_held(&bat_priv->gw.list_lock);
+
        if (gateway->bandwidth_down == 0)
                return;
 
@@ -369,10 +375,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
        gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
        gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
 
-       spin_lock_bh(&bat_priv->gw.list_lock);
        kref_get(&gw_node->refcount);
        hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list);
-       spin_unlock_bh(&bat_priv->gw.list_lock);
 
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
                   "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
@@ -428,11 +432,14 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
 {
        struct batadv_gw_node *gw_node, *curr_gw = NULL;
 
+       spin_lock_bh(&bat_priv->gw.list_lock);
        gw_node = batadv_gw_node_get(bat_priv, orig_node);
        if (!gw_node) {
                batadv_gw_node_add(bat_priv, orig_node, gateway);
+               spin_unlock_bh(&bat_priv->gw.list_lock);
                goto out;
        }
+       spin_unlock_bh(&bat_priv->gw.list_lock);
 
        if (gw_node->bandwidth_down == ntohl(gateway->bandwidth_down) &&
            gw_node->bandwidth_up == ntohl(gateway->bandwidth_up))
index 8da3c93..3ccc75e 100644 (file)
@@ -25,7 +25,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2018.2"
+#define BATADV_SOURCE_VERSION "2018.3"
 #endif
 
 /* B.A.T.M.A.N. parameters */
index c357844..34caf12 100644 (file)
@@ -854,16 +854,27 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
        spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
        struct list_head *list;
 
+       /* Select ingoing or outgoing coding node */
+       if (in_coding) {
+               lock = &orig_neigh_node->in_coding_list_lock;
+               list = &orig_neigh_node->in_coding_list;
+       } else {
+               lock = &orig_neigh_node->out_coding_list_lock;
+               list = &orig_neigh_node->out_coding_list;
+       }
+
+       spin_lock_bh(lock);
+
        /* Check if nc_node is already added */
        nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
 
        /* Node found */
        if (nc_node)
-               return nc_node;
+               goto unlock;
 
        nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
        if (!nc_node)
-               return NULL;
+               goto unlock;
 
        /* Initialize nc_node */
        INIT_LIST_HEAD(&nc_node->list);
@@ -872,22 +883,14 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
        kref_get(&orig_neigh_node->refcount);
        nc_node->orig_node = orig_neigh_node;
 
-       /* Select ingoing or outgoing coding node */
-       if (in_coding) {
-               lock = &orig_neigh_node->in_coding_list_lock;
-               list = &orig_neigh_node->in_coding_list;
-       } else {
-               lock = &orig_neigh_node->out_coding_list_lock;
-               list = &orig_neigh_node->out_coding_list;
-       }
-
        batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
                   nc_node->addr, nc_node->orig_node->orig);
 
        /* Add nc_node to orig_node */
-       spin_lock_bh(lock);
        kref_get(&nc_node->refcount);
        list_add_tail_rcu(&nc_node->list, list);
+
+unlock:
        spin_unlock_bh(lock);
 
        return nc_node;
index 1485263..626ddca 100644 (file)
@@ -574,15 +574,20 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
        struct batadv_softif_vlan *vlan;
        int err;
 
+       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+
        vlan = batadv_softif_vlan_get(bat_priv, vid);
        if (vlan) {
                batadv_softif_vlan_put(vlan);
+               spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
                return -EEXIST;
        }
 
        vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
-       if (!vlan)
+       if (!vlan) {
+               spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
                return -ENOMEM;
+       }
 
        vlan->bat_priv = bat_priv;
        vlan->vid = vid;
@@ -590,17 +595,23 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
 
        atomic_set(&vlan->ap_isolation, 0);
 
+       kref_get(&vlan->refcount);
+       hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
+       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
+       /* batadv_sysfs_add_vlan cannot be in the spinlock section due to the
+        * sleeping behavior of the sysfs functions and the fs_reclaim lock
+        */
        err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
        if (err) {
-               kfree(vlan);
+               /* ref for the function */
+               batadv_softif_vlan_put(vlan);
+
+               /* ref for the list */
+               batadv_softif_vlan_put(vlan);
                return err;
        }
 
-       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
-       kref_get(&vlan->refcount);
-       hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
-       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-
        /* add a new TT local entry. This one will be marked with the NOPURGE
         * flag
         */
index f2eef43..09427fc 100644 (file)
@@ -188,7 +188,8 @@ ssize_t batadv_store_##_name(struct kobject *kobj,                  \
                                                                        \
        return __batadv_store_uint_attr(buff, count, _min, _max,        \
                                        _post_func, attr,               \
-                                       &bat_priv->_var, net_dev);      \
+                                       &bat_priv->_var, net_dev,       \
+                                       NULL);  \
 }
 
 #define BATADV_ATTR_SIF_SHOW_UINT(_name, _var)                         \
@@ -262,7 +263,9 @@ ssize_t batadv_store_##_name(struct kobject *kobj,                  \
                                                                        \
        length = __batadv_store_uint_attr(buff, count, _min, _max,      \
                                          _post_func, attr,             \
-                                         &hard_iface->_var, net_dev);  \
+                                         &hard_iface->_var,            \
+                                         hard_iface->soft_iface,       \
+                                         net_dev);                     \
                                                                        \
        batadv_hardif_put(hard_iface);                          \
        return length;                                                  \
@@ -356,10 +359,12 @@ __batadv_store_bool_attr(char *buff, size_t count,
 
 static int batadv_store_uint_attr(const char *buff, size_t count,
                                  struct net_device *net_dev,
+                                 struct net_device *slave_dev,
                                  const char *attr_name,
                                  unsigned int min, unsigned int max,
                                  atomic_t *attr)
 {
+       char ifname[IFNAMSIZ + 3] = "";
        unsigned long uint_val;
        int ret;
 
@@ -385,8 +390,11 @@ static int batadv_store_uint_attr(const char *buff, size_t count,
        if (atomic_read(attr) == uint_val)
                return count;
 
-       batadv_info(net_dev, "%s: Changing from: %i to: %lu\n",
-                   attr_name, atomic_read(attr), uint_val);
+       if (slave_dev)
+               snprintf(ifname, sizeof(ifname), "%s: ", slave_dev->name);
+
+       batadv_info(net_dev, "%s: %sChanging from: %i to: %lu\n",
+                   attr_name, ifname, atomic_read(attr), uint_val);
 
        atomic_set(attr, uint_val);
        return count;
@@ -397,12 +405,13 @@ static ssize_t __batadv_store_uint_attr(const char *buff, size_t count,
                                        void (*post_func)(struct net_device *),
                                        const struct attribute *attr,
                                        atomic_t *attr_store,
-                                       struct net_device *net_dev)
+                                       struct net_device *net_dev,
+                                       struct net_device *slave_dev)
 {
        int ret;
 
-       ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max,
-                                    attr_store);
+       ret = batadv_store_uint_attr(buff, count, net_dev, slave_dev,
+                                    attr->name, min, max, attr_store);
        if (post_func && ret)
                post_func(net_dev);
 
@@ -571,7 +580,7 @@ static ssize_t batadv_store_gw_sel_class(struct kobject *kobj,
        return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
                                        batadv_post_gw_reselect, attr,
                                        &bat_priv->gw.sel_class,
-                                       bat_priv->soft_iface);
+                                       bat_priv->soft_iface, NULL);
 }
 
 static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
@@ -1090,8 +1099,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
        if (old_tp_override == tp_override)
                goto out;
 
-       batadv_info(net_dev, "%s: Changing from: %u.%u MBit to: %u.%u MBit\n",
-                   "throughput_override",
+       batadv_info(hard_iface->soft_iface,
+                   "%s: %s: Changing from: %u.%u MBit to: %u.%u MBit\n",
+                   "throughput_override", net_dev->name,
                    old_tp_override / 10, old_tp_override % 10,
                    tp_override / 10, tp_override % 10);
 
index 12a2b7d..d21624c 100644 (file)
@@ -1613,6 +1613,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
 {
        struct batadv_tt_orig_list_entry *orig_entry;
 
+       spin_lock_bh(&tt_global->list_lock);
+
        orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
        if (orig_entry) {
                /* refresh the ttvn: the current value could be a bogus one that
@@ -1635,11 +1637,9 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
        orig_entry->flags = flags;
        kref_init(&orig_entry->refcount);
 
-       spin_lock_bh(&tt_global->list_lock);
        kref_get(&orig_entry->refcount);
        hlist_add_head_rcu(&orig_entry->list,
                           &tt_global->orig_list);
-       spin_unlock_bh(&tt_global->list_lock);
        atomic_inc(&tt_global->orig_list_count);
 
 sync_flags:
@@ -1647,6 +1647,8 @@ sync_flags:
 out:
        if (orig_entry)
                batadv_tt_orig_list_entry_put(orig_entry);
+
+       spin_unlock_bh(&tt_global->list_lock);
 }
 
 /**
index a637458..40e69c9 100644 (file)
@@ -529,15 +529,20 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
 {
        struct batadv_tvlv_handler *tvlv_handler;
 
+       spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
+
        tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
        if (tvlv_handler) {
+               spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
                batadv_tvlv_handler_put(tvlv_handler);
                return;
        }
 
        tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
-       if (!tvlv_handler)
+       if (!tvlv_handler) {
+               spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
                return;
+       }
 
        tvlv_handler->ogm_handler = optr;
        tvlv_handler->unicast_handler = uptr;
@@ -547,7 +552,6 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
        kref_init(&tvlv_handler->refcount);
        INIT_HLIST_NODE(&tvlv_handler->list);
 
-       spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
        kref_get(&tvlv_handler->refcount);
        hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
        spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
index 3bdc8f3..ccce954 100644 (file)
@@ -2434,9 +2434,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
        /* LE address type */
        addr_type = le_addr_type(cp->addr.type);
 
-       hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
-
-       err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
+       /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
+       err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
        if (err < 0) {
                err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
                                        MGMT_STATUS_NOT_PAIRED, &rp,
@@ -2450,8 +2449,6 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
                goto done;
        }
 
-       /* Abort any ongoing SMP pairing */
-       smp_cancel_pairing(conn);
 
        /* Defer clearing up the connection parameters until closing to
         * give a chance of keeping them if a repairing happens.
index ae91e2d..73f7211 100644 (file)
@@ -83,6 +83,7 @@ enum {
 
 struct smp_dev {
        /* Secure Connections OOB data */
+       bool                    local_oob;
        u8                      local_pk[64];
        u8                      local_rand[16];
        bool                    debug_key;
@@ -599,6 +600,8 @@ int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16])
 
        memcpy(rand, smp->local_rand, 16);
 
+       smp->local_oob = true;
+
        return 0;
 }
 
@@ -1785,7 +1788,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
         * successfully received our local OOB data - therefore set the
         * flag to indicate that local OOB is in use.
         */
-       if (req->oob_flag == SMP_OOB_PRESENT)
+       if (req->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob)
                set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags);
 
        /* SMP over BR/EDR requires special treatment */
@@ -1967,7 +1970,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
         * successfully received our local OOB data - therefore set the
         * flag to indicate that local OOB is in use.
         */
-       if (rsp->oob_flag == SMP_OOB_PRESENT)
+       if (rsp->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob)
                set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags);
 
        smp->prsp[0] = SMP_CMD_PAIRING_RSP;
@@ -2419,30 +2422,51 @@ unlock:
        return ret;
 }
 
-void smp_cancel_pairing(struct hci_conn *hcon)
+int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                 u8 addr_type)
 {
-       struct l2cap_conn *conn = hcon->l2cap_data;
+       struct hci_conn *hcon;
+       struct l2cap_conn *conn;
        struct l2cap_chan *chan;
        struct smp_chan *smp;
+       int err;
+
+       err = hci_remove_ltk(hdev, bdaddr, addr_type);
+       hci_remove_irk(hdev, bdaddr, addr_type);
+
+       hcon = hci_conn_hash_lookup_le(hdev, bdaddr, addr_type);
+       if (!hcon)
+               goto done;
 
+       conn = hcon->l2cap_data;
        if (!conn)
-               return;
+               goto done;
 
        chan = conn->smp;
        if (!chan)
-               return;
+               goto done;
 
        l2cap_chan_lock(chan);
 
        smp = chan->data;
        if (smp) {
+               /* Set keys to NULL to make sure smp_failure() does not try to
+                * remove and free already invalidated rcu list entries. */
+               smp->ltk = NULL;
+               smp->slave_ltk = NULL;
+               smp->remote_irk = NULL;
+
                if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
                        smp_failure(conn, 0);
                else
                        smp_failure(conn, SMP_UNSPECIFIED);
+               err = 0;
        }
 
        l2cap_chan_unlock(chan);
+
+done:
+       return err;
 }
 
 static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
@@ -2697,7 +2721,13 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
         * key was set/generated.
         */
        if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) {
-               struct smp_dev *smp_dev = chan->data;
+               struct l2cap_chan *hchan = hdev->smp_data;
+               struct smp_dev *smp_dev;
+
+               if (!hchan || !hchan->data)
+                       return SMP_UNSPECIFIED;
+
+               smp_dev = hchan->data;
 
                tfm_ecdh = smp_dev->tfm_ecdh;
        } else {
@@ -3230,6 +3260,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
                return ERR_CAST(tfm_ecdh);
        }
 
+       smp->local_oob = false;
        smp->tfm_aes = tfm_aes;
        smp->tfm_cmac = tfm_cmac;
        smp->tfm_ecdh = tfm_ecdh;
index 0ff6247..121edad 100644 (file)
@@ -181,7 +181,8 @@ enum smp_key_pref {
 };
 
 /* SMP Commands */
-void smp_cancel_pairing(struct hci_conn *hcon);
+int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
+                                 u8 addr_type);
 bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
                             enum smp_key_pref key_pref);
 int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
index f0fc182..b64e164 100644 (file)
@@ -59,7 +59,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
        req.is_set = is_set;
        req.pid = current->pid;
        req.cmd = optname;
-       req.addr = (long)optval;
+       req.addr = (long __force __user)optval;
        req.len = optlen;
        mutex_lock(&bpfilter_lock);
        if (!info.pid)
@@ -98,7 +98,7 @@ static int __init load_umh(void)
        pr_info("Loaded bpfilter_umh pid %d\n", info.pid);
 
        /* health check that usermode process started correctly */
-       if (__bpfilter_process_sockopt(NULL, 0, 0, 0, 0) != 0) {
+       if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) {
                stop_umh();
                return -EFAULT;
        }
index 6e0dc6b..37278dc 100644 (file)
@@ -835,7 +835,8 @@ static unsigned int ip_sabotage_in(void *priv,
                                   struct sk_buff *skb,
                                   const struct nf_hook_state *state)
 {
-       if (skb->nf_bridge && !skb->nf_bridge->in_prerouting) {
+       if (skb->nf_bridge && !skb->nf_bridge->in_prerouting &&
+           !netif_is_l3_master(skb->dev)) {
                state->okfn(state->net, state->sk, skb);
                return NF_STOLEN;
        }
index 325fc50..82114e1 100644 (file)
@@ -93,7 +93,6 @@
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
-#include <linux/notifier.h>
 #include <linux/skbuff.h>
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
index 65fc366..8c0ed22 100644 (file)
@@ -2592,7 +2592,7 @@ send_done:
        if (!nlh) {
                err = devlink_dpipe_send_and_alloc_skb(&skb, info);
                if (err)
-                       goto err_skb_send_alloc;
+                       return err;
                goto send_done;
        }
        return genlmsg_reply(skb, info);
@@ -2600,7 +2600,6 @@ send_done:
 nla_put_failure:
        err = -EMSGSIZE;
 err_resource_put:
-err_skb_send_alloc:
        nlmsg_free(skb);
        return err;
 }
index c9993c6..0762aaf 100644 (file)
@@ -1483,6 +1483,7 @@ static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
 static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
 {
        struct ethtool_wolinfo wol;
+       int ret;
 
        if (!dev->ethtool_ops->set_wol)
                return -EOPNOTSUPP;
@@ -1490,7 +1491,13 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
        if (copy_from_user(&wol, useraddr, sizeof(wol)))
                return -EFAULT;
 
-       return dev->ethtool_ops->set_wol(dev, &wol);
+       ret = dev->ethtool_ops->set_wol(dev, &wol);
+       if (ret)
+               return ret;
+
+       dev->wol_enabled = !!wol.wolopts;
+
+       return 0;
 }
 
 static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
@@ -2624,6 +2631,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_GPHYSTATS:
        case ETHTOOL_GTSO:
        case ETHTOOL_GPERMADDR:
+       case ETHTOOL_GUFO:
        case ETHTOOL_GGSO:
        case ETHTOOL_GGRO:
        case ETHTOOL_GFLAGS:
index c25eb36..5e00f2b 100644 (file)
@@ -2282,14 +2282,21 @@ static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
        .arg2_type      = ARG_ANYTHING,
 };
 
+#define sk_msg_iter_var(var)                   \
+       do {                                    \
+               var++;                          \
+               if (var == MAX_SKB_FRAGS)       \
+                       var = 0;                \
+       } while (0)
+
 BPF_CALL_4(bpf_msg_pull_data,
           struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags)
 {
-       unsigned int len = 0, offset = 0, copy = 0;
+       unsigned int len = 0, offset = 0, copy = 0, poffset = 0;
+       int bytes = end - start, bytes_sg_total;
        struct scatterlist *sg = msg->sg_data;
        int first_sg, last_sg, i, shift;
        unsigned char *p, *to, *from;
-       int bytes = end - start;
        struct page *page;
 
        if (unlikely(flags || end <= start))
@@ -2299,21 +2306,22 @@ BPF_CALL_4(bpf_msg_pull_data,
        i = msg->sg_start;
        do {
                len = sg[i].length;
-               offset += len;
                if (start < offset + len)
                        break;
-               i++;
-               if (i == MAX_SKB_FRAGS)
-                       i = 0;
+               offset += len;
+               sk_msg_iter_var(i);
        } while (i != msg->sg_end);
 
        if (unlikely(start >= offset + len))
                return -EINVAL;
 
-       if (!msg->sg_copy[i] && bytes <= len)
-               goto out;
-
        first_sg = i;
+       /* The start may point into the sg element so we need to also
+        * account for the headroom.
+        */
+       bytes_sg_total = start - offset + bytes;
+       if (!msg->sg_copy[i] && bytes_sg_total <= len)
+               goto out;
 
        /* At this point we need to linearize multiple scatterlist
         * elements or a single shared page. Either way we need to
@@ -2327,37 +2335,33 @@ BPF_CALL_4(bpf_msg_pull_data,
         */
        do {
                copy += sg[i].length;
-               i++;
-               if (i == MAX_SKB_FRAGS)
-                       i = 0;
-               if (bytes < copy)
+               sk_msg_iter_var(i);
+               if (bytes_sg_total <= copy)
                        break;
        } while (i != msg->sg_end);
        last_sg = i;
 
-       if (unlikely(copy < end - start))
+       if (unlikely(bytes_sg_total > copy))
                return -EINVAL;
 
-       page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy));
+       page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
+                          get_order(copy));
        if (unlikely(!page))
                return -ENOMEM;
        p = page_address(page);
-       offset = 0;
 
        i = first_sg;
        do {
                from = sg_virt(&sg[i]);
                len = sg[i].length;
-               to = p + offset;
+               to = p + poffset;
 
                memcpy(to, from, len);
-               offset += len;
+               poffset += len;
                sg[i].length = 0;
                put_page(sg_page(&sg[i]));
 
-               i++;
-               if (i == MAX_SKB_FRAGS)
-                       i = 0;
+               sk_msg_iter_var(i);
        } while (i != last_sg);
 
        sg[first_sg].length = copy;
@@ -2367,11 +2371,15 @@ BPF_CALL_4(bpf_msg_pull_data,
         * had a single entry though we can just replace it and
         * be done. Otherwise walk the ring and shift the entries.
         */
-       shift = last_sg - first_sg - 1;
+       WARN_ON_ONCE(last_sg == first_sg);
+       shift = last_sg > first_sg ?
+               last_sg - first_sg - 1 :
+               MAX_SKB_FRAGS - first_sg + last_sg - 1;
        if (!shift)
                goto out;
 
-       i = first_sg + 1;
+       i = first_sg;
+       sk_msg_iter_var(i);
        do {
                int move_from;
 
@@ -2388,15 +2396,13 @@ BPF_CALL_4(bpf_msg_pull_data,
                sg[move_from].page_link = 0;
                sg[move_from].offset = 0;
 
-               i++;
-               if (i == MAX_SKB_FRAGS)
-                       i = 0;
+               sk_msg_iter_var(i);
        } while (1);
        msg->sg_end -= shift;
        if (msg->sg_end < 0)
                msg->sg_end += MAX_SKB_FRAGS;
 out:
-       msg->data = sg_virt(&sg[i]) + start - offset;
+       msg->data = sg_virt(&sg[first_sg]) + start - offset;
        msg->data_end = msg->data + bytes;
 
        return 0;
@@ -7281,7 +7287,7 @@ static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct sk_reuseport_md, ip_protocol):
-               BUILD_BUG_ON(hweight_long(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
+               BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
                SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset,
                                                    BPF_W, 0);
                *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
index aa19d86..91592fc 100644 (file)
@@ -1180,6 +1180,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
                lladdr = neigh->ha;
        }
 
+       /* Update confirmed timestamp for neighbour entry after we
+        * received ARP packet even if it doesn't change IP to MAC binding.
+        */
+       if (new & NUD_CONNECTED)
+               neigh->confirmed = jiffies;
+
        /* If entry was valid and address is not changed,
           do not change entry state, if new one is STALE.
         */
@@ -1201,15 +1207,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
                }
        }
 
-       /* Update timestamps only once we know we will make a change to the
+       /* Update timestamp only once we know we will make a change to the
         * neighbour entry. Otherwise we risk to move the locktime window with
         * noop updates and ignore relevant ARP updates.
         */
-       if (new != old || lladdr != neigh->ha) {
-               if (new & NUD_CONNECTED)
-                       neigh->confirmed = jiffies;
+       if (new != old || lladdr != neigh->ha)
                neigh->updated = jiffies;
-       }
 
        if (new != old) {
                neigh_del_timer(neigh);
index 57557a6..de1d1ba 100644 (file)
@@ -135,27 +135,9 @@ static void queue_process(struct work_struct *work)
        }
 }
 
-/*
- * Check whether delayed processing was scheduled for our NIC. If so,
- * we attempt to grab the poll lock and use ->poll() to pump the card.
- * If this fails, either we've recursed in ->poll() or it's already
- * running on another CPU.
- *
- * Note: we don't mask interrupts with this lock because we're using
- * trylock here and interrupts are already disabled in the softirq
- * case. Further, we test the poll_owner to avoid recursion on UP
- * systems where the lock doesn't exist.
- */
 static void poll_one_napi(struct napi_struct *napi)
 {
-       int work = 0;
-
-       /* net_rx_action's ->poll() invocations and our's are
-        * synchronized by this test which is only made while
-        * holding the napi->poll_lock.
-        */
-       if (!test_bit(NAPI_STATE_SCHED, &napi->state))
-               return;
+       int work;
 
        /* If we set this bit but see that it has already been set,
         * that indicates that napi has been disabled and we need
@@ -187,16 +169,16 @@ static void poll_napi(struct net_device *dev)
        }
 }
 
-static void netpoll_poll_dev(struct net_device *dev)
+void netpoll_poll_dev(struct net_device *dev)
 {
-       const struct net_device_ops *ops;
        struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
+       const struct net_device_ops *ops;
 
        /* Don't do any rx activity if the dev_lock mutex is held
         * the dev_open/close paths use this to block netpoll activity
         * while changing device state
         */
-       if (down_trylock(&ni->dev_lock))
+       if (!ni || down_trylock(&ni->dev_lock))
                return;
 
        if (!netif_running(dev)) {
@@ -205,13 +187,8 @@ static void netpoll_poll_dev(struct net_device *dev)
        }
 
        ops = dev->netdev_ops;
-       if (!ops->ndo_poll_controller) {
-               up(&ni->dev_lock);
-               return;
-       }
-
-       /* Process pending work on NIC */
-       ops->ndo_poll_controller(dev);
+       if (ops->ndo_poll_controller)
+               ops->ndo_poll_controller(dev);
 
        poll_napi(dev);
 
@@ -219,6 +196,7 @@ static void netpoll_poll_dev(struct net_device *dev)
 
        zap_completion_queue();
 }
+EXPORT_SYMBOL(netpoll_poll_dev);
 
 void netpoll_poll_disable(struct net_device *dev)
 {
@@ -334,6 +312,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
        /* It is up to the caller to keep npinfo alive. */
        struct netpoll_info *npinfo;
 
+       rcu_read_lock_bh();
        lockdep_assert_irqs_disabled();
 
        npinfo = rcu_dereference_bh(np->dev->npinfo);
@@ -378,6 +357,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
                skb_queue_tail(&npinfo->txq, skb);
                schedule_delayed_work(&npinfo->tx_work,0);
        }
+       rcu_read_unlock_bh();
 }
 EXPORT_SYMBOL(netpoll_send_skb_on_dev);
 
@@ -613,8 +593,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
        strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
        INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
 
-       if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
-           !ndev->netdev_ops->ndo_poll_controller) {
+       if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
                np_err(np, "%s doesn't support polling, aborting\n",
                       np->dev_name);
                err = -ENOTSUPP;
index 24431e5..37c7936 100644 (file)
@@ -324,6 +324,10 @@ void rtnl_unregister_all(int protocol)
 
        rtnl_lock();
        tab = rtnl_msg_handlers[protocol];
+       if (!tab) {
+               rtnl_unlock();
+               return;
+       }
        RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL);
        for (msgindex = 0; msgindex < RTM_NR_MSGTYPES; msgindex++) {
                link = tab[msgindex];
@@ -1894,10 +1898,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
                if (tb[IFLA_IF_NETNSID]) {
                        netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
                        tgt_net = get_target_net(skb->sk, netnsid);
-                       if (IS_ERR(tgt_net)) {
-                               tgt_net = net;
-                               netnsid = -1;
-                       }
+                       if (IS_ERR(tgt_net))
+                               return PTR_ERR(tgt_net);
                }
 
                if (tb[IFLA_EXT_MASK])
@@ -2806,7 +2808,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
        }
 
        if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
-               __dev_notify_flags(dev, old_flags, 0U);
+               __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags));
        } else {
                dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
                __dev_notify_flags(dev, old_flags, ~0U);
@@ -2833,6 +2835,12 @@ struct net_device *rtnl_create_link(struct net *net,
        else if (ops->get_num_rx_queues)
                num_rx_queues = ops->get_num_rx_queues();
 
+       if (num_tx_queues < 1 || num_tx_queues > 4096)
+               return ERR_PTR(-EINVAL);
+
+       if (num_rx_queues < 1 || num_rx_queues > 4096)
+               return ERR_PTR(-EINVAL);
+
        dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
                               ops->setup, num_tx_queues, num_rx_queues);
        if (!dev)
@@ -3740,16 +3748,27 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
        int err = 0;
        int fidx = 0;
 
-       err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
-                         IFLA_MAX, ifla_policy, NULL);
-       if (err < 0) {
-               return -EINVAL;
-       } else if (err == 0) {
-               if (tb[IFLA_MASTER])
-                       br_idx = nla_get_u32(tb[IFLA_MASTER]);
-       }
+       /* A hack to preserve kernel<->userspace interface.
+        * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
+        * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
+        * So, check for ndmsg with an optional u32 attribute (not used here).
+        * Fortunately these sizes don't conflict with the size of ifinfomsg
+        * with an optional attribute.
+        */
+       if (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) &&
+           (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) +
+            nla_attr_size(sizeof(u32)))) {
+               err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
+                                 IFLA_MAX, ifla_policy, NULL);
+               if (err < 0) {
+                       return -EINVAL;
+               } else if (err == 0) {
+                       if (tb[IFLA_MASTER])
+                               br_idx = nla_get_u32(tb[IFLA_MASTER]);
+               }
 
-       brport_idx = ifm->ifi_index;
+               brport_idx = ifm->ifi_index;
+       }
 
        if (br_idx) {
                br_dev = __dev_get_by_index(net, br_idx);
index c996c09..b2c807f 100644 (file)
@@ -939,9 +939,6 @@ struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
 
        WARN_ON_ONCE(!in_task());
 
-       if (!sock_flag(sk, SOCK_ZEROCOPY))
-               return NULL;
-
        skb = sock_omalloc(sk, 0, GFP_KERNEL);
        if (!skb)
                return NULL;
index d28d46b..85d6c87 100644 (file)
@@ -606,11 +606,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        if (sk->sk_state == DCCP_LISTEN) {
                if (dh->dccph_type == DCCP_PKT_REQUEST) {
                        /* It is possible that we process SYN packets from backlog,
-                        * so we need to make sure to disable BH right there.
+                        * so we need to make sure to disable BH and RCU right there.
                         */
+                       rcu_read_lock();
                        local_bh_disable();
                        acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
                        local_bh_enable();
+                       rcu_read_unlock();
                        if (!acceptable)
                                return 1;
                        consume_skb(skb);
index b08feb2..8e08cea 100644 (file)
@@ -493,9 +493,11 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
 
                dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
                                                              ireq->ir_rmt_addr);
+               rcu_read_lock();
                err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
                                            ireq->ir_rmt_addr,
-                                           ireq_opt_deref(ireq));
+                                           rcu_dereference(ireq->ireq_opt));
+               rcu_read_unlock();
                err = net_xmit_eval(err);
        }
 
index e63c554..9f3209f 100644 (file)
 #include <linux/of_mdio.h>
 #include <linux/of_platform.h>
 #include <linux/of_net.h>
-#include <linux/of_gpio.h>
 #include <linux/netdevice.h>
 #include <linux/sysfs.h>
 #include <linux/phy_fixed.h>
 #include <linux/ptp_classify.h>
-#include <linux/gpio/consumer.h>
 #include <linux/etherdevice.h>
 
 #include "dsa_priv.h"
index 962c4fd..1c45c1d 100644 (file)
@@ -767,7 +767,6 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
        const struct tc_action *a;
        struct dsa_port *to_dp;
        int err = -EOPNOTSUPP;
-       LIST_HEAD(actions);
 
        if (!ds->ops->port_mirror_add)
                return err;
@@ -775,8 +774,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev,
        if (!tcf_exts_has_one_action(cls->exts))
                return err;
 
-       tcf_exts_to_list(cls->exts, &actions);
-       a = list_first_entry(&actions, struct tc_action, list);
+       a = tcf_exts_first_action(cls->exts);
 
        if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
                struct dsa_mall_mirror_tc_entry *mirror;
index 20fda8f..1fbe2f8 100644 (file)
@@ -1377,6 +1377,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
                if (encap)
                        skb_reset_inner_headers(skb);
                skb->network_header = (u8 *)iph - skb->head;
+               skb_reset_mac_len(skb);
        } while ((skb = skb->next));
 
 out:
index cf75f89..4da3944 100644 (file)
@@ -820,10 +820,9 @@ static void igmp_timer_expire(struct timer_list *t)
        spin_lock(&im->lock);
        im->tm_running = 0;
 
-       if (im->unsolicit_count) {
-               im->unsolicit_count--;
+       if (im->unsolicit_count && --im->unsolicit_count)
                igmp_start_timer(im, unsolicited_report_interval(in_dev));
-       }
+
        im->reporter = 1;
        spin_unlock(&im->lock);
 
@@ -1308,6 +1307,8 @@ static void igmp_group_added(struct ip_mc_list *im)
 
        if (in_dev->dead)
                return;
+
+       im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
        if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
                spin_lock_bh(&im->lock);
                igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY);
@@ -1391,9 +1392,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
                              unsigned int mode)
 {
        struct ip_mc_list *im;
-#ifdef CONFIG_IP_MULTICAST
-       struct net *net = dev_net(in_dev->dev);
-#endif
 
        ASSERT_RTNL();
 
@@ -1420,7 +1418,6 @@ static void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
        spin_lock_init(&im->lock);
 #ifdef CONFIG_IP_MULTICAST
        timer_setup(&im->timer, igmp_timer_expire, 0);
-       im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
 #endif
 
        im->next_rcu = in_dev->mc_list;
index dfd5009..15e7f79 100644 (file)
@@ -544,7 +544,8 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
        struct ip_options_rcu *opt;
        struct rtable *rt;
 
-       opt = ireq_opt_deref(ireq);
+       rcu_read_lock();
+       opt = rcu_dereference(ireq->ireq_opt);
 
        flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
                           RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
@@ -558,11 +559,13 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
                goto no_route;
        if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
                goto route_err;
+       rcu_read_unlock();
        return &rt->dst;
 
 route_err:
        ip_rt_put(rt);
 no_route:
+       rcu_read_unlock();
        __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
        return NULL;
 }
index 88281fb..e722712 100644 (file)
@@ -599,6 +599,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
                        nextp = &fp->next;
                        fp->prev = NULL;
                        memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+                       fp->sk = NULL;
                        head->data_len += fp->len;
                        head->len += fp->len;
                        if (head->ip_summed != fp->ip_summed)
index 51a5d06..8cce0e9 100644 (file)
@@ -178,6 +178,9 @@ static void ipgre_err(struct sk_buff *skb, u32 info,
 
        if (tpi->proto == htons(ETH_P_TEB))
                itn = net_generic(net, gre_tap_net_id);
+       else if (tpi->proto == htons(ETH_P_ERSPAN) ||
+                tpi->proto == htons(ETH_P_ERSPAN2))
+               itn = net_generic(net, erspan_net_id);
        else
                itn = net_generic(net, ipgre_net_id);
 
@@ -328,6 +331,8 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
                return PACKET_RCVD;
        }
+       return PACKET_REJECT;
+
 drop:
        kfree_skb(skb);
        return PACKET_RCVD;
@@ -1508,11 +1513,14 @@ nla_put_failure:
 
 static void erspan_setup(struct net_device *dev)
 {
+       struct ip_tunnel *t = netdev_priv(dev);
+
        ether_setup(dev);
        dev->netdev_ops = &erspan_netdev_ops;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        ip_tunnel_setup(dev, erspan_net_id);
+       t->erspan_ver = 1;
 }
 
 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
index c0fe5ad..26c36cc 100644 (file)
@@ -149,7 +149,6 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
 static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
 {
        struct sockaddr_in sin;
-       const struct iphdr *iph = ip_hdr(skb);
        __be16 *ports;
        int end;
 
@@ -164,7 +163,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
        ports = (__be16 *)skb_transport_header(skb);
 
        sin.sin_family = AF_INET;
-       sin.sin_addr.s_addr = iph->daddr;
+       sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
        sin.sin_port = ports[1];
        memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
 
index c4f5602..284a221 100644 (file)
@@ -627,6 +627,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
                    const struct iphdr *tnl_params, u8 protocol)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
+       unsigned int inner_nhdr_len = 0;
        const struct iphdr *inner_iph;
        struct flowi4 fl4;
        u8     tos, ttl;
@@ -636,6 +637,14 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
        __be32 dst;
        bool connected;
 
+       /* ensure we can access the inner net header, for several users below */
+       if (skb->protocol == htons(ETH_P_IP))
+               inner_nhdr_len = sizeof(struct iphdr);
+       else if (skb->protocol == htons(ETH_P_IPV6))
+               inner_nhdr_len = sizeof(struct ipv6hdr);
+       if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
+               goto tx_error;
+
        inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
        connected = (tunnel->parms.iph.daddr != 0);
 
index d9504ad..184bf2e 100644 (file)
@@ -106,6 +106,10 @@ config NF_NAT_IPV4
 
 if NF_NAT_IPV4
 
+config NF_NAT_MASQUERADE_IPV4
+       bool
+
+if NF_TABLES
 config NFT_CHAIN_NAT_IPV4
        depends on NF_TABLES_IPV4
        tristate "IPv4 nf_tables nat chain support"
@@ -115,9 +119,6 @@ config NFT_CHAIN_NAT_IPV4
          packet transformations such as the source, destination address and
          source and destination ports.
 
-config NF_NAT_MASQUERADE_IPV4
-       bool
-
 config NFT_MASQ_IPV4
        tristate "IPv4 masquerading support for nf_tables"
        depends on NF_TABLES_IPV4
@@ -135,6 +136,7 @@ config NFT_REDIR_IPV4
        help
          This is the expression that provides IPv4 redirect support for
          nf_tables.
+endif # NF_TABLES
 
 config NF_NAT_SNMP_BASIC
        tristate "Basic SNMP-ALG support"
index b92f422..891ed2f 100644 (file)
@@ -48,6 +48,7 @@ static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
 static int ip_ping_group_range_min[] = { 0, 0 };
 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 static int comp_sack_nr_max = 255;
+static u32 u32_max_div_HZ = UINT_MAX / HZ;
 
 /* obsolete */
 static int sysctl_tcp_low_latency __read_mostly;
@@ -745,9 +746,10 @@ static struct ctl_table ipv4_net_table[] = {
        {
                .procname       = "tcp_probe_interval",
                .data           = &init_net.ipv4.sysctl_tcp_probe_interval,
-               .maxlen         = sizeof(int),
+               .maxlen         = sizeof(u32),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_douintvec_minmax,
+               .extra2         = &u32_max_div_HZ,
        },
        {
                .procname       = "igmp_link_local_mcast_reports",
index b8af2fe..10c6246 100644 (file)
@@ -1185,7 +1185,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
 
        flags = msg->msg_flags;
 
-       if (flags & MSG_ZEROCOPY && size) {
+       if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
                if (sk->sk_state != TCP_ESTABLISHED) {
                        err = -EINVAL;
                        goto out_err;
index 13d3442..02ff2dd 100644 (file)
@@ -95,11 +95,10 @@ struct bbr {
        u32     mode:3,              /* current bbr_mode in state machine */
                prev_ca_state:3,     /* CA state on previous ACK */
                packet_conservation:1,  /* use packet conservation? */
-               restore_cwnd:1,      /* decided to revert cwnd to old value */
                round_start:1,       /* start of packet-timed tx->ack round? */
                idle_restart:1,      /* restarting after idle? */
                probe_rtt_round_done:1,  /* a BBR_PROBE_RTT round at 4 pkts? */
-               unused:12,
+               unused:13,
                lt_is_sampling:1,    /* taking long-term ("LT") samples now? */
                lt_rtt_cnt:7,        /* round trips in long-term interval */
                lt_use_bw:1;         /* use lt_bw as our bw estimate? */
@@ -175,6 +174,8 @@ static const u32 bbr_lt_bw_diff = 4000 / 8;
 /* If we estimate we're policed, use lt_bw for this many round trips: */
 static const u32 bbr_lt_bw_max_rtts = 48;
 
+static void bbr_check_probe_rtt_done(struct sock *sk);
+
 /* Do we estimate that STARTUP filled the pipe? */
 static bool bbr_full_bw_reached(const struct sock *sk)
 {
@@ -309,6 +310,8 @@ static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
                 */
                if (bbr->mode == BBR_PROBE_BW)
                        bbr_set_pacing_rate(sk, bbr_bw(sk), BBR_UNIT);
+               else if (bbr->mode == BBR_PROBE_RTT)
+                       bbr_check_probe_rtt_done(sk);
        }
 }
 
@@ -396,17 +399,11 @@ static bool bbr_set_cwnd_to_recover_or_restore(
                cwnd = tcp_packets_in_flight(tp) + acked;
        } else if (prev_state >= TCP_CA_Recovery && state < TCP_CA_Recovery) {
                /* Exiting loss recovery; restore cwnd saved before recovery. */
-               bbr->restore_cwnd = 1;
+               cwnd = max(cwnd, bbr->prior_cwnd);
                bbr->packet_conservation = 0;
        }
        bbr->prev_ca_state = state;
 
-       if (bbr->restore_cwnd) {
-               /* Restore cwnd after exiting loss recovery or PROBE_RTT. */
-               cwnd = max(cwnd, bbr->prior_cwnd);
-               bbr->restore_cwnd = 0;
-       }
-
        if (bbr->packet_conservation) {
                *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked);
                return true;    /* yes, using packet conservation */
@@ -423,10 +420,10 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct bbr *bbr = inet_csk_ca(sk);
-       u32 cwnd = 0, target_cwnd = 0;
+       u32 cwnd = tp->snd_cwnd, target_cwnd = 0;
 
        if (!acked)
-               return;
+               goto done;  /* no packet fully ACKed; just apply caps */
 
        if (bbr_set_cwnd_to_recover_or_restore(sk, rs, acked, &cwnd))
                goto done;
@@ -748,6 +745,20 @@ static void bbr_check_drain(struct sock *sk, const struct rate_sample *rs)
                bbr_reset_probe_bw_mode(sk);  /* we estimate queue is drained */
 }
 
+static void bbr_check_probe_rtt_done(struct sock *sk)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct bbr *bbr = inet_csk_ca(sk);
+
+       if (!(bbr->probe_rtt_done_stamp &&
+             after(tcp_jiffies32, bbr->probe_rtt_done_stamp)))
+               return;
+
+       bbr->min_rtt_stamp = tcp_jiffies32;  /* wait a while until PROBE_RTT */
+       tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
+       bbr_reset_mode(sk);
+}
+
 /* The goal of PROBE_RTT mode is to have BBR flows cooperatively and
  * periodically drain the bottleneck queue, to converge to measure the true
  * min_rtt (unloaded propagation delay). This allows the flows to keep queues
@@ -806,12 +817,8 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
                } else if (bbr->probe_rtt_done_stamp) {
                        if (bbr->round_start)
                                bbr->probe_rtt_round_done = 1;
-                       if (bbr->probe_rtt_round_done &&
-                           after(tcp_jiffies32, bbr->probe_rtt_done_stamp)) {
-                               bbr->min_rtt_stamp = tcp_jiffies32;
-                               bbr->restore_cwnd = 1;  /* snap to prior_cwnd */
-                               bbr_reset_mode(sk);
-                       }
+                       if (bbr->probe_rtt_round_done)
+                               bbr_check_probe_rtt_done(sk);
                }
        }
        /* Restart after idle ends only once we process a new S/ACK for data */
@@ -862,7 +869,6 @@ static void bbr_init(struct sock *sk)
        bbr->has_seen_rtt = 0;
        bbr_init_pacing_rate_from_rtt(sk);
 
-       bbr->restore_cwnd = 0;
        bbr->round_start = 0;
        bbr->idle_restart = 0;
        bbr->full_bw_reached = 0;
index 4c2dd9f..47e08c1 100644 (file)
@@ -6009,11 +6009,13 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
                        if (th->fin)
                                goto discard;
                        /* It is possible that we process SYN packets from backlog,
-                        * so we need to make sure to disable BH right there.
+                        * so we need to make sure to disable BH and RCU right there.
                         */
+                       rcu_read_lock();
                        local_bh_disable();
                        acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
                        local_bh_enable();
+                       rcu_read_unlock();
 
                        if (!acceptable)
                                return 1;
@@ -6367,8 +6369,8 @@ static bool tcp_syn_flood_action(const struct sock *sk,
        if (!queue->synflood_warned &&
            net->ipv4.sysctl_tcp_syncookies != 2 &&
            xchg(&queue->synflood_warned, 1) == 0)
-               pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
-                       proto, ntohs(tcp_hdr(skb)->dest), msg);
+               net_info_ratelimited("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
+                                    proto, ntohs(tcp_hdr(skb)->dest), msg);
 
        return want_cookie;
 }
index 9e041fa..cd42631 100644 (file)
@@ -943,9 +943,11 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
        if (skb) {
                __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
 
+               rcu_read_lock();
                err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
                                            ireq->ir_rmt_addr,
-                                           ireq_opt_deref(ireq));
+                                           rcu_dereference(ireq->ireq_opt));
+               rcu_read_unlock();
                err = net_xmit_eval(err);
        }
 
@@ -2517,6 +2519,12 @@ static int __net_init tcp_sk_init(struct net *net)
                if (res)
                        goto fail;
                sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
+
+               /* Please enforce IP_DF and IPID==0 for RST and
+                * ACK sent in SYN-RECV and TIME-WAIT state.
+                */
+               inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
+
                *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
        }
 
index 75ef332..12affb7 100644 (file)
@@ -184,8 +184,9 @@ kill:
                                inet_twsk_deschedule_put(tw);
                                return TCP_TW_SUCCESS;
                        }
+               } else {
+                       inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
                }
-               inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
 
                if (tmp_opt.saw_tstamp) {
                        tcptw->tw_ts_recent       = tmp_opt.rcv_tsval;
index f4e35b2..7d69dd6 100644 (file)
@@ -2124,6 +2124,28 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
                                                         inet_compute_pseudo);
 }
 
+/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
+ * return code conversion for ip layer consumption
+ */
+static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
+                              struct udphdr *uh)
+{
+       int ret;
+
+       if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
+               skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
+                                        inet_compute_pseudo);
+
+       ret = udp_queue_rcv_skb(sk, skb);
+
+       /* a return value > 0 means to resubmit the input, but
+        * it wants the return to be -protocol, or 0
+        */
+       if (ret > 0)
+               return -ret;
+       return 0;
+}
+
 /*
  *     All we need to do is get the socket, and then do a checksum.
  */
@@ -2170,14 +2192,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                if (unlikely(sk->sk_rx_dst != dst))
                        udp_sk_rx_dst_set(sk, dst);
 
-               ret = udp_queue_rcv_skb(sk, skb);
+               ret = udp_unicast_rcv_skb(sk, skb, uh);
                sock_put(sk);
-               /* a return value > 0 means to resubmit the input, but
-                * it wants the return to be -protocol, or 0
-                */
-               if (ret > 0)
-                       return -ret;
-               return 0;
+               return ret;
        }
 
        if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
@@ -2185,22 +2202,8 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                                                saddr, daddr, udptable, proto);
 
        sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
-       if (sk) {
-               int ret;
-
-               if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
-                       skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
-                                                inet_compute_pseudo);
-
-               ret = udp_queue_rcv_skb(sk, skb);
-
-               /* a return value > 0 means to resubmit the input, but
-                * it wants the return to be -protocol, or 0
-                */
-               if (ret > 0)
-                       return -ret;
-               return 0;
-       }
+       if (sk)
+               return udp_unicast_rcv_skb(sk, skb, uh);
 
        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
                goto drop;
index bcfc00e..f8de248 100644 (file)
@@ -67,6 +67,7 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
 
        if (xo && (xo->flags & XFRM_GRO)) {
                skb_mac_header_rebuild(skb);
+               skb_reset_transport_header(skb);
                return 0;
        }
 
index 3d36644..1ad2c2c 100644 (file)
@@ -46,7 +46,6 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
 static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
 {
        int ihl = skb->data - skb_transport_header(skb);
-       struct xfrm_offload *xo = xfrm_offload(skb);
 
        if (skb->transport_header != skb->network_header) {
                memmove(skb_transport_header(skb),
@@ -54,8 +53,7 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
                skb->network_header = skb->transport_header;
        }
        ip_hdr(skb)->tot_len = htons(skb->len + ihl);
-       if (!xo || !(xo->flags & XFRM_GRO))
-               skb_reset_transport_header(skb);
+       skb_reset_transport_header(skb);
        return 0;
 }
 
index 2fac4ad..c63ccce 100644 (file)
@@ -2398,7 +2398,7 @@ static void addrconf_add_mroute(struct net_device *dev)
 
        ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
 
-       ip6_route_add(&cfg, GFP_ATOMIC, NULL);
+       ip6_route_add(&cfg, GFP_KERNEL, NULL);
 }
 
 static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
@@ -3062,7 +3062,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
        if (addr.s6_addr32[3]) {
                add_addr(idev, &addr, plen, scope);
                addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags,
-                                     GFP_ATOMIC);
+                                     GFP_KERNEL);
                return;
        }
 
@@ -3087,7 +3087,7 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
 
                                add_addr(idev, &addr, plen, flag);
                                addrconf_prefix_route(&addr, plen, 0, idev->dev,
-                                                     0, pflags, GFP_ATOMIC);
+                                                     0, pflags, GFP_KERNEL);
                        }
                }
        }
@@ -4201,7 +4201,6 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
                                p++;
                                continue;
                        }
-                       state->offset++;
                        return ifa;
                }
 
@@ -4225,13 +4224,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
                return ifa;
        }
 
+       state->offset = 0;
        while (++state->bucket < IN6_ADDR_HSIZE) {
-               state->offset = 0;
                hlist_for_each_entry_rcu(ifa,
                                     &inet6_addr_lst[state->bucket], addr_lst) {
                        if (!net_eq(dev_net(ifa->idev->dev), net))
                                continue;
-                       state->offset++;
                        return ifa;
                }
        }
index 673bba3..9a4261e 100644 (file)
@@ -938,14 +938,14 @@ static int __init inet6_init(void)
 
        err = proto_register(&pingv6_prot, 1);
        if (err)
-               goto out_unregister_ping_proto;
+               goto out_unregister_raw_proto;
 
        /* We MUST register RAW sockets before we create the ICMP6,
         * IGMP6, or NDISC control sockets.
         */
        err = rawv6_init();
        if (err)
-               goto out_unregister_raw_proto;
+               goto out_unregister_ping_proto;
 
        /* Register the family here so that the init calls below will
         * be able to create sockets. (?? is this dangerous ??)
@@ -1113,11 +1113,11 @@ netfilter_fail:
 igmp_fail:
        ndisc_cleanup();
 ndisc_fail:
-       ip6_mr_cleanup();
+       icmpv6_cleanup();
 icmp_fail:
-       unregister_pernet_subsys(&inet6_net_ops);
+       ip6_mr_cleanup();
 ipmr_fail:
-       icmpv6_cleanup();
+       unregister_pernet_subsys(&inet6_net_ops);
 register_pernet_fail:
        sock_unregister(PF_INET6);
        rtnl_unregister_all(PF_INET6);
index d212738..5516f55 100644 (file)
@@ -198,6 +198,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head)
                }
        }
 
+       lwtstate_put(f6i->fib6_nh.nh_lwtstate);
+
        if (f6i->fib6_nh.nh_dev)
                dev_put(f6i->fib6_nh.nh_dev);
 
@@ -987,7 +989,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
                                        fib6_clean_expires(iter);
                                else
                                        fib6_set_expires(iter, rt->expires);
-                               fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu);
+
+                               if (rt->fib6_pmtu)
+                                       fib6_metric_set(iter, RTAX_MTU,
+                                                       rt->fib6_pmtu);
                                return -EEXIST;
                        }
                        /* If we have the same destination and the same metric,
index 18a3794..e493b04 100644 (file)
@@ -1778,6 +1778,7 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
        if (data[IFLA_GRE_COLLECT_METADATA])
                parms->collect_md = true;
 
+       parms->erspan_ver = 1;
        if (data[IFLA_GRE_ERSPAN_VER])
                parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
 
index 37ff480..c7e495f 100644 (file)
@@ -115,6 +115,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
                        payload_len = skb->len - nhoff - sizeof(*ipv6h);
                ipv6h->payload_len = htons(payload_len);
                skb->network_header = (u8 *)ipv6h - skb->head;
+               skb_reset_mac_len(skb);
 
                if (udpfrag) {
                        int err = ip6_find_1stfragopt(skb, &prevhdr);
index 16f200f..f9f8f55 100644 (file)
@@ -219,12 +219,10 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
                                kfree_skb(skb);
                                return -ENOBUFS;
                        }
+                       if (skb->sk)
+                               skb_set_owner_w(skb2, skb->sk);
                        consume_skb(skb);
                        skb = skb2;
-                       /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
-                        * it is safe to call in our context (socket lock not held)
-                        */
-                       skb_set_owner_w(skb, (struct sock *)sk);
                }
                if (opt->opt_flen)
                        ipv6_push_frag_opts(skb, opt, &proto);
index 5df2a58..a0b6932 100644 (file)
@@ -1188,7 +1188,15 @@ route_lookup:
                init_tel_txopt(&opt, encap_limit);
                ipv6_push_frag_opts(skb, &opt.ops, &proto);
        }
-       hop_limit = hop_limit ? : ip6_dst_hoplimit(dst);
+
+       if (hop_limit == 0) {
+               if (skb->protocol == htons(ETH_P_IP))
+                       hop_limit = ip_hdr(skb)->ttl;
+               else if (skb->protocol == htons(ETH_P_IPV6))
+                       hop_limit = ipv6_hdr(skb)->hop_limit;
+               else
+                       hop_limit = ip6_dst_hoplimit(dst);
+       }
 
        /* Calculate max headroom for all the headers and adjust
         * needed_headroom if necessary.
@@ -1226,7 +1234,7 @@ static inline int
 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
-       const struct iphdr  *iph = ip_hdr(skb);
+       const struct iphdr  *iph;
        int encap_limit = -1;
        struct flowi6 fl6;
        __u8 dsfield;
@@ -1234,6 +1242,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        u8 tproto;
        int err;
 
+       /* ensure we can access the full inner ip header */
+       if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+               return -1;
+
+       iph = ip_hdr(skb);
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 
        tproto = READ_ONCE(t->parms.proto);
@@ -1298,7 +1311,7 @@ static inline int
 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ip6_tnl *t = netdev_priv(dev);
-       struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       struct ipv6hdr *ipv6h;
        int encap_limit = -1;
        __u16 offset;
        struct flowi6 fl6;
@@ -1307,6 +1320,10 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        u8 tproto;
        int err;
 
+       if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
+               return -1;
+
+       ipv6h = ipv6_hdr(skb);
        tproto = READ_ONCE(t->parms.proto);
        if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
            ip6_tnl_addr_conflict(t, ipv6h))
index 38dec9d..eeaf745 100644 (file)
@@ -481,7 +481,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        }
 
        mtu = dst_mtu(dst);
-       if (!skb->ignore_df && skb->len > mtu) {
+       if (skb->len > mtu) {
                skb_dst_update_pmtu(skb, mtu);
 
                if (skb->protocol == htons(ETH_P_IPV6)) {
@@ -1094,7 +1094,8 @@ static void __net_exit vti6_destroy_tunnels(struct vti6_net *ip6n,
        }
 
        t = rtnl_dereference(ip6n->tnls_wc[0]);
-       unregister_netdevice_queue(t->dev, list);
+       if (t)
+               unregister_netdevice_queue(t->dev, list);
 }
 
 static int __net_init vti6_init_net(struct net *net)
index 2a14d8b..8f68a51 100644 (file)
@@ -445,6 +445,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev,  struct net_devic
                else if (head->ip_summed == CHECKSUM_COMPLETE)
                        head->csum = csum_add(head->csum, fp->csum);
                head->truesize += fp->truesize;
+               fp->sk = NULL;
        }
        sub_frag_mem_limit(fq->q.net, head->truesize);
 
index 413d98b..5e0efd3 100644 (file)
@@ -651,8 +651,6 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
        skb->priority = sk->sk_priority;
        skb->mark = sk->sk_mark;
        skb->tstamp = sockc->transmit_time;
-       skb_dst_set(skb, &rt->dst);
-       *dstp = NULL;
 
        skb_put(skb, length);
        skb_reset_network_header(skb);
@@ -665,8 +663,14 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
 
        skb->transport_header = skb->network_header;
        err = memcpy_from_msg(iph, msg, length);
-       if (err)
-               goto error_fault;
+       if (err) {
+               err = -EFAULT;
+               kfree_skb(skb);
+               goto error;
+       }
+
+       skb_dst_set(skb, &rt->dst);
+       *dstp = NULL;
 
        /* if egress device is enslaved to an L3 master device pass the
         * skb to its handler for processing
@@ -675,21 +679,28 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
        if (unlikely(!skb))
                return 0;
 
+       /* Acquire rcu_read_lock() in case we need to use rt->rt6i_idev
+        * in the error path. Since skb has been freed, the dst could
+        * have been queued for deletion.
+        */
+       rcu_read_lock();
        IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
        err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
                      NULL, rt->dst.dev, dst_output);
        if (err > 0)
                err = net_xmit_errno(err);
-       if (err)
-               goto error;
+       if (err) {
+               IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
+               rcu_read_unlock();
+               goto error_check;
+       }
+       rcu_read_unlock();
 out:
        return 0;
 
-error_fault:
-       err = -EFAULT;
-       kfree_skb(skb);
 error:
        IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
+error_check:
        if (err == -ENOBUFS && !np->recverr)
                err = 0;
        return err;
index 7208c16..a366c05 100644 (file)
@@ -364,11 +364,14 @@ EXPORT_SYMBOL(ip6_dst_alloc);
 
 static void ip6_dst_destroy(struct dst_entry *dst)
 {
+       struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
        struct rt6_info *rt = (struct rt6_info *)dst;
        struct fib6_info *from;
        struct inet6_dev *idev;
 
-       dst_destroy_metrics_generic(dst);
+       if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
+               kfree(p);
+
        rt6_uncached_list_del(rt);
 
        idev = rt->rt6i_idev;
@@ -946,8 +949,6 @@ static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort)
 
 static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
 {
-       rt->dst.flags |= fib6_info_dst_flags(ort);
-
        if (ort->fib6_flags & RTF_REJECT) {
                ip6_rt_init_dst_reject(rt, ort);
                return;
@@ -956,7 +957,7 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
        rt->dst.error = 0;
        rt->dst.output = ip6_output;
 
-       if (ort->fib6_type == RTN_LOCAL) {
+       if (ort->fib6_type == RTN_LOCAL || ort->fib6_type == RTN_ANYCAST) {
                rt->dst.input = ip6_input;
        } else if (ipv6_addr_type(&ort->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
                rt->dst.input = ip6_mc_input;
@@ -978,6 +979,10 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
        rt->rt6i_flags &= ~RTF_EXPIRES;
        rcu_assign_pointer(rt->from, from);
        dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
+       if (from->fib6_metrics != &dst_default_metrics) {
+               rt->dst._metrics |= DST_METRICS_REFCOUNTED;
+               refcount_inc(&from->fib6_metrics->refcnt);
+       }
 }
 
 /* Caller must already hold reference to @ort */
@@ -996,7 +1001,6 @@ static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
        rt->rt6i_src = ort->fib6_src;
 #endif
        rt->rt6i_prefsrc = ort->fib6_prefsrc;
-       rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate);
 }
 
 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
@@ -4317,11 +4321,6 @@ static int ip6_route_info_append(struct net *net,
        if (!nh)
                return -ENOMEM;
        nh->fib6_info = rt;
-       err = ip6_convert_metrics(net, rt, r_cfg);
-       if (err) {
-               kfree(nh);
-               return err;
-       }
        memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
        list_add_tail(&nh->next, rt6_nh_list);
 
@@ -4671,20 +4670,31 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
                         int iif, int type, u32 portid, u32 seq,
                         unsigned int flags)
 {
-       struct rtmsg *rtm;
+       struct rt6_info *rt6 = (struct rt6_info *)dst;
+       struct rt6key *rt6_dst, *rt6_src;
+       u32 *pmetrics, table, rt6_flags;
        struct nlmsghdr *nlh;
+       struct rtmsg *rtm;
        long expires = 0;
-       u32 *pmetrics;
-       u32 table;
 
        nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
        if (!nlh)
                return -EMSGSIZE;
 
+       if (rt6) {
+               rt6_dst = &rt6->rt6i_dst;
+               rt6_src = &rt6->rt6i_src;
+               rt6_flags = rt6->rt6i_flags;
+       } else {
+               rt6_dst = &rt->fib6_dst;
+               rt6_src = &rt->fib6_src;
+               rt6_flags = rt->fib6_flags;
+       }
+
        rtm = nlmsg_data(nlh);
        rtm->rtm_family = AF_INET6;
-       rtm->rtm_dst_len = rt->fib6_dst.plen;
-       rtm->rtm_src_len = rt->fib6_src.plen;
+       rtm->rtm_dst_len = rt6_dst->plen;
+       rtm->rtm_src_len = rt6_src->plen;
        rtm->rtm_tos = 0;
        if (rt->fib6_table)
                table = rt->fib6_table->tb6_id;
@@ -4699,7 +4709,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
        rtm->rtm_scope = RT_SCOPE_UNIVERSE;
        rtm->rtm_protocol = rt->fib6_protocol;
 
-       if (rt->fib6_flags & RTF_CACHE)
+       if (rt6_flags & RTF_CACHE)
                rtm->rtm_flags |= RTM_F_CLONED;
 
        if (dest) {
@@ -4707,7 +4717,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
                        goto nla_put_failure;
                rtm->rtm_dst_len = 128;
        } else if (rtm->rtm_dst_len)
-               if (nla_put_in6_addr(skb, RTA_DST, &rt->fib6_dst.addr))
+               if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
                        goto nla_put_failure;
 #ifdef CONFIG_IPV6_SUBTREES
        if (src) {
@@ -4715,12 +4725,12 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
                        goto nla_put_failure;
                rtm->rtm_src_len = 128;
        } else if (rtm->rtm_src_len &&
-                  nla_put_in6_addr(skb, RTA_SRC, &rt->fib6_src.addr))
+                  nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
                goto nla_put_failure;
 #endif
        if (iif) {
 #ifdef CONFIG_IPV6_MROUTE
-               if (ipv6_addr_is_multicast(&rt->fib6_dst.addr)) {
+               if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
                        int err = ip6mr_get_route(net, skb, rtm, portid);
 
                        if (err == 0)
@@ -4755,7 +4765,14 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
        /* For multipath routes, walk the siblings list and add
         * each as a nexthop within RTA_MULTIPATH.
         */
-       if (rt->fib6_nsiblings) {
+       if (rt6) {
+               if (rt6_flags & RTF_GATEWAY &&
+                   nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
+                       goto nla_put_failure;
+
+               if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
+                       goto nla_put_failure;
+       } else if (rt->fib6_nsiblings) {
                struct fib6_info *sibling, *next_sibling;
                struct nlattr *mp;
 
@@ -4778,7 +4795,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
                        goto nla_put_failure;
        }
 
-       if (rt->fib6_flags & RTF_EXPIRES) {
+       if (rt6_flags & RTF_EXPIRES) {
                expires = dst ? dst->expires : rt->expires;
                expires -= jiffies;
        }
@@ -4786,7 +4803,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
        if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
                goto nla_put_failure;
 
-       if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->fib6_flags)))
+       if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
                goto nla_put_failure;
 
 
index 83f4c77..28c4aa5 100644 (file)
@@ -752,6 +752,28 @@ static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
        }
 }
 
+/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
+ * return code conversion for ip layer consumption
+ */
+static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
+                               struct udphdr *uh)
+{
+       int ret;
+
+       if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
+               skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
+                                        ip6_compute_pseudo);
+
+       ret = udpv6_queue_rcv_skb(sk, skb);
+
+       /* a return value > 0 means to resubmit the input, but
+        * it wants the return to be -protocol, or 0
+        */
+       if (ret > 0)
+               return -ret;
+       return 0;
+}
+
 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                   int proto)
 {
@@ -803,13 +825,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                if (unlikely(sk->sk_rx_dst != dst))
                        udp6_sk_rx_dst_set(sk, dst);
 
-               ret = udpv6_queue_rcv_skb(sk, skb);
-               sock_put(sk);
+               if (!uh->check && !udp_sk(sk)->no_check6_rx) {
+                       sock_put(sk);
+                       goto report_csum_error;
+               }
 
-               /* a return value > 0 means to resubmit the input */
-               if (ret > 0)
-                       return ret;
-               return 0;
+               ret = udp6_unicast_rcv_skb(sk, skb, uh);
+               sock_put(sk);
+               return ret;
        }
 
        /*
@@ -822,30 +845,13 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
        /* Unicast */
        sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
        if (sk) {
-               int ret;
-
-               if (!uh->check && !udp_sk(sk)->no_check6_rx) {
-                       udp6_csum_zero_error(skb);
-                       goto csum_error;
-               }
-
-               if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
-                       skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
-                                                ip6_compute_pseudo);
-
-               ret = udpv6_queue_rcv_skb(sk, skb);
-
-               /* a return value > 0 means to resubmit the input */
-               if (ret > 0)
-                       return ret;
-
-               return 0;
+               if (!uh->check && !udp_sk(sk)->no_check6_rx)
+                       goto report_csum_error;
+               return udp6_unicast_rcv_skb(sk, skb, uh);
        }
 
-       if (!uh->check) {
-               udp6_csum_zero_error(skb);
-               goto csum_error;
-       }
+       if (!uh->check)
+               goto report_csum_error;
 
        if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
                goto discard;
@@ -866,6 +872,9 @@ short_packet:
                            ulen, skb->len,
                            daddr, ntohs(uh->dest));
        goto discard;
+
+report_csum_error:
+       udp6_csum_zero_error(skb);
 csum_error:
        __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
 discard:
index 841f4a0..9ef490d 100644 (file)
@@ -59,6 +59,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
 
        if (xo && (xo->flags & XFRM_GRO)) {
                skb_mac_header_rebuild(skb);
+               skb_reset_transport_header(skb);
                return -1;
        }
 
index 9ad07a9..3c29da5 100644 (file)
@@ -51,7 +51,6 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
 static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
 {
        int ihl = skb->data - skb_transport_header(skb);
-       struct xfrm_offload *xo = xfrm_offload(skb);
 
        if (skb->transport_header != skb->network_header) {
                memmove(skb_transport_header(skb),
@@ -60,8 +59,7 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
        }
        ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
                                           sizeof(struct ipv6hdr));
-       if (!xo || !(xo->flags & XFRM_GRO))
-               skb_reset_transport_header(skb);
+       skb_reset_transport_header(skb);
        return 0;
 }
 
index 5959ce9..6a74080 100644 (file)
@@ -170,9 +170,11 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 
        if (toobig && xfrm6_local_dontfrag(skb)) {
                xfrm6_local_rxpmtu(skb, mtu);
+               kfree_skb(skb);
                return -EMSGSIZE;
        } else if (!skb->ignore_df && toobig && skb->sk) {
                xfrm_local_error(skb, mtu);
+               kfree_skb(skb);
                return -EMSGSIZE;
        }
 
index a21d8ed..e2f16a0 100644 (file)
@@ -351,20 +351,28 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
                memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
 
        skb->dev = iucv->hs_dev;
-       if (!skb->dev)
-               return -ENODEV;
-       if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
-               return -ENETDOWN;
+       if (!skb->dev) {
+               err = -ENODEV;
+               goto err_free;
+       }
+       if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
+               err = -ENETDOWN;
+               goto err_free;
+       }
        if (skb->len > skb->dev->mtu) {
-               if (sock->sk_type == SOCK_SEQPACKET)
-                       return -EMSGSIZE;
-               else
-                       skb_trim(skb, skb->dev->mtu);
+               if (sock->sk_type == SOCK_SEQPACKET) {
+                       err = -EMSGSIZE;
+                       goto err_free;
+               }
+               skb_trim(skb, skb->dev->mtu);
        }
        skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
        nskb = skb_clone(skb, GFP_ATOMIC);
-       if (!nskb)
-               return -ENOMEM;
+       if (!nskb) {
+               err = -ENOMEM;
+               goto err_free;
+       }
+
        skb_queue_tail(&iucv->send_skb_q, nskb);
        err = dev_queue_xmit(skb);
        if (net_xmit_eval(err)) {
@@ -375,6 +383,10 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
                WARN_ON(atomic_read(&iucv->msg_recv) < 0);
        }
        return net_xmit_eval(err);
+
+err_free:
+       kfree_skb(skb);
+       return err;
 }
 
 static struct sock *__iucv_get_sock_by_name(char *nm)
@@ -1167,7 +1179,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
                err = afiucv_hs_send(&txmsg, sk, skb, 0);
                if (err) {
                        atomic_dec(&iucv->msg_sent);
-                       goto fail;
+                       goto out;
                }
        } else { /* Classic VM IUCV transport */
                skb_queue_tail(&iucv->send_skb_q, skb);
@@ -2155,8 +2167,8 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
        struct sock *sk;
        struct iucv_sock *iucv;
        struct af_iucv_trans_hdr *trans_hdr;
+       int err = NET_RX_SUCCESS;
        char nullstring[8];
-       int err = 0;
 
        if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) {
                WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d",
@@ -2254,7 +2266,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
                err = afiucv_hs_callback_rx(sk, skb);
                break;
        default:
-               ;
+               kfree_skb(skb);
        }
 
        return err;
index 8f7ef16..eb502c6 100644 (file)
@@ -1874,7 +1874,7 @@ static void iucv_pm_complete(struct device *dev)
  * Returns 0 if there are still iucv pathes defined
  *        1 if there are no iucv pathes defined
  */
-int iucv_path_table_empty(void)
+static int iucv_path_table_empty(void)
 {
        int i;
 
index d25da0e..5d22eda 100644 (file)
@@ -427,7 +427,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
        case NL80211_IFTYPE_AP:
        case NL80211_IFTYPE_AP_VLAN:
                /* Keys without a station are used for TX only */
-               if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
+               if (sta && test_sta_flag(sta, WLAN_STA_MFP))
                        key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
                break;
        case NL80211_IFTYPE_ADHOC:
index 6449a1c..f0f5fed 100644 (file)
@@ -947,8 +947,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
        if (len < IEEE80211_DEAUTH_FRAME_LEN)
                return;
 
-       ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
-                mgmt->sa, mgmt->da, mgmt->bssid, reason);
+       ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+       ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason);
        sta_info_destroy_addr(sdata, mgmt->sa);
 }
 
@@ -966,9 +966,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
        auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
        auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
 
-       ibss_dbg(sdata,
-                "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
-                mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
+       ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+       ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n",
+                mgmt->bssid, auth_transaction);
 
        if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
                return;
@@ -1175,10 +1175,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
                rx_timestamp = drv_get_tsf(local, sdata);
        }
 
-       ibss_dbg(sdata,
-                "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
+       ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n",
                 mgmt->sa, mgmt->bssid,
-                (unsigned long long)rx_timestamp,
+                (unsigned long long)rx_timestamp);
+       ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n",
                 (unsigned long long)beacon_timestamp,
                 (unsigned long long)(rx_timestamp - beacon_timestamp),
                 jiffies);
@@ -1537,9 +1537,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
 
        tx_last_beacon = drv_tx_last_beacon(local);
 
-       ibss_dbg(sdata,
-                "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
-                mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
+       ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+       ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n",
+                mgmt->bssid, tx_last_beacon);
 
        if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
                return;
index 5e6cf2c..5836dde 100644 (file)
@@ -1756,7 +1756,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
 
                if (local->ops->wake_tx_queue &&
                    type != NL80211_IFTYPE_AP_VLAN &&
-                   type != NL80211_IFTYPE_MONITOR)
+                   (type != NL80211_IFTYPE_MONITOR ||
+                    (params->flags & MONITOR_FLAG_ACTIVE)))
                        txq_size += sizeof(struct txq_info) +
                                    local->hw.txq_data_size;
 
index 4fb2709..5136278 100644 (file)
@@ -256,8 +256,27 @@ static void ieee80211_restart_work(struct work_struct *work)
 
        flush_work(&local->radar_detected_work);
        rtnl_lock();
-       list_for_each_entry(sdata, &local->interfaces, list)
+       list_for_each_entry(sdata, &local->interfaces, list) {
+               /*
+                * XXX: there may be more work for other vif types and even
+                * for station mode: a good thing would be to run most of
+                * the iface type's dependent _stop (ieee80211_mg_stop,
+                * ieee80211_ibss_stop) etc...
+                * For now, fix only the specific bug that was seen: race
+                * between csa_connection_drop_work and us.
+                */
+               if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+                       /*
+                        * This worker is scheduled from the iface worker that
+                        * runs on mac80211's workqueue, so we can't be
+                        * scheduling this worker after the cancel right here.
+                        * The exception is ieee80211_chswitch_done.
+                        * Then we can have a race...
+                        */
+                       cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
+               }
                flush_delayed_work(&sdata->dec_tailroom_needed_wk);
+       }
        ieee80211_scan_cancel(local);
 
        /* make sure any new ROC will consider local->in_reconfig */
@@ -471,10 +490,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = {
                cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
                            IEEE80211_VHT_CAP_SHORT_GI_80 |
                            IEEE80211_VHT_CAP_SHORT_GI_160 |
-                           IEEE80211_VHT_CAP_RXSTBC_1 |
-                           IEEE80211_VHT_CAP_RXSTBC_2 |
-                           IEEE80211_VHT_CAP_RXSTBC_3 |
-                           IEEE80211_VHT_CAP_RXSTBC_4 |
+                           IEEE80211_VHT_CAP_RXSTBC_MASK |
                            IEEE80211_VHT_CAP_TXSTBC |
                            IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
                            IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
@@ -1208,6 +1224,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
 #if IS_ENABLED(CONFIG_IPV6)
        unregister_inet6addr_notifier(&local->ifa6_notifier);
 #endif
+       ieee80211_txq_teardown_flows(local);
 
        rtnl_lock();
 
@@ -1236,7 +1253,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
        skb_queue_purge(&local->skb_queue);
        skb_queue_purge(&local->skb_queue_unreliable);
        skb_queue_purge(&local->skb_queue_tdls_chsw);
-       ieee80211_txq_teardown_flows(local);
 
        destroy_workqueue(local->workqueue);
        wiphy_unregister(local->hw.wiphy);
index ee56f18..2152663 100644 (file)
@@ -217,7 +217,8 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
 int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
 void ieee80211s_init(void);
 void ieee80211s_update_metric(struct ieee80211_local *local,
-                             struct sta_info *sta, struct sk_buff *skb);
+                             struct sta_info *sta,
+                             struct ieee80211_tx_status *st);
 void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
 void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata);
 int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
index 35ad398..6950cd0 100644 (file)
@@ -295,15 +295,12 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
 }
 
 void ieee80211s_update_metric(struct ieee80211_local *local,
-               struct sta_info *sta, struct sk_buff *skb)
+                             struct sta_info *sta,
+                             struct ieee80211_tx_status *st)
 {
-       struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       struct ieee80211_tx_info *txinfo = st->info;
        int failed;
 
-       if (!ieee80211_is_data(hdr->frame_control))
-               return;
-
        failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
 
        /* moving average, scaled to 100.
@@ -572,6 +569,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
                forward = false;
                reply = true;
                target_metric = 0;
+
+               if (SN_GT(target_sn, ifmsh->sn))
+                       ifmsh->sn = target_sn;
+
                if (time_after(jiffies, ifmsh->last_sn_update +
                                        net_traversal_jiffies(sdata)) ||
                    time_before(jiffies, ifmsh->last_sn_update)) {
index 7fb9957..3dbecae 100644 (file)
@@ -1073,6 +1073,10 @@ static void ieee80211_chswitch_work(struct work_struct *work)
         */
 
        if (sdata->reserved_chanctx) {
+               struct ieee80211_supported_band *sband = NULL;
+               struct sta_info *mgd_sta = NULL;
+               enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
+
                /*
                 * with multi-vif csa driver may call ieee80211_csa_finish()
                 * many times while waiting for other interfaces to use their
@@ -1081,6 +1085,48 @@ static void ieee80211_chswitch_work(struct work_struct *work)
                if (sdata->reserved_ready)
                        goto out;
 
+               if (sdata->vif.bss_conf.chandef.width !=
+                   sdata->csa_chandef.width) {
+                       /*
+                        * For managed interface, we need to also update the AP
+                        * station bandwidth and align the rate scale algorithm
+                        * on the bandwidth change. Here we only consider the
+                        * bandwidth of the new channel definition (as channel
+                        * switch flow does not have the full HT/VHT/HE
+                        * information), assuming that if additional changes are
+                        * required they would be done as part of the processing
+                        * of the next beacon from the AP.
+                        */
+                       switch (sdata->csa_chandef.width) {
+                       case NL80211_CHAN_WIDTH_20_NOHT:
+                       case NL80211_CHAN_WIDTH_20:
+                       default:
+                               bw = IEEE80211_STA_RX_BW_20;
+                               break;
+                       case NL80211_CHAN_WIDTH_40:
+                               bw = IEEE80211_STA_RX_BW_40;
+                               break;
+                       case NL80211_CHAN_WIDTH_80:
+                               bw = IEEE80211_STA_RX_BW_80;
+                               break;
+                       case NL80211_CHAN_WIDTH_80P80:
+                       case NL80211_CHAN_WIDTH_160:
+                               bw = IEEE80211_STA_RX_BW_160;
+                               break;
+                       }
+
+                       mgd_sta = sta_info_get(sdata, ifmgd->bssid);
+                       sband =
+                               local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
+               }
+
+               if (sdata->vif.bss_conf.chandef.width >
+                   sdata->csa_chandef.width) {
+                       mgd_sta->sta.bandwidth = bw;
+                       rate_control_rate_update(local, sband, mgd_sta,
+                                                IEEE80211_RC_BW_CHANGED);
+               }
+
                ret = ieee80211_vif_use_reserved_context(sdata);
                if (ret) {
                        sdata_info(sdata,
@@ -1091,6 +1137,13 @@ static void ieee80211_chswitch_work(struct work_struct *work)
                        goto out;
                }
 
+               if (sdata->vif.bss_conf.chandef.width <
+                   sdata->csa_chandef.width) {
+                       mgd_sta->sta.bandwidth = bw;
+                       rate_control_rate_update(local, sband, mgd_sta,
+                                                IEEE80211_RC_BW_CHANGED);
+               }
+
                goto out;
        }
 
@@ -1312,6 +1365,16 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
                                         cbss->beacon_interval));
        return;
  drop_connection:
+       /*
+        * This is just so that the disconnect flow will know that
+        * we were trying to switch channel and failed. In case the
+        * mode is 1 (we are not allowed to Tx), we will know not to
+        * send a deauthentication frame. Those two fields will be
+        * reset when the disconnection worker runs.
+        */
+       sdata->vif.csa_active = true;
+       sdata->csa_block_tx = csa_ie.mode;
+
        ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
        mutex_unlock(&local->chanctx_mtx);
        mutex_unlock(&local->mtx);
@@ -2522,6 +2585,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
+       bool tx;
 
        sdata_lock(sdata);
        if (!ifmgd->associated) {
@@ -2529,6 +2593,8 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
                return;
        }
 
+       tx = !sdata->csa_block_tx;
+
        /* AP is probably out of range (or not reachable for another reason) so
         * remove the bss struct for that AP.
         */
@@ -2536,7 +2602,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
 
        ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                               WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
-                              true, frame_buf);
+                              tx, frame_buf);
        mutex_lock(&local->mtx);
        sdata->vif.csa_active = false;
        ifmgd->csa_waiting_bcn = false;
@@ -2547,7 +2613,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata)
        }
        mutex_unlock(&local->mtx);
 
-       ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
+       ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
                                    WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
 
        sdata_unlock(sdata);
index 64742f2..96611d5 100644 (file)
@@ -1728,6 +1728,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
         */
        if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
            !ieee80211_has_morefrags(hdr->frame_control) &&
+           !is_multicast_ether_addr(hdr->addr1) &&
            (ieee80211_is_mgmt(hdr->frame_control) ||
             ieee80211_is_data(hdr->frame_control)) &&
            !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
index 9a6d720..91d7c0c 100644 (file)
@@ -479,11 +479,6 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
        if (!skb)
                return;
 
-       if (dropped) {
-               dev_kfree_skb_any(skb);
-               return;
-       }
-
        if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
                u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
                struct ieee80211_sub_if_data *sdata;
@@ -507,6 +502,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
                rcu_read_unlock();
 
                dev_kfree_skb_any(skb);
+       } else if (dropped) {
+               dev_kfree_skb_any(skb);
        } else {
                /* consumes skb */
                skb_complete_wifi_ack(skb, acked);
@@ -811,7 +808,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
 
                rate_control_tx_status(local, sband, status);
                if (ieee80211_vif_is_mesh(&sta->sdata->vif))
-                       ieee80211s_update_metric(local, sta, skb);
+                       ieee80211s_update_metric(local, sta, status);
 
                if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
                        ieee80211_frame_acked(sta, skb);
@@ -972,6 +969,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
                }
 
                rate_control_tx_status(local, sband, status);
+               if (ieee80211_vif_is_mesh(&sta->sdata->vif))
+                       ieee80211s_update_metric(local, sta, status);
        }
 
        if (acked || noack_success) {
index 5cd5e6e..6c647f4 100644 (file)
@@ -16,6 +16,7 @@
 #include "ieee80211_i.h"
 #include "driver-ops.h"
 #include "rate.h"
+#include "wme.h"
 
 /* give usermode some time for retries in setting up the TDLS session */
 #define TDLS_PEER_SETUP_TIMEOUT        (15 * HZ)
@@ -1010,14 +1011,13 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
        switch (action_code) {
        case WLAN_TDLS_SETUP_REQUEST:
        case WLAN_TDLS_SETUP_RESPONSE:
-               skb_set_queue_mapping(skb, IEEE80211_AC_BK);
-               skb->priority = 2;
+               skb->priority = 256 + 2;
                break;
        default:
-               skb_set_queue_mapping(skb, IEEE80211_AC_VI);
-               skb->priority = 5;
+               skb->priority = 256 + 5;
                break;
        }
+       skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
 
        /*
         * Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress.
index cd332e3..25ba24b 100644 (file)
@@ -214,6 +214,7 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
 {
        struct ieee80211_local *local = tx->local;
        struct ieee80211_if_managed *ifmgd;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
 
        /* driver doesn't support power save */
        if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS))
@@ -242,6 +243,9 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
        if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
                return TX_CONTINUE;
 
+       if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK))
+               return TX_CONTINUE;
+
        ifmgd = &tx->sdata->u.mgd;
 
        /*
@@ -1890,7 +1894,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
                        sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
 
        if (invoke_tx_handlers_early(&tx))
-               return false;
+               return true;
 
        if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
                return true;
@@ -3078,27 +3082,18 @@ void ieee80211_clear_fast_xmit(struct sta_info *sta)
 }
 
 static bool ieee80211_amsdu_realloc_pad(struct ieee80211_local *local,
-                                       struct sk_buff *skb, int headroom,
-                                       int *subframe_len)
+                                       struct sk_buff *skb, int headroom)
 {
-       int amsdu_len = *subframe_len + sizeof(struct ethhdr);
-       int padding = (4 - amsdu_len) & 3;
-
-       if (skb_headroom(skb) < headroom || skb_tailroom(skb) < padding) {
+       if (skb_headroom(skb) < headroom) {
                I802_DEBUG_INC(local->tx_expand_skb_head);
 
-               if (pskb_expand_head(skb, headroom, padding, GFP_ATOMIC)) {
+               if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
                        wiphy_debug(local->hw.wiphy,
                                    "failed to reallocate TX buffer\n");
                        return false;
                }
        }
 
-       if (padding) {
-               *subframe_len += padding;
-               skb_put_zero(skb, padding);
-       }
-
        return true;
 }
 
@@ -3122,8 +3117,7 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
        if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
                return true;
 
-       if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr),
-                                        &subframe_len))
+       if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr)))
                return false;
 
        data = skb_push(skb, sizeof(*amsdu_hdr));
@@ -3189,7 +3183,8 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        void *data;
        bool ret = false;
        unsigned int orig_len;
-       int n = 1, nfrags;
+       int n = 2, nfrags, pad = 0;
+       u16 hdrlen;
 
        if (!ieee80211_hw_check(&local->hw, TX_AMSDU))
                return false;
@@ -3222,9 +3217,6 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        if (skb->len + head->len > max_amsdu_len)
                goto out;
 
-       if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
-               goto out;
-
        nfrags = 1 + skb_shinfo(skb)->nr_frags;
        nfrags += 1 + skb_shinfo(head)->nr_frags;
        frag_tail = &skb_shinfo(head)->frag_list;
@@ -3240,10 +3232,24 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        if (max_frags && nfrags > max_frags)
                goto out;
 
-       if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) + 2,
-                                        &subframe_len))
+       if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
                goto out;
 
+       /*
+        * Pad out the previous subframe to a multiple of 4 by adding the
+        * padding to the next one, that's being added. Note that head->len
+        * is the length of the full A-MSDU, but that works since each time
+        * we add a new subframe we pad out the previous one to a multiple
+        * of 4 and thus it no longer matters in the next round.
+        */
+       hdrlen = fast_tx->hdr_len - sizeof(rfc1042_header);
+       if ((head->len - hdrlen) & 3)
+               pad = 4 - ((head->len - hdrlen) & 3);
+
+       if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(rfc1042_header) +
+                                                    2 + pad))
+               goto out_recalc;
+
        ret = true;
        data = skb_push(skb, ETH_ALEN + 2);
        memmove(data, data + ETH_ALEN + 2, 2 * ETH_ALEN);
@@ -3253,15 +3259,19 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
        memcpy(data, &len, 2);
        memcpy(data + 2, rfc1042_header, sizeof(rfc1042_header));
 
+       memset(skb_push(skb, pad), 0, pad);
+
        head->len += skb->len;
        head->data_len += skb->len;
        *frag_tail = skb;
 
-       flow->backlog += head->len - orig_len;
-       tin->backlog_bytes += head->len - orig_len;
-
-       fq_recalc_backlog(fq, tin, flow);
+out_recalc:
+       if (head->len != orig_len) {
+               flow->backlog += head->len - orig_len;
+               tin->backlog_bytes += head->len - orig_len;
 
+               fq_recalc_backlog(fq, tin, flow);
+       }
 out:
        spin_unlock_bh(&fq->lock);
 
index 88efda7..716cd64 100644 (file)
@@ -1135,7 +1135,7 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_chanctx_conf *chanctx_conf;
        const struct ieee80211_reg_rule *rrule;
-       struct ieee80211_wmm_ac *wmm_ac;
+       const struct ieee80211_wmm_ac *wmm_ac;
        u16 center_freq = 0;
 
        if (sdata->vif.type != NL80211_IFTYPE_AP &&
@@ -1154,20 +1154,19 @@ void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata,
 
        rrule = freq_reg_info(sdata->wdev.wiphy, MHZ_TO_KHZ(center_freq));
 
-       if (IS_ERR_OR_NULL(rrule) || !rrule->wmm_rule) {
+       if (IS_ERR_OR_NULL(rrule) || !rrule->has_wmm) {
                rcu_read_unlock();
                return;
        }
 
        if (sdata->vif.type == NL80211_IFTYPE_AP)
-               wmm_ac = &rrule->wmm_rule->ap[ac];
+               wmm_ac = &rrule->wmm_rule.ap[ac];
        else
-               wmm_ac = &rrule->wmm_rule->client[ac];
+               wmm_ac = &rrule->wmm_rule.client[ac];
        qparam->cw_min = max_t(u16, qparam->cw_min, wmm_ac->cw_min);
        qparam->cw_max = max_t(u16, qparam->cw_max, wmm_ac->cw_max);
        qparam->aifs = max_t(u8, qparam->aifs, wmm_ac->aifsn);
-       qparam->txop = !qparam->txop ? wmm_ac->cot / 32 :
-               min_t(u16, qparam->txop, wmm_ac->cot / 32);
+       qparam->txop = min_t(u16, qparam->txop, wmm_ac->cot / 32);
        rcu_read_unlock();
 }
 
index 7a4de6d..8fbe6cd 100644 (file)
@@ -1533,10 +1533,14 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
        unsigned int flags;
 
        if (event == NETDEV_REGISTER) {
-               /* For now just support Ethernet, IPGRE, SIT and IPIP devices */
+
+               /* For now just support Ethernet, IPGRE, IP6GRE, SIT and
+                * IPIP devices
+                */
                if (dev->type == ARPHRD_ETHER ||
                    dev->type == ARPHRD_LOOPBACK ||
                    dev->type == ARPHRD_IPGRE ||
+                   dev->type == ARPHRD_IP6GRE ||
                    dev->type == ARPHRD_SIT ||
                    dev->type == ARPHRD_TUNNEL) {
                        mdev = mpls_add_dev(dev);
index 82e6edf..45f33d6 100644 (file)
@@ -100,7 +100,7 @@ static int ncsi_write_package_info(struct sk_buff *skb,
        bool found;
        int rc;
 
-       if (id > ndp->package_num) {
+       if (id > ndp->package_num - 1) {
                netdev_info(ndp->ndev.dev, "NCSI: No package with id %u\n", id);
                return -ENODEV;
        }
@@ -240,7 +240,7 @@ static int ncsi_pkg_info_all_nl(struct sk_buff *skb,
                return 0; /* done */
 
        hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
-                         &ncsi_genl_family, 0,  NCSI_CMD_PKG_INFO);
+                         &ncsi_genl_family, NLM_F_MULTI,  NCSI_CMD_PKG_INFO);
        if (!hdr) {
                rc = -EMSGSIZE;
                goto err;
index 71709c1..f61c306 100644 (file)
@@ -771,13 +771,13 @@ config NETFILTER_XT_TARGET_CHECKSUM
        depends on NETFILTER_ADVANCED
        ---help---
          This option adds a `CHECKSUM' target, which can be used in the iptables mangle
-         table.
+         table to work around buggy DHCP clients in virtualized environments.
 
-         You can use this target to compute and fill in the checksum in
-         a packet that lacks a checksum.  This is particularly useful,
-         if you need to work around old applications such as dhcp clients,
-         that do not work well with checksum offloads, but don't want to disable
-         checksum offload in your device.
+         Some old DHCP clients drop packets because they are not aware
+         that the checksum would normally be offloaded to hardware and
+         thus should be considered valid.
+         This target can be used to fill in the checksum using iptables
+         when such packets are sent via a virtual network device.
 
          To compile it as a module, choose M here.  If unsure, say N.
 
index 9f14b0d..51c5d7e 100644 (file)
@@ -776,9 +776,26 @@ static const struct nf_hook_ops ipv6_conntrack_ops[] = {
 };
 #endif
 
+static int nf_ct_tcp_fixup(struct nf_conn *ct, void *_nfproto)
+{
+       u8 nfproto = (unsigned long)_nfproto;
+
+       if (nf_ct_l3num(ct) != nfproto)
+               return 0;
+
+       if (nf_ct_protonum(ct) == IPPROTO_TCP &&
+           ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED) {
+               ct->proto.tcp.seen[0].td_maxwin = 0;
+               ct->proto.tcp.seen[1].td_maxwin = 0;
+       }
+
+       return 0;
+}
+
 static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
 {
        struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+       bool fixup_needed = false;
        int err = 0;
 
        mutex_lock(&nf_ct_proto_mutex);
@@ -798,6 +815,8 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
                                            ARRAY_SIZE(ipv4_conntrack_ops));
                if (err)
                        cnet->users4 = 0;
+               else
+                       fixup_needed = true;
                break;
 #if IS_ENABLED(CONFIG_IPV6)
        case NFPROTO_IPV6:
@@ -814,6 +833,8 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
                                            ARRAY_SIZE(ipv6_conntrack_ops));
                if (err)
                        cnet->users6 = 0;
+               else
+                       fixup_needed = true;
                break;
 #endif
        default:
@@ -822,6 +843,11 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
        }
  out_unlock:
        mutex_unlock(&nf_ct_proto_mutex);
+
+       if (fixup_needed)
+               nf_ct_iterate_cleanup_net(net, nf_ct_tcp_fixup,
+                                         (void *)(unsigned long)nfproto, 0, 0);
+
        return err;
 }
 
index 8c58f96..f3f91ed 100644 (file)
@@ -675,7 +675,7 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -697,6 +697,8 @@ static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
                        timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
                }
        }
+
+       timeouts[CTA_TIMEOUT_DCCP_UNSPEC] = timeouts[CTA_TIMEOUT_DCCP_REQUEST];
        return 0;
 }
 
@@ -726,7 +728,7 @@ dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = {
        [CTA_TIMEOUT_DCCP_CLOSING]      = { .type = NLA_U32 },
        [CTA_TIMEOUT_DCCP_TIMEWAIT]     = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 #ifdef CONFIG_SYSCTL
 /* template, data assigned later */
@@ -827,6 +829,11 @@ static int dccp_init_net(struct net *net, u_int16_t proto)
                dn->dccp_timeout[CT_DCCP_CLOSEREQ]      = 64 * HZ;
                dn->dccp_timeout[CT_DCCP_CLOSING]       = 64 * HZ;
                dn->dccp_timeout[CT_DCCP_TIMEWAIT]      = 2 * DCCP_MSL;
+
+               /* timeouts[0] is unused, make it same as SYN_SENT so
+                * ->timeouts[0] contains 'new' timeout, like udp or icmp.
+                */
+               dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST];
        }
 
        return dccp_kmemdup_sysctl_table(net, pn, dn);
@@ -856,7 +863,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = {
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = dccp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = dccp_timeout_obj_to_nlattr,
@@ -864,7 +871,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = {
                .obj_size       = sizeof(unsigned int) * CT_DCCP_MAX,
                .nla_policy     = dccp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = dccp_init_net,
        .get_net_proto          = dccp_get_net_proto,
 };
@@ -889,7 +896,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = dccp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = dccp_timeout_obj_to_nlattr,
@@ -897,7 +904,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
                .obj_size       = sizeof(unsigned int) * CT_DCCP_MAX,
                .nla_policy     = dccp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = dccp_init_net,
        .get_net_proto          = dccp_get_net_proto,
 };
index ac4a0b2..1df3244 100644 (file)
@@ -70,7 +70,7 @@ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
        return ret;
 }
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -113,7 +113,7 @@ static const struct nla_policy
 generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
        [CTA_TIMEOUT_GENERIC_TIMEOUT]   = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table generic_sysctl_table[] = {
@@ -164,7 +164,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
        .pkt_to_tuple           = generic_pkt_to_tuple,
        .packet                 = generic_packet,
        .new                    = generic_new,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = generic_timeout_nlattr_to_obj,
                .obj_to_nlattr  = generic_timeout_obj_to_nlattr,
@@ -172,7 +172,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
                .obj_size       = sizeof(unsigned int),
                .nla_policy     = generic_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = generic_init_net,
        .get_net_proto          = generic_get_net_proto,
 };
index d163225..650eb4f 100644 (file)
@@ -285,7 +285,7 @@ static void gre_destroy(struct nf_conn *ct)
                nf_ct_gre_keymap_destroy(master);
 }
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -334,7 +334,7 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
        [CTA_TIMEOUT_GRE_UNREPLIED]     = { .type = NLA_U32 },
        [CTA_TIMEOUT_GRE_REPLIED]       = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 static int gre_init_net(struct net *net, u_int16_t proto)
 {
@@ -367,7 +367,7 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
        .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
        .nla_policy      = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout    = {
                .nlattr_to_obj  = gre_timeout_nlattr_to_obj,
                .obj_to_nlattr  = gre_timeout_obj_to_nlattr,
@@ -375,7 +375,7 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
                .obj_size       = sizeof(unsigned int) * GRE_CT_MAX,
                .nla_policy     = gre_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .net_id         = &proto_gre_net_id,
        .init_net       = gre_init_net,
 };
index 036670b..43c7e1a 100644 (file)
@@ -273,7 +273,7 @@ static unsigned int icmp_nlattr_tuple_size(void)
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -313,7 +313,7 @@ static const struct nla_policy
 icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = {
        [CTA_TIMEOUT_ICMP_TIMEOUT]      = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table icmp_sysctl_table[] = {
@@ -374,7 +374,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
        .nlattr_to_tuple        = icmp_nlattr_to_tuple,
        .nla_policy             = icmp_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = icmp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = icmp_timeout_obj_to_nlattr,
@@ -382,7 +382,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
                .obj_size       = sizeof(unsigned int),
                .nla_policy     = icmp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = icmp_init_net,
        .get_net_proto          = icmp_get_net_proto,
 };
index bed07b9..97e40f7 100644 (file)
@@ -274,7 +274,7 @@ static unsigned int icmpv6_nlattr_tuple_size(void)
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -314,7 +314,7 @@ static const struct nla_policy
 icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = {
        [CTA_TIMEOUT_ICMPV6_TIMEOUT]    = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table icmpv6_sysctl_table[] = {
@@ -373,7 +373,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
        .nlattr_to_tuple        = icmpv6_nlattr_to_tuple,
        .nla_policy             = icmpv6_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = icmpv6_timeout_nlattr_to_obj,
                .obj_to_nlattr  = icmpv6_timeout_obj_to_nlattr,
@@ -381,7 +381,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
                .obj_size       = sizeof(unsigned int),
                .nla_policy     = icmpv6_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = icmpv6_init_net,
        .get_net_proto          = icmpv6_get_net_proto,
 };
index 8d1e085..e4d738d 100644 (file)
@@ -591,7 +591,7 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -613,6 +613,8 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
                        timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
                }
        }
+
+       timeouts[CTA_TIMEOUT_SCTP_UNSPEC] = timeouts[CTA_TIMEOUT_SCTP_CLOSED];
        return 0;
 }
 
@@ -644,7 +646,7 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
        [CTA_TIMEOUT_SCTP_HEARTBEAT_SENT]       = { .type = NLA_U32 },
        [CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED]      = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 
 #ifdef CONFIG_SYSCTL
@@ -743,6 +745,11 @@ static int sctp_init_net(struct net *net, u_int16_t proto)
 
                for (i = 0; i < SCTP_CONNTRACK_MAX; i++)
                        sn->timeouts[i] = sctp_timeouts[i];
+
+               /* timeouts[0] is unused, init it so ->timeouts[0] contains
+                * 'new' timeout, like udp or icmp.
+                */
+               sn->timeouts[0] = sctp_timeouts[SCTP_CONNTRACK_CLOSED];
        }
 
        return sctp_kmemdup_sysctl_table(pn, sn);
@@ -773,7 +780,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = sctp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = sctp_timeout_obj_to_nlattr,
@@ -781,7 +788,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
                .obj_size       = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
                .nla_policy     = sctp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = sctp_init_net,
        .get_net_proto          = sctp_get_net_proto,
 };
@@ -806,7 +813,8 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nlattr_to_tuple        = nf_ct_port_nlattr_to_tuple,
        .nla_policy             = nf_ct_port_nla_policy,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = sctp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = sctp_timeout_obj_to_nlattr,
@@ -814,8 +822,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
                .obj_size       = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
                .nla_policy     = sctp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#endif
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = sctp_init_net,
        .get_net_proto          = sctp_get_net_proto,
 };
index d80d322..247b897 100644 (file)
@@ -1213,8 +1213,8 @@ static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
 #define TCP_NLATTR_SIZE        ( \
        NLA_ALIGN(NLA_HDRLEN + 1) + \
        NLA_ALIGN(NLA_HDRLEN + 1) + \
-       NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))) + \
-       NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))))
+       NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
+       NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
 
 static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
 {
@@ -1279,7 +1279,7 @@ static unsigned int tcp_nlattr_tuple_size(void)
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -1301,6 +1301,7 @@ static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
                timeouts[TCP_CONNTRACK_SYN_SENT] =
                        ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
        }
+
        if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
                timeouts[TCP_CONNTRACK_SYN_RECV] =
                        ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
@@ -1341,6 +1342,8 @@ static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
                timeouts[TCP_CONNTRACK_UNACK] =
                        ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
        }
+
+       timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT];
        return 0;
 }
 
@@ -1391,7 +1394,7 @@ static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
        [CTA_TIMEOUT_TCP_RETRANS]       = { .type = NLA_U32 },
        [CTA_TIMEOUT_TCP_UNACK]         = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table tcp_sysctl_table[] = {
@@ -1518,6 +1521,10 @@ static int tcp_init_net(struct net *net, u_int16_t proto)
                for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
                        tn->timeouts[i] = tcp_timeouts[i];
 
+               /* timeouts[0] is unused, make it same as SYN_SENT so
+                * ->timeouts[0] contains 'new' timeout, like udp or icmp.
+                */
+               tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
                tn->tcp_loose = nf_ct_tcp_loose;
                tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
                tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
@@ -1551,7 +1558,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
        .nlattr_size            = TCP_NLATTR_SIZE,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = tcp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = tcp_timeout_obj_to_nlattr,
@@ -1560,7 +1567,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
                                        TCP_CONNTRACK_TIMEOUT_MAX,
                .nla_policy     = tcp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = tcp_init_net,
        .get_net_proto          = tcp_get_net_proto,
 };
@@ -1586,7 +1593,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
        .nlattr_tuple_size      = tcp_nlattr_tuple_size,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = tcp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = tcp_timeout_obj_to_nlattr,
@@ -1595,7 +1602,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
                                        TCP_CONNTRACK_TIMEOUT_MAX,
                .nla_policy     = tcp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = tcp_init_net,
        .get_net_proto          = tcp_get_net_proto,
 };
index 7a1b898..3065fb8 100644 (file)
@@ -171,7 +171,7 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
        return NF_ACCEPT;
 }
 
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -221,7 +221,7 @@ udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = {
        [CTA_TIMEOUT_UDP_UNREPLIED]     = { .type = NLA_U32 },
        [CTA_TIMEOUT_UDP_REPLIED]       = { .type = NLA_U32 },
 };
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table udp_sysctl_table[] = {
@@ -292,7 +292,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = udp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = udp_timeout_obj_to_nlattr,
@@ -300,7 +300,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
                .obj_size       = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
                .nla_policy     = udp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = udp_init_net,
        .get_net_proto          = udp_get_net_proto,
 };
@@ -321,7 +321,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 =
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = udp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = udp_timeout_obj_to_nlattr,
@@ -329,7 +329,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 =
                .obj_size       = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
                .nla_policy     = udp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = udp_init_net,
        .get_net_proto          = udp_get_net_proto,
 };
@@ -350,7 +350,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = udp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = udp_timeout_obj_to_nlattr,
@@ -358,7 +358,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
                .obj_size       = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
                .nla_policy     = udp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = udp_init_net,
        .get_net_proto          = udp_get_net_proto,
 };
@@ -379,7 +379,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
        .nlattr_tuple_size      = nf_ct_port_nlattr_tuple_size,
        .nla_policy             = nf_ct_port_nla_policy,
 #endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
        .ctnl_timeout           = {
                .nlattr_to_obj  = udp_timeout_nlattr_to_obj,
                .obj_to_nlattr  = udp_timeout_obj_to_nlattr,
@@ -387,10 +387,9 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
                .obj_size       = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
                .nla_policy     = udp_timeout_nla_policy,
        },
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
        .init_net               = udp_init_net,
        .get_net_proto          = udp_get_net_proto,
 };
 EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6);
 #endif
-#include <net/netfilter/nf_conntrack_timeout.h>
index 1dca568..2cfb173 100644 (file)
@@ -4637,6 +4637,7 @@ static int nft_flush_set(const struct nft_ctx *ctx,
        }
        set->ndeact++;
 
+       nft_set_elem_deactivate(ctx->net, set, elem);
        nft_trans_elem_set(trans) = set;
        nft_trans_elem(trans) = *elem;
        list_add_tail(&trans->list, &ctx->net->nft.commit_list);
index d46a236..a30f8ba 100644 (file)
@@ -489,8 +489,8 @@ err:
        return err;
 }
 
-static struct ctnl_timeout *
-ctnl_timeout_find_get(struct net *net, const char *name)
+static struct nf_ct_timeout *ctnl_timeout_find_get(struct net *net,
+                                                  const char *name)
 {
        struct ctnl_timeout *timeout, *matching = NULL;
 
@@ -509,7 +509,7 @@ ctnl_timeout_find_get(struct net *net, const char *name)
                break;
        }
 err:
-       return matching;
+       return matching ? &matching->timeout : NULL;
 }
 
 static void ctnl_timeout_put(struct nf_ct_timeout *t)
index ea4ba55..d33094f 100644 (file)
@@ -233,6 +233,7 @@ static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
        int err;
 
        if (verdict == NF_ACCEPT ||
+           verdict == NF_REPEAT ||
            verdict == NF_STOP) {
                rcu_read_lock();
                ct_hook = rcu_dereference(nf_ct_hook);
index 26a8bae..5dd8774 100644 (file)
@@ -799,7 +799,7 @@ err:
 }
 
 struct nft_ct_timeout_obj {
-       struct nf_conn          *tmpl;
+       struct nf_ct_timeout    *timeout;
        u8                      l4proto;
 };
 
@@ -809,26 +809,42 @@ static void nft_ct_timeout_obj_eval(struct nft_object *obj,
 {
        const struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
        struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb);
-       struct sk_buff *skb = pkt->skb;
+       struct nf_conn_timeout *timeout;
+       const unsigned int *values;
+
+       if (priv->l4proto != pkt->tprot)
+               return;
 
-       if (ct ||
-           priv->l4proto != pkt->tprot)
+       if (!ct || nf_ct_is_template(ct) || nf_ct_is_confirmed(ct))
                return;
 
-       nf_ct_set(skb, priv->tmpl, IP_CT_NEW);
+       timeout = nf_ct_timeout_find(ct);
+       if (!timeout) {
+               timeout = nf_ct_timeout_ext_add(ct, priv->timeout, GFP_ATOMIC);
+               if (!timeout) {
+                       regs->verdict.code = NF_DROP;
+                       return;
+               }
+       }
+
+       rcu_assign_pointer(timeout->timeout, priv->timeout);
+
+       /* adjust the timeout as per 'new' state. ct is unconfirmed,
+        * so the current timestamp must not be added.
+        */
+       values = nf_ct_timeout_data(timeout);
+       if (values)
+               nf_ct_refresh(ct, pkt->skb, values[0]);
 }
 
 static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
                                   const struct nlattr * const tb[],
                                   struct nft_object *obj)
 {
-       const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
        struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
        const struct nf_conntrack_l4proto *l4proto;
-       struct nf_conn_timeout *timeout_ext;
        struct nf_ct_timeout *timeout;
        int l3num = ctx->family;
-       struct nf_conn *tmpl;
        __u8 l4num;
        int ret;
 
@@ -863,28 +879,14 @@ static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
 
        timeout->l3num = l3num;
        timeout->l4proto = l4proto;
-       tmpl = nf_ct_tmpl_alloc(ctx->net, zone, GFP_ATOMIC);
-       if (!tmpl) {
-               ret = -ENOMEM;
-               goto err_free_timeout;
-       }
-
-       timeout_ext = nf_ct_timeout_ext_add(tmpl, timeout, GFP_ATOMIC);
-       if (!timeout_ext) {
-               ret = -ENOMEM;
-               goto err_free_tmpl;
-       }
 
        ret = nf_ct_netns_get(ctx->net, ctx->family);
        if (ret < 0)
-               goto err_free_tmpl;
-
-       priv->tmpl = tmpl;
+               goto err_free_timeout;
 
+       priv->timeout = timeout;
        return 0;
 
-err_free_tmpl:
-       nf_ct_tmpl_free(tmpl);
 err_free_timeout:
        kfree(timeout);
 err_proto_put:
@@ -896,22 +898,19 @@ static void nft_ct_timeout_obj_destroy(const struct nft_ctx *ctx,
                                       struct nft_object *obj)
 {
        struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
-       struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl);
-       struct nf_ct_timeout *timeout;
+       struct nf_ct_timeout *timeout = priv->timeout;
 
-       timeout = rcu_dereference_raw(t->timeout);
        nf_ct_untimeout(ctx->net, timeout);
        nf_ct_l4proto_put(timeout->l4proto);
        nf_ct_netns_put(ctx->net, ctx->family);
-       nf_ct_tmpl_free(priv->tmpl);
+       kfree(priv->timeout);
 }
 
 static int nft_ct_timeout_obj_dump(struct sk_buff *skb,
                                   struct nft_object *obj, bool reset)
 {
        const struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
-       const struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl);
-       const struct nf_ct_timeout *timeout = rcu_dereference_raw(t->timeout);
+       const struct nf_ct_timeout *timeout = priv->timeout;
        struct nlattr *nest_params;
        int ret;
 
index 5af74b3..a35fb59 100644 (file)
@@ -49,7 +49,7 @@ static int nft_osf_init(const struct nft_ctx *ctx,
 
        priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]);
        err = nft_validate_register_store(ctx, priv->dreg, NULL,
-                                         NFTA_DATA_VALUE, NFT_OSF_MAXGENRELEN);
+                                         NFT_DATA_VALUE, NFT_OSF_MAXGENRELEN);
        if (err < 0)
                return err;
 
index 55e2d92..0e5ec12 100644 (file)
@@ -355,12 +355,11 @@ cont:
 
 static void nft_rbtree_gc(struct work_struct *work)
 {
+       struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
        struct nft_set_gc_batch *gcb = NULL;
-       struct rb_node *node, *prev = NULL;
-       struct nft_rbtree_elem *rbe;
        struct nft_rbtree *priv;
+       struct rb_node *node;
        struct nft_set *set;
-       int i;
 
        priv = container_of(work, struct nft_rbtree, gc_work.work);
        set  = nft_set_container_of(priv);
@@ -371,7 +370,7 @@ static void nft_rbtree_gc(struct work_struct *work)
                rbe = rb_entry(node, struct nft_rbtree_elem, node);
 
                if (nft_rbtree_interval_end(rbe)) {
-                       prev = node;
+                       rbe_end = rbe;
                        continue;
                }
                if (!nft_set_elem_expired(&rbe->ext))
@@ -379,29 +378,30 @@ static void nft_rbtree_gc(struct work_struct *work)
                if (nft_set_elem_mark_busy(&rbe->ext))
                        continue;
 
+               if (rbe_prev) {
+                       rb_erase(&rbe_prev->node, &priv->root);
+                       rbe_prev = NULL;
+               }
                gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
                if (!gcb)
                        break;
 
                atomic_dec(&set->nelems);
                nft_set_gc_batch_add(gcb, rbe);
+               rbe_prev = rbe;
 
-               if (prev) {
-                       rbe = rb_entry(prev, struct nft_rbtree_elem, node);
+               if (rbe_end) {
                        atomic_dec(&set->nelems);
-                       nft_set_gc_batch_add(gcb, rbe);
-                       prev = NULL;
+                       nft_set_gc_batch_add(gcb, rbe_end);
+                       rb_erase(&rbe_end->node, &priv->root);
+                       rbe_end = NULL;
                }
                node = rb_next(node);
                if (!node)
                        break;
        }
-       if (gcb) {
-               for (i = 0; i < gcb->head.cnt; i++) {
-                       rbe = gcb->elems[i];
-                       rb_erase(&rbe->node, &priv->root);
-               }
-       }
+       if (rbe_prev)
+               rb_erase(&rbe_prev->node, &priv->root);
        write_seqcount_end(&priv->count);
        write_unlock_bh(&priv->lock);
 
index 9f4151e..6c7aa6a 100644 (file)
@@ -16,6 +16,9 @@
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_CHECKSUM.h>
 
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Michael S. Tsirkin <mst@redhat.com>");
 MODULE_DESCRIPTION("Xtables: checksum modification");
@@ -25,7 +28,7 @@ MODULE_ALIAS("ip6t_CHECKSUM");
 static unsigned int
 checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
 {
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
+       if (skb->ip_summed == CHECKSUM_PARTIAL && !skb_is_gso(skb))
                skb_checksum_help(skb);
 
        return XT_CONTINUE;
@@ -34,6 +37,8 @@ checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
 static int checksum_tg_check(const struct xt_tgchk_param *par)
 {
        const struct xt_CHECKSUM_info *einfo = par->targinfo;
+       const struct ip6t_ip6 *i6 = par->entryinfo;
+       const struct ipt_ip *i4 = par->entryinfo;
 
        if (einfo->operation & ~XT_CHECKSUM_OP_FILL) {
                pr_info_ratelimited("unsupported CHECKSUM operation %x\n",
@@ -43,6 +48,21 @@ static int checksum_tg_check(const struct xt_tgchk_param *par)
        if (!einfo->operation)
                return -EINVAL;
 
+       switch (par->family) {
+       case NFPROTO_IPV4:
+               if (i4->proto == IPPROTO_UDP &&
+                   (i4->invflags & XT_INV_PROTO) == 0)
+                       return 0;
+               break;
+       case NFPROTO_IPV6:
+               if ((i6->flags & IP6T_F_PROTO) &&
+                   i6->proto == IPPROTO_UDP &&
+                   (i6->invflags & XT_INV_PROTO) == 0)
+                       return 0;
+               break;
+       }
+
+       pr_warn_once("CHECKSUM should be avoided.  If really needed, restrict with \"-p udp\" and only use in OUTPUT\n");
        return 0;
 }
 
index dfbdbb2..51d0c25 100644 (file)
@@ -125,6 +125,7 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par)
 static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
 {
        struct xt_cluster_match_info *info = par->matchinfo;
+       int ret;
 
        if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
                pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n",
@@ -135,7 +136,17 @@ static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
                pr_info_ratelimited("node mask cannot exceed total number of nodes\n");
                return -EDOM;
        }
-       return 0;
+
+       ret = nf_ct_netns_get(par->net, par->family);
+       if (ret < 0)
+               pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+                                   par->family);
+       return ret;
+}
+
+static void xt_cluster_mt_destroy(const struct xt_mtdtor_param *par)
+{
+       nf_ct_netns_put(par->net, par->family);
 }
 
 static struct xt_match xt_cluster_match __read_mostly = {
@@ -144,6 +155,7 @@ static struct xt_match xt_cluster_match __read_mostly = {
        .match          = xt_cluster_mt,
        .checkentry     = xt_cluster_mt_checkentry,
        .matchsize      = sizeof(struct xt_cluster_match_info),
+       .destroy        = xt_cluster_mt_destroy,
        .me             = THIS_MODULE,
 };
 
index 9b16402..3e7d259 100644 (file)
@@ -1057,7 +1057,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
 static void *dl_seq_start(struct seq_file *s, loff_t *pos)
        __acquires(htable->lock)
 {
-       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
        unsigned int *bucket;
 
        spin_lock_bh(&htable->lock);
@@ -1074,7 +1074,7 @@ static void *dl_seq_start(struct seq_file *s, loff_t *pos)
 
 static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
 {
-       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
        unsigned int *bucket = v;
 
        *pos = ++(*bucket);
@@ -1088,7 +1088,7 @@ static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
 static void dl_seq_stop(struct seq_file *s, void *v)
        __releases(htable->lock)
 {
-       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
        unsigned int *bucket = v;
 
        if (!IS_ERR(bucket))
@@ -1130,7 +1130,7 @@ static void dl_seq_print(struct dsthash_ent *ent, u_int8_t family,
 static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family,
                               struct seq_file *s)
 {
-       struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
 
        spin_lock(&ent->lock);
        /* recalculate to show accurate numbers */
@@ -1145,7 +1145,7 @@ static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family,
 static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family,
                               struct seq_file *s)
 {
-       struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
 
        spin_lock(&ent->lock);
        /* recalculate to show accurate numbers */
@@ -1160,7 +1160,7 @@ static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family,
 static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
                            struct seq_file *s)
 {
-       struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
 
        spin_lock(&ent->lock);
        /* recalculate to show accurate numbers */
@@ -1174,7 +1174,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
 
 static int dl_seq_show_v2(struct seq_file *s, void *v)
 {
-       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
        unsigned int *bucket = (unsigned int *)v;
        struct dsthash_ent *ent;
 
@@ -1188,7 +1188,7 @@ static int dl_seq_show_v2(struct seq_file *s, void *v)
 
 static int dl_seq_show_v1(struct seq_file *s, void *v)
 {
-       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
        unsigned int *bucket = v;
        struct dsthash_ent *ent;
 
@@ -1202,7 +1202,7 @@ static int dl_seq_show_v1(struct seq_file *s, void *v)
 
 static int dl_seq_show(struct seq_file *s, void *v)
 {
-       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+       struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
        unsigned int *bucket = v;
        struct dsthash_ent *ent;
 
index 0472f34..ada144e 100644 (file)
@@ -56,7 +56,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
        struct sk_buff *pskb = (struct sk_buff *)skb;
        struct sock *sk = skb->sk;
 
-       if (!net_eq(xt_net(par), sock_net(sk)))
+       if (sk && !net_eq(xt_net(par), sock_net(sk)))
                sk = NULL;
 
        if (!sk)
@@ -117,7 +117,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
        struct sk_buff *pskb = (struct sk_buff *)skb;
        struct sock *sk = skb->sk;
 
-       if (!net_eq(xt_net(par), sock_net(sk)))
+       if (sk && !net_eq(xt_net(par), sock_net(sk)))
                sk = NULL;
 
        if (!sk)
index c070dfc..c92894c 100644 (file)
@@ -781,7 +781,8 @@ static int netlbl_unlabel_addrinfo_get(struct genl_info *info,
 {
        u32 addr_len;
 
-       if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) {
+       if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] &&
+           info->attrs[NLBL_UNLABEL_A_IPV4MASK]) {
                addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]);
                if (addr_len != sizeof(struct in_addr) &&
                    addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK]))
index ac8030c..19cb2e4 100644 (file)
@@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
                }
                create_info = (struct hci_create_pipe_resp *)skb->data;
 
+               if (create_info->pipe >= NFC_HCI_MAX_PIPES) {
+                       status = NFC_HCI_ANY_E_NOK;
+                       goto exit;
+               }
+
                /* Save the new created pipe and bind with local gate,
                 * the description for skb->data[3] is destination gate id
                 * but since we received this cmd from host controller, we
@@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
                }
                delete_info = (struct hci_delete_pipe_noti *)skb->data;
 
+               if (delete_info->pipe >= NFC_HCI_MAX_PIPES) {
+                       status = NFC_HCI_ANY_E_NOK;
+                       goto exit;
+               }
+
                hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE;
                hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST;
                break;
index 86a7510..35ae64c 100644 (file)
@@ -1312,6 +1312,10 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
 
        rcu_assign_pointer(help->helper, helper);
        info->helper = helper;
+
+       if (info->nat)
+               request_module("ip_nat_%s", name);
+
        return 0;
 }
 
@@ -1624,10 +1628,6 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
                OVS_NLERR(log, "Failed to allocate conntrack template");
                return -ENOMEM;
        }
-
-       __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
-       nf_conntrack_get(&ct_info.ct->ct_general);
-
        if (helper) {
                err = ovs_ct_add_helper(&ct_info, helper, key, log);
                if (err)
@@ -1639,6 +1639,8 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
        if (err)
                goto err_free_ct;
 
+       __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
+       nf_conntrack_get(&ct_info.ct->ct_general);
        return 0;
 err_free_ct:
        __ovs_ct_free_action(&ct_info);
index 5610061..d6e94dc 100644 (file)
@@ -2715,10 +2715,12 @@ tpacket_error:
                        }
                }
 
-               if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr,
-                                                             vio_le())) {
-                       tp_len = -EINVAL;
-                       goto tpacket_error;
+               if (po->has_vnet_hdr) {
+                       if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
+                               tp_len = -EINVAL;
+                               goto tpacket_error;
+                       }
+                       virtio_net_hdr_set_proto(skb, vnet_hdr);
                }
 
                skb->destructor = tpacket_destruct_skb;
@@ -2915,6 +2917,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
                if (err)
                        goto out_free;
                len += sizeof(vnet_hdr);
+               virtio_net_hdr_set_proto(skb, &vnet_hdr);
        }
 
        skb_probe_transport_header(skb, reserve);
@@ -4137,36 +4140,52 @@ static const struct vm_operations_struct packet_mmap_ops = {
        .close  =       packet_mm_close,
 };
 
-static void free_pg_vec(struct pgv *pg_vec, unsigned int len)
+static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
+                       unsigned int len)
 {
        int i;
 
        for (i = 0; i < len; i++) {
                if (likely(pg_vec[i].buffer)) {
-                       kvfree(pg_vec[i].buffer);
+                       if (is_vmalloc_addr(pg_vec[i].buffer))
+                               vfree(pg_vec[i].buffer);
+                       else
+                               free_pages((unsigned long)pg_vec[i].buffer,
+                                          order);
                        pg_vec[i].buffer = NULL;
                }
        }
        kfree(pg_vec);
 }
 
-static char *alloc_one_pg_vec_page(unsigned long size)
+static char *alloc_one_pg_vec_page(unsigned long order)
 {
        char *buffer;
+       gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
+                         __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
 
-       buffer = kvzalloc(size, GFP_KERNEL);
+       buffer = (char *) __get_free_pages(gfp_flags, order);
        if (buffer)
                return buffer;
 
-       buffer = kvzalloc(size, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+       /* __get_free_pages failed, fall back to vmalloc */
+       buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
+       if (buffer)
+               return buffer;
+
+       /* vmalloc failed, lets dig into swap here */
+       gfp_flags &= ~__GFP_NORETRY;
+       buffer = (char *) __get_free_pages(gfp_flags, order);
+       if (buffer)
+               return buffer;
 
-       return buffer;
+       /* complete and utter failure */
+       return NULL;
 }
 
-static struct pgv *alloc_pg_vec(struct tpacket_req *req)
+static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
 {
        unsigned int block_nr = req->tp_block_nr;
-       unsigned long size = req->tp_block_size;
        struct pgv *pg_vec;
        int i;
 
@@ -4175,7 +4194,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req)
                goto out;
 
        for (i = 0; i < block_nr; i++) {
-               pg_vec[i].buffer = alloc_one_pg_vec_page(size);
+               pg_vec[i].buffer = alloc_one_pg_vec_page(order);
                if (unlikely(!pg_vec[i].buffer))
                        goto out_free_pgvec;
        }
@@ -4184,7 +4203,7 @@ out:
        return pg_vec;
 
 out_free_pgvec:
-       free_pg_vec(pg_vec, block_nr);
+       free_pg_vec(pg_vec, order, block_nr);
        pg_vec = NULL;
        goto out;
 }
@@ -4194,9 +4213,9 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 {
        struct pgv *pg_vec = NULL;
        struct packet_sock *po = pkt_sk(sk);
+       int was_running, order = 0;
        struct packet_ring_buffer *rb;
        struct sk_buff_head *rb_queue;
-       int was_running;
        __be16 num;
        int err = -EINVAL;
        /* Added to avoid minimal code churn */
@@ -4258,7 +4277,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                        goto out;
 
                err = -ENOMEM;
-               pg_vec = alloc_pg_vec(req);
+               order = get_order(req->tp_block_size);
+               pg_vec = alloc_pg_vec(req, order);
                if (unlikely(!pg_vec))
                        goto out;
                switch (po->tp_version) {
@@ -4312,6 +4332,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
                rb->frame_size = req->tp_frame_size;
                spin_unlock_bh(&rb_queue->lock);
 
+               swap(rb->pg_vec_order, order);
                swap(rb->pg_vec_len, req->tp_block_nr);
 
                rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
@@ -4337,7 +4358,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
        }
 
        if (pg_vec)
-               free_pg_vec(pg_vec, req->tp_block_nr);
+               free_pg_vec(pg_vec, order, req->tp_block_nr);
 out:
        return err;
 }
index 8f50036..3bb7c5f 100644 (file)
@@ -64,6 +64,7 @@ struct packet_ring_buffer {
        unsigned int            frame_size;
        unsigned int            frame_max;
 
+       unsigned int            pg_vec_order;
        unsigned int            pg_vec_pages;
        unsigned int            pg_vec_len;
 
index 01b3bd6..b909211 100644 (file)
@@ -1,6 +1,6 @@
 
 config RDS
-       tristate "The RDS Protocol"
+       tristate "The Reliable Datagram Sockets Protocol"
        depends on INET
        ---help---
          The RDS (Reliable Datagram Sockets) protocol provides reliable,
index 3ab5578..762d2c6 100644 (file)
@@ -76,11 +76,13 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
        struct rds_sock *rs;
 
        __rds_create_bind_key(key, addr, port, scope_id);
-       rs = rhashtable_lookup_fast(&bind_hash_table, key, ht_parms);
+       rcu_read_lock();
+       rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
        if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
                rds_sock_addref(rs);
        else
                rs = NULL;
+       rcu_read_unlock();
 
        rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
                 ntohs(port));
@@ -235,6 +237,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                goto out;
        }
 
+       sock_set_flag(sk, SOCK_RCU_FREE);
        ret = rds_add_bound(rs, binding_addr, &port, scope_id);
        if (ret)
                goto out;
index c1d9764..eba75c1 100644 (file)
@@ -341,15 +341,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn,
 
        if (rds_conn_state(conn) == RDS_CONN_UP) {
                struct rds_ib_device *rds_ibdev;
-               struct rdma_dev_addr *dev_addr;
 
                ic = conn->c_transport_data;
-               dev_addr = &ic->i_cm_id->route.addr.dev_addr;
-               rdma_addr_get_sgid(dev_addr,
-                                  (union ib_gid *)&iinfo6->src_gid);
-               rdma_addr_get_dgid(dev_addr,
-                                  (union ib_gid *)&iinfo6->dst_gid);
-
+               rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid,
+                              (union ib_gid *)&iinfo6->dst_gid);
                rds_ibdev = ic->rds_ibdev;
                iinfo6->max_send_wr = ic->i_send_ring.w_nr;
                iinfo6->max_recv_wr = ic->i_recv_ring.w_nr;
index 73427ff..71ff356 100644 (file)
@@ -443,7 +443,7 @@ int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
 int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
 
 /* ib_stats.c */
-DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
+DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
 #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
 #define rds_ib_stats_add(member, count) \
                rds_stats_add_which(rds_ib_stats, member, count)
index 2c7b7c3..b9bbcf3 100644 (file)
@@ -37,7 +37,6 @@
 #include <net/tcp.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
-#include <net/tcp.h>
 #include <net/addrconf.h>
 
 #include "rds.h"
index 00192a9..0f84658 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/mod_devicetable.h>
 #include <linux/rfkill.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
index c975587..ef95541 100644 (file)
@@ -40,17 +40,12 @@ struct rxrpc_crypt {
 struct rxrpc_connection;
 
 /*
- * Mark applied to socket buffers.
+ * Mark applied to socket buffers in skb->mark.  skb->priority is used
+ * to pass supplementary information.
  */
 enum rxrpc_skb_mark {
-       RXRPC_SKB_MARK_DATA,            /* data message */
-       RXRPC_SKB_MARK_FINAL_ACK,       /* final ACK received message */
-       RXRPC_SKB_MARK_BUSY,            /* server busy message */
-       RXRPC_SKB_MARK_REMOTE_ABORT,    /* remote abort message */
-       RXRPC_SKB_MARK_LOCAL_ABORT,     /* local abort message */
-       RXRPC_SKB_MARK_NET_ERROR,       /* network error message */
-       RXRPC_SKB_MARK_LOCAL_ERROR,     /* local error message */
-       RXRPC_SKB_MARK_NEW_CALL,        /* local error message */
+       RXRPC_SKB_MARK_REJECT_BUSY,     /* Reject with BUSY */
+       RXRPC_SKB_MARK_REJECT_ABORT,    /* Reject with ABORT (code in skb->priority) */
 };
 
 /*
@@ -293,7 +288,6 @@ struct rxrpc_peer {
        struct hlist_node       hash_link;
        struct rxrpc_local      *local;
        struct hlist_head       error_targets;  /* targets for net error distribution */
-       struct work_struct      error_distributor;
        struct rb_root          service_conns;  /* Service connections */
        struct list_head        keepalive_link; /* Link in net->peer_keepalive[] */
        time64_t                last_tx_at;     /* Last time packet sent here */
@@ -304,8 +298,6 @@ struct rxrpc_peer {
        unsigned int            maxdata;        /* data size (MTU - hdrsize) */
        unsigned short          hdrsize;        /* header size (IP + UDP + RxRPC) */
        int                     debug_id;       /* debug ID for printks */
-       int                     error_report;   /* Net (+0) or local (+1000000) to distribute */
-#define RXRPC_LOCAL_ERROR_OFFSET 1000000
        struct sockaddr_rxrpc   srx;            /* remote address */
 
        /* calculated RTT cache */
@@ -463,6 +455,16 @@ struct rxrpc_connection {
        u8                      out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
 };
 
+static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
+{
+       return sp->hdr.flags & RXRPC_CLIENT_INITIATED;
+}
+
+static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp)
+{
+       return !rxrpc_to_server(sp);
+}
+
 /*
  * Flags in call->flags.
  */
@@ -717,6 +719,8 @@ extern struct workqueue_struct *rxrpc_workqueue;
 int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
 void rxrpc_discard_prealloc(struct rxrpc_sock *);
 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
+                                          struct rxrpc_sock *,
+                                          struct rxrpc_peer *,
                                           struct rxrpc_connection *,
                                           struct sk_buff *);
 void rxrpc_accept_incoming_calls(struct rxrpc_local *);
@@ -908,7 +912,8 @@ extern unsigned int rxrpc_closed_conn_expiry;
 
 struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
 struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
-                                                  struct sk_buff *);
+                                                  struct sk_buff *,
+                                                  struct rxrpc_peer **);
 void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
 void rxrpc_disconnect_call(struct rxrpc_call *);
 void rxrpc_kill_connection(struct rxrpc_connection *);
@@ -1031,7 +1036,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *);
  * peer_event.c
  */
 void rxrpc_error_report(struct sock *);
-void rxrpc_peer_error_distributor(struct work_struct *);
 void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
                        rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
 void rxrpc_peer_keepalive_worker(struct work_struct *);
@@ -1044,13 +1048,11 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
 struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
                                     struct sockaddr_rxrpc *, gfp_t);
 struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
-struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
-                                             struct rxrpc_peer *);
+void rxrpc_new_incoming_peer(struct rxrpc_local *, struct rxrpc_peer *);
 void rxrpc_destroy_all_peers(struct rxrpc_net *);
 struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
 void rxrpc_put_peer(struct rxrpc_peer *);
-void __rxrpc_queue_peer_error(struct rxrpc_peer *);
 
 /*
  * proc.c
index 9d1e298..9c7f26d 100644 (file)
@@ -249,11 +249,11 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
  */
 static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
                                                    struct rxrpc_local *local,
+                                                   struct rxrpc_peer *peer,
                                                    struct rxrpc_connection *conn,
                                                    struct sk_buff *skb)
 {
        struct rxrpc_backlog *b = rx->backlog;
-       struct rxrpc_peer *peer, *xpeer;
        struct rxrpc_call *call;
        unsigned short call_head, conn_head, peer_head;
        unsigned short call_tail, conn_tail, peer_tail;
@@ -276,21 +276,18 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
                return NULL;
 
        if (!conn) {
-               /* No connection.  We're going to need a peer to start off
-                * with.  If one doesn't yet exist, use a spare from the
-                * preallocation set.  We dump the address into the spare in
-                * anticipation - and to save on stack space.
-                */
-               xpeer = b->peer_backlog[peer_tail];
-               if (rxrpc_extract_addr_from_skb(local, &xpeer->srx, skb) < 0)
-                       return NULL;
-
-               peer = rxrpc_lookup_incoming_peer(local, xpeer);
-               if (peer == xpeer) {
+               if (peer && !rxrpc_get_peer_maybe(peer))
+                       peer = NULL;
+               if (!peer) {
+                       peer = b->peer_backlog[peer_tail];
+                       if (rxrpc_extract_addr_from_skb(local, &peer->srx, skb) < 0)
+                               return NULL;
                        b->peer_backlog[peer_tail] = NULL;
                        smp_store_release(&b->peer_backlog_tail,
                                          (peer_tail + 1) &
                                          (RXRPC_BACKLOG_MAX - 1));
+
+                       rxrpc_new_incoming_peer(local, peer);
                }
 
                /* Now allocate and set up the connection */
@@ -335,45 +332,31 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
  * The call is returned with the user access mutex held.
  */
 struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+                                          struct rxrpc_sock *rx,
+                                          struct rxrpc_peer *peer,
                                           struct rxrpc_connection *conn,
                                           struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-       struct rxrpc_sock *rx;
        struct rxrpc_call *call;
-       u16 service_id = sp->hdr.serviceId;
 
        _enter("");
 
-       /* Get the socket providing the service */
-       rx = rcu_dereference(local->service);
-       if (rx && (service_id == rx->srx.srx_service ||
-                  service_id == rx->second_service))
-               goto found_service;
-
-       trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                         RX_INVALID_OPERATION, EOPNOTSUPP);
-       skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
-       skb->priority = RX_INVALID_OPERATION;
-       _leave(" = NULL [service]");
-       return NULL;
-
-found_service:
        spin_lock(&rx->incoming_lock);
        if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
            rx->sk.sk_state == RXRPC_CLOSE) {
                trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
                                  sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
-               skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+               skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
                skb->priority = RX_INVALID_OPERATION;
                _leave(" = NULL [close]");
                call = NULL;
                goto out;
        }
 
-       call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
+       call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
        if (!call) {
-               skb->mark = RXRPC_SKB_MARK_BUSY;
+               skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
                _leave(" = NULL [busy]");
                call = NULL;
                goto out;
index 9486293..799f75b 100644 (file)
@@ -400,7 +400,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
        rcu_assign_pointer(conn->channels[chan].call, call);
 
        spin_lock(&conn->params.peer->lock);
-       hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
+       hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
        spin_unlock(&conn->params.peer->lock);
 
        _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
index f8f3718..8acf74f 100644 (file)
@@ -710,8 +710,8 @@ int rxrpc_connect_call(struct rxrpc_call *call,
        }
 
        spin_lock_bh(&call->conn->params.peer->lock);
-       hlist_add_head(&call->error_link,
-                      &call->conn->params.peer->error_targets);
+       hlist_add_head_rcu(&call->error_link,
+                          &call->conn->params.peer->error_targets);
        spin_unlock_bh(&call->conn->params.peer->lock);
 
 out:
index 77440a3..885dae8 100644 (file)
@@ -69,10 +69,14 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
  * If successful, a pointer to the connection is returned, but no ref is taken.
  * NULL is returned if there is no match.
  *
+ * When searching for a service call, if we find a peer but no connection, we
+ * return that through *_peer in case we need to create a new service call.
+ *
  * The caller must be holding the RCU read lock.
  */
 struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
-                                                  struct sk_buff *skb)
+                                                  struct sk_buff *skb,
+                                                  struct rxrpc_peer **_peer)
 {
        struct rxrpc_connection *conn;
        struct rxrpc_conn_proto k;
@@ -85,9 +89,6 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
        if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0)
                goto not_found;
 
-       k.epoch = sp->hdr.epoch;
-       k.cid   = sp->hdr.cid & RXRPC_CIDMASK;
-
        /* We may have to handle mixing IPv4 and IPv6 */
        if (srx.transport.family != local->srx.transport.family) {
                pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
@@ -99,7 +100,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
        k.epoch = sp->hdr.epoch;
        k.cid   = sp->hdr.cid & RXRPC_CIDMASK;
 
-       if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
+       if (rxrpc_to_server(sp)) {
                /* We need to look up service connections by the full protocol
                 * parameter set.  We look up the peer first as an intermediate
                 * step and then the connection from the peer's tree.
@@ -107,6 +108,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
                peer = rxrpc_lookup_peer_rcu(local, &srx);
                if (!peer)
                        goto not_found;
+               *_peer = peer;
                conn = rxrpc_find_service_conn_rcu(peer, skb);
                if (!conn || atomic_read(&conn->usage) == 0)
                        goto not_found;
@@ -214,7 +216,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
        call->peer->cong_cwnd = call->cong_cwnd;
 
        spin_lock_bh(&conn->params.peer->lock);
-       hlist_del_init(&call->error_link);
+       hlist_del_rcu(&call->error_link);
        spin_unlock_bh(&conn->params.peer->lock);
 
        if (rxrpc_is_client_call(call))
index cfdc199..800f5b8 100644 (file)
@@ -622,13 +622,14 @@ static void rxrpc_input_requested_ack(struct rxrpc_call *call,
                if (!skb)
                        continue;
 
+               sent_at = skb->tstamp;
+               smp_rmb(); /* Read timestamp before serial. */
                sp = rxrpc_skb(skb);
                if (sp->hdr.serial != orig_serial)
                        continue;
-               smp_rmb();
-               sent_at = skb->tstamp;
                goto found;
        }
+
        return;
 
 found:
@@ -1124,12 +1125,14 @@ void rxrpc_data_ready(struct sock *udp_sk)
 {
        struct rxrpc_connection *conn;
        struct rxrpc_channel *chan;
-       struct rxrpc_call *call;
+       struct rxrpc_call *call = NULL;
        struct rxrpc_skb_priv *sp;
        struct rxrpc_local *local = udp_sk->sk_user_data;
+       struct rxrpc_peer *peer = NULL;
+       struct rxrpc_sock *rx = NULL;
        struct sk_buff *skb;
        unsigned int channel;
-       int ret, skew;
+       int ret, skew = 0;
 
        _enter("%p", udp_sk);
 
@@ -1143,6 +1146,9 @@ void rxrpc_data_ready(struct sock *udp_sk)
                return;
        }
 
+       if (skb->tstamp == 0)
+               skb->tstamp = ktime_get_real();
+
        rxrpc_new_skb(skb, rxrpc_skb_rx_received);
 
        _net("recv skb %p", skb);
@@ -1177,46 +1183,75 @@ void rxrpc_data_ready(struct sock *udp_sk)
 
        trace_rxrpc_rx_packet(sp);
 
-       _net("Rx RxRPC %s ep=%x call=%x:%x",
-            sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
-            sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
-
-       if (sp->hdr.type >= RXRPC_N_PACKET_TYPES ||
-           !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) {
-               _proto("Rx Bad Packet Type %u", sp->hdr.type);
-               goto bad_message;
-       }
-
        switch (sp->hdr.type) {
        case RXRPC_PACKET_TYPE_VERSION:
-               if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED))
+               if (rxrpc_to_client(sp))
                        goto discard;
                rxrpc_post_packet_to_local(local, skb);
                goto out;
 
        case RXRPC_PACKET_TYPE_BUSY:
-               if (sp->hdr.flags & RXRPC_CLIENT_INITIATED)
+               if (rxrpc_to_server(sp))
                        goto discard;
                /* Fall through */
+       case RXRPC_PACKET_TYPE_ACK:
+       case RXRPC_PACKET_TYPE_ACKALL:
+               if (sp->hdr.callNumber == 0)
+                       goto bad_message;
+               /* Fall through */
+       case RXRPC_PACKET_TYPE_ABORT:
+               break;
 
        case RXRPC_PACKET_TYPE_DATA:
-               if (sp->hdr.callNumber == 0)
+               if (sp->hdr.callNumber == 0 ||
+                   sp->hdr.seq == 0)
                        goto bad_message;
                if (sp->hdr.flags & RXRPC_JUMBO_PACKET &&
                    !rxrpc_validate_jumbo(skb))
                        goto bad_message;
                break;
 
+       case RXRPC_PACKET_TYPE_CHALLENGE:
+               if (rxrpc_to_server(sp))
+                       goto discard;
+               break;
+       case RXRPC_PACKET_TYPE_RESPONSE:
+               if (rxrpc_to_client(sp))
+                       goto discard;
+               break;
+
                /* Packet types 9-11 should just be ignored. */
        case RXRPC_PACKET_TYPE_PARAMS:
        case RXRPC_PACKET_TYPE_10:
        case RXRPC_PACKET_TYPE_11:
                goto discard;
+
+       default:
+               _proto("Rx Bad Packet Type %u", sp->hdr.type);
+               goto bad_message;
        }
 
+       if (sp->hdr.serviceId == 0)
+               goto bad_message;
+
        rcu_read_lock();
 
-       conn = rxrpc_find_connection_rcu(local, skb);
+       if (rxrpc_to_server(sp)) {
+               /* Weed out packets to services we're not offering.  Packets
+                * that would begin a call are explicitly rejected and the rest
+                * are just discarded.
+                */
+               rx = rcu_dereference(local->service);
+               if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
+                           sp->hdr.serviceId != rx->second_service)) {
+                       if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
+                           sp->hdr.seq == 1)
+                               goto unsupported_service;
+                       goto discard_unlock;
+               }
+       }
+
+       conn = rxrpc_find_connection_rcu(local, skb, &peer);
        if (conn) {
                if (sp->hdr.securityIndex != conn->security_ix)
                        goto wrong_security;
@@ -1280,7 +1315,7 @@ void rxrpc_data_ready(struct sock *udp_sk)
                call = rcu_dereference(chan->call);
 
                if (sp->hdr.callNumber > chan->call_id) {
-                       if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED)) {
+                       if (rxrpc_to_client(sp)) {
                                rcu_read_unlock();
                                goto reject_packet;
                        }
@@ -1297,19 +1332,15 @@ void rxrpc_data_ready(struct sock *udp_sk)
                        if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags))
                                set_bit(RXRPC_CALL_RX_HEARD, &call->flags);
                }
-       } else {
-               skew = 0;
-               call = NULL;
        }
 
        if (!call || atomic_read(&call->usage) == 0) {
-               if (!(sp->hdr.type & RXRPC_CLIENT_INITIATED) ||
-                   sp->hdr.callNumber == 0 ||
+               if (rxrpc_to_client(sp) ||
                    sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
                        goto bad_message_unlock;
                if (sp->hdr.seq != 1)
                        goto discard_unlock;
-               call = rxrpc_new_incoming_call(local, conn, skb);
+               call = rxrpc_new_incoming_call(local, rx, peer, conn, skb);
                if (!call) {
                        rcu_read_unlock();
                        goto reject_packet;
@@ -1340,6 +1371,13 @@ wrong_security:
        skb->priority = RXKADINCONSISTENCY;
        goto post_abort;
 
+unsupported_service:
+       rcu_read_unlock();
+       trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+                         RX_INVALID_OPERATION, EOPNOTSUPP);
+       skb->priority = RX_INVALID_OPERATION;
+       goto post_abort;
+
 reupgrade:
        rcu_read_unlock();
        trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
@@ -1354,7 +1392,7 @@ bad_message:
 protocol_error:
        skb->priority = RX_PROTOCOL_ERROR;
 post_abort:
-       skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+       skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
 reject_packet:
        trace_rxrpc_rx_done(skb->mark, skb->priority);
        rxrpc_reject_packet(local, skb);
index 777c3ed..94d234e 100644 (file)
@@ -135,10 +135,10 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
        }
 
        switch (local->srx.transport.family) {
-       case AF_INET:
-               /* we want to receive ICMP errors */
+       case AF_INET6:
+               /* we want to receive ICMPv6 errors */
                opt = 1;
-               ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
+               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
                                        (char *) &opt, sizeof(opt));
                if (ret < 0) {
                        _debug("setsockopt failed");
@@ -146,19 +146,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
                }
 
                /* we want to set the don't fragment bit */
-               opt = IP_PMTUDISC_DO;
-               ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
+               opt = IPV6_PMTUDISC_DO;
+               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
                                        (char *) &opt, sizeof(opt));
                if (ret < 0) {
                        _debug("setsockopt failed");
                        goto error;
                }
-               break;
 
-       case AF_INET6:
+               /* Fall through and set IPv4 options too otherwise we don't get
+                * errors from IPv4 packets sent through the IPv6 socket.
+                */
+
+       case AF_INET:
                /* we want to receive ICMP errors */
                opt = 1;
-               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
+               ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
                                        (char *) &opt, sizeof(opt));
                if (ret < 0) {
                        _debug("setsockopt failed");
@@ -166,13 +169,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
                }
 
                /* we want to set the don't fragment bit */
-               opt = IPV6_PMTUDISC_DO;
-               ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
+               opt = IP_PMTUDISC_DO;
+               ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
                                        (char *) &opt, sizeof(opt));
                if (ret < 0) {
                        _debug("setsockopt failed");
                        goto error;
                }
+
+               /* We want receive timestamps. */
+               opt = 1;
+               ret = kernel_setsockopt(local->socket, SOL_SOCKET, SO_TIMESTAMPNS,
+                                       (char *)&opt, sizeof(opt));
+               if (ret < 0) {
+                       _debug("setsockopt failed");
+                       goto error;
+               }
                break;
 
        default:
index ccf5de1..e8fb892 100644 (file)
@@ -124,7 +124,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
        struct kvec iov[2];
        rxrpc_serial_t serial;
        rxrpc_seq_t hard_ack, top;
-       ktime_t now;
        size_t len, n;
        int ret;
        u8 reason;
@@ -196,9 +195,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
                /* We need to stick a time in before we send the packet in case
                 * the reply gets back before kernel_sendmsg() completes - but
                 * asking UDP to send the packet can take a relatively long
-                * time, so we update the time after, on the assumption that
-                * the packet transmission is more likely to happen towards the
-                * end of the kernel_sendmsg() call.
+                * time.
                 */
                call->ping_time = ktime_get_real();
                set_bit(RXRPC_CALL_PINGING, &call->flags);
@@ -206,9 +203,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
        }
 
        ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
-       now = ktime_get_real();
-       if (ping)
-               call->ping_time = now;
        conn->params.peer->last_tx_at = ktime_get_seconds();
        if (ret < 0)
                trace_rxrpc_tx_fail(call->debug_id, serial, ret,
@@ -363,8 +357,14 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
 
        /* If our RTT cache needs working on, request an ACK.  Also request
         * ACKs if a DATA packet appears to have been lost.
+        *
+        * However, we mustn't request an ACK on the last reply packet of a
+        * service call, lest OpenAFS incorrectly send us an ACK with some
+        * soft-ACKs in it and then never follow up with a proper hard ACK.
         */
-       if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
+       if ((!(sp->hdr.flags & RXRPC_LAST_PACKET) ||
+            rxrpc_to_server(sp)
+            ) &&
            (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
             retrans ||
             call->cong_mode == RXRPC_CALL_SLOW_START ||
@@ -390,6 +390,11 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
                goto send_fragmentable;
 
        down_read(&conn->params.local->defrag_sem);
+
+       sp->hdr.serial = serial;
+       smp_wmb(); /* Set serial before timestamp */
+       skb->tstamp = ktime_get_real();
+
        /* send the packet by UDP
         * - returns -EMSGSIZE if UDP would have to fragment the packet
         *   to go out of the interface
@@ -413,12 +418,8 @@ done:
        trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
                            retrans, lost);
        if (ret >= 0) {
-               ktime_t now = ktime_get_real();
-               skb->tstamp = now;
-               smp_wmb();
-               sp->hdr.serial = serial;
                if (whdr.flags & RXRPC_REQUEST_ACK) {
-                       call->peer->rtt_last_req = now;
+                       call->peer->rtt_last_req = skb->tstamp;
                        trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
                        if (call->peer->rtt_usage > 1) {
                                unsigned long nowj = jiffies, ack_lost_at;
@@ -457,6 +458,10 @@ send_fragmentable:
 
        down_write(&conn->params.local->defrag_sem);
 
+       sp->hdr.serial = serial;
+       smp_wmb(); /* Set serial before timestamp */
+       skb->tstamp = ktime_get_real();
+
        switch (conn->params.local->srx.transport.family) {
        case AF_INET:
                opt = IP_PMTUDISC_DONT;
@@ -519,7 +524,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
        struct kvec iov[2];
        size_t size;
        __be32 code;
-       int ret;
+       int ret, ioc;
 
        _enter("%d", local->debug_id);
 
@@ -527,7 +532,6 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
        iov[0].iov_len = sizeof(whdr);
        iov[1].iov_base = &code;
        iov[1].iov_len = sizeof(code);
-       size = sizeof(whdr) + sizeof(code);
 
        msg.msg_name = &srx.transport;
        msg.msg_control = NULL;
@@ -535,17 +539,31 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
        msg.msg_flags = 0;
 
        memset(&whdr, 0, sizeof(whdr));
-       whdr.type = RXRPC_PACKET_TYPE_ABORT;
 
        while ((skb = skb_dequeue(&local->reject_queue))) {
                rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
                sp = rxrpc_skb(skb);
 
+               switch (skb->mark) {
+               case RXRPC_SKB_MARK_REJECT_BUSY:
+                       whdr.type = RXRPC_PACKET_TYPE_BUSY;
+                       size = sizeof(whdr);
+                       ioc = 1;
+                       break;
+               case RXRPC_SKB_MARK_REJECT_ABORT:
+                       whdr.type = RXRPC_PACKET_TYPE_ABORT;
+                       code = htonl(skb->priority);
+                       size = sizeof(whdr) + sizeof(code);
+                       ioc = 2;
+                       break;
+               default:
+                       rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+                       continue;
+               }
+
                if (rxrpc_extract_addr_from_skb(local, &srx, skb) == 0) {
                        msg.msg_namelen = srx.transport_len;
 
-                       code = htonl(skb->priority);
-
                        whdr.epoch      = htonl(sp->hdr.epoch);
                        whdr.cid        = htonl(sp->hdr.cid);
                        whdr.callNumber = htonl(sp->hdr.callNumber);
index 4f9da2f..f3e6fc6 100644 (file)
@@ -23,6 +23,8 @@
 #include "ar-internal.h"
 
 static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
+static void rxrpc_distribute_error(struct rxrpc_peer *, int,
+                                  enum rxrpc_call_completion);
 
 /*
  * Find the peer associated with an ICMP packet.
@@ -194,8 +196,6 @@ void rxrpc_error_report(struct sock *sk)
        rcu_read_unlock();
        rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
 
-       /* The ref we obtained is passed off to the work item */
-       __rxrpc_queue_peer_error(peer);
        _leave("");
 }
 
@@ -205,6 +205,7 @@ void rxrpc_error_report(struct sock *sk)
 static void rxrpc_store_error(struct rxrpc_peer *peer,
                              struct sock_exterr_skb *serr)
 {
+       enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
        struct sock_extended_err *ee;
        int err;
 
@@ -255,7 +256,7 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
        case SO_EE_ORIGIN_NONE:
        case SO_EE_ORIGIN_LOCAL:
                _proto("Rx Received local error { error=%d }", err);
-               err += RXRPC_LOCAL_ERROR_OFFSET;
+               compl = RXRPC_CALL_LOCAL_ERROR;
                break;
 
        case SO_EE_ORIGIN_ICMP6:
@@ -264,48 +265,23 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
                break;
        }
 
-       peer->error_report = err;
+       rxrpc_distribute_error(peer, err, compl);
 }
 
 /*
- * Distribute an error that occurred on a peer
+ * Distribute an error that occurred on a peer.
  */
-void rxrpc_peer_error_distributor(struct work_struct *work)
+static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
+                                  enum rxrpc_call_completion compl)
 {
-       struct rxrpc_peer *peer =
-               container_of(work, struct rxrpc_peer, error_distributor);
        struct rxrpc_call *call;
-       enum rxrpc_call_completion compl;
-       int error;
-
-       _enter("");
-
-       error = READ_ONCE(peer->error_report);
-       if (error < RXRPC_LOCAL_ERROR_OFFSET) {
-               compl = RXRPC_CALL_NETWORK_ERROR;
-       } else {
-               compl = RXRPC_CALL_LOCAL_ERROR;
-               error -= RXRPC_LOCAL_ERROR_OFFSET;
-       }
 
-       _debug("ISSUE ERROR %s %d", rxrpc_call_completions[compl], error);
-
-       spin_lock_bh(&peer->lock);
-
-       while (!hlist_empty(&peer->error_targets)) {
-               call = hlist_entry(peer->error_targets.first,
-                                  struct rxrpc_call, error_link);
-               hlist_del_init(&call->error_link);
+       hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
                rxrpc_see_call(call);
-
-               if (rxrpc_set_call_completion(call, compl, 0, -error))
+               if (call->state < RXRPC_CALL_COMPLETE &&
+                   rxrpc_set_call_completion(call, compl, 0, -error))
                        rxrpc_notify_socket(call);
        }
-
-       spin_unlock_bh(&peer->lock);
-
-       rxrpc_put_peer(peer);
-       _leave("");
 }
 
 /*
index 1dc7648..01a9feb 100644 (file)
@@ -124,11 +124,9 @@ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
        struct rxrpc_net *rxnet = local->rxnet;
 
        hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
-               if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0) {
-                       if (atomic_read(&peer->usage) == 0)
-                               return NULL;
+               if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 &&
+                   atomic_read(&peer->usage) > 0)
                        return peer;
-               }
        }
 
        return NULL;
@@ -222,8 +220,6 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
                atomic_set(&peer->usage, 1);
                peer->local = local;
                INIT_HLIST_HEAD(&peer->error_targets);
-               INIT_WORK(&peer->error_distributor,
-                         &rxrpc_peer_error_distributor);
                peer->service_conns = RB_ROOT;
                seqlock_init(&peer->service_conn_lock);
                spin_lock_init(&peer->lock);
@@ -299,34 +295,23 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
 }
 
 /*
- * Set up a new incoming peer.  The address is prestored in the preallocated
- * peer.
+ * Set up a new incoming peer.  There shouldn't be any other matching peers
+ * since we've already done a search in the list from the non-reentrant context
+ * (the data_ready handler) that is the only place we can add new peers.
  */
-struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
-                                             struct rxrpc_peer *prealloc)
+void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
 {
-       struct rxrpc_peer *peer;
        struct rxrpc_net *rxnet = local->rxnet;
        unsigned long hash_key;
 
-       hash_key = rxrpc_peer_hash_key(local, &prealloc->srx);
-       prealloc->local = local;
-       rxrpc_init_peer(prealloc, hash_key);
+       hash_key = rxrpc_peer_hash_key(local, &peer->srx);
+       peer->local = local;
+       rxrpc_init_peer(peer, hash_key);
 
        spin_lock(&rxnet->peer_hash_lock);
-
-       /* Need to check that we aren't racing with someone else */
-       peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key);
-       if (peer && !rxrpc_get_peer_maybe(peer))
-               peer = NULL;
-       if (!peer) {
-               peer = prealloc;
-               hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
-               list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
-       }
-
+       hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
+       list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
        spin_unlock(&rxnet->peer_hash_lock);
-       return peer;
 }
 
 /*
@@ -416,21 +401,6 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
 }
 
 /*
- * Queue a peer record.  This passes the caller's ref to the workqueue.
- */
-void __rxrpc_queue_peer_error(struct rxrpc_peer *peer)
-{
-       const void *here = __builtin_return_address(0);
-       int n;
-
-       n = atomic_read(&peer->usage);
-       if (rxrpc_queue_work(&peer->error_distributor))
-               trace_rxrpc_peer(peer, rxrpc_peer_queued_error, n, here);
-       else
-               rxrpc_put_peer(peer);
-}
-
-/*
  * Discard a peer record.
  */
 static void __rxrpc_put_peer(struct rxrpc_peer *peer)
index 93da73b..f9cb83c 100644 (file)
@@ -50,7 +50,6 @@ struct rxrpc_wire_header {
 #define RXRPC_PACKET_TYPE_10           10      /* Ignored */
 #define RXRPC_PACKET_TYPE_11           11      /* Ignored */
 #define RXRPC_PACKET_TYPE_VERSION      13      /* version string request */
-#define RXRPC_N_PACKET_TYPES           14      /* number of packet types (incl type 0) */
 
        uint8_t         flags;          /* packet flags */
 #define RXRPC_CLIENT_INITIATED 0x01            /* signifies a packet generated by a client */
@@ -72,20 +71,6 @@ struct rxrpc_wire_header {
 
 } __packed;
 
-#define RXRPC_SUPPORTED_PACKET_TYPES (                 \
-               (1 << RXRPC_PACKET_TYPE_DATA) |         \
-               (1 << RXRPC_PACKET_TYPE_ACK) |          \
-               (1 << RXRPC_PACKET_TYPE_BUSY) |         \
-               (1 << RXRPC_PACKET_TYPE_ABORT) |        \
-               (1 << RXRPC_PACKET_TYPE_ACKALL) |       \
-               (1 << RXRPC_PACKET_TYPE_CHALLENGE) |    \
-               (1 << RXRPC_PACKET_TYPE_RESPONSE) |     \
-               /*(1 << RXRPC_PACKET_TYPE_DEBUG) | */   \
-               (1 << RXRPC_PACKET_TYPE_PARAMS) |       \
-               (1 << RXRPC_PACKET_TYPE_10) |           \
-               (1 << RXRPC_PACKET_TYPE_11) |           \
-               (1 << RXRPC_PACKET_TYPE_VERSION))
-
 /*****************************************************************************/
 /*
  * jumbo packet secondary header
index 229d63c..e12f8ef 100644 (file)
@@ -300,21 +300,17 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
 }
 EXPORT_SYMBOL(tcf_generic_walker);
 
-static bool __tcf_idr_check(struct tc_action_net *tn, u32 index,
-                           struct tc_action **a, int bind)
+int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
 {
        struct tcf_idrinfo *idrinfo = tn->idrinfo;
        struct tc_action *p;
 
        spin_lock(&idrinfo->lock);
        p = idr_find(&idrinfo->action_idr, index);
-       if (IS_ERR(p)) {
+       if (IS_ERR(p))
                p = NULL;
-       } else if (p) {
+       else if (p)
                refcount_inc(&p->tcfa_refcnt);
-               if (bind)
-                       atomic_inc(&p->tcfa_bindcnt);
-       }
        spin_unlock(&idrinfo->lock);
 
        if (p) {
@@ -323,23 +319,10 @@ static bool __tcf_idr_check(struct tc_action_net *tn, u32 index,
        }
        return false;
 }
-
-int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
-{
-       return __tcf_idr_check(tn, index, a, 0);
-}
 EXPORT_SYMBOL(tcf_idr_search);
 
-bool tcf_idr_check(struct tc_action_net *tn, u32 index, struct tc_action **a,
-                  int bind)
+static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
 {
-       return __tcf_idr_check(tn, index, a, bind);
-}
-EXPORT_SYMBOL(tcf_idr_check);
-
-int tcf_idr_delete_index(struct tc_action_net *tn, u32 index)
-{
-       struct tcf_idrinfo *idrinfo = tn->idrinfo;
        struct tc_action *p;
        int ret = 0;
 
@@ -370,7 +353,6 @@ int tcf_idr_delete_index(struct tc_action_net *tn, u32 index)
        spin_unlock(&idrinfo->lock);
        return ret;
 }
-EXPORT_SYMBOL(tcf_idr_delete_index);
 
 int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
                   struct tc_action **a, const struct tc_action_ops *ops,
@@ -409,7 +391,6 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
 
        p->idrinfo = idrinfo;
        p->ops = ops;
-       INIT_LIST_HEAD(&p->list);
        *a = p;
        return 0;
 err3:
@@ -681,19 +662,30 @@ int tcf_action_destroy(struct tc_action *actions[], int bind)
        return ret;
 }
 
+static int tcf_action_destroy_1(struct tc_action *a, int bind)
+{
+       struct tc_action *actions[] = { a, NULL };
+
+       return tcf_action_destroy(actions, bind);
+}
+
 static int tcf_action_put(struct tc_action *p)
 {
        return __tcf_action_put(p, false);
 }
 
+/* Put all actions in this array, skip those NULL's. */
 static void tcf_action_put_many(struct tc_action *actions[])
 {
        int i;
 
-       for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
+       for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
                struct tc_action *a = actions[i];
-               const struct tc_action_ops *ops = a->ops;
+               const struct tc_action_ops *ops;
 
+               if (!a)
+                       continue;
+               ops = a->ops;
                if (tcf_action_put(a))
                        module_put(ops->owner);
        }
@@ -896,17 +888,16 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
                err = tcf_action_goto_chain_init(a, tp);
                if (err) {
-                       struct tc_action *actions[] = { a, NULL };
-
-                       tcf_action_destroy(actions, bind);
+                       tcf_action_destroy_1(a, bind);
                        NL_SET_ERR_MSG(extack, "Failed to init TC action chain");
                        return ERR_PTR(err);
                }
        }
 
        if (!tcf_action_valid(a->tcfa_action)) {
-               NL_SET_ERR_MSG(extack, "invalid action value, using TC_ACT_UNSPEC instead");
-               a->tcfa_action = TC_ACT_UNSPEC;
+               tcf_action_destroy_1(a, bind);
+               NL_SET_ERR_MSG(extack, "Invalid control action value");
+               return ERR_PTR(-EINVAL);
        }
 
        return a;
@@ -1175,41 +1166,38 @@ err_out:
        return err;
 }
 
-static int tcf_action_delete(struct net *net, struct tc_action *actions[],
-                            int *acts_deleted, struct netlink_ext_ack *extack)
+static int tcf_action_delete(struct net *net, struct tc_action *actions[])
 {
-       u32 act_index;
-       int ret, i;
+       int i;
 
        for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
                struct tc_action *a = actions[i];
                const struct tc_action_ops *ops = a->ops;
-
                /* Actions can be deleted concurrently so we must save their
                 * type and id to search again after reference is released.
                 */
-               act_index = a->tcfa_index;
+               struct tcf_idrinfo *idrinfo = a->idrinfo;
+               u32 act_index = a->tcfa_index;
 
+               actions[i] = NULL;
                if (tcf_action_put(a)) {
                        /* last reference, action was deleted concurrently */
                        module_put(ops->owner);
                } else  {
+                       int ret;
+
                        /* now do the delete */
-                       ret = ops->delete(net, act_index);
-                       if (ret < 0) {
-                               *acts_deleted = i + 1;
+                       ret = tcf_idr_delete_index(idrinfo, act_index);
+                       if (ret < 0)
                                return ret;
-                       }
                }
        }
-       *acts_deleted = i;
        return 0;
 }
 
 static int
 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
-              int *acts_deleted, u32 portid, size_t attr_size,
-              struct netlink_ext_ack *extack)
+              u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
 {
        int ret;
        struct sk_buff *skb;
@@ -1227,7 +1215,7 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
        }
 
        /* now do the delete */
-       ret = tcf_action_delete(net, actions, acts_deleted, extack);
+       ret = tcf_action_delete(net, actions);
        if (ret < 0) {
                NL_SET_ERR_MSG(extack, "Failed to delete TC action");
                kfree_skb(skb);
@@ -1249,8 +1237,7 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
        struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
        struct tc_action *act;
        size_t attr_size = 0;
-       struct tc_action *actions[TCA_ACT_MAX_PRIO + 1] = {};
-       int acts_deleted = 0;
+       struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
 
        ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL, extack);
        if (ret < 0)
@@ -1280,14 +1267,13 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
        if (event == RTM_GETACTION)
                ret = tcf_get_notify(net, portid, n, actions, event, extack);
        else { /* delete */
-               ret = tcf_del_notify(net, n, actions, &acts_deleted, portid,
-                                    attr_size, extack);
+               ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
                if (ret)
                        goto err;
-               return ret;
+               return 0;
        }
 err:
-       tcf_action_put_many(&actions[acts_deleted]);
+       tcf_action_put_many(actions);
        return ret;
 }
 
index d30b23e..0c68bc9 100644 (file)
@@ -395,13 +395,6 @@ static int tcf_bpf_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_bpf_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, bpf_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_bpf_ops __read_mostly = {
        .kind           =       "bpf",
        .type           =       TCA_ACT_BPF,
@@ -412,7 +405,6 @@ static struct tc_action_ops act_bpf_ops __read_mostly = {
        .init           =       tcf_bpf_init,
        .walk           =       tcf_bpf_walker,
        .lookup         =       tcf_bpf_search,
-       .delete         =       tcf_bpf_delete,
        .size           =       sizeof(struct tcf_bpf),
 };
 
index 54c0bf5..6f0f273 100644 (file)
@@ -198,13 +198,6 @@ static int tcf_connmark_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_connmark_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, connmark_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_connmark_ops = {
        .kind           =       "connmark",
        .type           =       TCA_ACT_CONNMARK,
@@ -214,7 +207,6 @@ static struct tc_action_ops act_connmark_ops = {
        .init           =       tcf_connmark_init,
        .walk           =       tcf_connmark_walker,
        .lookup         =       tcf_connmark_search,
-       .delete         =       tcf_connmark_delete,
        .size           =       sizeof(struct tcf_connmark_info),
 };
 
index e698d3f..b8a67ae 100644 (file)
@@ -659,13 +659,6 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act)
        return nla_total_size(sizeof(struct tc_csum));
 }
 
-static int tcf_csum_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, csum_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_csum_ops = {
        .kind           = "csum",
        .type           = TCA_ACT_CSUM,
@@ -677,7 +670,6 @@ static struct tc_action_ops act_csum_ops = {
        .walk           = tcf_csum_walker,
        .lookup         = tcf_csum_search,
        .get_fill_size  = tcf_csum_get_fill_size,
-       .delete         = tcf_csum_delete,
        .size           = sizeof(struct tcf_csum),
 };
 
index 6a3f25a..cd1d9bd 100644 (file)
@@ -243,13 +243,6 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act)
        return sz;
 }
 
-static int tcf_gact_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, gact_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_gact_ops = {
        .kind           =       "gact",
        .type           =       TCA_ACT_GACT,
@@ -261,7 +254,6 @@ static struct tc_action_ops act_gact_ops = {
        .walk           =       tcf_gact_walker,
        .lookup         =       tcf_gact_search,
        .get_fill_size  =       tcf_gact_get_fill_size,
-       .delete         =       tcf_gact_delete,
        .size           =       sizeof(struct tcf_gact),
 };
 
index d1081bd..06a3d48 100644 (file)
@@ -167,16 +167,16 @@ static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
 {
        struct tcf_meta_ops *o;
 
-       read_lock_bh(&ife_mod_lock);
+       read_lock(&ife_mod_lock);
        list_for_each_entry(o, &ifeoplist, list) {
                if (o->metaid == metaid) {
                        if (!try_module_get(o->owner))
                                o = NULL;
-                       read_unlock_bh(&ife_mod_lock);
+                       read_unlock(&ife_mod_lock);
                        return o;
                }
        }
-       read_unlock_bh(&ife_mod_lock);
+       read_unlock(&ife_mod_lock);
 
        return NULL;
 }
@@ -190,12 +190,12 @@ int register_ife_op(struct tcf_meta_ops *mops)
            !mops->get || !mops->alloc)
                return -EINVAL;
 
-       write_lock_bh(&ife_mod_lock);
+       write_lock(&ife_mod_lock);
 
        list_for_each_entry(m, &ifeoplist, list) {
                if (m->metaid == mops->metaid ||
                    (strcmp(mops->name, m->name) == 0)) {
-                       write_unlock_bh(&ife_mod_lock);
+                       write_unlock(&ife_mod_lock);
                        return -EEXIST;
                }
        }
@@ -204,7 +204,7 @@ int register_ife_op(struct tcf_meta_ops *mops)
                mops->release = ife_release_meta_gen;
 
        list_add_tail(&mops->list, &ifeoplist);
-       write_unlock_bh(&ife_mod_lock);
+       write_unlock(&ife_mod_lock);
        return 0;
 }
 EXPORT_SYMBOL_GPL(unregister_ife_op);
@@ -214,7 +214,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops)
        struct tcf_meta_ops *m;
        int err = -ENOENT;
 
-       write_lock_bh(&ife_mod_lock);
+       write_lock(&ife_mod_lock);
        list_for_each_entry(m, &ifeoplist, list) {
                if (m->metaid == mops->metaid) {
                        list_del(&mops->list);
@@ -222,7 +222,7 @@ int unregister_ife_op(struct tcf_meta_ops *mops)
                        break;
                }
        }
-       write_unlock_bh(&ife_mod_lock);
+       write_unlock(&ife_mod_lock);
 
        return err;
 }
@@ -265,11 +265,8 @@ static const char *ife_meta_id2name(u32 metaid)
 #endif
 
 /* called when adding new meta information
- * under ife->tcf_lock for existing action
 */
-static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
-                               void *val, int len, bool exists,
-                               bool rtnl_held)
+static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held)
 {
        struct tcf_meta_ops *ops = find_ife_oplist(metaid);
        int ret = 0;
@@ -277,15 +274,11 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
        if (!ops) {
                ret = -ENOENT;
 #ifdef CONFIG_MODULES
-               if (exists)
-                       spin_unlock_bh(&ife->tcf_lock);
                if (rtnl_held)
                        rtnl_unlock();
                request_module("ife-meta-%s", ife_meta_id2name(metaid));
                if (rtnl_held)
                        rtnl_lock();
-               if (exists)
-                       spin_lock_bh(&ife->tcf_lock);
                ops = find_ife_oplist(metaid);
 #endif
        }
@@ -302,24 +295,17 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
 }
 
 /* called when adding new meta information
- * under ife->tcf_lock for existing action
 */
-static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
-                       int len, bool atomic)
+static int __add_metainfo(const struct tcf_meta_ops *ops,
+                         struct tcf_ife_info *ife, u32 metaid, void *metaval,
+                         int len, bool atomic, bool exists)
 {
        struct tcf_meta_info *mi = NULL;
-       struct tcf_meta_ops *ops = find_ife_oplist(metaid);
        int ret = 0;
 
-       if (!ops)
-               return -ENOENT;
-
        mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
-       if (!mi) {
-               /*put back what find_ife_oplist took */
-               module_put(ops->owner);
+       if (!mi)
                return -ENOMEM;
-       }
 
        mi->metaid = metaid;
        mi->ops = ops;
@@ -327,29 +313,61 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
                ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
                if (ret != 0) {
                        kfree(mi);
-                       module_put(ops->owner);
                        return ret;
                }
        }
 
+       if (exists)
+               spin_lock_bh(&ife->tcf_lock);
        list_add_tail(&mi->metalist, &ife->metalist);
+       if (exists)
+               spin_unlock_bh(&ife->tcf_lock);
+
+       return ret;
+}
+
+static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
+                                   struct tcf_ife_info *ife, u32 metaid,
+                                   bool exists)
+{
+       int ret;
+
+       if (!try_module_get(ops->owner))
+               return -ENOENT;
+       ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
+       if (ret)
+               module_put(ops->owner);
+       return ret;
+}
+
+static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
+                       int len, bool exists)
+{
+       const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
+       int ret;
 
+       if (!ops)
+               return -ENOENT;
+       ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
+       if (ret)
+               /*put back what find_ife_oplist took */
+               module_put(ops->owner);
        return ret;
 }
 
-static int use_all_metadata(struct tcf_ife_info *ife)
+static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
 {
        struct tcf_meta_ops *o;
        int rc = 0;
        int installed = 0;
 
-       read_lock_bh(&ife_mod_lock);
+       read_lock(&ife_mod_lock);
        list_for_each_entry(o, &ifeoplist, list) {
-               rc = add_metainfo(ife, o->metaid, NULL, 0, true);
+               rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
                if (rc == 0)
                        installed += 1;
        }
-       read_unlock_bh(&ife_mod_lock);
+       read_unlock(&ife_mod_lock);
 
        if (installed)
                return 0;
@@ -396,7 +414,6 @@ static void _tcf_ife_cleanup(struct tc_action *a)
        struct tcf_meta_info *e, *n;
 
        list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
-               module_put(e->ops->owner);
                list_del(&e->metalist);
                if (e->metaval) {
                        if (e->ops->release)
@@ -404,6 +421,7 @@ static void _tcf_ife_cleanup(struct tc_action *a)
                        else
                                kfree(e->metaval);
                }
+               module_put(e->ops->owner);
                kfree(e);
        }
 }
@@ -422,7 +440,6 @@ static void tcf_ife_cleanup(struct tc_action *a)
                kfree_rcu(p, rcu);
 }
 
-/* under ife->tcf_lock for existing action */
 static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
                             bool exists, bool rtnl_held)
 {
@@ -436,8 +453,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
                        val = nla_data(tb[i]);
                        len = nla_len(tb[i]);
 
-                       rc = load_metaops_and_vet(ife, i, val, len, exists,
-                                                 rtnl_held);
+                       rc = load_metaops_and_vet(i, val, len, rtnl_held);
                        if (rc != 0)
                                return rc;
 
@@ -540,8 +556,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                p->eth_type = ife_type;
        }
 
-       if (exists)
-               spin_lock_bh(&ife->tcf_lock);
 
        if (ret == ACT_P_CREATED)
                INIT_LIST_HEAD(&ife->metalist);
@@ -551,10 +565,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                                       NULL, NULL);
                if (err) {
 metadata_parse_err:
-                       if (exists)
-                               spin_unlock_bh(&ife->tcf_lock);
                        tcf_idr_release(*a, bind);
-
                        kfree(p);
                        return err;
                }
@@ -569,17 +580,16 @@ metadata_parse_err:
                 * as we can. You better have at least one else we are
                 * going to bail out
                 */
-               err = use_all_metadata(ife);
+               err = use_all_metadata(ife, exists);
                if (err) {
-                       if (exists)
-                               spin_unlock_bh(&ife->tcf_lock);
                        tcf_idr_release(*a, bind);
-
                        kfree(p);
                        return err;
                }
        }
 
+       if (exists)
+               spin_lock_bh(&ife->tcf_lock);
        ife->tcf_action = parm->action;
        /* protected by tcf_lock when modifying existing action */
        rcu_swap_protected(ife->params, p, 1);
@@ -853,13 +863,6 @@ static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_ife_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, ife_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_ife_ops = {
        .kind = "ife",
        .type = TCA_ACT_IFE,
@@ -870,7 +873,6 @@ static struct tc_action_ops act_ife_ops = {
        .init = tcf_ife_init,
        .walk = tcf_ife_walker,
        .lookup = tcf_ife_search,
-       .delete = tcf_ife_delete,
        .size = sizeof(struct tcf_ife_info),
 };
 
index 51f235b..8525de8 100644 (file)
@@ -135,7 +135,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
        }
 
        td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
-       if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) {
+       if (nla_len(tb[TCA_IPT_TARG]) != td->u.target_size) {
                if (exists)
                        tcf_idr_release(*a, bind);
                else
@@ -337,13 +337,6 @@ static int tcf_ipt_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_ipt_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, ipt_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_ipt_ops = {
        .kind           =       "ipt",
        .type           =       TCA_ACT_IPT,
@@ -354,7 +347,6 @@ static struct tc_action_ops act_ipt_ops = {
        .init           =       tcf_ipt_init,
        .walk           =       tcf_ipt_walker,
        .lookup         =       tcf_ipt_search,
-       .delete         =       tcf_ipt_delete,
        .size           =       sizeof(struct tcf_ipt),
 };
 
@@ -395,13 +387,6 @@ static int tcf_xt_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_xt_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, xt_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_xt_ops = {
        .kind           =       "xt",
        .type           =       TCA_ACT_XT,
@@ -412,7 +397,6 @@ static struct tc_action_ops act_xt_ops = {
        .init           =       tcf_xt_init,
        .walk           =       tcf_xt_walker,
        .lookup         =       tcf_xt_search,
-       .delete         =       tcf_xt_delete,
        .size           =       sizeof(struct tcf_ipt),
 };
 
index 38fd20f..8bf66d0 100644 (file)
@@ -395,13 +395,6 @@ static void tcf_mirred_put_dev(struct net_device *dev)
        dev_put(dev);
 }
 
-static int tcf_mirred_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, mirred_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_mirred_ops = {
        .kind           =       "mirred",
        .type           =       TCA_ACT_MIRRED,
@@ -416,7 +409,6 @@ static struct tc_action_ops act_mirred_ops = {
        .size           =       sizeof(struct tcf_mirred),
        .get_dev        =       tcf_mirred_get_dev,
        .put_dev        =       tcf_mirred_put_dev,
-       .delete         =       tcf_mirred_delete,
 };
 
 static __net_init int mirred_init_net(struct net *net)
index 822e903..4313aa1 100644 (file)
@@ -300,13 +300,6 @@ static int tcf_nat_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_nat_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, nat_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_nat_ops = {
        .kind           =       "nat",
        .type           =       TCA_ACT_NAT,
@@ -316,7 +309,6 @@ static struct tc_action_ops act_nat_ops = {
        .init           =       tcf_nat_init,
        .walk           =       tcf_nat_walker,
        .lookup         =       tcf_nat_search,
-       .delete         =       tcf_nat_delete,
        .size           =       sizeof(struct tcf_nat),
 };
 
index 8a7a7cb..ad99a99 100644 (file)
@@ -109,16 +109,18 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
 {
        struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX);
 
+       if (!keys_start)
+               goto nla_failure;
        for (; n > 0; n--) {
                struct nlattr *key_start;
 
                key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX);
+               if (!key_start)
+                       goto nla_failure;
 
                if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) ||
-                   nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) {
-                       nlmsg_trim(skb, keys_start);
-                       return -EINVAL;
-               }
+                   nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd))
+                       goto nla_failure;
 
                nla_nest_end(skb, key_start);
 
@@ -128,6 +130,9 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
        nla_nest_end(skb, keys_start);
 
        return 0;
+nla_failure:
+       nla_nest_cancel(skb, keys_start);
+       return -EINVAL;
 }
 
 static int tcf_pedit_init(struct net *net, struct nlattr *nla,
@@ -418,7 +423,10 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
        opt->bindcnt = atomic_read(&p->tcf_bindcnt) - bind;
 
        if (p->tcfp_keys_ex) {
-               tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys);
+               if (tcf_pedit_key_ex_dump(skb,
+                                         p->tcfp_keys_ex,
+                                         p->tcfp_nkeys))
+                       goto nla_put_failure;
 
                if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt))
                        goto nla_put_failure;
@@ -460,13 +468,6 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_pedit_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, pedit_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_pedit_ops = {
        .kind           =       "pedit",
        .type           =       TCA_ACT_PEDIT,
@@ -477,7 +478,6 @@ static struct tc_action_ops act_pedit_ops = {
        .init           =       tcf_pedit_init,
        .walk           =       tcf_pedit_walker,
        .lookup         =       tcf_pedit_search,
-       .delete         =       tcf_pedit_delete,
        .size           =       sizeof(struct tcf_pedit),
 };
 
index 06f0742..5d8bfa8 100644 (file)
@@ -320,13 +320,6 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_police_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, police_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 MODULE_AUTHOR("Alexey Kuznetsov");
 MODULE_DESCRIPTION("Policing actions");
 MODULE_LICENSE("GPL");
@@ -340,7 +333,6 @@ static struct tc_action_ops act_police_ops = {
        .init           =       tcf_police_init,
        .walk           =       tcf_police_walker,
        .lookup         =       tcf_police_search,
-       .delete         =       tcf_police_delete,
        .size           =       sizeof(struct tcf_police),
 };
 
index 207b413..6b67aa1 100644 (file)
@@ -69,7 +69,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
 
        if (!exists) {
                ret = tcf_idr_create(tn, parm->index, est, a,
-                                    &act_sample_ops, bind, false);
+                                    &act_sample_ops, bind, true);
                if (ret) {
                        tcf_idr_cleanup(tn, parm->index);
                        return ret;
@@ -232,13 +232,6 @@ static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_sample_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, sample_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_sample_ops = {
        .kind     = "sample",
        .type     = TCA_ACT_SAMPLE,
@@ -249,7 +242,6 @@ static struct tc_action_ops act_sample_ops = {
        .cleanup  = tcf_sample_cleanup,
        .walk     = tcf_sample_walker,
        .lookup   = tcf_sample_search,
-       .delete   = tcf_sample_delete,
        .size     = sizeof(struct tcf_sample),
 };
 
index e616523..52400d4 100644 (file)
@@ -196,13 +196,6 @@ static int tcf_simp_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_simp_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, simp_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_simp_ops = {
        .kind           =       "simple",
        .type           =       TCA_ACT_SIMP,
@@ -213,7 +206,6 @@ static struct tc_action_ops act_simp_ops = {
        .init           =       tcf_simp_init,
        .walk           =       tcf_simp_walker,
        .lookup         =       tcf_simp_search,
-       .delete         =       tcf_simp_delete,
        .size           =       sizeof(struct tcf_defact),
 };
 
index 926d7bc..73e44ce 100644 (file)
@@ -299,13 +299,6 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_skbedit_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, skbedit_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_skbedit_ops = {
        .kind           =       "skbedit",
        .type           =       TCA_ACT_SKBEDIT,
@@ -316,7 +309,6 @@ static struct tc_action_ops act_skbedit_ops = {
        .cleanup        =       tcf_skbedit_cleanup,
        .walk           =       tcf_skbedit_walker,
        .lookup         =       tcf_skbedit_search,
-       .delete         =       tcf_skbedit_delete,
        .size           =       sizeof(struct tcf_skbedit),
 };
 
index d6a1af0..588077f 100644 (file)
@@ -259,13 +259,6 @@ static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_skbmod_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, skbmod_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_skbmod_ops = {
        .kind           =       "skbmod",
        .type           =       TCA_ACT_SKBMOD,
@@ -276,7 +269,6 @@ static struct tc_action_ops act_skbmod_ops = {
        .cleanup        =       tcf_skbmod_cleanup,
        .walk           =       tcf_skbmod_walker,
        .lookup         =       tcf_skbmod_search,
-       .delete         =       tcf_skbmod_delete,
        .size           =       sizeof(struct tcf_skbmod),
 };
 
index 8f09cf0..681f6f0 100644 (file)
@@ -317,7 +317,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
                                                  &metadata->u.tun_info,
                                                  opts_len, extack);
                        if (ret < 0)
-                               goto err_out;
+                               goto release_tun_meta;
                }
 
                metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
@@ -333,23 +333,24 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
                                     &act_tunnel_key_ops, bind, true);
                if (ret) {
                        NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
-                       goto err_out;
+                       goto release_tun_meta;
                }
 
                ret = ACT_P_CREATED;
        } else if (!ovr) {
-               tcf_idr_release(*a, bind);
                NL_SET_ERR_MSG(extack, "TC IDR already exists");
-               return -EEXIST;
+               ret = -EEXIST;
+               goto release_tun_meta;
        }
 
        t = to_tunnel_key(*a);
 
        params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
        if (unlikely(!params_new)) {
-               tcf_idr_release(*a, bind);
                NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               exists = true;
+               goto release_tun_meta;
        }
        params_new->tcft_action = parm->t_action;
        params_new->tcft_enc_metadata = metadata;
@@ -367,6 +368,9 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
 
        return ret;
 
+release_tun_meta:
+       dst_release(&metadata->dst);
+
 err_out:
        if (exists)
                tcf_idr_release(*a, bind);
@@ -408,8 +412,10 @@ static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
                    nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
                               opt->type) ||
                    nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
-                           opt->length * 4, opt + 1))
+                           opt->length * 4, opt + 1)) {
+                       nla_nest_cancel(skb, start);
                        return -EMSGSIZE;
+               }
 
                len -= sizeof(struct geneve_opt) + opt->length * 4;
                src += sizeof(struct geneve_opt) + opt->length * 4;
@@ -423,7 +429,7 @@ static int tunnel_key_opts_dump(struct sk_buff *skb,
                                const struct ip_tunnel_info *info)
 {
        struct nlattr *start;
-       int err;
+       int err = -EINVAL;
 
        if (!info->options_len)
                return 0;
@@ -435,9 +441,11 @@ static int tunnel_key_opts_dump(struct sk_buff *skb,
        if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
                err = tunnel_key_geneve_opts_dump(skb, info);
                if (err)
-                       return err;
+                       goto err_out;
        } else {
-               return -EINVAL;
+err_out:
+               nla_nest_cancel(skb, start);
+               return err;
        }
 
        nla_nest_end(skb, start);
@@ -548,13 +556,6 @@ static int tunnel_key_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tunnel_key_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_tunnel_key_ops = {
        .kind           =       "tunnel_key",
        .type           =       TCA_ACT_TUNNEL_KEY,
@@ -565,7 +566,6 @@ static struct tc_action_ops act_tunnel_key_ops = {
        .cleanup        =       tunnel_key_release,
        .walk           =       tunnel_key_walker,
        .lookup         =       tunnel_key_search,
-       .delete         =       tunnel_key_delete,
        .size           =       sizeof(struct tcf_tunnel_key),
 };
 
index 209e70a..033d273 100644 (file)
@@ -296,13 +296,6 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index,
        return tcf_idr_search(tn, a, index);
 }
 
-static int tcf_vlan_delete(struct net *net, u32 index)
-{
-       struct tc_action_net *tn = net_generic(net, vlan_net_id);
-
-       return tcf_idr_delete_index(tn, index);
-}
-
 static struct tc_action_ops act_vlan_ops = {
        .kind           =       "vlan",
        .type           =       TCA_ACT_VLAN,
@@ -313,7 +306,6 @@ static struct tc_action_ops act_vlan_ops = {
        .cleanup        =       tcf_vlan_cleanup,
        .walk           =       tcf_vlan_walker,
        .lookup         =       tcf_vlan_search,
-       .delete         =       tcf_vlan_delete,
        .size           =       sizeof(struct tcf_vlan),
 };
 
index 31bd143..0a75cb2 100644 (file)
@@ -1252,7 +1252,7 @@ replay:
        }
        chain = tcf_chain_get(block, chain_index, true);
        if (!chain) {
-               NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
+               NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
                err = -ENOMEM;
                goto errout;
        }
@@ -1399,7 +1399,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
                        goto errout;
                }
                NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
-               err = -EINVAL;
+               err = -ENOENT;
                goto errout;
        }
 
@@ -1902,6 +1902,8 @@ replay:
                                RTM_NEWCHAIN, false);
                break;
        case RTM_DELCHAIN:
+               tfilter_notify_chain(net, skb, block, q, parent, n,
+                                    chain, RTM_DELTFILTER);
                /* Flush the chain first as the user requested chain removal. */
                tcf_chain_flush(chain);
                /* In case the chain was successfully deleted, put a reference
index d5d2a6d..f218ccf 100644 (file)
@@ -914,6 +914,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
        struct nlattr *opt = tca[TCA_OPTIONS];
        struct nlattr *tb[TCA_U32_MAX + 1];
        u32 htid, flags = 0;
+       size_t sel_size;
        int err;
 #ifdef CONFIG_CLS_U32_PERF
        size_t size;
@@ -1076,8 +1077,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
        }
 
        s = nla_data(tb[TCA_U32_SEL]);
+       sel_size = struct_size(s, keys, s->nkeys);
+       if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
+               err = -EINVAL;
+               goto erridr;
+       }
 
-       n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
+       n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
        if (n == NULL) {
                err = -ENOBUFS;
                goto erridr;
@@ -1092,7 +1098,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
        }
 #endif
 
-       memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
+       memcpy(&n->sel, s, sel_size);
        RCU_INIT_POINTER(n->ht_up, ht);
        n->handle = handle;
        n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
index 98541c6..85e73f4 100644 (file)
@@ -1311,6 +1311,18 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
  * Delete/get qdisc.
  */
 
+const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
+       [TCA_KIND]              = { .type = NLA_STRING },
+       [TCA_OPTIONS]           = { .type = NLA_NESTED },
+       [TCA_RATE]              = { .type = NLA_BINARY,
+                                   .len = sizeof(struct tc_estimator) },
+       [TCA_STAB]              = { .type = NLA_NESTED },
+       [TCA_DUMP_INVISIBLE]    = { .type = NLA_FLAG },
+       [TCA_CHAIN]             = { .type = NLA_U32 },
+       [TCA_INGRESS_BLOCK]     = { .type = NLA_U32 },
+       [TCA_EGRESS_BLOCK]      = { .type = NLA_U32 },
+};
+
 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
                        struct netlink_ext_ack *extack)
 {
@@ -1327,7 +1339,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
            !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
 
-       err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
+       err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
+                         extack);
        if (err < 0)
                return err;
 
@@ -1411,7 +1424,8 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
 
 replay:
        /* Reinit, just in case something touches this. */
-       err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
+       err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
+                         extack);
        if (err < 0)
                return err;
 
@@ -1645,7 +1659,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
        idx = 0;
        ASSERT_RTNL();
 
-       err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX, NULL, NULL);
+       err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
+                         rtm_tca_policy, NULL);
        if (err < 0)
                return err;
 
@@ -1864,7 +1879,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
            !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
                return -EPERM;
 
-       err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
+       err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
+                         extack);
        if (err < 0)
                return err;
 
index 35fc725..c07c30b 100644 (file)
@@ -64,7 +64,6 @@
 #include <linux/vmalloc.h>
 #include <linux/reciprocal_div.h>
 #include <net/netlink.h>
-#include <linux/version.h>
 #include <linux/if_vlan.h>
 #include <net/pkt_sched.h>
 #include <net/pkt_cls.h>
@@ -621,15 +620,20 @@ static bool cake_ddst(int flow_mode)
 }
 
 static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
-                    int flow_mode)
+                    int flow_mode, u16 flow_override, u16 host_override)
 {
-       u32 flow_hash = 0, srchost_hash, dsthost_hash;
+       u32 flow_hash = 0, srchost_hash = 0, dsthost_hash = 0;
        u16 reduced_hash, srchost_idx, dsthost_idx;
        struct flow_keys keys, host_keys;
 
        if (unlikely(flow_mode == CAKE_FLOW_NONE))
                return 0;
 
+       /* If both overrides are set we can skip packet dissection entirely */
+       if ((flow_override || !(flow_mode & CAKE_FLOW_FLOWS)) &&
+           (host_override || !(flow_mode & CAKE_FLOW_HOSTS)))
+               goto skip_hash;
+
        skb_flow_dissect_flow_keys(skb, &keys,
                                   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
 
@@ -676,6 +680,14 @@ static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
        if (flow_mode & CAKE_FLOW_FLOWS)
                flow_hash = flow_hash_from_keys(&keys);
 
+skip_hash:
+       if (flow_override)
+               flow_hash = flow_override - 1;
+       if (host_override) {
+               dsthost_hash = host_override - 1;
+               srchost_hash = host_override - 1;
+       }
+
        if (!(flow_mode & CAKE_FLOW_FLOWS)) {
                if (flow_mode & CAKE_FLOW_SRC_IP)
                        flow_hash ^= srchost_hash;
@@ -1571,7 +1583,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
        struct cake_sched_data *q = qdisc_priv(sch);
        struct tcf_proto *filter;
        struct tcf_result res;
-       u32 flow = 0;
+       u16 flow = 0, host = 0;
        int result;
 
        filter = rcu_dereference_bh(q->filter_list);
@@ -1595,10 +1607,12 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
 #endif
                if (TC_H_MIN(res.classid) <= CAKE_QUEUES)
                        flow = TC_H_MIN(res.classid);
+               if (TC_H_MAJ(res.classid) <= (CAKE_QUEUES << 16))
+                       host = TC_H_MAJ(res.classid) >> 16;
        }
 hash:
        *t = cake_select_tin(sch, skb);
-       return flow ?: cake_hash(*t, skb, flow_mode) + 1;
+       return cake_hash(*t, skb, flow_mode, flow, host) + 1;
 }
 
 static void cake_reconfigure(struct Qdisc *sch);
index d74d00b..42191ed 100644 (file)
@@ -1048,7 +1048,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
                if (!ctx->packet || !ctx->packet->has_cookie_echo)
                        return;
 
-               /* fallthru */
+               /* fall through */
        case SCTP_STATE_ESTABLISHED:
        case SCTP_STATE_SHUTDOWN_PENDING:
        case SCTP_STATE_SHUTDOWN_RECEIVED:
index ef5c9a8..a644292 100644 (file)
@@ -215,7 +215,6 @@ static const struct seq_operations sctp_eps_ops = {
 struct sctp_ht_iter {
        struct seq_net_private p;
        struct rhashtable_iter hti;
-       int start_fail;
 };
 
 static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
@@ -224,7 +223,6 @@ static void *sctp_transport_seq_start(struct seq_file *seq, loff_t *pos)
 
        sctp_transport_walk_start(&iter->hti);
 
-       iter->start_fail = 0;
        return sctp_transport_get_idx(seq_file_net(seq), &iter->hti, *pos);
 }
 
@@ -232,8 +230,6 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v)
 {
        struct sctp_ht_iter *iter = seq->private;
 
-       if (iter->start_fail)
-               return;
        sctp_transport_walk_stop(&iter->hti);
 }
 
@@ -264,8 +260,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
        }
 
        transport = (struct sctp_transport *)v;
-       if (!sctp_transport_hold(transport))
-               return 0;
        assoc = transport->asoc;
        epb = &assoc->base;
        sk = epb->sk;
@@ -322,8 +316,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
        }
 
        transport = (struct sctp_transport *)v;
-       if (!sctp_transport_hold(transport))
-               return 0;
        assoc = transport->asoc;
 
        list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
index e96b15a..f73e9d3 100644 (file)
@@ -2658,20 +2658,23 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
        }
 
        if (params->spp_flags & SPP_IPV6_FLOWLABEL) {
-               if (trans && trans->ipaddr.sa.sa_family == AF_INET6) {
-                       trans->flowlabel = params->spp_ipv6_flowlabel &
-                                          SCTP_FLOWLABEL_VAL_MASK;
-                       trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
-               } else if (asoc) {
-                       list_for_each_entry(trans,
-                                           &asoc->peer.transport_addr_list,
-                                           transports) {
-                               if (trans->ipaddr.sa.sa_family != AF_INET6)
-                                       continue;
+               if (trans) {
+                       if (trans->ipaddr.sa.sa_family == AF_INET6) {
                                trans->flowlabel = params->spp_ipv6_flowlabel &
                                                   SCTP_FLOWLABEL_VAL_MASK;
                                trans->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
                        }
+               } else if (asoc) {
+                       struct sctp_transport *t;
+
+                       list_for_each_entry(t, &asoc->peer.transport_addr_list,
+                                           transports) {
+                               if (t->ipaddr.sa.sa_family != AF_INET6)
+                                       continue;
+                               t->flowlabel = params->spp_ipv6_flowlabel &
+                                              SCTP_FLOWLABEL_VAL_MASK;
+                               t->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
+                       }
                        asoc->flowlabel = params->spp_ipv6_flowlabel &
                                          SCTP_FLOWLABEL_VAL_MASK;
                        asoc->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
@@ -2687,12 +2690,13 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
                        trans->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
                        trans->dscp |= SCTP_DSCP_SET_MASK;
                } else if (asoc) {
-                       list_for_each_entry(trans,
-                                           &asoc->peer.transport_addr_list,
+                       struct sctp_transport *t;
+
+                       list_for_each_entry(t, &asoc->peer.transport_addr_list,
                                            transports) {
-                               trans->dscp = params->spp_dscp &
-                                             SCTP_DSCP_VAL_MASK;
-                               trans->dscp |= SCTP_DSCP_SET_MASK;
+                               t->dscp = params->spp_dscp &
+                                         SCTP_DSCP_VAL_MASK;
+                               t->dscp |= SCTP_DSCP_SET_MASK;
                        }
                        asoc->dscp = params->spp_dscp & SCTP_DSCP_VAL_MASK;
                        asoc->dscp |= SCTP_DSCP_SET_MASK;
@@ -5005,9 +5009,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net,
                        break;
                }
 
+               if (!sctp_transport_hold(t))
+                       continue;
+
                if (net_eq(sock_net(t->asoc->base.sk), net) &&
                    t->asoc->peer.primary_path == t)
                        break;
+
+               sctp_transport_put(t);
        }
 
        return t;
@@ -5017,13 +5026,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net,
                                              struct rhashtable_iter *iter,
                                              int pos)
 {
-       void *obj = SEQ_START_TOKEN;
+       struct sctp_transport *t;
 
-       while (pos && (obj = sctp_transport_get_next(net, iter)) &&
-              !IS_ERR(obj))
-               pos--;
+       if (!pos)
+               return SEQ_START_TOKEN;
 
-       return obj;
+       while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
+               if (!--pos)
+                       break;
+               sctp_transport_put(t);
+       }
+
+       return t;
 }
 
 int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
@@ -5082,8 +5096,6 @@ again:
 
        tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
        for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
-               if (!sctp_transport_hold(tsp))
-                       continue;
                ret = cb(tsp, p);
                if (ret)
                        break;
index 12cac85..033696e 100644 (file)
@@ -260,6 +260,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
 bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
 {
        struct dst_entry *dst = sctp_transport_dst_check(t);
+       struct sock *sk = t->asoc->base.sk;
        bool change = true;
 
        if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
@@ -271,12 +272,19 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
        pmtu = SCTP_TRUNC4(pmtu);
 
        if (dst) {
-               dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
+               struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family);
+               union sctp_addr addr;
+
+               pf->af->from_sk(&addr, sk);
+               pf->to_sk_daddr(&t->ipaddr, sk);
+               dst->ops->update_pmtu(dst, sk, NULL, pmtu);
+               pf->to_sk_daddr(&addr, sk);
+
                dst = sctp_transport_dst_check(t);
        }
 
        if (!dst) {
-               t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
+               t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
                dst = t->dst;
        }
 
index 2d8a1e1..0152317 100644 (file)
@@ -742,7 +742,10 @@ static void smc_connect_work(struct work_struct *work)
                smc->sk.sk_err = -rc;
 
 out:
-       smc->sk.sk_state_change(&smc->sk);
+       if (smc->sk.sk_err)
+               smc->sk.sk_state_change(&smc->sk);
+       else
+               smc->sk.sk_write_space(&smc->sk);
        kfree(smc->connect_info);
        smc->connect_info = NULL;
        release_sock(&smc->sk);
@@ -1150,9 +1153,9 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
 }
 
 /* listen worker: finish RDMA setup */
-static void smc_listen_rdma_finish(struct smc_sock *new_smc,
-                                  struct smc_clc_msg_accept_confirm *cclc,
-                                  int local_contact)
+static int smc_listen_rdma_finish(struct smc_sock *new_smc,
+                                 struct smc_clc_msg_accept_confirm *cclc,
+                                 int local_contact)
 {
        struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
        int reason_code = 0;
@@ -1175,11 +1178,12 @@ static void smc_listen_rdma_finish(struct smc_sock *new_smc,
                if (reason_code)
                        goto decline;
        }
-       return;
+       return 0;
 
 decline:
        mutex_unlock(&smc_create_lgr_pending);
        smc_listen_decline(new_smc, reason_code, local_contact);
+       return reason_code;
 }
 
 /* setup for RDMA connection of server */
@@ -1276,8 +1280,10 @@ static void smc_listen_work(struct work_struct *work)
        }
 
        /* finish worker */
-       if (!ism_supported)
-               smc_listen_rdma_finish(new_smc, &cclc, local_contact);
+       if (!ism_supported) {
+               if (smc_listen_rdma_finish(new_smc, &cclc, local_contact))
+                       return;
+       }
        smc_conn_save_peer_info(new_smc, &cclc);
        mutex_unlock(&smc_create_lgr_pending);
        smc_listen_out_connected(new_smc);
@@ -1529,7 +1535,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
                return EPOLLNVAL;
 
        smc = smc_sk(sock->sk);
-       if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
+       if (smc->use_fallback) {
                /* delegate to CLC child sock */
                mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
                sk->sk_err = smc->clcsock->sk->sk_err;
@@ -1560,9 +1566,9 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
                                mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
                        if (sk->sk_state == SMC_APPCLOSEWAIT1)
                                mask |= EPOLLIN;
+                       if (smc->conn.urg_state == SMC_URG_VALID)
+                               mask |= EPOLLPRI;
                }
-               if (smc->conn.urg_state == SMC_URG_VALID)
-                       mask |= EPOLLPRI;
        }
 
        return mask;
index 83aba9a..52241d6 100644 (file)
@@ -446,14 +446,12 @@ int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
        vec[i++].iov_len = sizeof(trl);
        /* due to the few bytes needed for clc-handshake this cannot block */
        len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen);
-       if (len < sizeof(pclc)) {
-               if (len >= 0) {
-                       reason_code = -ENETUNREACH;
-                       smc->sk.sk_err = -reason_code;
-               } else {
-                       smc->sk.sk_err = smc->clcsock->sk->sk_err;
-                       reason_code = -smc->sk.sk_err;
-               }
+       if (len < 0) {
+               smc->sk.sk_err = smc->clcsock->sk->sk_err;
+               reason_code = -smc->sk.sk_err;
+       } else if (len < (int)sizeof(pclc)) {
+               reason_code = -ENETUNREACH;
+               smc->sk.sk_err = -reason_code;
        }
 
        return reason_code;
index ac961df..ea2b87f 100644 (file)
@@ -100,15 +100,14 @@ static void smc_close_active_abort(struct smc_sock *smc)
        struct smc_cdc_conn_state_flags *txflags =
                &smc->conn.local_tx_ctrl.conn_state_flags;
 
-       sk->sk_err = ECONNABORTED;
-       if (smc->clcsock && smc->clcsock->sk) {
-               smc->clcsock->sk->sk_err = ECONNABORTED;
-               smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
+       if (sk->sk_state != SMC_INIT && smc->clcsock && smc->clcsock->sk) {
+               sk->sk_err = ECONNABORTED;
+               if (smc->clcsock && smc->clcsock->sk) {
+                       smc->clcsock->sk->sk_err = ECONNABORTED;
+                       smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
+               }
        }
        switch (sk->sk_state) {
-       case SMC_INIT:
-               sk->sk_state = SMC_PEERABORTWAIT;
-               break;
        case SMC_ACTIVE:
                sk->sk_state = SMC_PEERABORTWAIT;
                release_sock(sk);
@@ -143,6 +142,7 @@ static void smc_close_active_abort(struct smc_sock *smc)
        case SMC_PEERFINCLOSEWAIT:
                sock_put(sk); /* passive closing */
                break;
+       case SMC_INIT:
        case SMC_PEERABORTWAIT:
        case SMC_CLOSED:
                break;
index 01c6ce0..7cb3e4f 100644 (file)
@@ -461,7 +461,7 @@ static const struct genl_ops smc_pnet_ops[] = {
 };
 
 /* SMC_PNETID family definition */
-static struct genl_family smc_pnet_nl_family = {
+static struct genl_family smc_pnet_nl_family __ro_after_init = {
        .hdrsize = 0,
        .name = SMCR_GENL_FAMILY_NAME,
        .version = SMCR_GENL_FAMILY_VERSION,
index e6945e3..01f3f8f 100644 (file)
@@ -941,7 +941,8 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
 EXPORT_SYMBOL(dlci_ioctl_set);
 
 static long sock_do_ioctl(struct net *net, struct socket *sock,
-                                unsigned int cmd, unsigned long arg)
+                         unsigned int cmd, unsigned long arg,
+                         unsigned int ifreq_size)
 {
        int err;
        void __user *argp = (void __user *)arg;
@@ -967,11 +968,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
        } else {
                struct ifreq ifr;
                bool need_copyout;
-               if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
+               if (copy_from_user(&ifr, argp, ifreq_size))
                        return -EFAULT;
                err = dev_ioctl(net, cmd, &ifr, &need_copyout);
                if (!err && need_copyout)
-                       if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
+                       if (copy_to_user(argp, &ifr, ifreq_size))
                                return -EFAULT;
        }
        return err;
@@ -1070,7 +1071,8 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
                        err = open_related_ns(&net->ns, get_net_ns);
                        break;
                default:
-                       err = sock_do_ioctl(net, sock, cmd, arg);
+                       err = sock_do_ioctl(net, sock, cmd, arg,
+                                           sizeof(struct ifreq));
                        break;
                }
        return err;
@@ -2750,7 +2752,8 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
        int err;
 
        set_fs(KERNEL_DS);
-       err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
+       err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv,
+                           sizeof(struct compat_ifreq));
        set_fs(old_fs);
        if (!err)
                err = compat_put_timeval(&ktv, up);
@@ -2766,7 +2769,8 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
        int err;
 
        set_fs(KERNEL_DS);
-       err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
+       err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts,
+                           sizeof(struct compat_ifreq));
        set_fs(old_fs);
        if (!err)
                err = compat_put_timespec(&kts, up);
@@ -3072,7 +3076,8 @@ static int routing_ioctl(struct net *net, struct socket *sock,
        }
 
        set_fs(KERNEL_DS);
-       ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
+       ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r,
+                           sizeof(struct compat_ifreq));
        set_fs(old_fs);
 
 out:
@@ -3185,7 +3190,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
        case SIOCBONDSETHWADDR:
        case SIOCBONDCHANGEACTIVE:
        case SIOCGIFNAME:
-               return sock_do_ioctl(net, sock, cmd, arg);
+               return sock_do_ioctl(net, sock, cmd, arg,
+                                    sizeof(struct compat_ifreq));
        }
 
        return -ENOIOCTLCMD;
index 9ee6cfe..d802654 100644 (file)
@@ -51,12 +51,12 @@ const char tipc_bclink_name[] = "broadcast-link";
  * struct tipc_bc_base - base structure for keeping broadcast send state
  * @link: broadcast send link structure
  * @inputq: data input queue; will only carry SOCK_WAKEUP messages
- * @dest: array keeping number of reachable destinations per bearer
+ * @dests: array keeping number of reachable destinations per bearer
  * @primary_bearer: a bearer having links to all broadcast destinations, if any
  * @bcast_support: indicates if primary bearer, if any, supports broadcast
  * @rcast_support: indicates if all peer nodes support replicast
  * @rc_ratio: dest count as percentage of cluster size where send method changes
- * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast
+ * @bc_threshold: calculated from rc_ratio; if dests > threshold use broadcast
  */
 struct tipc_bc_base {
        struct tipc_link *link;
index 418f03d..645c160 100644 (file)
@@ -609,16 +609,18 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
 
        switch (evt) {
        case NETDEV_CHANGE:
-               if (netif_carrier_ok(dev))
+               if (netif_carrier_ok(dev) && netif_oper_up(dev)) {
+                       test_and_set_bit_lock(0, &b->up);
                        break;
-               /* else: fall through */
-       case NETDEV_UP:
-               test_and_set_bit_lock(0, &b->up);
-               break;
+               }
+               /* fall through */
        case NETDEV_GOING_DOWN:
                clear_bit_unlock(0, &b->up);
                tipc_reset_bearer(net, b);
                break;
+       case NETDEV_UP:
+               test_and_set_bit_lock(0, &b->up);
+               break;
        case NETDEV_CHANGEMTU:
                if (tipc_mtu_bad(dev, 0)) {
                        bearer_disable(net, b);
index aaabb0b..73137f4 100644 (file)
@@ -84,7 +84,9 @@ static int tipc_sock_diag_handler_dump(struct sk_buff *skb,
 
        if (h->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
+                       .start = tipc_dump_start,
                        .dump = tipc_diag_dump,
+                       .done = tipc_dump_done,
                };
                netlink_dump_start(net->diag_nlsk, skb, h, &c);
                return 0;
index b1f0bee..fb886b5 100644 (file)
@@ -410,6 +410,11 @@ char *tipc_link_name(struct tipc_link *l)
        return l->name;
 }
 
+u32 tipc_link_state(struct tipc_link *l)
+{
+       return l->state;
+}
+
 /**
  * tipc_link_create - create a new link
  * @n: pointer to associated node
@@ -841,9 +846,14 @@ void tipc_link_reset(struct tipc_link *l)
        l->in_session = false;
        l->session++;
        l->mtu = l->advertised_mtu;
+       spin_lock_bh(&l->wakeupq.lock);
+       spin_lock_bh(&l->inputq->lock);
+       skb_queue_splice_init(&l->wakeupq, l->inputq);
+       spin_unlock_bh(&l->inputq->lock);
+       spin_unlock_bh(&l->wakeupq.lock);
+
        __skb_queue_purge(&l->transmq);
        __skb_queue_purge(&l->deferdq);
-       skb_queue_splice_init(&l->wakeupq, l->inputq);
        __skb_queue_purge(&l->backlogq);
        l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
        l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
@@ -1380,6 +1390,36 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
        __skb_queue_tail(xmitq, skb);
 }
 
+void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
+                                   struct sk_buff_head *xmitq)
+{
+       u32 onode = tipc_own_addr(l->net);
+       struct tipc_msg *hdr, *ihdr;
+       struct sk_buff_head tnlq;
+       struct sk_buff *skb;
+       u32 dnode = l->addr;
+
+       skb_queue_head_init(&tnlq);
+       skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
+                             INT_H_SIZE, BASIC_H_SIZE,
+                             dnode, onode, 0, 0, 0);
+       if (!skb) {
+               pr_warn("%sunable to create tunnel packet\n", link_co_err);
+               return;
+       }
+
+       hdr = buf_msg(skb);
+       msg_set_msgcnt(hdr, 1);
+       msg_set_bearer_id(hdr, l->peer_bearer_id);
+
+       ihdr = (struct tipc_msg *)msg_data(hdr);
+       tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
+                     BASIC_H_SIZE, dnode);
+       msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
+       __skb_queue_tail(&tnlq, skb);
+       tipc_link_xmit(l, &tnlq, xmitq);
+}
+
 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
  * with contents of the link's transmit and backlog queues.
  */
@@ -1476,6 +1516,9 @@ bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
                        return false;
                if (session != curr_session)
                        return false;
+               /* Extra sanity check */
+               if (!link_is_up(l) && msg_ack(hdr))
+                       return false;
                if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
                        return true;
                /* Accept only STATE with new sequence number */
index 7bc494a..90488c5 100644 (file)
@@ -88,6 +88,8 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
                         struct tipc_link **link);
 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
                           int mtyp, struct sk_buff_head *xmitq);
+void tipc_link_create_dummy_tnl_msg(struct tipc_link *tnl,
+                                   struct sk_buff_head *xmitq);
 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
 int tipc_link_fsm_evt(struct tipc_link *l, int evt);
 bool tipc_link_is_up(struct tipc_link *l);
@@ -107,6 +109,7 @@ u16 tipc_link_rcv_nxt(struct tipc_link *l);
 u16 tipc_link_acked(struct tipc_link *l);
 u32 tipc_link_id(struct tipc_link *l);
 char *tipc_link_name(struct tipc_link *l);
+u32 tipc_link_state(struct tipc_link *l);
 char tipc_link_plane(struct tipc_link *l);
 int tipc_link_prio(struct tipc_link *l);
 int tipc_link_window(struct tipc_link *l);
index 88f027b..66d5b2c 100644 (file)
@@ -980,20 +980,17 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
 struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port)
 {
-       u64 value = (u64)node << 32 | port;
        struct tipc_dest *dst;
 
        list_for_each_entry(dst, l, list) {
-               if (dst->value != value)
-                       continue;
-               return dst;
+               if (dst->node == node && dst->port == port)
+                       return dst;
        }
        return NULL;
 }
 
 bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
 {
-       u64 value = (u64)node << 32 | port;
        struct tipc_dest *dst;
 
        if (tipc_dest_find(l, node, port))
@@ -1002,7 +999,8 @@ bool tipc_dest_push(struct list_head *l, u32 node, u32 port)
        dst = kmalloc(sizeof(*dst), GFP_ATOMIC);
        if (unlikely(!dst))
                return false;
-       dst->value = value;
+       dst->node = node;
+       dst->port = port;
        list_add(&dst->list, l);
        return true;
 }
index 0febba4..892bd75 100644 (file)
@@ -133,13 +133,8 @@ void tipc_nametbl_stop(struct net *net);
 
 struct tipc_dest {
        struct list_head list;
-       union {
-               struct {
-                       u32 port;
-                       u32 node;
-               };
-               u64 value;
-       };
+       u32 port;
+       u32 node;
 };
 
 struct tipc_dest *tipc_dest_find(struct list_head *l, u32 node, u32 port);
index 6ff2254..99ee419 100644 (file)
@@ -167,7 +167,9 @@ static const struct genl_ops tipc_genl_v2_ops[] = {
        },
        {
                .cmd    = TIPC_NL_SOCK_GET,
+               .start = tipc_dump_start,
                .dumpit = tipc_nl_sk_dump,
+               .done   = tipc_dump_done,
                .policy = tipc_nl_policy,
        },
        {
index a2f7674..6376467 100644 (file)
@@ -185,6 +185,10 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
                return -ENOMEM;
 
        buf->sk = msg->dst_sk;
+       if (__tipc_dump_start(&cb, msg->net)) {
+               kfree_skb(buf);
+               return -ENOMEM;
+       }
 
        do {
                int rem;
@@ -216,6 +220,7 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
        err = 0;
 
 err_out:
+       tipc_dump_done(&cb);
        kfree_skb(buf);
 
        if (err == -EMSGSIZE) {
index 68014f1..2afc4f8 100644 (file)
@@ -111,6 +111,7 @@ struct tipc_node {
        int action_flags;
        struct list_head list;
        int state;
+       bool failover_sent;
        u16 sync_point;
        int link_cnt;
        u16 working_links;
@@ -680,6 +681,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
                *slot0 = bearer_id;
                *slot1 = bearer_id;
                tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
+               n->failover_sent = false;
                n->action_flags |= TIPC_NOTIFY_NODE_UP;
                tipc_link_set_active(nl, true);
                tipc_bcast_add_peer(n->net, nl, xmitq);
@@ -911,6 +913,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
        bool reset = true;
        char *if_name;
        unsigned long intv;
+       u16 session;
 
        *dupl_addr = false;
        *respond = false;
@@ -997,9 +1000,10 @@ void tipc_node_check_dest(struct net *net, u32 addr,
                        goto exit;
 
                if_name = strchr(b->name, ':') + 1;
+               get_random_bytes(&session, sizeof(u16));
                if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
                                      b->net_plane, b->mtu, b->priority,
-                                     b->window, mod(tipc_net(net)->random),
+                                     b->window, session,
                                      tipc_own_addr(net), addr, peer_id,
                                      n->capabilities,
                                      tipc_bc_sndlink(n->net), n->bc_entry.link,
@@ -1615,6 +1619,14 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
                        tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
                                                        tipc_link_inputq(l));
                }
+               /* If parallel link was already down, and this happened before
+                * the tunnel link came up, FAILOVER was never sent. Ensure that
+                * FAILOVER is sent to get peer out of NODE_FAILINGOVER state.
+                */
+               if (n->state != NODE_FAILINGOVER && !n->failover_sent) {
+                       tipc_link_create_dummy_tnl_msg(l, xmitq);
+                       n->failover_sent = true;
+               }
                /* If pkts arrive out of order, use lowest calculated syncpt */
                if (less(syncpt, n->sync_point))
                        n->sync_point = syncpt;
index c1e93c9..b6f99b0 100644 (file)
@@ -576,6 +576,7 @@ static int tipc_release(struct socket *sock)
        sk_stop_timer(sk, &sk->sk_timer);
        tipc_sk_remove(tsk);
 
+       sock_orphan(sk);
        /* Reject any messages that accumulated in backlog queue */
        release_sock(sk);
        tipc_dest_list_purge(&tsk->cong_links);
@@ -1418,8 +1419,10 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
        /* Handle implicit connection setup */
        if (unlikely(dest)) {
                rc = __tipc_sendmsg(sock, m, dlen);
-               if (dlen && (dlen == rc))
+               if (dlen && dlen == rc) {
+                       tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
                        tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
+               }
                return rc;
        }
 
@@ -2672,6 +2675,8 @@ void tipc_sk_reinit(struct net *net)
 
                rhashtable_walk_stop(&iter);
        } while (tsk == ERR_PTR(-EAGAIN));
+
+       rhashtable_walk_exit(&iter);
 }
 
 static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
@@ -3227,45 +3232,74 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
                                       struct netlink_callback *cb,
                                       struct tipc_sock *tsk))
 {
-       struct net *net = sock_net(skb->sk);
-       struct tipc_net *tn = tipc_net(net);
-       const struct bucket_table *tbl;
-       u32 prev_portid = cb->args[1];
-       u32 tbl_id = cb->args[0];
-       struct rhash_head *pos;
+       struct rhashtable_iter *iter = (void *)cb->args[4];
        struct tipc_sock *tsk;
        int err;
 
-       rcu_read_lock();
-       tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
-       for (; tbl_id < tbl->size; tbl_id++) {
-               rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
-                       spin_lock_bh(&tsk->sk.sk_lock.slock);
-                       if (prev_portid && prev_portid != tsk->portid) {
-                               spin_unlock_bh(&tsk->sk.sk_lock.slock);
+       rhashtable_walk_start(iter);
+       while ((tsk = rhashtable_walk_next(iter)) != NULL) {
+               if (IS_ERR(tsk)) {
+                       err = PTR_ERR(tsk);
+                       if (err == -EAGAIN) {
+                               err = 0;
                                continue;
                        }
+                       break;
+               }
 
-                       err = skb_handler(skb, cb, tsk);
-                       if (err) {
-                               prev_portid = tsk->portid;
-                               spin_unlock_bh(&tsk->sk.sk_lock.slock);
-                               goto out;
-                       }
-
-                       prev_portid = 0;
-                       spin_unlock_bh(&tsk->sk.sk_lock.slock);
+               sock_hold(&tsk->sk);
+               rhashtable_walk_stop(iter);
+               lock_sock(&tsk->sk);
+               err = skb_handler(skb, cb, tsk);
+               if (err) {
+                       release_sock(&tsk->sk);
+                       sock_put(&tsk->sk);
+                       goto out;
                }
+               release_sock(&tsk->sk);
+               rhashtable_walk_start(iter);
+               sock_put(&tsk->sk);
        }
+       rhashtable_walk_stop(iter);
 out:
-       rcu_read_unlock();
-       cb->args[0] = tbl_id;
-       cb->args[1] = prev_portid;
-
        return skb->len;
 }
 EXPORT_SYMBOL(tipc_nl_sk_walk);
 
+int tipc_dump_start(struct netlink_callback *cb)
+{
+       return __tipc_dump_start(cb, sock_net(cb->skb->sk));
+}
+EXPORT_SYMBOL(tipc_dump_start);
+
+int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
+{
+       /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
+       struct rhashtable_iter *iter = (void *)cb->args[4];
+       struct tipc_net *tn = tipc_net(net);
+
+       if (!iter) {
+               iter = kmalloc(sizeof(*iter), GFP_KERNEL);
+               if (!iter)
+                       return -ENOMEM;
+
+               cb->args[4] = (long)iter;
+       }
+
+       rhashtable_walk_enter(&tn->sk_rht, iter);
+       return 0;
+}
+
+int tipc_dump_done(struct netlink_callback *cb)
+{
+       struct rhashtable_iter *hti = (void *)cb->args[4];
+
+       rhashtable_walk_exit(hti);
+       kfree(hti);
+       return 0;
+}
+EXPORT_SYMBOL(tipc_dump_done);
+
 int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
                           struct tipc_sock *tsk, u32 sk_filter_state,
                           u64 (*tipc_diag_gen_cookie)(struct sock *sk))
index aff9b2a..5e575f2 100644 (file)
@@ -68,4 +68,7 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
                    int (*skb_handler)(struct sk_buff *skb,
                                       struct netlink_callback *cb,
                                       struct tipc_sock *tsk));
+int tipc_dump_start(struct netlink_callback *cb);
+int __tipc_dump_start(struct netlink_callback *cb, struct net *net);
+int tipc_dump_done(struct netlink_callback *cb);
 #endif
index c8e34ef..2627b5d 100644 (file)
@@ -313,8 +313,8 @@ static void tipc_conn_send_work(struct work_struct *work)
        conn_put(con);
 }
 
-/* tipc_conn_queue_evt() - interrupt level call from a subscription instance
- * The queued work is launched into tipc_send_work()->tipc_send_to_sock()
+/* tipc_topsrv_queue_evt() - interrupt level call from a subscription instance
+ * The queued work is launched into tipc_conn_send_work()->tipc_conn_send_to_sock()
  */
 void tipc_topsrv_queue_evt(struct net *net, int conid,
                           u32 event, struct tipc_event *evt)
index 292742e..961b07d 100644 (file)
@@ -686,7 +686,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
                goto free_marker_record;
        }
 
-       crypto_info = &ctx->crypto_send;
+       crypto_info = &ctx->crypto_send.info;
        switch (crypto_info->cipher_type) {
        case TLS_CIPHER_AES_GCM_128:
                nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
@@ -780,7 +780,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
 
        ctx->priv_ctx_tx = offload_ctx;
        rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
-                                            &ctx->crypto_send,
+                                            &ctx->crypto_send.info,
                                             tcp_sk(sk)->write_seq);
        if (rc)
                goto release_netdev;
@@ -862,7 +862,7 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
                goto release_ctx;
 
        rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
-                                            &ctx->crypto_recv,
+                                            &ctx->crypto_recv.info,
                                             tcp_sk(sk)->copied_seq);
        if (rc) {
                pr_err_ratelimited("%s: The netdev has refused to offload this socket\n",
index 6102169..450a6db 100644 (file)
@@ -320,7 +320,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
                goto free_req;
 
        iv = buf;
-       memcpy(iv, tls_ctx->crypto_send_aes_gcm_128.salt,
+       memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt,
               TLS_CIPHER_AES_GCM_128_SALT_SIZE);
        aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE +
              TLS_CIPHER_AES_GCM_128_IV_SIZE;
index 93c0c22..523622d 100644 (file)
@@ -213,9 +213,14 @@ static void tls_write_space(struct sock *sk)
 {
        struct tls_context *ctx = tls_get_ctx(sk);
 
-       /* We are already sending pages, ignore notification */
-       if (ctx->in_tcp_sendpages)
+       /* If in_tcp_sendpages call lower protocol write space handler
+        * to ensure we wake up any waiting operations there. For example
+        * if do_tcp_sendpages where to call sk_wait_event.
+        */
+       if (ctx->in_tcp_sendpages) {
+               ctx->sk_write_space(sk);
                return;
+       }
 
        if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
                gfp_t sk_allocation = sk->sk_allocation;
@@ -236,6 +241,16 @@ static void tls_write_space(struct sock *sk)
        ctx->sk_write_space(sk);
 }
 
+static void tls_ctx_free(struct tls_context *ctx)
+{
+       if (!ctx)
+               return;
+
+       memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
+       memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
+       kfree(ctx);
+}
+
 static void tls_sk_proto_close(struct sock *sk, long timeout)
 {
        struct tls_context *ctx = tls_get_ctx(sk);
@@ -289,7 +304,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
 #else
        {
 #endif
-               kfree(ctx);
+               tls_ctx_free(ctx);
                ctx = NULL;
        }
 
@@ -300,7 +315,7 @@ skip_tx_cleanup:
         * for sk->sk_prot->unhash [tls_hw_unhash]
         */
        if (free_ctx)
-               kfree(ctx);
+               tls_ctx_free(ctx);
 }
 
 static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
@@ -325,7 +340,7 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
        }
 
        /* get user crypto info */
-       crypto_info = &ctx->crypto_send;
+       crypto_info = &ctx->crypto_send.info;
 
        if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
                rc = -EBUSY;
@@ -412,9 +427,9 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
        }
 
        if (tx)
-               crypto_info = &ctx->crypto_send;
+               crypto_info = &ctx->crypto_send.info;
        else
-               crypto_info = &ctx->crypto_recv;
+               crypto_info = &ctx->crypto_recv.info;
 
        /* Currently we don't support set crypto info more than one time */
        if (TLS_CRYPTO_INFO_READY(crypto_info)) {
@@ -494,7 +509,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
        goto out;
 
 err_crypto_info:
-       memset(crypto_info, 0, sizeof(*crypto_info));
+       memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
 out:
        return rc;
 }
index 52fbe72..b9c6ecf 100644 (file)
@@ -125,6 +125,9 @@ static int alloc_encrypted_sg(struct sock *sk, int len)
                         &ctx->sg_encrypted_num_elem,
                         &ctx->sg_encrypted_size, 0);
 
+       if (rc == -ENOSPC)
+               ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data);
+
        return rc;
 }
 
@@ -138,6 +141,9 @@ static int alloc_plaintext_sg(struct sock *sk, int len)
                         &ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
                         tls_ctx->pending_open_record_frags);
 
+       if (rc == -ENOSPC)
+               ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data);
+
        return rc;
 }
 
@@ -925,7 +931,15 @@ int tls_sw_recvmsg(struct sock *sk,
                                if (control != TLS_RECORD_TYPE_DATA)
                                        goto recv_end;
                        }
+               } else {
+                       /* MSG_PEEK right now cannot look beyond current skb
+                        * from strparser, meaning we cannot advance skb here
+                        * and thus unpause strparser since we'd loose original
+                        * one.
+                        */
+                       break;
                }
+
                /* If we have a new message from strparser, continue now. */
                if (copied >= target && !ctx->recv_pkt)
                        break;
@@ -1049,8 +1063,8 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
                goto read_failure;
        }
 
-       if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) ||
-           header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) {
+       if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) ||
+           header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) {
                ret = -EINVAL;
                goto read_failure;
        }
@@ -1130,7 +1144,6 @@ void tls_sw_free_resources_rx(struct sock *sk)
 
 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
 {
-       char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
        struct tls_crypto_info *crypto_info;
        struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
        struct tls_sw_context_tx *sw_ctx_tx = NULL;
@@ -1175,12 +1188,12 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
 
        if (tx) {
                crypto_init_wait(&sw_ctx_tx->async_wait);
-               crypto_info = &ctx->crypto_send;
+               crypto_info = &ctx->crypto_send.info;
                cctx = &ctx->tx;
                aead = &sw_ctx_tx->aead_send;
        } else {
                crypto_init_wait(&sw_ctx_rx->async_wait);
-               crypto_info = &ctx->crypto_recv;
+               crypto_info = &ctx->crypto_recv.info;
                cctx = &ctx->rx;
                aead = &sw_ctx_rx->aead_recv;
        }
@@ -1259,9 +1272,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
 
        ctx->push_pending_record = tls_sw_push_pending_record;
 
-       memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
-
-       rc = crypto_aead_setkey(*aead, keyval,
+       rc = crypto_aead_setkey(*aead, gcm_128_info->key,
                                TLS_CIPHER_AES_GCM_128_KEY_SIZE);
        if (rc)
                goto free_aead;
index 5fb9b7d..176edfe 100644 (file)
@@ -669,13 +669,13 @@ static int nl80211_msg_put_wmm_rules(struct sk_buff *msg,
                        goto nla_put_failure;
 
                if (nla_put_u16(msg, NL80211_WMMR_CW_MIN,
-                               rule->wmm_rule->client[j].cw_min) ||
+                               rule->wmm_rule.client[j].cw_min) ||
                    nla_put_u16(msg, NL80211_WMMR_CW_MAX,
-                               rule->wmm_rule->client[j].cw_max) ||
+                               rule->wmm_rule.client[j].cw_max) ||
                    nla_put_u8(msg, NL80211_WMMR_AIFSN,
-                              rule->wmm_rule->client[j].aifsn) ||
-                   nla_put_u8(msg, NL80211_WMMR_TXOP,
-                              rule->wmm_rule->client[j].cot))
+                              rule->wmm_rule.client[j].aifsn) ||
+                   nla_put_u16(msg, NL80211_WMMR_TXOP,
+                               rule->wmm_rule.client[j].cot))
                        goto nla_put_failure;
 
                nla_nest_end(msg, nl_wmm_rule);
@@ -766,9 +766,9 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
 
        if (large) {
                const struct ieee80211_reg_rule *rule =
-                       freq_reg_info(wiphy, chan->center_freq);
+                       freq_reg_info(wiphy, MHZ_TO_KHZ(chan->center_freq));
 
-               if (!IS_ERR(rule) && rule->wmm_rule) {
+               if (!IS_ERR_OR_NULL(rule) && rule->has_wmm) {
                        if (nl80211_msg_put_wmm_rules(msg, rule))
                                goto nla_put_failure;
                }
@@ -3756,6 +3756,7 @@ static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
                        return false;
 
                /* check availability */
+               ridx = array_index_nospec(ridx, IEEE80211_HT_MCS_MASK_LEN);
                if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
                        mcs[ridx] |= rbit;
                else
@@ -10230,7 +10231,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        s32 last, low, high;
        u32 hyst;
-       int i, n;
+       int i, n, low_index;
        int err;
 
        /* RSSI reporting disabled? */
@@ -10267,10 +10268,19 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
                if (last < wdev->cqm_config->rssi_thresholds[i])
                        break;
 
-       low = i > 0 ?
-               (wdev->cqm_config->rssi_thresholds[i - 1] - hyst) : S32_MIN;
-       high = i < n ?
-               (wdev->cqm_config->rssi_thresholds[i] + hyst - 1) : S32_MAX;
+       low_index = i - 1;
+       if (low_index >= 0) {
+               low_index = array_index_nospec(low_index, n);
+               low = wdev->cqm_config->rssi_thresholds[low_index] - hyst;
+       } else {
+               low = S32_MIN;
+       }
+       if (i < n) {
+               i = array_index_nospec(i, n);
+               high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1;
+       } else {
+               high = S32_MAX;
+       }
 
        return rdev_set_cqm_rssi_range_config(rdev, dev, low, high);
 }
@@ -12205,6 +12215,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info)
                return -EOPNOTSUPP;
 
        if (!info->attrs[NL80211_ATTR_MDID] ||
+           !info->attrs[NL80211_ATTR_IE] ||
            !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
                return -EINVAL;
 
index 4fc66a1..24cfa27 100644 (file)
@@ -425,36 +425,23 @@ static const struct ieee80211_regdomain *
 reg_copy_regd(const struct ieee80211_regdomain *src_regd)
 {
        struct ieee80211_regdomain *regd;
-       int size_of_regd, size_of_wmms;
+       int size_of_regd;
        unsigned int i;
-       struct ieee80211_wmm_rule *d_wmm, *s_wmm;
 
        size_of_regd =
                sizeof(struct ieee80211_regdomain) +
                src_regd->n_reg_rules * sizeof(struct ieee80211_reg_rule);
-       size_of_wmms = src_regd->n_wmm_rules *
-               sizeof(struct ieee80211_wmm_rule);
 
-       regd = kzalloc(size_of_regd + size_of_wmms, GFP_KERNEL);
+       regd = kzalloc(size_of_regd, GFP_KERNEL);
        if (!regd)
                return ERR_PTR(-ENOMEM);
 
        memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
 
-       d_wmm = (struct ieee80211_wmm_rule *)((u8 *)regd + size_of_regd);
-       s_wmm = (struct ieee80211_wmm_rule *)((u8 *)src_regd + size_of_regd);
-       memcpy(d_wmm, s_wmm, size_of_wmms);
-
-       for (i = 0; i < src_regd->n_reg_rules; i++) {
+       for (i = 0; i < src_regd->n_reg_rules; i++)
                memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
                       sizeof(struct ieee80211_reg_rule));
-               if (!src_regd->reg_rules[i].wmm_rule)
-                       continue;
 
-               regd->reg_rules[i].wmm_rule = d_wmm +
-                       (src_regd->reg_rules[i].wmm_rule - s_wmm) /
-                       sizeof(struct ieee80211_wmm_rule);
-       }
        return regd;
 }
 
@@ -860,9 +847,10 @@ static bool valid_regdb(const u8 *data, unsigned int size)
        return true;
 }
 
-static void set_wmm_rule(struct ieee80211_wmm_rule *rule,
+static void set_wmm_rule(struct ieee80211_reg_rule *rrule,
                         struct fwdb_wmm_rule *wmm)
 {
+       struct ieee80211_wmm_rule *rule = &rrule->wmm_rule;
        unsigned int i;
 
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
@@ -876,11 +864,13 @@ static void set_wmm_rule(struct ieee80211_wmm_rule *rule,
                rule->ap[i].aifsn = wmm->ap[i].aifsn;
                rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot);
        }
+
+       rrule->has_wmm = true;
 }
 
 static int __regdb_query_wmm(const struct fwdb_header *db,
                             const struct fwdb_country *country, int freq,
-                            u32 *dbptr, struct ieee80211_wmm_rule *rule)
+                            struct ieee80211_reg_rule *rule)
 {
        unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
        struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
@@ -901,8 +891,6 @@ static int __regdb_query_wmm(const struct fwdb_header *db,
                        wmm_ptr = be16_to_cpu(rrule->wmm_ptr) << 2;
                        wmm = (void *)((u8 *)db + wmm_ptr);
                        set_wmm_rule(rule, wmm);
-                       if (dbptr)
-                               *dbptr = wmm_ptr;
                        return 0;
                }
        }
@@ -910,8 +898,7 @@ static int __regdb_query_wmm(const struct fwdb_header *db,
        return -ENODATA;
 }
 
-int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
-                       struct ieee80211_wmm_rule *rule)
+int reg_query_regdb_wmm(char *alpha2, int freq, struct ieee80211_reg_rule *rule)
 {
        const struct fwdb_header *hdr = regdb;
        const struct fwdb_country *country;
@@ -925,8 +912,7 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
        country = &hdr->country[0];
        while (country->coll_ptr) {
                if (alpha2_equal(alpha2, country->alpha2))
-                       return __regdb_query_wmm(regdb, country, freq, dbptr,
-                                                rule);
+                       return __regdb_query_wmm(regdb, country, freq, rule);
 
                country++;
        }
@@ -935,32 +921,13 @@ int reg_query_regdb_wmm(char *alpha2, int freq, u32 *dbptr,
 }
 EXPORT_SYMBOL(reg_query_regdb_wmm);
 
-struct wmm_ptrs {
-       struct ieee80211_wmm_rule *rule;
-       u32 ptr;
-};
-
-static struct ieee80211_wmm_rule *find_wmm_ptr(struct wmm_ptrs *wmm_ptrs,
-                                              u32 wmm_ptr, int n_wmms)
-{
-       int i;
-
-       for (i = 0; i < n_wmms; i++) {
-               if (wmm_ptrs[i].ptr == wmm_ptr)
-                       return wmm_ptrs[i].rule;
-       }
-       return NULL;
-}
-
 static int regdb_query_country(const struct fwdb_header *db,
                               const struct fwdb_country *country)
 {
        unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2;
        struct fwdb_collection *coll = (void *)((u8 *)db + ptr);
        struct ieee80211_regdomain *regdom;
-       struct ieee80211_regdomain *tmp_rd;
-       unsigned int size_of_regd, i, n_wmms = 0;
-       struct wmm_ptrs *wmm_ptrs;
+       unsigned int size_of_regd, i;
 
        size_of_regd = sizeof(struct ieee80211_regdomain) +
                coll->n_rules * sizeof(struct ieee80211_reg_rule);
@@ -969,12 +936,6 @@ static int regdb_query_country(const struct fwdb_header *db,
        if (!regdom)
                return -ENOMEM;
 
-       wmm_ptrs = kcalloc(coll->n_rules, sizeof(*wmm_ptrs), GFP_KERNEL);
-       if (!wmm_ptrs) {
-               kfree(regdom);
-               return -ENOMEM;
-       }
-
        regdom->n_reg_rules = coll->n_rules;
        regdom->alpha2[0] = country->alpha2[0];
        regdom->alpha2[1] = country->alpha2[1];
@@ -1013,37 +974,11 @@ static int regdb_query_country(const struct fwdb_header *db,
                                1000 * be16_to_cpu(rule->cac_timeout);
                if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) {
                        u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2;
-                       struct ieee80211_wmm_rule *wmm_pos =
-                               find_wmm_ptr(wmm_ptrs, wmm_ptr, n_wmms);
-                       struct fwdb_wmm_rule *wmm;
-                       struct ieee80211_wmm_rule *wmm_rule;
-
-                       if (wmm_pos) {
-                               rrule->wmm_rule = wmm_pos;
-                               continue;
-                       }
-                       wmm = (void *)((u8 *)db + wmm_ptr);
-                       tmp_rd = krealloc(regdom, size_of_regd + (n_wmms + 1) *
-                                         sizeof(struct ieee80211_wmm_rule),
-                                         GFP_KERNEL);
-
-                       if (!tmp_rd) {
-                               kfree(regdom);
-                               kfree(wmm_ptrs);
-                               return -ENOMEM;
-                       }
-                       regdom = tmp_rd;
-
-                       wmm_rule = (struct ieee80211_wmm_rule *)
-                               ((u8 *)regdom + size_of_regd + n_wmms *
-                               sizeof(struct ieee80211_wmm_rule));
+                       struct fwdb_wmm_rule *wmm = (void *)((u8 *)db + wmm_ptr);
 
-                       set_wmm_rule(wmm_rule, wmm);
-                       wmm_ptrs[n_wmms].ptr = wmm_ptr;
-                       wmm_ptrs[n_wmms++].rule = wmm_rule;
+                       set_wmm_rule(rrule, wmm);
                }
        }
-       kfree(wmm_ptrs);
 
        return reg_schedule_apply(regdom);
 }
@@ -2726,11 +2661,12 @@ static void reg_process_hint(struct regulatory_request *reg_request)
 {
        struct wiphy *wiphy = NULL;
        enum reg_request_treatment treatment;
+       enum nl80211_reg_initiator initiator = reg_request->initiator;
 
        if (reg_request->wiphy_idx != WIPHY_IDX_INVALID)
                wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
 
-       switch (reg_request->initiator) {
+       switch (initiator) {
        case NL80211_REGDOM_SET_BY_CORE:
                treatment = reg_process_hint_core(reg_request);
                break;
@@ -2748,7 +2684,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
                treatment = reg_process_hint_country_ie(wiphy, reg_request);
                break;
        default:
-               WARN(1, "invalid initiator %d\n", reg_request->initiator);
+               WARN(1, "invalid initiator %d\n", initiator);
                goto out_free;
        }
 
@@ -2763,7 +2699,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
         */
        if (treatment == REG_REQ_ALREADY_SET && wiphy &&
            wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
-               wiphy_update_regulatory(wiphy, reg_request->initiator);
+               wiphy_update_regulatory(wiphy, initiator);
                wiphy_all_share_dfs_chan_state(wiphy);
                reg_check_channels();
        }
@@ -2932,6 +2868,7 @@ static int regulatory_hint_core(const char *alpha2)
        request->alpha2[0] = alpha2[0];
        request->alpha2[1] = alpha2[1];
        request->initiator = NL80211_REGDOM_SET_BY_CORE;
+       request->wiphy_idx = WIPHY_IDX_INVALID;
 
        queue_regulatory_request(request);
 
index d36c3eb..d0e7472 100644 (file)
@@ -1058,13 +1058,23 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
        return NULL;
 }
 
+/*
+ * Update RX channel information based on the available frame payload
+ * information. This is mainly for the 2.4 GHz band where frames can be received
+ * from neighboring channels and the Beacon frames use the DSSS Parameter Set
+ * element to indicate the current (transmitting) channel, but this might also
+ * be needed on other bands if RX frequency does not match with the actual
+ * operating channel of a BSS.
+ */
 static struct ieee80211_channel *
 cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
-                        struct ieee80211_channel *channel)
+                        struct ieee80211_channel *channel,
+                        enum nl80211_bss_scan_width scan_width)
 {
        const u8 *tmp;
        u32 freq;
        int channel_number = -1;
+       struct ieee80211_channel *alt_channel;
 
        tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen);
        if (tmp && tmp[1] == 1) {
@@ -1078,16 +1088,45 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
                }
        }
 
-       if (channel_number < 0)
+       if (channel_number < 0) {
+               /* No channel information in frame payload */
                return channel;
+       }
 
        freq = ieee80211_channel_to_frequency(channel_number, channel->band);
-       channel = ieee80211_get_channel(wiphy, freq);
-       if (!channel)
-               return NULL;
-       if (channel->flags & IEEE80211_CHAN_DISABLED)
+       alt_channel = ieee80211_get_channel(wiphy, freq);
+       if (!alt_channel) {
+               if (channel->band == NL80211_BAND_2GHZ) {
+                       /*
+                        * Better not allow unexpected channels when that could
+                        * be going beyond the 1-11 range (e.g., discovering
+                        * BSS on channel 12 when radio is configured for
+                        * channel 11.
+                        */
+                       return NULL;
+               }
+
+               /* No match for the payload channel number - ignore it */
+               return channel;
+       }
+
+       if (scan_width == NL80211_BSS_CHAN_WIDTH_10 ||
+           scan_width == NL80211_BSS_CHAN_WIDTH_5) {
+               /*
+                * Ignore channel number in 5 and 10 MHz channels where there
+                * may not be an n:1 or 1:n mapping between frequencies and
+                * channel numbers.
+                */
+               return channel;
+       }
+
+       /*
+        * Use the channel determined through the payload channel number
+        * instead of the RX channel reported by the driver.
+        */
+       if (alt_channel->flags & IEEE80211_CHAN_DISABLED)
                return NULL;
-       return channel;
+       return alt_channel;
 }
 
 /* Returned bss is reference counted and must be cleaned up appropriately. */
@@ -1112,7 +1151,8 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
                    (data->signal < 0 || data->signal > 100)))
                return NULL;
 
-       channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan);
+       channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan,
+                                          data->scan_width);
        if (!channel)
                return NULL;
 
@@ -1210,7 +1250,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
                return NULL;
 
        channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
-                                          ielen, data->chan);
+                                          ielen, data->chan, data->scan_width);
        if (!channel)
                return NULL;
 
index e0825a0..959ed3a 100644 (file)
@@ -1456,7 +1456,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
                                          u8 *op_class)
 {
        u8 vht_opclass;
-       u16 freq = chandef->center_freq1;
+       u32 freq = chandef->center_freq1;
 
        if (freq >= 2412 && freq <= 2472) {
                if (chandef->width > NL80211_CHAN_WIDTH_40)
index 167f702..06943d9 100644 (file)
@@ -1278,12 +1278,16 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
        if (err)
                return err;
 
-       if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)))
-               return -EOPNOTSUPP;
+       if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) {
+               err = -EOPNOTSUPP;
+               goto free;
+       }
 
        rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
 
-       return 0;
+free:
+       cfg80211_sinfo_release_content(&sinfo);
+       return err;
 }
 
 /* Get wireless statistics.  Called by /proc/net/wireless and by SIOCGIWSTATS */
@@ -1293,7 +1297,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
        /* we are under RTNL - globally locked - so can use static structs */
        static struct iw_statistics wstats;
-       static struct station_info sinfo;
+       static struct station_info sinfo = {};
        u8 bssid[ETH_ALEN];
 
        if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)
@@ -1352,6 +1356,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
        if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))
                wstats.discard.retries = sinfo.tx_failed;
 
+       cfg80211_sinfo_release_content(&sinfo);
+
        return &wstats;
 }
 
index 911ca6d..bfe2dbe 100644 (file)
@@ -74,14 +74,14 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
                return 0;
 
        if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
-               return force_zc ? -ENOTSUPP : 0; /* fail or fallback */
+               return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */
 
        bpf.command = XDP_QUERY_XSK_UMEM;
 
        rtnl_lock();
        err = xdp_umem_query(dev, queue_id);
        if (err) {
-               err = err < 0 ? -ENOTSUPP : -EBUSY;
+               err = err < 0 ? -EOPNOTSUPP : -EBUSY;
                goto err_rtnl_unlock;
        }
 
index b89c9c7..be3520e 100644 (file)
@@ -458,6 +458,7 @@ resume:
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
                        goto drop;
                }
+               crypto_done = false;
        } while (!err);
 
        err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
index 45ba07a..261995d 100644 (file)
@@ -100,6 +100,10 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
                spin_unlock_bh(&x->lock);
 
                skb_dst_force(skb);
+               if (!skb_dst(skb)) {
+                       XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
+                       goto error_nolock;
+               }
 
                if (xfrm_offload(skb)) {
                        x->type_offload->encap(x, skb);
index 3110c3f..f094d4b 100644 (file)
@@ -2491,6 +2491,10 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
        }
 
        skb_dst_force(skb);
+       if (!skb_dst(skb)) {
+               XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
+               return 0;
+       }
 
        dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
        if (IS_ERR(dst)) {
index 4791aa8..df7ca2d 100644 (file)
@@ -151,10 +151,16 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
        err = -EINVAL;
        switch (p->family) {
        case AF_INET:
+               if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
+                       goto out;
+
                break;
 
        case AF_INET6:
 #if IS_ENABLED(CONFIG_IPV6)
+               if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
+                       goto out;
+
                break;
 #else
                err = -EAFNOSUPPORT;
@@ -1396,10 +1402,16 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
 
        switch (p->sel.family) {
        case AF_INET:
+               if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
+                       return -EINVAL;
+
                break;
 
        case AF_INET6:
 #if IS_ENABLED(CONFIG_IPV6)
+               if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
+                       return -EINVAL;
+
                break;
 #else
                return  -EAFNOSUPPORT;
@@ -1480,6 +1492,9 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
                    (ut[i].family != prev_family))
                        return -EINVAL;
 
+               if (ut[i].mode >= XFRM_MODE_MAX)
+                       return -EINVAL;
+
                prev_family = ut[i].family;
 
                switch (ut[i].family) {
index c75413d..ce53639 100644 (file)
@@ -153,10 +153,6 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \
 # Usage:  EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
 cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
 
-# cc-if-fullversion
-# Usage:  EXTRA_CFLAGS += $(call cc-if-fullversion, -lt, 040502, -O1)
-cc-if-fullversion = $(shell [ $(cc-fullversion) $(1) $(2) ] && echo $(3) || echo $(4))
-
 # cc-ldoption
 # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
 cc-ldoption = $(call try-run,\
index 1c48572..5a2d1c9 100644 (file)
@@ -246,8 +246,6 @@ objtool_args += --no-fp
 endif
 ifdef CONFIG_GCOV_KERNEL
 objtool_args += --no-unreachable
-else
-objtool_args += $(call cc-ifversion, -lt, 0405, --no-unreachable)
 endif
 ifdef CONFIG_RETPOLINE
 ifneq ($(RETPOLINE_CFLAGS),)
index 5219280..161b022 100755 (executable)
@@ -380,6 +380,7 @@ our $Attribute      = qr{
                        __noclone|
                        __deprecated|
                        __read_mostly|
+                       __ro_after_init|
                        __kprobes|
                        $InitAttribute|
                        ____cacheline_aligned|
@@ -3311,7 +3312,7 @@ sub process {
                        # known declaration macros
                      $sline =~ /^\+\s+$declaration_macros/ ||
                        # start of struct or union or enum
-                     $sline =~ /^\+\s+(?:union|struct|enum|typedef)\b/ ||
+                     $sline =~ /^\+\s+(?:static\s+)?(?:const\s+)?(?:union|struct|enum|typedef)\b/ ||
                        # start or end of block or continuation of declaration
                      $sline =~ /^\+\s+(?:$|[\{\}\.\#\"\?\:\(\[])/ ||
                        # bitfield continuation
index 999d585..e083bca 100755 (executable)
@@ -11,13 +11,14 @@ DEPMOD=$1
 KERNELRELEASE=$2
 
 if ! test -r System.map ; then
+       echo "Warning: modules_install: missing 'System.map' file. Skipping depmod." >&2
        exit 0
 fi
 
 if [ -z $(command -v $DEPMOD) ]; then
-       echo "'make modules_install' requires $DEPMOD. Please install it." >&2
+       echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
        echo "This is probably in the kmod package." >&2
-       exit 1
+       exit 0
 fi
 
 # older versions of depmod require the version string to start with three
index 4a7bd21..67ed9f6 100644 (file)
@@ -221,7 +221,6 @@ $(obj)/zconf.tab.o: $(obj)/zconf.lex.c
 
 # check if necessary packages are available, and configure build flags
 define filechk_conf_cfg
-       $(CONFIG_SHELL) $(srctree)/scripts/kconfig/check-pkgconfig.sh; \
        $(CONFIG_SHELL) $<
 endef
 
diff --git a/scripts/kconfig/check-pkgconfig.sh b/scripts/kconfig/check-pkgconfig.sh
deleted file mode 100644 (file)
index 7a1c40b..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-# Check for pkg-config presence
-
-if [ -z $(command -v pkg-config) ]; then
-       echo "'make *config' requires 'pkg-config'. Please install it." 1>&2
-       exit 1
-fi
index 533b3d8..480ecd8 100755 (executable)
@@ -3,6 +3,13 @@
 
 PKG="gtk+-2.0 gmodule-2.0 libglade-2.0"
 
+if [ -z "$(command -v pkg-config)" ]; then
+       echo >&2 "*"
+       echo >&2 "* 'make gconfig' requires 'pkg-config'. Please install it."
+       echo >&2 "*"
+       exit 1
+fi
+
 if ! pkg-config --exists $PKG; then
        echo >&2 "*"
        echo >&2 "* Unable to find the GTK+ installation. Please make sure that"
index e6f9fac..c812872 100755 (executable)
@@ -4,20 +4,23 @@
 PKG="ncursesw"
 PKG2="ncurses"
 
-if pkg-config --exists $PKG; then
-       echo cflags=\"$(pkg-config --cflags $PKG)\"
-       echo libs=\"$(pkg-config --libs $PKG)\"
-       exit 0
-fi
+if [ -n "$(command -v pkg-config)" ]; then
+       if pkg-config --exists $PKG; then
+               echo cflags=\"$(pkg-config --cflags $PKG)\"
+               echo libs=\"$(pkg-config --libs $PKG)\"
+               exit 0
+       fi
 
-if pkg-config --exists $PKG2; then
-       echo cflags=\"$(pkg-config --cflags $PKG2)\"
-       echo libs=\"$(pkg-config --libs $PKG2)\"
-       exit 0
+       if pkg-config --exists $PKG2; then
+               echo cflags=\"$(pkg-config --cflags $PKG2)\"
+               echo libs=\"$(pkg-config --libs $PKG2)\"
+               exit 0
+       fi
 fi
 
-# Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses
-# by pkg-config.
+# Check the default paths in case pkg-config is not installed.
+# (Even if it is installed, some distributions such as openSUSE cannot
+# find ncurses by pkg-config.)
 if [ -f /usr/include/ncursesw/ncurses.h ]; then
        echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\"
        echo libs=\"-lncursesw\"
index 83b5836..143c05f 100644 (file)
@@ -490,7 +490,6 @@ static void build_conf(struct menu *menu)
                        switch (prop->type) {
                        case P_MENU:
                                child_count++;
-                               prompt = prompt;
                                if (single_menu_mode) {
                                        item_make("%s%*c%s",
                                                  menu->data ? "-->" : "++>",
index 42f5ac7..001559e 100644 (file)
@@ -4,20 +4,23 @@
 PKG="ncursesw menuw panelw"
 PKG2="ncurses menu panel"
 
-if pkg-config --exists $PKG; then
-       echo cflags=\"$(pkg-config --cflags $PKG)\"
-       echo libs=\"$(pkg-config --libs $PKG)\"
-       exit 0
-fi
+if [ -n "$(command -v pkg-config)" ]; then
+       if pkg-config --exists $PKG; then
+               echo cflags=\"$(pkg-config --cflags $PKG)\"
+               echo libs=\"$(pkg-config --libs $PKG)\"
+               exit 0
+       fi
 
-if pkg-config --exists $PKG2; then
-       echo cflags=\"$(pkg-config --cflags $PKG2)\"
-       echo libs=\"$(pkg-config --libs $PKG2)\"
-       exit 0
+       if pkg-config --exists $PKG2; then
+               echo cflags=\"$(pkg-config --cflags $PKG2)\"
+               echo libs=\"$(pkg-config --libs $PKG2)\"
+               exit 0
+       fi
 fi
 
-# Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses
-# by pkg-config.
+# Check the default paths in case pkg-config is not installed.
+# (Even if it is installed, some distributions such as openSUSE cannot
+# find ncurses by pkg-config.)
 if [ -f /usr/include/ncursesw/ncurses.h ]; then
        echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\"
        echo libs=\"-lncursesw -lmenuw -lpanelw\"
index 0862e15..02ccc0a 100755 (executable)
@@ -4,6 +4,13 @@
 PKG="Qt5Core Qt5Gui Qt5Widgets"
 PKG2="QtCore QtGui"
 
+if [ -z "$(command -v pkg-config)" ]; then
+       echo >&2 "*"
+       echo >&2 "* 'make xconfig' requires 'pkg-config'. Please install it."
+       echo >&2 "*"
+       exit 1
+fi
+
 if pkg-config --exists $PKG; then
        echo cflags=\"-std=c++11 -fPIC $(pkg-config --cflags Qt5Core Qt5Gui Qt5Widgets)\"
        echo libs=\"$(pkg-config --libs $PKG)\"
index fe06e77..f599031 100755 (executable)
@@ -389,6 +389,9 @@ if ($arch eq "x86_64") {
     $mcount_regex = "^\\s*([0-9a-fA-F]+):\\sR_RISCV_CALL\\s_mcount\$";
     $type = ".quad";
     $alignment = 2;
+} elsif ($arch eq "nds32") {
+    $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_NDS32_HI20_RELA\\s+_mcount\$";
+    $alignment = 2;
 } else {
     die "Arch $arch is not supported with CONFIG_FTRACE_MCOUNT_RECORD";
 }
index 71f3941..79f7dd5 100755 (executable)
@@ -74,7 +74,7 @@ scm_version()
                fi
 
                # Check for uncommitted changes
-               if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then
+               if git status -uno --porcelain | grep -qv '^.. scripts/package'; then
                        printf '%s' -dirty
                fi
 
diff --git a/scripts/subarch.include b/scripts/subarch.include
new file mode 100644 (file)
index 0000000..6506828
--- /dev/null
@@ -0,0 +1,13 @@
+# SUBARCH tells the usermode build what the underlying arch is.  That is set
+# first, and if a usermode build is happening, the "ARCH=um" on the command
+# line overrides the setting of ARCH below.  If a native build is happening,
+# then ARCH is assigned, getting whatever value it gets normally, and
+# SUBARCH is subsequently ignored.
+
+SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
+                                 -e s/sun4u/sparc64/ \
+                                 -e s/arm.*/arm/ -e s/sa110/arm/ \
+                                 -e s/s390x/s390/ -e s/parisc64/parisc/ \
+                                 -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
+                                 -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
+                                 -e s/riscv.*/riscv/)
index 27d8b26..d9aa521 100644 (file)
@@ -57,7 +57,7 @@ config SECURITY_NETWORK
 config PAGE_TABLE_ISOLATION
        bool "Remove the kernel mapping in user mode"
        default y
-       depends on X86 && !UML
+       depends on (X86_64 || X86_PAE) && !UML
        help
          This feature reduces the number of hardware side channels by
          ensuring that the majority of kernel addresses are not mapped
index f2f22d0..4ccec1b 100644 (file)
@@ -79,7 +79,6 @@ int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
        struct aa_label *label = aa_secid_to_label(secid);
        int len;
 
-       AA_BUG(!secdata);
        AA_BUG(!seclen);
 
        if (!label)
index 69517e1..08d5662 100644 (file)
@@ -129,7 +129,7 @@ static int snd_rawmidi_runtime_create(struct snd_rawmidi_substream *substream)
                runtime->avail = 0;
        else
                runtime->avail = runtime->buffer_size;
-       runtime->buffer = kvmalloc(runtime->buffer_size, GFP_KERNEL);
+       runtime->buffer = kvzalloc(runtime->buffer_size, GFP_KERNEL);
        if (!runtime->buffer) {
                kfree(runtime);
                return -ENOMEM;
@@ -655,7 +655,7 @@ static int resize_runtime_buffer(struct snd_rawmidi_runtime *runtime,
        if (params->avail_min < 1 || params->avail_min > params->buffer_size)
                return -EINVAL;
        if (params->buffer_size != runtime->buffer_size) {
-               newbuf = kvmalloc(params->buffer_size, GFP_KERNEL);
+               newbuf = kvzalloc(params->buffer_size, GFP_KERNEL);
                if (!newbuf)
                        return -ENOMEM;
                spin_lock_irq(&runtime->lock);
index 730ea91..9367635 100644 (file)
@@ -263,6 +263,8 @@ do_registration(struct work_struct *work)
 error:
        mutex_unlock(&devices_mutex);
        snd_bebob_stream_destroy_duplex(bebob);
+       kfree(bebob->maudio_special_quirk);
+       bebob->maudio_special_quirk = NULL;
        snd_card_free(bebob->card);
        dev_info(&bebob->unit->device,
                 "Sound card registration failed: %d\n", err);
index bd55620..c266997 100644 (file)
@@ -96,17 +96,13 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
        struct fw_device *device = fw_parent_device(unit);
        int err, rcode;
        u64 date;
-       __le32 cues[3] = {
-               cpu_to_le32(MAUDIO_BOOTLOADER_CUE1),
-               cpu_to_le32(MAUDIO_BOOTLOADER_CUE2),
-               cpu_to_le32(MAUDIO_BOOTLOADER_CUE3)
-       };
+       __le32 *cues;
 
        /* check date of software used to build */
        err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE,
                                   &date, sizeof(u64));
        if (err < 0)
-               goto end;
+               return err;
        /*
         * firmware version 5058 or later has date later than "20070401", but
         * 'date' is not null-terminated.
@@ -114,20 +110,28 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
        if (date < 0x3230303730343031LL) {
                dev_err(&unit->device,
                        "Use firmware version 5058 or later\n");
-               err = -ENOSYS;
-               goto end;
+               return -ENXIO;
        }
 
+       cues = kmalloc_array(3, sizeof(*cues), GFP_KERNEL);
+       if (!cues)
+               return -ENOMEM;
+
+       cues[0] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE1);
+       cues[1] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE2);
+       cues[2] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE3);
+
        rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST,
                                   device->node_id, device->generation,
                                   device->max_speed, BEBOB_ADDR_REG_REQ,
-                                  cues, sizeof(cues));
+                                  cues, 3 * sizeof(*cues));
+       kfree(cues);
        if (rcode != RCODE_COMPLETE) {
                dev_err(&unit->device,
                        "Failed to send a cue to load firmware\n");
                err = -EIO;
        }
-end:
+
        return err;
 }
 
@@ -290,10 +294,6 @@ snd_bebob_maudio_special_discover(struct snd_bebob *bebob, bool is1814)
                bebob->midi_output_ports = 2;
        }
 end:
-       if (err < 0) {
-               kfree(params);
-               bebob->maudio_special_quirk = NULL;
-       }
        mutex_unlock(&bebob->mutex);
        return err;
 }
index 1f5e1d2..ef68999 100644 (file)
@@ -49,6 +49,7 @@ static void dg00x_free(struct snd_dg00x *dg00x)
        fw_unit_put(dg00x->unit);
 
        mutex_destroy(&dg00x->mutex);
+       kfree(dg00x);
 }
 
 static void dg00x_card_free(struct snd_card *card)
index ad7a0a3..64c3cb0 100644 (file)
@@ -146,6 +146,7 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
 {
        __le32 *reg;
        int i;
+       int err;
 
        reg = kcalloc(18, sizeof(__le32), GFP_KERNEL);
        if (reg == NULL)
@@ -163,9 +164,11 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
                        reg[i] = cpu_to_le32(0x00000001);
        }
 
-       return snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
-                                 FF400_FETCH_PCM_FRAMES, reg,
-                                 sizeof(__le32) * 18, 0);
+       err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
+                                FF400_FETCH_PCM_FRAMES, reg,
+                                sizeof(__le32) * 18, 0);
+       kfree(reg);
+       return err;
 }
 
 static void ff400_dump_sync_status(struct snd_ff *ff,
index 71a0613..f2d0733 100644 (file)
@@ -301,6 +301,8 @@ error:
        snd_efw_transaction_remove_instance(efw);
        snd_efw_stream_destroy_duplex(efw);
        snd_card_free(efw->card);
+       kfree(efw->resp_buf);
+       efw->resp_buf = NULL;
        dev_info(&efw->unit->device,
                 "Sound card registration failed: %d\n", err);
 }
index 1e5b2c8..2ea8be6 100644 (file)
@@ -130,6 +130,7 @@ static void oxfw_free(struct snd_oxfw *oxfw)
 
        kfree(oxfw->spec);
        mutex_destroy(&oxfw->mutex);
+       kfree(oxfw);
 }
 
 /*
@@ -207,6 +208,7 @@ static int detect_quirks(struct snd_oxfw *oxfw)
 static void do_registration(struct work_struct *work)
 {
        struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work);
+       int i;
        int err;
 
        if (oxfw->registered)
@@ -269,7 +271,15 @@ error:
        snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
        if (oxfw->has_output)
                snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
+       for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; ++i) {
+               kfree(oxfw->tx_stream_formats[i]);
+               oxfw->tx_stream_formats[i] = NULL;
+               kfree(oxfw->rx_stream_formats[i]);
+               oxfw->rx_stream_formats[i] = NULL;
+       }
        snd_card_free(oxfw->card);
+       kfree(oxfw->spec);
+       oxfw->spec = NULL;
        dev_info(&oxfw->unit->device,
                 "Sound card registration failed: %d\n", err);
 }
index 44ad41f..d3fdc46 100644 (file)
@@ -93,6 +93,7 @@ static void tscm_free(struct snd_tscm *tscm)
        fw_unit_put(tscm->unit);
 
        mutex_destroy(&tscm->mutex);
+       kfree(tscm);
 }
 
 static void tscm_card_free(struct snd_card *card)
index 1bd2757..a835558 100644 (file)
@@ -146,7 +146,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_decouple);
  */
 void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *stream)
 {
-       snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 0, AZX_PPLCCTL_RUN);
+       snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL,
+                        AZX_PPLCCTL_RUN, AZX_PPLCCTL_RUN);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_link_stream_start);
 
@@ -171,7 +172,8 @@ void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *stream)
 
        snd_hdac_ext_link_stream_clear(stream);
 
-       snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL, 0, AZX_PPLCCTL_STRST);
+       snd_hdac_updatel(stream->pplc_addr, AZX_REG_PPLCCTL,
+                        AZX_PPLCCTL_STRST, AZX_PPLCCTL_STRST);
        udelay(3);
        timeout = 50;
        do {
@@ -242,7 +244,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_link_set_stream_id);
 void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
                                 int stream)
 {
-       snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, 0, (1 << stream));
+       snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV, (1 << stream), 0);
 }
 EXPORT_SYMBOL_GPL(snd_hdac_ext_link_clear_stream_id);
 
@@ -415,7 +417,6 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *bus,
                                 bool enable, int index)
 {
        u32 mask = 0;
-       u32 register_mask = 0;
 
        if (!bus->spbcap) {
                dev_err(bus->dev, "Address of SPB capability is NULL\n");
@@ -424,12 +425,8 @@ void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *bus,
 
        mask |= (1 << index);
 
-       register_mask = readl(bus->spbcap + AZX_REG_SPB_SPBFCCTL);
-
-       mask |= register_mask;
-
        if (enable)
-               snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, 0, mask);
+               snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, mask);
        else
                snd_hdac_updatel(bus->spbcap, AZX_REG_SPB_SPBFCCTL, mask, 0);
 }
@@ -503,7 +500,6 @@ void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
                                bool enable, int index)
 {
        u32 mask = 0;
-       u32 register_mask = 0;
 
        if (!bus->drsmcap) {
                dev_err(bus->dev, "Address of DRSM capability is NULL\n");
@@ -512,12 +508,8 @@ void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
 
        mask |= (1 << index);
 
-       register_mask = readl(bus->drsmcap + AZX_REG_SPB_SPBFCCTL);
-
-       mask |= register_mask;
-
        if (enable)
-               snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, 0, mask);
+               snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, mask);
        else
                snd_hdac_updatel(bus->drsmcap, AZX_REG_DRSM_CTL, mask, 0);
 }
index 560ec09..74244d8 100644 (file)
@@ -40,6 +40,8 @@ static void azx_clear_corbrp(struct hdac_bus *bus)
  */
 void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus)
 {
+       WARN_ON_ONCE(!bus->rb.area);
+
        spin_lock_irq(&bus->reg_lock);
        /* CORB set up */
        bus->corb.addr = bus->rb.addr;
@@ -383,7 +385,7 @@ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus)
 EXPORT_SYMBOL_GPL(snd_hdac_bus_exit_link_reset);
 
 /* reset codec link */
-static int azx_reset(struct hdac_bus *bus, bool full_reset)
+int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
 {
        if (!full_reset)
                goto skip_reset;
@@ -408,7 +410,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset)
  skip_reset:
        /* check to see if controller is ready */
        if (!snd_hdac_chip_readb(bus, GCTL)) {
-               dev_dbg(bus->dev, "azx_reset: controller not ready!\n");
+               dev_dbg(bus->dev, "controller not ready!\n");
                return -EBUSY;
        }
 
@@ -423,6 +425,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(snd_hdac_bus_reset_link);
 
 /* enable interrupts */
 static void azx_int_enable(struct hdac_bus *bus)
@@ -477,15 +480,17 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
                return false;
 
        /* reset controller */
-       azx_reset(bus, full_reset);
+       snd_hdac_bus_reset_link(bus, full_reset);
 
-       /* initialize interrupts */
+       /* clear interrupts */
        azx_int_clear(bus);
-       azx_int_enable(bus);
 
        /* initialize the codec command I/O */
        snd_hdac_bus_init_cmd_io(bus);
 
+       /* enable interrupts after CORB/RIRB buffers are initialized above */
+       azx_int_enable(bus);
+
        /* program the position buffer */
        if (bus->use_posbuf && bus->posbuf.addr) {
                snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr);
index b5282cb..617ff1a 100644 (file)
@@ -145,9 +145,11 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
        if (!acomp->ops) {
                request_module("i915");
                /* 10s timeout */
-               wait_for_completion_timeout(&bind_complete, 10 * 1000);
+               wait_for_completion_timeout(&bind_complete,
+                                           msecs_to_jiffies(10 * 1000));
        }
        if (!acomp->ops) {
+               dev_info(bus->dev, "couldn't bind with audio component\n");
                snd_hdac_acomp_exit(bus);
                return -ENODEV;
        }
index 9071374..6ebe817 100644 (file)
@@ -2540,7 +2540,7 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un
                emu->support_tlv = 1;
                return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp);
        case SNDRV_EMU10K1_IOCTL_INFO:
-               info = kmalloc(sizeof(*info), GFP_KERNEL);
+               info = kzalloc(sizeof(*info), GFP_KERNEL);
                if (!info)
                        return -ENOMEM;
                snd_emu10k1_fx8010_info(emu, info);
index 0a50855..26d348b 100644 (file)
@@ -3935,7 +3935,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus)
 
        list_for_each_codec(codec, bus) {
                /* FIXME: maybe a better way needed for forced reset */
-               cancel_delayed_work_sync(&codec->jackpoll_work);
+               if (current_work() != &codec->jackpoll_work.work)
+                       cancel_delayed_work_sync(&codec->jackpoll_work);
 #ifdef CONFIG_PM
                if (hda_codec_is_power_on(codec)) {
                        hda_call_codec_suspend(codec);
index 1b2ce30..aa4c672 100644 (file)
@@ -365,8 +365,10 @@ enum {
  */
 #ifdef SUPPORT_VGA_SWITCHEROO
 #define use_vga_switcheroo(chip)       ((chip)->use_vga_switcheroo)
+#define needs_eld_notify_link(chip)    ((chip)->need_eld_notify_link)
 #else
 #define use_vga_switcheroo(chip)       0
+#define needs_eld_notify_link(chip)    false
 #endif
 
 #define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
@@ -453,6 +455,7 @@ static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
 #endif
 
 static int azx_acquire_irq(struct azx *chip, int do_disconnect);
+static void set_default_power_save(struct azx *chip);
 
 /*
  * initialize the PCI registers
@@ -1201,6 +1204,10 @@ static int azx_runtime_idle(struct device *dev)
            azx_bus(chip)->codec_powered || !chip->running)
                return -EBUSY;
 
+       /* ELD notification gets broken when HD-audio bus is off */
+       if (needs_eld_notify_link(hda))
+               return -EBUSY;
+
        return 0;
 }
 
@@ -1298,6 +1305,36 @@ static bool azx_vs_can_switch(struct pci_dev *pci)
        return true;
 }
 
+/*
+ * The discrete GPU cannot power down unless the HDA controller runtime
+ * suspends, so activate runtime PM on codecs even if power_save == 0.
+ */
+static void setup_vga_switcheroo_runtime_pm(struct azx *chip)
+{
+       struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+       struct hda_codec *codec;
+
+       if (hda->use_vga_switcheroo && !hda->need_eld_notify_link) {
+               list_for_each_codec(codec, &chip->bus)
+                       codec->auto_runtime_pm = 1;
+               /* reset the power save setup */
+               if (chip->running)
+                       set_default_power_save(chip);
+       }
+}
+
+static void azx_vs_gpu_bound(struct pci_dev *pci,
+                            enum vga_switcheroo_client_id client_id)
+{
+       struct snd_card *card = pci_get_drvdata(pci);
+       struct azx *chip = card->private_data;
+       struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+
+       if (client_id == VGA_SWITCHEROO_DIS)
+               hda->need_eld_notify_link = 0;
+       setup_vga_switcheroo_runtime_pm(chip);
+}
+
 static void init_vga_switcheroo(struct azx *chip)
 {
        struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
@@ -1306,6 +1343,7 @@ static void init_vga_switcheroo(struct azx *chip)
                dev_info(chip->card->dev,
                         "Handle vga_switcheroo audio client\n");
                hda->use_vga_switcheroo = 1;
+               hda->need_eld_notify_link = 1; /* cleared in gpu_bound op */
                chip->driver_caps |= AZX_DCAPS_PM_RUNTIME;
                pci_dev_put(p);
        }
@@ -1314,6 +1352,7 @@ static void init_vga_switcheroo(struct azx *chip)
 static const struct vga_switcheroo_client_ops azx_vs_ops = {
        .set_gpu_state = azx_vs_set_state,
        .can_switch = azx_vs_can_switch,
+       .gpu_bound = azx_vs_gpu_bound,
 };
 
 static int register_vga_switcheroo(struct azx *chip)
@@ -1339,6 +1378,7 @@ static int register_vga_switcheroo(struct azx *chip)
 #define init_vga_switcheroo(chip)              /* NOP */
 #define register_vga_switcheroo(chip)          0
 #define check_hdmi_disabled(pci)       false
+#define setup_vga_switcheroo_runtime_pm(chip)  /* NOP */
 #endif /* SUPPORT_VGA_SWITCHER */
 
 /*
@@ -1352,6 +1392,7 @@ static int azx_free(struct azx *chip)
 
        if (azx_has_pm_runtime(chip) && chip->running)
                pm_runtime_get_noresume(&pci->dev);
+       chip->running = 0;
 
        azx_del_card_list(chip);
 
@@ -2230,6 +2271,25 @@ static struct snd_pci_quirk power_save_blacklist[] = {
 };
 #endif /* CONFIG_PM */
 
+static void set_default_power_save(struct azx *chip)
+{
+       int val = power_save;
+
+#ifdef CONFIG_PM
+       if (pm_blacklist) {
+               const struct snd_pci_quirk *q;
+
+               q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
+               if (q && val) {
+                       dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
+                                q->subvendor, q->subdevice);
+                       val = 0;
+               }
+       }
+#endif /* CONFIG_PM */
+       snd_hda_set_power_save(&chip->bus, val * 1000);
+}
+
 /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
 static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
        [AZX_DRIVER_NVIDIA] = 8,
@@ -2241,9 +2301,7 @@ static int azx_probe_continue(struct azx *chip)
        struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
        struct hdac_bus *bus = azx_bus(chip);
        struct pci_dev *pci = chip->pci;
-       struct hda_codec *codec;
        int dev = chip->dev_index;
-       int val;
        int err;
 
        hda->probe_continued = 1;
@@ -2322,31 +2380,13 @@ static int azx_probe_continue(struct azx *chip)
        if (err < 0)
                goto out_free;
 
+       setup_vga_switcheroo_runtime_pm(chip);
+
        chip->running = 1;
        azx_add_card_list(chip);
 
-       val = power_save;
-#ifdef CONFIG_PM
-       if (pm_blacklist) {
-               const struct snd_pci_quirk *q;
-
-               q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
-               if (q && val) {
-                       dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
-                                q->subvendor, q->subdevice);
-                       val = 0;
-               }
-       }
-#endif /* CONFIG_PM */
-       /*
-        * The discrete GPU cannot power down unless the HDA controller runtime
-        * suspends, so activate runtime PM on codecs even if power_save == 0.
-        */
-       if (use_vga_switcheroo(hda))
-               list_for_each_codec(codec, &chip->bus)
-                       codec->auto_runtime_pm = 1;
+       set_default_power_save(chip);
 
-       snd_hda_set_power_save(&chip->bus, val * 1000);
        if (azx_has_pm_runtime(chip))
                pm_runtime_put_autosuspend(&pci->dev);
 
index e3a3d31..f59719e 100644 (file)
@@ -37,6 +37,7 @@ struct hda_intel {
 
        /* vga_switcheroo setup */
        unsigned int use_vga_switcheroo:1;
+       unsigned int need_eld_notify_link:1;
        unsigned int vga_switcheroo_registered:1;
        unsigned int init_failed:1; /* delayed init failed */
 
index 1d117f0..3ac7ba9 100644 (file)
@@ -6409,6 +6409,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
        SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
        SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+       SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
        SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
        SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
        SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
index e359938..77b265b 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/sizes.h>
 #include <linux/pm_runtime.h>
 
@@ -184,6 +185,24 @@ static void config_dma_descriptor_in_sram(void __iomem *acp_mmio,
        acp_reg_write(descr_info->xfer_val, acp_mmio, mmACP_SRBM_Targ_Idx_Data);
 }
 
+static void pre_config_reset(void __iomem *acp_mmio, u16 ch_num)
+{
+       u32 dma_ctrl;
+       int ret;
+
+       /* clear the reset bit */
+       dma_ctrl = acp_reg_read(acp_mmio, mmACP_DMA_CNTL_0 + ch_num);
+       dma_ctrl &= ~ACP_DMA_CNTL_0__DMAChRst_MASK;
+       acp_reg_write(dma_ctrl, acp_mmio, mmACP_DMA_CNTL_0 + ch_num);
+       /* check the reset bit before programming configuration registers */
+       ret = readl_poll_timeout(acp_mmio + ((mmACP_DMA_CNTL_0 + ch_num) * 4),
+                                dma_ctrl,
+                                !(dma_ctrl & ACP_DMA_CNTL_0__DMAChRst_MASK),
+                                100, ACP_DMA_RESET_TIME);
+       if (ret < 0)
+               pr_err("Failed to clear reset of channel : %d\n", ch_num);
+}
+
 /*
  * Initialize the DMA descriptor information for transfer between
  * system memory <-> ACP SRAM
@@ -236,6 +255,7 @@ static void set_acp_sysmem_dma_descriptors(void __iomem *acp_mmio,
                config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx,
                                              &dmadscr[i]);
        }
+       pre_config_reset(acp_mmio, ch);
        config_acp_dma_channel(acp_mmio, ch,
                               dma_dscr_idx - 1,
                               NUM_DSCRS_PER_CHANNEL,
@@ -275,6 +295,7 @@ static void set_acp_to_i2s_dma_descriptors(void __iomem *acp_mmio, u32 size,
                config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx,
                                              &dmadscr[i]);
        }
+       pre_config_reset(acp_mmio, ch);
        /* Configure the DMA channel with the above descriptore */
        config_acp_dma_channel(acp_mmio, ch, dma_dscr_idx - 1,
                               NUM_DSCRS_PER_CHANNEL,
index 275677d..4075541 100644 (file)
@@ -157,8 +157,8 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = {
        SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2,
                                3, 1, 0),
        SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum),
-       SOC_SINGLE("MMTLR Data Switch", 0,
-                               1, 1, 0),
+       SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2,
+                               0, 1, 0),
        SOC_ENUM("Mono Channel Select", spdif_mono_select_enum),
        SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24),
 };
index 92b7125..1093f76 100644 (file)
@@ -520,6 +520,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
        case MAX98373_R2000_SW_RESET ... MAX98373_R2009_INT_FLAG3:
+       case MAX98373_R203E_AMP_PATH_GAIN:
        case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
        case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
        case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
@@ -729,6 +730,7 @@ static int max98373_probe(struct snd_soc_component *component)
        /* Software Reset */
        regmap_write(max98373->regmap,
                MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+       usleep_range(10000, 11000);
 
        /* IV default slot configuration */
        regmap_write(max98373->regmap,
@@ -817,6 +819,7 @@ static int max98373_resume(struct device *dev)
 
        regmap_write(max98373->regmap,
                MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+       usleep_range(10000, 11000);
        regcache_cache_only(max98373->regmap, false);
        regcache_sync(max98373->regmap);
        return 0;
index dca82dd..32fe76c 100644 (file)
@@ -64,8 +64,8 @@ static const struct reg_sequence rt5514_patch[] = {
        {RT5514_ANA_CTRL_LDO10,         0x00028604},
        {RT5514_ANA_CTRL_ADCFED,        0x00000800},
        {RT5514_ASRC_IN_CTRL1,          0x00000003},
-       {RT5514_DOWNFILTER0_CTRL3,      0x10000352},
-       {RT5514_DOWNFILTER1_CTRL3,      0x10000352},
+       {RT5514_DOWNFILTER0_CTRL3,      0x10000342},
+       {RT5514_DOWNFILTER1_CTRL3,      0x10000342},
 };
 
 static const struct reg_default rt5514_reg[] = {
@@ -92,10 +92,10 @@ static const struct reg_default rt5514_reg[] = {
        {RT5514_ASRC_IN_CTRL1,          0x00000003},
        {RT5514_DOWNFILTER0_CTRL1,      0x00020c2f},
        {RT5514_DOWNFILTER0_CTRL2,      0x00020c2f},
-       {RT5514_DOWNFILTER0_CTRL3,      0x10000352},
+       {RT5514_DOWNFILTER0_CTRL3,      0x10000342},
        {RT5514_DOWNFILTER1_CTRL1,      0x00020c2f},
        {RT5514_DOWNFILTER1_CTRL2,      0x00020c2f},
-       {RT5514_DOWNFILTER1_CTRL3,      0x10000352},
+       {RT5514_DOWNFILTER1_CTRL3,      0x10000342},
        {RT5514_ANA_CTRL_LDO10,         0x00028604},
        {RT5514_ANA_CTRL_LDO18_16,      0x02000345},
        {RT5514_ANA_CTRL_ADC12,         0x0000a2a8},
index 640d400..afe7d5b 100644 (file)
@@ -750,8 +750,8 @@ static bool rt5682_readable_register(struct device *dev, unsigned int reg)
 }
 
 static const DECLARE_TLV_DB_SCALE(hp_vol_tlv, -2250, 150, 0);
-static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -6525, 75, 0);
+static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -1725, 75, 0);
 static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
 
 /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
@@ -1114,7 +1114,7 @@ static const struct snd_kcontrol_new rt5682_snd_controls[] = {
 
        /* DAC Digital Volume */
        SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5682_DAC1_DIG_VOL,
-               RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 175, 0, dac_vol_tlv),
+               RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 86, 0, dac_vol_tlv),
 
        /* IN Boost Volume */
        SOC_SINGLE_TLV("CBJ Boost Volume", RT5682_CBJ_BST_CTRL,
@@ -1124,7 +1124,7 @@ static const struct snd_kcontrol_new rt5682_snd_controls[] = {
        SOC_DOUBLE("STO1 ADC Capture Switch", RT5682_STO1_ADC_DIG_VOL,
                RT5682_L_MUTE_SFT, RT5682_R_MUTE_SFT, 1, 1),
        SOC_DOUBLE_TLV("STO1 ADC Capture Volume", RT5682_STO1_ADC_DIG_VOL,
-               RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 127, 0, adc_vol_tlv),
+               RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 63, 0, adc_vol_tlv),
 
        /* ADC Boost Volume Control */
        SOC_DOUBLE_TLV("STO1 ADC Boost Gain Volume", RT5682_STO1_ADC_BOOST,
index d53680a..6df1586 100644 (file)
@@ -117,8 +117,7 @@ static int sigmadsp_ctrl_write(struct sigmadsp *sigmadsp,
        struct sigmadsp_control *ctrl, void *data)
 {
        /* safeload loads up to 20 bytes in a atomic operation */
-       if (ctrl->num_bytes > 4 && ctrl->num_bytes <= 20 && sigmadsp->ops &&
-           sigmadsp->ops->safeload)
+       if (ctrl->num_bytes <= 20 && sigmadsp->ops && sigmadsp->ops->safeload)
                return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data,
                        ctrl->num_bytes);
        else
index 14999b9..0d61455 100644 (file)
@@ -424,8 +424,10 @@ static void tas6424_fault_check_work(struct work_struct *work)
               TAS6424_FAULT_PVDD_UV |
               TAS6424_FAULT_VBAT_UV;
 
-       if (reg)
+       if (!reg) {
+               tas6424->last_fault1 = reg;
                goto check_global_fault2_reg;
+       }
 
        /*
         * Only flag errors once for a given occurrence. This is needed as
@@ -461,8 +463,10 @@ check_global_fault2_reg:
               TAS6424_FAULT_OTSD_CH3 |
               TAS6424_FAULT_OTSD_CH4;
 
-       if (!reg)
+       if (!reg) {
+               tas6424->last_fault2 = reg;
                goto check_warn_reg;
+       }
 
        if ((reg & TAS6424_FAULT_OTSD) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD))
                dev_crit(dev, "experienced a global overtemp shutdown\n");
@@ -497,8 +501,10 @@ check_warn_reg:
               TAS6424_WARN_VDD_OTW_CH3 |
               TAS6424_WARN_VDD_OTW_CH4;
 
-       if (!reg)
+       if (!reg) {
+               tas6424->last_warn = reg;
                goto out;
+       }
 
        if ((reg & TAS6424_WARN_VDD_UV) && !(tas6424->last_warn & TAS6424_WARN_VDD_UV))
                dev_warn(dev, "experienced a VDD under voltage condition\n");
index f27464c..7954196 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/i2c.h>
+#include <linux/acpi.h>
 
 #include "wm8804.h"
 
@@ -40,17 +41,29 @@ static const struct i2c_device_id wm8804_i2c_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id);
 
+#if defined(CONFIG_OF)
 static const struct of_device_id wm8804_of_match[] = {
        { .compatible = "wlf,wm8804", },
        { }
 };
 MODULE_DEVICE_TABLE(of, wm8804_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id wm8804_acpi_match[] = {
+       { "1AEC8804", 0 }, /* Wolfson PCI ID + part ID */
+       { "10138804", 0 }, /* Cirrus Logic PCI ID + part ID */
+       { },
+};
+MODULE_DEVICE_TABLE(acpi, wm8804_acpi_match);
+#endif
 
 static struct i2c_driver wm8804_i2c_driver = {
        .driver = {
                .name = "wm8804",
                .pm = &wm8804_pm,
-               .of_match_table = wm8804_of_match,
+               .of_match_table = of_match_ptr(wm8804_of_match),
+               .acpi_match_table = ACPI_PTR(wm8804_acpi_match),
        },
        .probe = wm8804_i2c_probe,
        .remove = wm8804_i2c_remove,
index 953d94d..ade34c2 100644 (file)
@@ -719,7 +719,7 @@ static int wm9712_probe(struct platform_device *pdev)
 
 static struct platform_driver wm9712_component_driver = {
        .driver = {
-               .name = "wm9712-component",
+               .name = "wm9712-codec",
        },
 
        .probe = wm9712_probe,
index d32844f..b6dc524 100644 (file)
@@ -575,6 +575,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
                                        BYT_RT5640_MONO_SPEAKER |
                                        BYT_RT5640_MCLK_EN),
        },
+       {       /* Linx Linx7 tablet */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LINX"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LINX7"),
+               },
+               .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+                                       BYT_RT5640_MONO_SPEAKER |
+                                       BYT_RT5640_JD_NOT_INV |
+                                       BYT_RT5640_SSP0_AIF1 |
+                                       BYT_RT5640_MCLK_EN),
+       },
        {       /* MSI S100 tablet */
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
@@ -602,6 +613,21 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
                                        BYT_RT5640_SSP0_AIF1 |
                                        BYT_RT5640_MCLK_EN),
        },
+       {       /* Onda v975w */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+                       DMI_EXACT_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+                       /* The above are too generic, also match BIOS info */
+                       DMI_EXACT_MATCH(DMI_BIOS_VERSION, "5.6.5"),
+                       DMI_EXACT_MATCH(DMI_BIOS_DATE, "07/25/2014"),
+               },
+               .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+                                       BYT_RT5640_JD_SRC_JD2_IN4N |
+                                       BYT_RT5640_OVCD_TH_2000UA |
+                                       BYT_RT5640_OVCD_SF_0P75 |
+                                       BYT_RT5640_DIFF_MIC |
+                                       BYT_RT5640_MCLK_EN),
+       },
        {       /* Pipo W4 */
                .matches = {
                        DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
index dce6494..1d17be0 100644 (file)
@@ -834,7 +834,7 @@ static int skl_first_init(struct hdac_bus *bus)
                return -ENXIO;
        }
 
-       skl_init_chip(bus, true);
+       snd_hdac_bus_reset_link(bus, true);
 
        snd_hdac_bus_parse_capabilities(bus);
 
index dc94c5c..c6b5157 100644 (file)
@@ -960,8 +960,10 @@ static int msm_routing_probe(struct snd_soc_component *c)
 {
        int i;
 
-       for (i = 0; i < MAX_SESSIONS; i++)
+       for (i = 0; i < MAX_SESSIONS; i++) {
                routing_data->sessions[i].port_id = -1;
+               routing_data->sessions[i].fedai_id = -1;
+       }
 
        return 0;
 }
index 3a3064d..051f964 100644 (file)
@@ -462,6 +462,11 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
                goto rsnd_adg_get_clkout_end;
 
        req_size = prop->length / sizeof(u32);
+       if (req_size > REQ_SIZE) {
+               dev_err(dev,
+                       "too many clock-frequency, use top %d\n", REQ_SIZE);
+               req_size = REQ_SIZE;
+       }
 
        of_property_read_u32_array(np, "clock-frequency", req_rate, req_size);
        req_48kHz_rate = 0;
index f8425d8..d23c2bb 100644 (file)
@@ -478,7 +478,7 @@ static int rsnd_status_update(u32 *status,
                        (func_call && (mod)->ops->fn) ? #fn : "");      \
                if (func_call && (mod)->ops->fn)                        \
                        tmp = (mod)->ops->fn(mod, io, param);           \
-               if (tmp)                                                \
+               if (tmp && (tmp != -EPROBE_DEFER))                      \
                        dev_err(dev, "%s[%d] : %s error %d\n",          \
                                rsnd_mod_name(mod), rsnd_mod_id(mod),   \
                                                     #fn, tmp);         \
@@ -958,12 +958,23 @@ static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream,
        rsnd_dai_stream_quit(io);
 }
 
+static int rsnd_soc_dai_prepare(struct snd_pcm_substream *substream,
+                               struct snd_soc_dai *dai)
+{
+       struct rsnd_priv *priv = rsnd_dai_to_priv(dai);
+       struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+       struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
+
+       return rsnd_dai_call(prepare, io, priv);
+}
+
 static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
        .startup        = rsnd_soc_dai_startup,
        .shutdown       = rsnd_soc_dai_shutdown,
        .trigger        = rsnd_soc_dai_trigger,
        .set_fmt        = rsnd_soc_dai_set_fmt,
        .set_tdm_slot   = rsnd_soc_set_dai_tdm_slot,
+       .prepare        = rsnd_soc_dai_prepare,
 };
 
 void rsnd_parse_connect_common(struct rsnd_dai *rdai,
@@ -1550,6 +1561,14 @@ exit_snd_probe:
                rsnd_dai_call(remove, &rdai->capture, priv);
        }
 
+       /*
+        * adg is very special mod which can't use rsnd_dai_call(remove),
+        * and it registers ADG clock on probe.
+        * It should be unregister if probe failed.
+        * Mainly it is assuming -EPROBE_DEFER case
+        */
+       rsnd_adg_remove(priv);
+
        return ret;
 }
 
index fe63ef8..d65ea7b 100644 (file)
@@ -241,6 +241,10 @@ static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
        /* try to get DMAEngine channel */
        chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
        if (IS_ERR_OR_NULL(chan)) {
+               /* Let's follow when -EPROBE_DEFER case */
+               if (PTR_ERR(chan) == -EPROBE_DEFER)
+                       return PTR_ERR(chan);
+
                /*
                 * DMA failed. try to PIO mode
                 * see
index 96d9333..8f7a0ab 100644 (file)
@@ -280,6 +280,9 @@ struct rsnd_mod_ops {
        int (*nolock_stop)(struct rsnd_mod *mod,
                    struct rsnd_dai_stream *io,
                    struct rsnd_priv *priv);
+       int (*prepare)(struct rsnd_mod *mod,
+                      struct rsnd_dai_stream *io,
+                      struct rsnd_priv *priv);
 };
 
 struct rsnd_dai_stream;
@@ -309,6 +312,7 @@ struct rsnd_mod {
  * H   0: fallback
  * H   0: hw_params
  * H   0: pointer
+ * H   0: prepare
  */
 #define __rsnd_mod_shift_nolock_start  0
 #define __rsnd_mod_shift_nolock_stop   0
@@ -323,6 +327,7 @@ struct rsnd_mod {
 #define __rsnd_mod_shift_fallback      28 /* always called */
 #define __rsnd_mod_shift_hw_params     28 /* always called */
 #define __rsnd_mod_shift_pointer       28 /* always called */
+#define __rsnd_mod_shift_prepare       28 /* always called */
 
 #define __rsnd_mod_add_probe           0
 #define __rsnd_mod_add_remove          0
@@ -337,6 +342,7 @@ struct rsnd_mod {
 #define __rsnd_mod_add_fallback                0
 #define __rsnd_mod_add_hw_params       0
 #define __rsnd_mod_add_pointer         0
+#define __rsnd_mod_add_prepare         0
 
 #define __rsnd_mod_call_probe          0
 #define __rsnd_mod_call_remove         0
@@ -351,6 +357,7 @@ struct rsnd_mod {
 #define __rsnd_mod_call_pointer                0
 #define __rsnd_mod_call_nolock_start   0
 #define __rsnd_mod_call_nolock_stop    1
+#define __rsnd_mod_call_prepare                0
 
 #define rsnd_mod_to_priv(mod)  ((mod)->priv)
 #define rsnd_mod_name(mod)     ((mod)->ops->name)
index 8304e4e..3f880ec 100644 (file)
@@ -283,7 +283,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
        if (rsnd_ssi_is_multi_slave(mod, io))
                return 0;
 
-       if (ssi->usrcnt > 1) {
+       if (ssi->rate) {
                if (ssi->rate != rate) {
                        dev_err(dev, "SSI parent/child should use same rate\n");
                        return -EINVAL;
@@ -434,7 +434,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
                         struct rsnd_priv *priv)
 {
        struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
-       int ret;
 
        if (!rsnd_ssi_is_run_mods(mod, io))
                return 0;
@@ -443,10 +442,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
 
        rsnd_mod_power_on(mod);
 
-       ret = rsnd_ssi_master_clk_start(mod, io);
-       if (ret < 0)
-               return ret;
-
        rsnd_ssi_config_init(mod, io);
 
        rsnd_ssi_register_setup(mod);
@@ -852,6 +847,13 @@ static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
        return 0;
 }
 
+static int rsnd_ssi_prepare(struct rsnd_mod *mod,
+                           struct rsnd_dai_stream *io,
+                           struct rsnd_priv *priv)
+{
+       return rsnd_ssi_master_clk_start(mod, io);
+}
+
 static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
        .name   = SSI_NAME,
        .probe  = rsnd_ssi_common_probe,
@@ -864,6 +866,7 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
        .pointer = rsnd_ssi_pio_pointer,
        .pcm_new = rsnd_ssi_pcm_new,
        .hw_params = rsnd_ssi_hw_params,
+       .prepare = rsnd_ssi_prepare,
 };
 
 static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
@@ -940,6 +943,7 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
        .pcm_new = rsnd_ssi_pcm_new,
        .fallback = rsnd_ssi_fallback,
        .hw_params = rsnd_ssi_hw_params,
+       .prepare = rsnd_ssi_prepare,
 };
 
 int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod)
index 9cfe10d..473eefe 100644 (file)
@@ -1447,7 +1447,7 @@ static int soc_link_dai_widgets(struct snd_soc_card *card,
        sink = codec_dai->playback_widget;
        source = cpu_dai->capture_widget;
        if (sink && source) {
-               ret = snd_soc_dapm_new_pcm(card, dai_link->params,
+               ret = snd_soc_dapm_new_pcm(card, rtd, dai_link->params,
                                           dai_link->num_params,
                                           source, sink);
                if (ret != 0) {
@@ -1460,7 +1460,7 @@ static int soc_link_dai_widgets(struct snd_soc_card *card,
        sink = cpu_dai->playback_widget;
        source = codec_dai->capture_widget;
        if (sink && source) {
-               ret = snd_soc_dapm_new_pcm(card, dai_link->params,
+               ret = snd_soc_dapm_new_pcm(card, rtd, dai_link->params,
                                           dai_link->num_params,
                                           source, sink);
                if (ret != 0) {
index 7e96793..461d951 100644 (file)
@@ -3652,6 +3652,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
 {
        struct snd_soc_dapm_path *source_p, *sink_p;
        struct snd_soc_dai *source, *sink;
+       struct snd_soc_pcm_runtime *rtd = w->priv;
        const struct snd_soc_pcm_stream *config = w->params + w->params_select;
        struct snd_pcm_substream substream;
        struct snd_pcm_hw_params *params = NULL;
@@ -3711,6 +3712,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
                goto out;
        }
        substream.runtime = runtime;
+       substream.private_data = rtd;
 
        switch (event) {
        case SND_SOC_DAPM_PRE_PMU:
@@ -3895,6 +3897,7 @@ outfree_w_param:
 }
 
 int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
+                        struct snd_soc_pcm_runtime *rtd,
                         const struct snd_soc_pcm_stream *params,
                         unsigned int num_params,
                         struct snd_soc_dapm_widget *source,
@@ -3963,6 +3966,7 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
 
        w->params = params;
        w->num_params = num_params;
+       w->priv = rtd;
 
        ret = snd_soc_dapm_add_path(&card->dapm, source, w, NULL, NULL);
        if (ret)
index 16e006f..4602464 100644 (file)
@@ -27,6 +27,7 @@
 #define __KVM_HAVE_GUEST_DEBUG
 #define __KVM_HAVE_IRQ_LINE
 #define __KVM_HAVE_READONLY_MEM
+#define __KVM_HAVE_VCPU_EVENTS
 
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 
@@ -125,6 +126,18 @@ struct kvm_sync_regs {
 struct kvm_arch_memory_slot {
 };
 
+/* for KVM_GET/SET_VCPU_EVENTS */
+struct kvm_vcpu_events {
+       struct {
+               __u8 serror_pending;
+               __u8 serror_has_esr;
+               /* Align it to 8 bytes */
+               __u8 pad[6];
+               __u64 serror_esr;
+       } exception;
+       __u32 reserved[12];
+};
+
 /* If you need to interpret the index values, here is the key: */
 #define KVM_REG_ARM_COPROC_MASK                0x000000000FFF0000
 #define KVM_REG_ARM_COPROC_SHIFT       16
index 4e76630..97c3478 100644 (file)
@@ -39,6 +39,7 @@
 #define __KVM_HAVE_GUEST_DEBUG
 #define __KVM_HAVE_IRQ_LINE
 #define __KVM_HAVE_READONLY_MEM
+#define __KVM_HAVE_VCPU_EVENTS
 
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 
@@ -154,6 +155,18 @@ struct kvm_sync_regs {
 struct kvm_arch_memory_slot {
 };
 
+/* for KVM_GET/SET_VCPU_EVENTS */
+struct kvm_vcpu_events {
+       struct {
+               __u8 serror_pending;
+               __u8 serror_has_esr;
+               /* Align it to 8 bytes */
+               __u8 pad[6];
+               __u64 serror_esr;
+       } exception;
+       __u32 reserved[12];
+};
+
 /* If you need to interpret the index values, here is the key: */
 #define KVM_REG_ARM_COPROC_MASK                0x000000000FFF0000
 #define KVM_REG_ARM_COPROC_SHIFT       16
index 4cdaa55..9a50f02 100644 (file)
@@ -4,7 +4,7 @@
 /*
  * KVM s390 specific structures and definitions
  *
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008, 2018
  *
  *    Author(s): Carsten Otte <cotte@de.ibm.com>
  *               Christian Borntraeger <borntraeger@de.ibm.com>
@@ -225,6 +225,7 @@ struct kvm_guest_debug_arch {
 #define KVM_SYNC_FPRS   (1UL << 8)
 #define KVM_SYNC_GSCB   (1UL << 9)
 #define KVM_SYNC_BPBC   (1UL << 10)
+#define KVM_SYNC_ETOKEN (1UL << 11)
 /* length and alignment of the sdnx as a power of two */
 #define SDNXC 8
 #define SDNXL (1UL << SDNXC)
@@ -258,6 +259,8 @@ struct kvm_sync_regs {
                struct {
                        __u64 reserved1[2];
                        __u64 gscb[4];
+                       __u64 etoken;
+                       __u64 etoken_extension;
                };
        };
 };
index c535c2f..86299ef 100644 (file)
@@ -378,4 +378,41 @@ struct kvm_sync_regs {
 #define KVM_X86_QUIRK_LINT0_REENABLED  (1 << 0)
 #define KVM_X86_QUIRK_CD_NW_CLEARED    (1 << 1)
 
+#define KVM_STATE_NESTED_GUEST_MODE    0x00000001
+#define KVM_STATE_NESTED_RUN_PENDING   0x00000002
+
+#define KVM_STATE_NESTED_SMM_GUEST_MODE        0x00000001
+#define KVM_STATE_NESTED_SMM_VMXON     0x00000002
+
+struct kvm_vmx_nested_state {
+       __u64 vmxon_pa;
+       __u64 vmcs_pa;
+
+       struct {
+               __u16 flags;
+       } smm;
+};
+
+/* for KVM_CAP_NESTED_STATE */
+struct kvm_nested_state {
+       /* KVM_STATE_* flags */
+       __u16 flags;
+
+       /* 0 for VMX, 1 for SVM.  */
+       __u16 format;
+
+       /* 128 for SVM, 128 + VMCS size for VMX.  */
+       __u32 size;
+
+       union {
+               /* VMXON, VMCS */
+               struct kvm_vmx_nested_state vmx;
+
+               /* Pad the header to 128 bytes.  */
+               __u8 pad[120];
+       };
+
+       __u8 data[0];
+};
+
 #endif /* _ASM_X86_KVM_H */
index b2ec20e..b455930 100644 (file)
@@ -68,6 +68,7 @@ static const char * const map_type_name[] = {
        [BPF_MAP_TYPE_DEVMAP]           = "devmap",
        [BPF_MAP_TYPE_SOCKMAP]          = "sockmap",
        [BPF_MAP_TYPE_CPUMAP]           = "cpumap",
+       [BPF_MAP_TYPE_XSKMAP]           = "xskmap",
        [BPF_MAP_TYPE_SOCKHASH]         = "sockhash",
        [BPF_MAP_TYPE_CGROUP_STORAGE]   = "cgroup_storage",
 };
index 1832100..6d41323 100644 (file)
@@ -194,8 +194,10 @@ int do_event_pipe(int argc, char **argv)
        }
 
        while (argc) {
-               if (argc < 2)
+               if (argc < 2) {
                        BAD_ARG();
+                       goto err_close_map;
+               }
 
                if (is_prefix(*argv, "cpu")) {
                        char *endptr;
@@ -221,6 +223,7 @@ int do_event_pipe(int argc, char **argv)
                        NEXT_ARG();
                } else {
                        BAD_ARG();
+                       goto err_close_map;
                }
 
                do_all = false;
index d78aed8..8ff8cb1 100644 (file)
@@ -234,6 +234,7 @@ int main(int argc, char *argv[])
                        break;
 
                default:
+                       error = HV_E_FAIL;
                        syslog(LOG_ERR, "Unknown operation: %d",
                                buffer.hdr.operation);
 
index dbf6e8b..bbb2a8e 100644 (file)
@@ -286,7 +286,7 @@ static int kvp_key_delete(int pool, const __u8 *key, int key_size)
                 * Found a match; just move the remaining
                 * entries up.
                 */
-               if (i == num_records) {
+               if (i == (num_records - 1)) {
                        kvp_file_info[pool].num_records--;
                        kvp_update_file(pool);
                        return 0;
index 6b0c36a..e569972 100644 (file)
@@ -30,9 +30,12 @@ struct task_struct {
        struct held_lock held_locks[MAX_LOCK_DEPTH];
        gfp_t lockdep_reclaim_gfp;
        int pid;
+       int state;
        char comm[17];
 };
 
+#define TASK_RUNNING 0
+
 extern struct task_struct *__curr(void);
 
 #define current (__curr())
diff --git a/tools/include/linux/nmi.h b/tools/include/linux/nmi.h
new file mode 100644 (file)
index 0000000..e69de29
index 664ced8..e907ba6 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: (LGPL-2.0+ OR BSD-2-Clause)
 /* Copyright (C) 2018 Netronome Systems, Inc. */
 
 #ifndef __TOOLS_LIBC_COMPAT_H
index 4299067..df4bedb 100644 (file)
@@ -734,9 +734,11 @@ __SYSCALL(__NR_pkey_free,     sys_pkey_free)
 __SYSCALL(__NR_statx,     sys_statx)
 #define __NR_io_pgetevents 292
 __SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
+#define __NR_rseq 293
+__SYSCALL(__NR_rseq, sys_rseq)
 
 #undef __NR_syscalls
-#define __NR_syscalls 293
+#define __NR_syscalls 294
 
 /*
  * 32 bit systems traditionally used different
index 9c660e1..300f336 100644 (file)
@@ -687,6 +687,15 @@ struct drm_get_cap {
  */
 #define DRM_CLIENT_CAP_ASPECT_RATIO    4
 
+/**
+ * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
+ *
+ * If set to 1, the DRM core will expose special connectors to be used for
+ * writing back to memory the scene setup in the commit. Depends on client
+ * also supporting DRM_CLIENT_CAP_ATOMIC
+ */
+#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS    5
+
 /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
 struct drm_set_client_cap {
        __u64 capability;
index cf01b68..43391e2 100644 (file)
@@ -164,6 +164,8 @@ enum {
        IFLA_CARRIER_UP_COUNT,
        IFLA_CARRIER_DOWN_COUNT,
        IFLA_NEW_IFINDEX,
+       IFLA_MIN_MTU,
+       IFLA_MAX_MTU,
        __IFLA_MAX
 };
 
@@ -334,6 +336,7 @@ enum {
        IFLA_BRPORT_GROUP_FWD_MASK,
        IFLA_BRPORT_NEIGH_SUPPRESS,
        IFLA_BRPORT_ISOLATED,
+       IFLA_BRPORT_BACKUP_PORT,
        __IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -459,6 +462,16 @@ enum {
 
 #define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1)
 
+/* XFRM section */
+enum {
+       IFLA_XFRM_UNSPEC,
+       IFLA_XFRM_LINK,
+       IFLA_XFRM_IF_ID,
+       __IFLA_XFRM_MAX
+};
+
+#define IFLA_XFRM_MAX (__IFLA_XFRM_MAX - 1)
+
 enum macsec_validation_type {
        MACSEC_VALIDATE_DISABLED = 0,
        MACSEC_VALIDATE_CHECK = 1,
@@ -920,6 +933,7 @@ enum {
        XDP_ATTACHED_DRV,
        XDP_ATTACHED_SKB,
        XDP_ATTACHED_HW,
+       XDP_ATTACHED_MULTI,
 };
 
 enum {
@@ -928,6 +942,9 @@ enum {
        IFLA_XDP_ATTACHED,
        IFLA_XDP_FLAGS,
        IFLA_XDP_PROG_ID,
+       IFLA_XDP_DRV_PROG_ID,
+       IFLA_XDP_SKB_PROG_ID,
+       IFLA_XDP_HW_PROG_ID,
        __IFLA_XDP_MAX,
 };
 
index b6270a3..07548de 100644 (file)
@@ -949,6 +949,9 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_GET_MSR_FEATURES 153
 #define KVM_CAP_HYPERV_EVENTFD 154
 #define KVM_CAP_HYPERV_TLBFLUSH 155
+#define KVM_CAP_S390_HPAGE_1M 156
+#define KVM_CAP_NESTED_STATE 157
+#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1391,6 +1394,9 @@ struct kvm_enc_region {
 /* Available with KVM_CAP_HYPERV_EVENTFD */
 #define KVM_HYPERV_EVENTFD        _IOW(KVMIO,  0xbd, struct kvm_hyperv_eventfd)
 
+/* Available with KVM_CAP_NESTED_STATE */
+#define KVM_GET_NESTED_STATE         _IOWR(KVMIO, 0xbe, struct kvm_nested_state)
+#define KVM_SET_NESTED_STATE         _IOW(KVMIO,  0xbf, struct kvm_nested_state)
 
 /* Secure Encrypted Virtualization command */
 enum sev_cmd_id {
index eeb787b..f35eb72 100644 (file)
@@ -144,7 +144,7 @@ enum perf_event_sample_format {
 
        PERF_SAMPLE_MAX = 1U << 20,             /* non-ABI */
 
-       __PERF_SAMPLE_CALLCHAIN_EARLY           = 1ULL << 63,
+       __PERF_SAMPLE_CALLCHAIN_EARLY           = 1ULL << 63, /* non-ABI; internal use */
 };
 
 /*
index c51f8e5..84c3de8 100644 (file)
@@ -65,6 +65,7 @@ struct vhost_iotlb_msg {
 };
 
 #define VHOST_IOTLB_MSG 0x1
+#define VHOST_IOTLB_MSG_V2 0x2
 
 struct vhost_msg {
        int type;
@@ -74,6 +75,15 @@ struct vhost_msg {
        };
 };
 
+struct vhost_msg_v2 {
+       __u32 type;
+       __u32 reserved;
+       union {
+               struct vhost_iotlb_msg iotlb;
+               __u8 padding[64];
+       };
+};
+
 struct vhost_memory_region {
        __u64 guest_phys_addr;
        __u64 memory_size; /* bytes */
@@ -160,6 +170,14 @@ struct vhost_memory {
 #define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24,      \
                                         struct vhost_vring_state)
 
+/* Set or get vhost backend capability */
+
+/* Use message type V2 */
+#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
+
+#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
+#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
+
 /* VHOST_NET specific defines */
 
 /* Attach virtio net ring to a raw socket, or tap device.
index 56c4b3f..195ba48 100755 (executable)
@@ -759,12 +759,18 @@ class DebugfsProvider(Provider):
             if len(vms) == 0:
                 self.do_read = False
 
-            self.paths = filter(lambda x: "{}-".format(pid) in x, vms)
+            self.paths = list(filter(lambda x: "{}-".format(pid) in x, vms))
 
         else:
             self.paths = []
             self.do_read = True
-        self.reset()
+
+    def _verify_paths(self):
+        """Remove invalid paths"""
+        for path in self.paths:
+            if not os.path.exists(os.path.join(PATH_DEBUGFS_KVM, path)):
+                self.paths.remove(path)
+                continue
 
     def read(self, reset=0, by_guest=0):
         """Returns a dict with format:'file name / field -> current value'.
@@ -780,6 +786,7 @@ class DebugfsProvider(Provider):
         # If no debugfs filtering support is available, then don't read.
         if not self.do_read:
             return results
+        self._verify_paths()
 
         paths = self.paths
         if self._pid == 0:
@@ -1096,15 +1103,16 @@ class Tui(object):
             pid = self.stats.pid_filter
         self.screen.erase()
         gname = self.get_gname_from_pid(pid)
+        self._gname = gname
         if gname:
             gname = ('({})'.format(gname[:MAX_GUEST_NAME_LEN] + '...'
                                    if len(gname) > MAX_GUEST_NAME_LEN
                                    else gname))
         if pid > 0:
-            self.screen.addstr(0, 0, 'kvm statistics - pid {0} {1}'
-                               .format(pid, gname), curses.A_BOLD)
+            self._headline = 'kvm statistics - pid {0} {1}'.format(pid, gname)
         else:
-            self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD)
+            self._headline = 'kvm statistics - summary'
+        self.screen.addstr(0, 0, self._headline, curses.A_BOLD)
         if self.stats.fields_filter:
             regex = self.stats.fields_filter
             if len(regex) > MAX_REGEX_LEN:
@@ -1162,6 +1170,19 @@ class Tui(object):
 
             return sorted_items
 
+        if not self._is_running_guest(self.stats.pid_filter):
+            if self._gname:
+                try: # ...to identify the guest by name in case it's back
+                    pids = self.get_pid_from_gname(self._gname)
+                    if len(pids) == 1:
+                        self._refresh_header(pids[0])
+                        self._update_pid(pids[0])
+                        return
+                except:
+                    pass
+            self._display_guest_dead()
+            # leave final data on screen
+            return
         row = 3
         self.screen.move(row, 0)
         self.screen.clrtobot()
@@ -1184,6 +1205,7 @@ class Tui(object):
         # print events
         tavg = 0
         tcur = 0
+        guest_removed = False
         for key, values in get_sorted_events(self, stats):
             if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0):
                 break
@@ -1191,7 +1213,10 @@ class Tui(object):
                 key = self.get_gname_from_pid(key)
                 if not key:
                     continue
-            cur = int(round(values.delta / sleeptime)) if values.delta else ''
+            cur = int(round(values.delta / sleeptime)) if values.delta else 0
+            if cur < 0:
+                guest_removed = True
+                continue
             if key[0] != ' ':
                 if values.delta:
                     tcur += values.delta
@@ -1204,13 +1229,21 @@ class Tui(object):
                                values.value * 100 / float(ltotal), cur))
             row += 1
         if row == 3:
-            self.screen.addstr(4, 1, 'No matching events reported yet')
+            if guest_removed:
+                self.screen.addstr(4, 1, 'Guest removed, updating...')
+            else:
+                self.screen.addstr(4, 1, 'No matching events reported yet')
         if row > 4:
             tavg = int(round(tcur / sleeptime)) if tcur > 0 else ''
             self.screen.addstr(row, 1, '%-40s %10d        %8s' %
                                ('Total', total, tavg), curses.A_BOLD)
         self.screen.refresh()
 
+    def _display_guest_dead(self):
+        marker = '   Guest is DEAD   '
+        y = min(len(self._headline), 80 - len(marker))
+        self.screen.addstr(0, y, marker, curses.A_BLINK | curses.A_STANDOUT)
+
     def _show_msg(self, text):
         """Display message centered text and exit on key press"""
         hint = 'Press any key to continue'
@@ -1219,10 +1252,10 @@ class Tui(object):
         (x, term_width) = self.screen.getmaxyx()
         row = 2
         for line in text:
-            start = (term_width - len(line)) / 2
+            start = (term_width - len(line)) // 2
             self.screen.addstr(row, start, line)
             row += 1
-        self.screen.addstr(row + 1, (term_width - len(hint)) / 2, hint,
+        self.screen.addstr(row + 1, (term_width - len(hint)) // 2, hint,
                            curses.A_STANDOUT)
         self.screen.getkey()
 
@@ -1292,7 +1325,7 @@ class Tui(object):
         msg = ''
         while True:
             self.screen.erase()
-            self.screen.addstr(0, 0, 'Set update interval (defaults to %fs).' %
+            self.screen.addstr(0, 0, 'Set update interval (defaults to %.1fs).' %
                                DELAY_DEFAULT, curses.A_BOLD)
             self.screen.addstr(4, 0, msg)
             self.screen.addstr(2, 0, 'Change delay from %.1fs to ' %
@@ -1319,6 +1352,12 @@ class Tui(object):
                 msg = '"' + str(val) + '": Invalid value'
         self._refresh_header()
 
+    def _is_running_guest(self, pid):
+        """Check if pid is still a running process."""
+        if not pid:
+            return True
+        return os.path.isdir(os.path.join('/proc/', str(pid)))
+
     def _show_vm_selection_by_guest(self):
         """Draws guest selection mask.
 
@@ -1346,7 +1385,7 @@ class Tui(object):
             if not guest or guest == '0':
                 break
             if guest.isdigit():
-                if not os.path.isdir(os.path.join('/proc/', guest)):
+                if not self._is_running_guest(guest):
                     msg = '"' + guest + '": Not a running process'
                     continue
                 pid = int(guest)
index 13a8611..6eb9bac 100644 (file)
@@ -1 +1 @@
-libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o
+libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o
index 2abd0f1..bdb9493 100644 (file)
@@ -50,6 +50,7 @@
 #include "libbpf.h"
 #include "bpf.h"
 #include "btf.h"
+#include "str_error.h"
 
 #ifndef EM_BPF
 #define EM_BPF 247
@@ -469,7 +470,7 @@ static int bpf_object__elf_init(struct bpf_object *obj)
                obj->efile.fd = open(obj->path, O_RDONLY);
                if (obj->efile.fd < 0) {
                        char errmsg[STRERR_BUFSIZE];
-                       char *cp = strerror_r(errno, errmsg, sizeof(errmsg));
+                       char *cp = str_error(errno, errmsg, sizeof(errmsg));
 
                        pr_warning("failed to open %s: %s\n", obj->path, cp);
                        return -errno;
@@ -810,8 +811,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
                                                      data->d_size, name, idx);
                        if (err) {
                                char errmsg[STRERR_BUFSIZE];
-                               char *cp = strerror_r(-err, errmsg,
-                                                     sizeof(errmsg));
+                               char *cp = str_error(-err, errmsg, sizeof(errmsg));
 
                                pr_warning("failed to alloc program %s (%s): %s",
                                           name, obj->path, cp);
@@ -1140,7 +1140,7 @@ bpf_object__create_maps(struct bpf_object *obj)
 
                *pfd = bpf_create_map_xattr(&create_attr);
                if (*pfd < 0 && create_attr.btf_key_type_id) {
-                       cp = strerror_r(errno, errmsg, sizeof(errmsg));
+                       cp = str_error(errno, errmsg, sizeof(errmsg));
                        pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
                                   map->name, cp, errno);
                        create_attr.btf_fd = 0;
@@ -1155,7 +1155,7 @@ bpf_object__create_maps(struct bpf_object *obj)
                        size_t j;
 
                        err = *pfd;
-                       cp = strerror_r(errno, errmsg, sizeof(errmsg));
+                       cp = str_error(errno, errmsg, sizeof(errmsg));
                        pr_warning("failed to create map (name: '%s'): %s\n",
                                   map->name, cp);
                        for (j = 0; j < i; j++)
@@ -1339,7 +1339,7 @@ load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
        }
 
        ret = -LIBBPF_ERRNO__LOAD;
-       cp = strerror_r(errno, errmsg, sizeof(errmsg));
+       cp = str_error(errno, errmsg, sizeof(errmsg));
        pr_warning("load bpf program failed: %s\n", cp);
 
        if (log_buf && log_buf[0] != '\0') {
@@ -1654,7 +1654,7 @@ static int check_path(const char *path)
 
        dir = dirname(dname);
        if (statfs(dir, &st_fs)) {
-               cp = strerror_r(errno, errmsg, sizeof(errmsg));
+               cp = str_error(errno, errmsg, sizeof(errmsg));
                pr_warning("failed to statfs %s: %s\n", dir, cp);
                err = -errno;
        }
@@ -1690,7 +1690,7 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
        }
 
        if (bpf_obj_pin(prog->instances.fds[instance], path)) {
-               cp = strerror_r(errno, errmsg, sizeof(errmsg));
+               cp = str_error(errno, errmsg, sizeof(errmsg));
                pr_warning("failed to pin program: %s\n", cp);
                return -errno;
        }
@@ -1708,7 +1708,7 @@ static int make_dir(const char *path)
                err = -errno;
 
        if (err) {
-               cp = strerror_r(-err, errmsg, sizeof(errmsg));
+               cp = str_error(-err, errmsg, sizeof(errmsg));
                pr_warning("failed to mkdir %s: %s\n", path, cp);
        }
        return err;
@@ -1770,7 +1770,7 @@ int bpf_map__pin(struct bpf_map *map, const char *path)
        }
 
        if (bpf_obj_pin(map->fd, path)) {
-               cp = strerror_r(errno, errmsg, sizeof(errmsg));
+               cp = str_error(errno, errmsg, sizeof(errmsg));
                pr_warning("failed to pin map: %s\n", cp);
                return -errno;
        }
diff --git a/tools/lib/bpf/str_error.c b/tools/lib/bpf/str_error.c
new file mode 100644 (file)
index 0000000..b879811
--- /dev/null
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: LGPL-2.1
+#undef _GNU_SOURCE
+#include <string.h>
+#include <stdio.h>
+#include "str_error.h"
+
+/*
+ * Wrapper to allow for building in non-GNU systems such as Alpine Linux's musl
+ * libc, while checking strerror_r() return to avoid having to check this in
+ * all places calling it.
+ */
+char *str_error(int err, char *dst, int len)
+{
+       int ret = strerror_r(err, dst, len);
+       if (ret)
+               snprintf(dst, len, "ERROR: strerror_r(%d)=%d", err, ret);
+       return dst;
+}
diff --git a/tools/lib/bpf/str_error.h b/tools/lib/bpf/str_error.h
new file mode 100644 (file)
index 0000000..355b1db
--- /dev/null
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: LGPL-2.1
+#ifndef BPF_STR_ERROR
+#define BPF_STR_ERROR
+
+char *str_error(int err, char *dst, int len);
+#endif // BPF_STR_ERROR
index 42261a9..ac841bc 100644 (file)
@@ -280,7 +280,7 @@ $(MAN_HTML): $(OUTPUT)%.html : %.txt
        mv $@+ $@
 
 ifdef USE_ASCIIDOCTOR
-$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.txt
+$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : %.txt
        $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
        $(ASCIIDOC) -b manpage -d manpage \
                $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \
index b3d1b12..5224ade 100644 (file)
@@ -777,14 +777,12 @@ endif
        $(call QUIET_INSTALL, libexec) \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
 ifndef NO_LIBBPF
-       $(call QUIET_INSTALL, lib) \
-               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
-       $(call QUIET_INSTALL, include/bpf) \
-               $(INSTALL) include/bpf/*.h '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
-       $(call QUIET_INSTALL, lib) \
-               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
-       $(call QUIET_INSTALL, examples/bpf) \
-               $(INSTALL) examples/bpf/*.c '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
+       $(call QUIET_INSTALL, bpf-headers) \
+               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'; \
+               $(INSTALL) include/bpf/*.h -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
+       $(call QUIET_INSTALL, bpf-examples) \
+               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'; \
+               $(INSTALL) examples/bpf/*.c -t '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
 endif
        $(call QUIET_INSTALL, perf-archive) \
                $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
index f013b11..dbef716 100644 (file)
@@ -11,7 +11,8 @@ PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
 
 out    := $(OUTPUT)arch/arm64/include/generated/asm
 header := $(out)/syscalls.c
-sysdef := $(srctree)/tools/include/uapi/asm-generic/unistd.h
+incpath := $(srctree)/tools
+sysdef := $(srctree)/tools/arch/arm64/include/uapi/asm/unistd.h
 sysprf := $(srctree)/tools/perf/arch/arm64/entry/syscalls/
 systbl := $(sysprf)/mksyscalltbl
 
@@ -19,7 +20,7 @@ systbl := $(sysprf)/mksyscalltbl
 _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
 
 $(header): $(sysdef) $(systbl)
-       $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(sysdef) > $@
+       $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(incpath) $(sysdef) > $@
 
 clean::
        $(call QUIET_CLEAN, arm64) $(RM) $(header)
index 52e1973..2dbb8ca 100755 (executable)
@@ -11,7 +11,8 @@
 
 gcc=$1
 hostcc=$2
-input=$3
+incpath=$3
+input=$4
 
 if ! test -r $input; then
        echo "Could not read input file" >&2
@@ -28,7 +29,6 @@ create_table_from_c()
 
        cat <<-_EoHEADER
                #include <stdio.h>
-               #define __ARCH_WANT_RENAMEAT
                #include "$input"
                int main(int argc, char *argv[])
                {
@@ -42,7 +42,7 @@ create_table_from_c()
        printf "%s\n" " printf(\"#define SYSCALLTBL_ARM64_MAX_ID %d\\n\", __NR_$last_sc);"
        printf "}\n"
 
-       } | $hostcc -o $create_table_exe -x c -
+       } | $hostcc -I $incpath/include/uapi -o $create_table_exe -x c -
 
        $create_table_exe
 
index 20e7d74..10a44e9 100644 (file)
@@ -22,15 +22,16 @@ bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
 
 #endif
 
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
 int arch__choose_best_symbol(struct symbol *syma,
                             struct symbol *symb __maybe_unused)
 {
        char *sym = syma->name;
 
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
        /* Skip over any initial dot */
        if (*sym == '.')
                sym++;
+#endif
 
        /* Avoid "SyS" kernel syscall aliases */
        if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3))
@@ -41,6 +42,7 @@ int arch__choose_best_symbol(struct symbol *syma,
        return SYMBOL_A;
 }
 
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
 /* Allow matching against dot variants */
 int arch__compare_symbol_names(const char *namea, const char *nameb)
 {
index c1bd979..613709c 100644 (file)
@@ -9,6 +9,7 @@ struct test;
 int test__rdpmc(struct test *test __maybe_unused, int subtest);
 int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest);
 int test__insn_x86(struct test *test __maybe_unused, int subtest);
+int test__bp_modify(struct test *test, int subtest);
 
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
 struct thread;
index 8e2c5a3..586849f 100644 (file)
@@ -5,3 +5,4 @@ libperf-y += arch-tests.o
 libperf-y += rdpmc.o
 libperf-y += perf-time-to-tsc.o
 libperf-$(CONFIG_AUXTRACE) += insn-x86.o
+libperf-$(CONFIG_X86_64) += bp-modify.o
index cc1802f..d47d3f8 100644 (file)
@@ -24,6 +24,12 @@ struct test arch_tests[] = {
                .func = test__insn_x86,
        },
 #endif
+#if defined(__x86_64__)
+       {
+               .desc = "x86 bp modify",
+               .func = test__bp_modify,
+       },
+#endif
        {
                .func = NULL,
        },
diff --git a/tools/perf/arch/x86/tests/bp-modify.c b/tools/perf/arch/x86/tests/bp-modify.c
new file mode 100644 (file)
index 0000000..f53e440
--- /dev/null
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/user.h>
+#include <syscall.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/ptrace.h>
+#include <asm/ptrace.h>
+#include <errno.h>
+#include "debug.h"
+#include "tests/tests.h"
+#include "arch-tests.h"
+
+static noinline int bp_1(void)
+{
+       pr_debug("in %s\n", __func__);
+       return 0;
+}
+
+static noinline int bp_2(void)
+{
+       pr_debug("in %s\n", __func__);
+       return 0;
+}
+
+static int spawn_child(void)
+{
+       int child = fork();
+
+       if (child == 0) {
+               /*
+                * The child sets itself for as tracee and
+                * waits in signal for parent to trace it,
+                * then it calls bp_1 and quits.
+                */
+               int err = ptrace(PTRACE_TRACEME, 0, NULL, NULL);
+
+               if (err) {
+                       pr_debug("failed to PTRACE_TRACEME\n");
+                       exit(1);
+               }
+
+               raise(SIGCONT);
+               bp_1();
+               exit(0);
+       }
+
+       return child;
+}
+
+/*
+ * This tests creates HW breakpoint, tries to
+ * change it and checks it was properly changed.
+ */
+static int bp_modify1(void)
+{
+       pid_t child;
+       int status;
+       unsigned long rip = 0, dr7 = 1;
+
+       child = spawn_child();
+
+       waitpid(child, &status, 0);
+       if (WIFEXITED(status)) {
+               pr_debug("tracee exited prematurely 1\n");
+               return TEST_FAIL;
+       }
+
+       /*
+        * The parent does following steps:
+        *  - creates a new breakpoint (id 0) for bp_2 function
+        *  - changes that breakponit to bp_1 function
+        *  - waits for the breakpoint to hit and checks
+        *    it has proper rip of bp_1 function
+        *  - detaches the child
+        */
+       if (ptrace(PTRACE_POKEUSER, child,
+                  offsetof(struct user, u_debugreg[0]), bp_2)) {
+               pr_debug("failed to set breakpoint, 1st time: %s\n",
+                        strerror(errno));
+               goto out;
+       }
+
+       if (ptrace(PTRACE_POKEUSER, child,
+                  offsetof(struct user, u_debugreg[0]), bp_1)) {
+               pr_debug("failed to set breakpoint, 2nd time: %s\n",
+                        strerror(errno));
+               goto out;
+       }
+
+       if (ptrace(PTRACE_POKEUSER, child,
+                  offsetof(struct user, u_debugreg[7]), dr7)) {
+               pr_debug("failed to set dr7: %s\n", strerror(errno));
+               goto out;
+       }
+
+       if (ptrace(PTRACE_CONT, child, NULL, NULL)) {
+               pr_debug("failed to PTRACE_CONT: %s\n", strerror(errno));
+               goto out;
+       }
+
+       waitpid(child, &status, 0);
+       if (WIFEXITED(status)) {
+               pr_debug("tracee exited prematurely 2\n");
+               return TEST_FAIL;
+       }
+
+       rip = ptrace(PTRACE_PEEKUSER, child,
+                    offsetof(struct user_regs_struct, rip), NULL);
+       if (rip == (unsigned long) -1) {
+               pr_debug("failed to PTRACE_PEEKUSER: %s\n",
+                        strerror(errno));
+               goto out;
+       }
+
+       pr_debug("rip %lx, bp_1 %p\n", rip, bp_1);
+
+out:
+       if (ptrace(PTRACE_DETACH, child, NULL, NULL)) {
+               pr_debug("failed to PTRACE_DETACH: %s", strerror(errno));
+               return TEST_FAIL;
+       }
+
+       return rip == (unsigned long) bp_1 ? TEST_OK : TEST_FAIL;
+}
+
+/*
+ * This tests creates HW breakpoint, tries to
+ * change it to bogus value and checks the original
+ * breakpoint is hit.
+ */
+static int bp_modify2(void)
+{
+       pid_t child;
+       int status;
+       unsigned long rip = 0, dr7 = 1;
+
+       child = spawn_child();
+
+       waitpid(child, &status, 0);
+       if (WIFEXITED(status)) {
+               pr_debug("tracee exited prematurely 1\n");
+               return TEST_FAIL;
+       }
+
+       /*
+        * The parent does following steps:
+        *  - creates a new breakpoint (id 0) for bp_1 function
+        *  - tries to change that breakpoint to (-1) address
+        *  - waits for the breakpoint to hit and checks
+        *    it has proper rip of bp_1 function
+        *  - detaches the child
+        */
+       if (ptrace(PTRACE_POKEUSER, child,
+                  offsetof(struct user, u_debugreg[0]), bp_1)) {
+               pr_debug("failed to set breakpoint: %s\n",
+                        strerror(errno));
+               goto out;
+       }
+
+       if (ptrace(PTRACE_POKEUSER, child,
+                  offsetof(struct user, u_debugreg[7]), dr7)) {
+               pr_debug("failed to set dr7: %s\n", strerror(errno));
+               goto out;
+       }
+
+       if (!ptrace(PTRACE_POKEUSER, child,
+                  offsetof(struct user, u_debugreg[0]), (unsigned long) (-1))) {
+               pr_debug("failed, breakpoint set to bogus address\n");
+               goto out;
+       }
+
+       if (ptrace(PTRACE_CONT, child, NULL, NULL)) {
+               pr_debug("failed to PTRACE_CONT: %s\n", strerror(errno));
+               goto out;
+       }
+
+       waitpid(child, &status, 0);
+       if (WIFEXITED(status)) {
+               pr_debug("tracee exited prematurely 2\n");
+               return TEST_FAIL;
+       }
+
+       rip = ptrace(PTRACE_PEEKUSER, child,
+                    offsetof(struct user_regs_struct, rip), NULL);
+       if (rip == (unsigned long) -1) {
+               pr_debug("failed to PTRACE_PEEKUSER: %s\n",
+                        strerror(errno));
+               goto out;
+       }
+
+       pr_debug("rip %lx, bp_1 %p\n", rip, bp_1);
+
+out:
+       if (ptrace(PTRACE_DETACH, child, NULL, NULL)) {
+               pr_debug("failed to PTRACE_DETACH: %s", strerror(errno));
+               return TEST_FAIL;
+       }
+
+       return rip == (unsigned long) bp_1 ? TEST_OK : TEST_FAIL;
+}
+
+int test__bp_modify(struct test *test __maybe_unused,
+                   int subtest __maybe_unused)
+{
+       TEST_ASSERT_VAL("modify test 1 failed\n", !bp_modify1());
+       TEST_ASSERT_VAL("modify test 2 failed\n", !bp_modify2());
+
+       return 0;
+}
index 20061cf..28cd6a1 100644 (file)
@@ -246,8 +246,14 @@ find_target:
 
 indirect_call:
        tok = strchr(endptr, '*');
-       if (tok != NULL)
-               ops->target.addr = strtoull(tok + 1, NULL, 16);
+       if (tok != NULL) {
+               endptr++;
+
+               /* Indirect call can use a non-rip register and offset: callq  *0x8(%rbx).
+                * Do not parse such instruction.  */
+               if (strstr(endptr, "(%r") == NULL)
+                       ops->target.addr = strtoull(endptr, NULL, 16);
+       }
        goto find_target;
 }
 
@@ -276,7 +282,19 @@ bool ins__is_call(const struct ins *ins)
        return ins->ops == &call_ops || ins->ops == &s390_call_ops;
 }
 
-static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map_symbol *ms)
+/*
+ * Prevents from matching commas in the comment section, e.g.:
+ * ffff200008446e70:       b.cs    ffff2000084470f4 <generic_exec_single+0x314>  // b.hs, b.nlast
+ */
+static inline const char *validate_comma(const char *c, struct ins_operands *ops)
+{
+       if (ops->raw_comment && c > ops->raw_comment)
+               return NULL;
+
+       return c;
+}
+
+static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
 {
        struct map *map = ms->map;
        struct symbol *sym = ms->sym;
@@ -285,6 +303,10 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
        };
        const char *c = strchr(ops->raw, ',');
        u64 start, end;
+
+       ops->raw_comment = strchr(ops->raw, arch->objdump.comment_char);
+       c = validate_comma(c, ops);
+
        /*
         * Examples of lines to parse for the _cpp_lex_token@@Base
         * function:
@@ -304,6 +326,7 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
                ops->target.addr = strtoull(c, NULL, 16);
                if (!ops->target.addr) {
                        c = strchr(c, ',');
+                       c = validate_comma(c, ops);
                        if (c++ != NULL)
                                ops->target.addr = strtoull(c, NULL, 16);
                }
@@ -361,9 +384,12 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
                return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.sym->name);
 
        c = strchr(ops->raw, ',');
+       c = validate_comma(c, ops);
+
        if (c != NULL) {
                const char *c2 = strchr(c + 1, ',');
 
+               c2 = validate_comma(c2, ops);
                /* check for 3-op insn */
                if (c2 != NULL)
                        c = c2;
index 005a5fe..5399ba2 100644 (file)
@@ -22,6 +22,7 @@ struct ins {
 
 struct ins_operands {
        char    *raw;
+       char    *raw_comment;
        struct {
                char    *raw;
                char    *name;
index c980bbf..1a61628 100644 (file)
@@ -251,8 +251,9 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
 {
        struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
 
-       if (evsel != NULL)
-               perf_evsel__init(evsel, attr, idx);
+       if (!evsel)
+               return NULL;
+       perf_evsel__init(evsel, attr, idx);
 
        if (perf_evsel__is_bpf_output(evsel)) {
                evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
index 36d0763..6a6929f 100644 (file)
@@ -576,6 +576,13 @@ struct symbol *map_groups__find_symbol(struct map_groups *mg,
        return NULL;
 }
 
+static bool map__contains_symbol(struct map *map, struct symbol *sym)
+{
+       u64 ip = map->unmap_ip(map, sym->start);
+
+       return ip >= map->start && ip < map->end;
+}
+
 struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
                                         struct map **mapp)
 {
@@ -591,6 +598,10 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
 
                if (sym == NULL)
                        continue;
+               if (!map__contains_symbol(pos, sym)) {
+                       sym = NULL;
+                       continue;
+               }
                if (mapp != NULL)
                        *mapp = pos;
                goto out;
index c85d0d1..7b0ca7c 100644 (file)
@@ -377,7 +377,7 @@ out:
 
 static int record_saved_cmdline(void)
 {
-       unsigned int size;
+       unsigned long long size;
        char *path;
        struct stat st;
        int ret, err = 0;
index 920b1d5..e76214f 100644 (file)
@@ -164,16 +164,15 @@ void parse_ftrace_printk(struct tep_handle *pevent,
 void parse_saved_cmdline(struct tep_handle *pevent,
                         char *file, unsigned int size __maybe_unused)
 {
-       char *comm;
+       char comm[17]; /* Max comm length in the kernel is 16. */
        char *line;
        char *next = NULL;
        int pid;
 
        line = strtok_r(file, "\n", &next);
        while (line) {
-               sscanf(line, "%d %ms", &pid, &comm);
-               tep_register_comm(pevent, comm, pid);
-               free(comm);
+               if (sscanf(line, "%d %16s", &pid, comm) == 2)
+                       tep_register_comm(pevent, comm, pid);
                line = strtok_r(NULL, "\n", &next);
        }
 }
index 72c25a3..d9a7254 100644 (file)
@@ -6,7 +6,7 @@ TEST_PROGS := run.sh
 
 include ../lib.mk
 
-all:
+all: khdr
        @for DIR in $(SUBDIRS); do              \
                BUILD_TARGET=$(OUTPUT)/$$DIR;   \
                mkdir $$BUILD_TARGET  -p;       \
index e036952..88cfe88 100644 (file)
@@ -10,6 +10,8 @@ $(TEST_GEN_FILES): ipcsocket.c ionutils.c
 
 TEST_PROGS := ion_test.sh
 
+KSFT_KHDR_INSTALL := 1
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(OUTPUT)/ionapp_export: ionapp_export.c ipcsocket.c ionutils.c
index 6f54f84..9b552c0 100644 (file)
@@ -580,7 +580,11 @@ static void test_sockmap(int tasks, void *data)
        /* Test update without programs */
        for (i = 0; i < 6; i++) {
                err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
-               if (err) {
+               if (i < 2 && !err) {
+                       printf("Allowed update sockmap '%i:%i' not in ESTABLISHED\n",
+                              i, sfd[i]);
+                       goto out_sockmap;
+               } else if (i >= 2 && err) {
                        printf("Failed noprog update sockmap '%i:%i'\n",
                               i, sfd[i]);
                        goto out_sockmap;
@@ -741,7 +745,7 @@ static void test_sockmap(int tasks, void *data)
        }
 
        /* Test map update elem afterwards fd lives in fd and map_fd */
-       for (i = 0; i < 6; i++) {
+       for (i = 2; i < 6; i++) {
                err = bpf_map_update_elem(map_fd_rx, &i, &sfd[i], BPF_ANY);
                if (err) {
                        printf("Failed map_fd_rx update sockmap %i '%i:%i'\n",
@@ -845,7 +849,7 @@ static void test_sockmap(int tasks, void *data)
        }
 
        /* Delete the elems without programs */
-       for (i = 0; i < 6; i++) {
+       for (i = 2; i < 6; i++) {
                err = bpf_map_delete_elem(fd, &i);
                if (err) {
                        printf("Failed delete sockmap %i '%i:%i'\n",
index 1c5d2b2..14c9fe2 100644 (file)
@@ -89,17 +89,28 @@ int cg_read(const char *cgroup, const char *control, char *buf, size_t len)
 int cg_read_strcmp(const char *cgroup, const char *control,
                   const char *expected)
 {
-       size_t size = strlen(expected) + 1;
+       size_t size;
        char *buf;
+       int ret;
+
+       /* Handle the case of comparing against empty string */
+       if (!expected)
+               size = 32;
+       else
+               size = strlen(expected) + 1;
 
        buf = malloc(size);
        if (!buf)
                return -1;
 
-       if (cg_read(cgroup, control, buf, size))
+       if (cg_read(cgroup, control, buf, size)) {
+               free(buf);
                return -1;
+       }
 
-       return strcmp(expected, buf);
+       ret = strcmp(expected, buf);
+       free(buf);
+       return ret;
 }
 
 int cg_read_strstr(const char *cgroup, const char *control, const char *needle)
@@ -337,3 +348,24 @@ int is_swap_enabled(void)
 
        return cnt > 1;
 }
+
+int set_oom_adj_score(int pid, int score)
+{
+       char path[PATH_MAX];
+       int fd, len;
+
+       sprintf(path, "/proc/%d/oom_score_adj", pid);
+
+       fd = open(path, O_WRONLY | O_APPEND);
+       if (fd < 0)
+               return fd;
+
+       len = dprintf(fd, "%d", score);
+       if (len < 0) {
+               close(fd);
+               return len;
+       }
+
+       close(fd);
+       return 0;
+}
index 1ff6f9f..9ac8b79 100644 (file)
@@ -40,3 +40,4 @@ extern int get_temp_fd(void);
 extern int alloc_pagecache(int fd, size_t size);
 extern int alloc_anon(const char *cgroup, void *arg);
 extern int is_swap_enabled(void);
+extern int set_oom_adj_score(int pid, int score);
index cf0bddc..28d321b 100644 (file)
@@ -2,6 +2,7 @@
 #define _GNU_SOURCE
 
 #include <linux/limits.h>
+#include <linux/oom.h>
 #include <fcntl.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -202,6 +203,36 @@ static int alloc_pagecache_50M_noexit(const char *cgroup, void *arg)
        return 0;
 }
 
+static int alloc_anon_noexit(const char *cgroup, void *arg)
+{
+       int ppid = getppid();
+
+       if (alloc_anon(cgroup, arg))
+               return -1;
+
+       while (getppid() == ppid)
+               sleep(1);
+
+       return 0;
+}
+
+/*
+ * Wait until processes are killed asynchronously by the OOM killer
+ * If we exceed a timeout, fail.
+ */
+static int cg_test_proc_killed(const char *cgroup)
+{
+       int limit;
+
+       for (limit = 10; limit > 0; limit--) {
+               if (cg_read_strcmp(cgroup, "cgroup.procs", "") == 0)
+                       return 0;
+
+               usleep(100000);
+       }
+       return -1;
+}
+
 /*
  * First, this test creates the following hierarchy:
  * A       memory.min = 50M,  memory.max = 200M
@@ -964,6 +995,177 @@ cleanup:
        return ret;
 }
 
+/*
+ * This test disables swapping and tries to allocate anonymous memory
+ * up to OOM with memory.group.oom set. Then it checks that all
+ * processes in the leaf (but not the parent) were killed.
+ */
+static int test_memcg_oom_group_leaf_events(const char *root)
+{
+       int ret = KSFT_FAIL;
+       char *parent, *child;
+
+       parent = cg_name(root, "memcg_test_0");
+       child = cg_name(root, "memcg_test_0/memcg_test_1");
+
+       if (!parent || !child)
+               goto cleanup;
+
+       if (cg_create(parent))
+               goto cleanup;
+
+       if (cg_create(child))
+               goto cleanup;
+
+       if (cg_write(parent, "cgroup.subtree_control", "+memory"))
+               goto cleanup;
+
+       if (cg_write(child, "memory.max", "50M"))
+               goto cleanup;
+
+       if (cg_write(child, "memory.swap.max", "0"))
+               goto cleanup;
+
+       if (cg_write(child, "memory.oom.group", "1"))
+               goto cleanup;
+
+       cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60));
+       cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
+       cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
+       if (!cg_run(child, alloc_anon, (void *)MB(100)))
+               goto cleanup;
+
+       if (cg_test_proc_killed(child))
+               goto cleanup;
+
+       if (cg_read_key_long(child, "memory.events", "oom_kill ") <= 0)
+               goto cleanup;
+
+       if (cg_read_key_long(parent, "memory.events", "oom_kill ") != 0)
+               goto cleanup;
+
+       ret = KSFT_PASS;
+
+cleanup:
+       if (child)
+               cg_destroy(child);
+       if (parent)
+               cg_destroy(parent);
+       free(child);
+       free(parent);
+
+       return ret;
+}
+
+/*
+ * This test disables swapping and tries to allocate anonymous memory
+ * up to OOM with memory.group.oom set. Then it checks that all
+ * processes in the parent and leaf were killed.
+ */
+static int test_memcg_oom_group_parent_events(const char *root)
+{
+       int ret = KSFT_FAIL;
+       char *parent, *child;
+
+       parent = cg_name(root, "memcg_test_0");
+       child = cg_name(root, "memcg_test_0/memcg_test_1");
+
+       if (!parent || !child)
+               goto cleanup;
+
+       if (cg_create(parent))
+               goto cleanup;
+
+       if (cg_create(child))
+               goto cleanup;
+
+       if (cg_write(parent, "memory.max", "80M"))
+               goto cleanup;
+
+       if (cg_write(parent, "memory.swap.max", "0"))
+               goto cleanup;
+
+       if (cg_write(parent, "memory.oom.group", "1"))
+               goto cleanup;
+
+       cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60));
+       cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
+       cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
+
+       if (!cg_run(child, alloc_anon, (void *)MB(100)))
+               goto cleanup;
+
+       if (cg_test_proc_killed(child))
+               goto cleanup;
+       if (cg_test_proc_killed(parent))
+               goto cleanup;
+
+       ret = KSFT_PASS;
+
+cleanup:
+       if (child)
+               cg_destroy(child);
+       if (parent)
+               cg_destroy(parent);
+       free(child);
+       free(parent);
+
+       return ret;
+}
+
+/*
+ * This test disables swapping and tries to allocate anonymous memory
+ * up to OOM with memory.group.oom set. Then it checks that all
+ * processes were killed except those set with OOM_SCORE_ADJ_MIN
+ */
+static int test_memcg_oom_group_score_events(const char *root)
+{
+       int ret = KSFT_FAIL;
+       char *memcg;
+       int safe_pid;
+
+       memcg = cg_name(root, "memcg_test_0");
+
+       if (!memcg)
+               goto cleanup;
+
+       if (cg_create(memcg))
+               goto cleanup;
+
+       if (cg_write(memcg, "memory.max", "50M"))
+               goto cleanup;
+
+       if (cg_write(memcg, "memory.swap.max", "0"))
+               goto cleanup;
+
+       if (cg_write(memcg, "memory.oom.group", "1"))
+               goto cleanup;
+
+       safe_pid = cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1));
+       if (set_oom_adj_score(safe_pid, OOM_SCORE_ADJ_MIN))
+               goto cleanup;
+
+       cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1));
+       if (!cg_run(memcg, alloc_anon, (void *)MB(100)))
+               goto cleanup;
+
+       if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 3)
+               goto cleanup;
+
+       if (kill(safe_pid, SIGKILL))
+               goto cleanup;
+
+       ret = KSFT_PASS;
+
+cleanup:
+       if (memcg)
+               cg_destroy(memcg);
+       free(memcg);
+
+       return ret;
+}
+
+
 #define T(x) { x, #x }
 struct memcg_test {
        int (*fn)(const char *root);
@@ -978,6 +1180,9 @@ struct memcg_test {
        T(test_memcg_oom_events),
        T(test_memcg_swap_max),
        T(test_memcg_sock),
+       T(test_memcg_oom_group_leaf_events),
+       T(test_memcg_oom_group_parent_events),
+       T(test_memcg_oom_group_score_events),
 };
 #undef T
 
diff --git a/tools/testing/selftests/efivarfs/config b/tools/testing/selftests/efivarfs/config
new file mode 100644 (file)
index 0000000..4e151f1
--- /dev/null
@@ -0,0 +1 @@
+CONFIG_EFIVAR_FS=y
index ff8feca..ad1eeb1 100644 (file)
@@ -18,6 +18,7 @@ TEST_GEN_FILES := \
 
 TEST_PROGS := run.sh
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_FILES): $(HEADERS)
index 1bbb475..4665cdb 100644 (file)
@@ -21,11 +21,8 @@ endef
 CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/
 LDLIBS += -lmount -I/usr/include/libmount
 
-$(BINARIES): ../../../gpio/gpio-utils.o ../../../../usr/include/linux/gpio.h
+$(BINARIES):| khdr
+$(BINARIES): ../../../gpio/gpio-utils.o
 
 ../../../gpio/gpio-utils.o:
        make ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) -C ../../../gpio
-
-../../../../usr/include/linux/gpio.h:
-       make -C ../../../.. headers_install INSTALL_HDR_PATH=$(shell pwd)/../../../../usr/
-
index 15e6b75..a3edb2c 100644 (file)
@@ -19,7 +19,6 @@
 #define KSFT_FAIL  1
 #define KSFT_XFAIL 2
 #define KSFT_XPASS 3
-/* Treat skip as pass */
 #define KSFT_SKIP  4
 
 /* counters */
index 4202139..5c34752 100644 (file)
@@ -1,4 +1,5 @@
 cr4_cpuid_sync_test
+platform_info_test
 set_sregs_test
 sync_regs_test
 vmx_tsc_adjust_test
index 03b0f55..ec32dad 100644 (file)
@@ -6,7 +6,8 @@ UNAME_M := $(shell uname -m)
 LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
 LIBKVM_x86_64 = lib/x86.c lib/vmx.c
 
-TEST_GEN_PROGS_x86_64 = set_sregs_test
+TEST_GEN_PROGS_x86_64 = platform_info_test
+TEST_GEN_PROGS_x86_64 += set_sregs_test
 TEST_GEN_PROGS_x86_64 += sync_regs_test
 TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test
 TEST_GEN_PROGS_x86_64 += cr4_cpuid_sync_test
@@ -20,7 +21,7 @@ INSTALL_HDR_PATH = $(top_srcdir)/usr
 LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
 LINUX_TOOL_INCLUDE = $(top_srcdir)tools/include
 CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I..
-LDFLAGS += -lpthread
+LDFLAGS += -pthread
 
 # After inclusion, $(OUTPUT) is defined and
 # $(TEST_GEN_PROGS) starts with $(OUTPUT)/
@@ -37,9 +38,6 @@ $(LIBKVM_OBJ): $(OUTPUT)/%.o: %.c
 $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
        $(AR) crs $@ $^
 
-$(LINUX_HDR_PATH):
-       make -C $(top_srcdir) headers_install
-
-all: $(STATIC_LIBS) $(LINUX_HDR_PATH)
+all: $(STATIC_LIBS)
 $(TEST_GEN_PROGS): $(STATIC_LIBS)
-$(TEST_GEN_PROGS) $(LIBKVM_OBJ): | $(LINUX_HDR_PATH)
+$(STATIC_LIBS):| khdr
index bb5a25f..3acf9a9 100644 (file)
@@ -50,6 +50,7 @@ enum vm_mem_backing_src_type {
 };
 
 int kvm_check_cap(long cap);
+int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
 
 struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm);
 void kvm_vm_free(struct kvm_vm *vmp);
@@ -108,6 +109,9 @@ void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
                          struct kvm_vcpu_events *events);
 void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
                          struct kvm_vcpu_events *events);
+uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
+void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
+       uint64_t msr_value);
 
 const char *exit_reason_str(unsigned int exit_reason);
 
index e9ba389..6fd8c08 100644 (file)
@@ -63,6 +63,29 @@ int kvm_check_cap(long cap)
        return ret;
 }
 
+/* VM Enable Capability
+ *
+ * Input Args:
+ *   vm - Virtual Machine
+ *   cap - Capability
+ *
+ * Output Args: None
+ *
+ * Return: On success, 0. On failure a TEST_ASSERT failure is produced.
+ *
+ * Enables a capability (KVM_CAP_*) on the VM.
+ */
+int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
+{
+       int ret;
+
+       ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap);
+       TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n"
+               "  rc: %i errno: %i", ret, errno);
+
+       return ret;
+}
+
 static void vm_open(struct kvm_vm *vm, int perm)
 {
        vm->kvm_fd = open(KVM_DEV_PATH, perm);
@@ -1220,6 +1243,72 @@ void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
                ret, errno);
 }
 
+/* VCPU Get MSR
+ *
+ * Input Args:
+ *   vm - Virtual Machine
+ *   vcpuid - VCPU ID
+ *   msr_index - Index of MSR
+ *
+ * Output Args: None
+ *
+ * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
+ *
+ * Get value of MSR for VCPU.
+ */
+uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
+{
+       struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+       struct {
+               struct kvm_msrs header;
+               struct kvm_msr_entry entry;
+       } buffer = {};
+       int r;
+
+       TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+       buffer.header.nmsrs = 1;
+       buffer.entry.index = msr_index;
+       r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header);
+       TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
+               "  rc: %i errno: %i", r, errno);
+
+       return buffer.entry.data;
+}
+
+/* VCPU Set MSR
+ *
+ * Input Args:
+ *   vm - Virtual Machine
+ *   vcpuid - VCPU ID
+ *   msr_index - Index of MSR
+ *   msr_value - New value of MSR
+ *
+ * Output Args: None
+ *
+ * Return: On success, nothing. On failure a TEST_ASSERT is produced.
+ *
+ * Set value of MSR for VCPU.
+ */
+void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
+       uint64_t msr_value)
+{
+       struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+       struct {
+               struct kvm_msrs header;
+               struct kvm_msr_entry entry;
+       } buffer = {};
+       int r;
+
+       TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+       memset(&buffer, 0, sizeof(buffer));
+       buffer.header.nmsrs = 1;
+       buffer.entry.index = msr_index;
+       buffer.entry.data = msr_value;
+       r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
+       TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
+               "  rc: %i errno: %i", r, errno);
+}
+
 /* VM VCPU Args Set
  *
  * Input Args:
diff --git a/tools/testing/selftests/kvm/platform_info_test.c b/tools/testing/selftests/kvm/platform_info_test.c
new file mode 100644 (file)
index 0000000..3764e71
--- /dev/null
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test for x86 KVM_CAP_MSR_PLATFORM_INFO
+ *
+ * Copyright (C) 2018, Google LLC.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Verifies expected behavior of controlling guest access to
+ * MSR_PLATFORM_INFO.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "x86.h"
+
+#define VCPU_ID 0
+#define MSR_PLATFORM_INFO_MAX_TURBO_RATIO 0xff00
+
+static void guest_code(void)
+{
+       uint64_t msr_platform_info;
+
+       for (;;) {
+               msr_platform_info = rdmsr(MSR_PLATFORM_INFO);
+               GUEST_SYNC(msr_platform_info);
+               asm volatile ("inc %r11");
+       }
+}
+
+static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable)
+{
+       struct kvm_enable_cap cap = {};
+
+       cap.cap = KVM_CAP_MSR_PLATFORM_INFO;
+       cap.flags = 0;
+       cap.args[0] = (int)enable;
+       vm_enable_cap(vm, &cap);
+}
+
+static void test_msr_platform_info_enabled(struct kvm_vm *vm)
+{
+       struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+       struct guest_args args;
+
+       set_msr_platform_info_enabled(vm, true);
+       vcpu_run(vm, VCPU_ID);
+       TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                       "Exit_reason other than KVM_EXIT_IO: %u (%s),\n",
+                       run->exit_reason,
+                       exit_reason_str(run->exit_reason));
+       guest_args_read(vm, VCPU_ID, &args);
+       TEST_ASSERT(args.port == GUEST_PORT_SYNC,
+                       "Received IO from port other than PORT_HOST_SYNC: %u\n",
+                       run->io.port);
+       TEST_ASSERT((args.arg1 & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) ==
+               MSR_PLATFORM_INFO_MAX_TURBO_RATIO,
+               "Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.",
+               MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
+}
+
+static void test_msr_platform_info_disabled(struct kvm_vm *vm)
+{
+       struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+
+       set_msr_platform_info_enabled(vm, false);
+       vcpu_run(vm, VCPU_ID);
+       TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
+                       "Exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
+                       run->exit_reason,
+                       exit_reason_str(run->exit_reason));
+}
+
+int main(int argc, char *argv[])
+{
+       struct kvm_vm *vm;
+       struct kvm_run *state;
+       int rv;
+       uint64_t msr_platform_info;
+
+       /* Tell stdout not to buffer its content */
+       setbuf(stdout, NULL);
+
+       rv = kvm_check_cap(KVM_CAP_MSR_PLATFORM_INFO);
+       if (!rv) {
+               fprintf(stderr,
+                       "KVM_CAP_MSR_PLATFORM_INFO not supported, skip test\n");
+               exit(KSFT_SKIP);
+       }
+
+       vm = vm_create_default(VCPU_ID, 0, guest_code);
+
+       msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
+       vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
+               msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
+       test_msr_platform_info_disabled(vm);
+       test_msr_platform_info_enabled(vm);
+       vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
+
+       kvm_vm_free(vm);
+
+       return 0;
+}
index 17ab366..0a8e758 100644 (file)
@@ -16,8 +16,20 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS))
 TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
 TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
 
+top_srcdir ?= ../../../..
+include $(top_srcdir)/scripts/subarch.include
+ARCH           ?= $(SUBARCH)
+
 all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
 
+.PHONY: khdr
+khdr:
+       make ARCH=$(ARCH) -C $(top_srcdir) headers_install
+
+ifdef KSFT_KHDR_INSTALL
+$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES):| khdr
+endif
+
 .ONESHELL:
 define RUN_TEST_PRINT_RESULT
        TEST_HDR_MSG="selftests: "`basename $$PWD`:" $$BASENAME_TEST";  \
index 2fde301..a7e8cd5 100644 (file)
@@ -2,3 +2,4 @@ CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTPLUG_SPARSE=y
 CONFIG_NOTIFIER_ERROR_INJECTION=y
 CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
+CONFIG_MEMORY_HOTREMOVE=y
index 9cca68e..919aa2a 100644 (file)
@@ -15,6 +15,7 @@ TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx
 TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
 TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
 
+KSFT_KHDR_INSTALL := 1
 include ../lib.mk
 
 $(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma
index f8cc38a..0ab9423 100755 (executable)
@@ -46,6 +46,9 @@
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
 
+# Some systems don't have a ping6 binary anymore
+which ping6 > /dev/null 2>&1 && ping6=$(which ping6) || ping6=$(which ping)
+
 tests="
        pmtu_vti6_exception             vti6: PMTU exceptions
        pmtu_vti4_exception             vti4: PMTU exceptions
@@ -175,8 +178,8 @@ setup() {
 
 cleanup() {
        [ ${cleanup_done} -eq 1 ] && return
-       ip netns del ${NS_A} 2 > /dev/null
-       ip netns del ${NS_B} 2 > /dev/null
+       ip netns del ${NS_A} 2> /dev/null
+       ip netns del ${NS_B} 2> /dev/null
        cleanup_done=1
 }
 
@@ -274,7 +277,7 @@ test_pmtu_vti6_exception() {
        mtu "${ns_b}" veth_b 4000
        mtu "${ns_a}" vti6_a 5000
        mtu "${ns_b}" vti6_b 5000
-       ${ns_a} ping6 -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null
+       ${ns_a} ${ping6} -q -i 0.1 -w 2 -s 60000 ${vti6_b_addr} > /dev/null
 
        # Check that exception was created
        if [ "$(route_get_dst_pmtu_from_exception "${ns_a}" ${vti6_b_addr})" = "" ]; then
@@ -334,7 +337,7 @@ test_pmtu_vti4_link_add_mtu() {
        fail=0
 
        min=68
-       max=$((65528 - 20))
+       max=$((65535 - 20))
        # Check invalid values first
        for v in $((min - 1)) $((max + 1)); do
                ${ns_a} ip link add vti4_a mtu ${v} type vti local ${veth4_a_addr} remote ${veth4_b_addr} key 10 2>/dev/null
index b3ebf26..8fdfeaf 100644 (file)
@@ -502,6 +502,55 @@ TEST_F(tls, recv_peek_multiple)
        EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
 }
 
+TEST_F(tls, recv_peek_multiple_records)
+{
+       char const *test_str = "test_read_peek_mult_recs";
+       char const *test_str_first = "test_read_peek";
+       char const *test_str_second = "_mult_recs";
+       int len;
+       char buf[64];
+
+       len = strlen(test_str_first);
+       EXPECT_EQ(send(self->fd, test_str_first, len, 0), len);
+
+       len = strlen(test_str_second) + 1;
+       EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
+
+       len = sizeof(buf);
+       memset(buf, 0, len);
+       EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1);
+
+       /* MSG_PEEK can only peek into the current record. */
+       len = strlen(test_str_first) + 1;
+       EXPECT_EQ(memcmp(test_str_first, buf, len), 0);
+
+       len = sizeof(buf);
+       memset(buf, 0, len);
+       EXPECT_NE(recv(self->cfd, buf, len, 0), -1);
+
+       /* Non-MSG_PEEK will advance strparser (and therefore record)
+        * however.
+        */
+       len = strlen(test_str) + 1;
+       EXPECT_EQ(memcmp(test_str, buf, len), 0);
+
+       /* MSG_MORE will hold current record open, so later MSG_PEEK
+        * will see everything.
+        */
+       len = strlen(test_str_first);
+       EXPECT_EQ(send(self->fd, test_str_first, len, MSG_MORE), len);
+
+       len = strlen(test_str_second) + 1;
+       EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
+
+       len = sizeof(buf);
+       memset(buf, 0, len);
+       EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1);
+
+       len = strlen(test_str) + 1;
+       EXPECT_EQ(memcmp(test_str, buf, len), 0);
+}
+
 TEST_F(tls, pollin)
 {
        char const *test_str = "test_poll";
index a728040..14cfcf0 100644 (file)
@@ -5,6 +5,7 @@ TEST_PROGS := hwtstamp_config rxtimestamp timestamping txtimestamp
 
 all: $(TEST_PROGS)
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 clean:
index 93baaca..d056486 100644 (file)
@@ -1,5 +1,6 @@
 TEST_GEN_PROGS := copy_first_unaligned alignment_handler
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c ../utils.c
index b4d7432..d40300a 100644 (file)
@@ -4,6 +4,7 @@ TEST_GEN_FILES := exec_target
 
 CFLAGS += -O2
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 1be5474..ede4d3d 100644 (file)
@@ -5,6 +5,7 @@ all: $(TEST_PROGS)
 
 $(TEST_PROGS): ../harness.c ../utils.c
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 clean:
index 1cf89a3..44574f3 100644 (file)
@@ -17,6 +17,7 @@ TEST_GEN_PROGS := copyuser_64_t0 copyuser_64_t1 copyuser_64_t2 \
 
 EXTRA_SOURCES := validate.c ../harness.c stubs.S
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(OUTPUT)/copyuser_64_t%:      copyuser_64.S $(EXTRA_SOURCES)
index 55d7db7..5df4763 100644 (file)
@@ -3,6 +3,7 @@ TEST_GEN_PROGS := dscr_default_test dscr_explicit_test dscr_user_test   \
              dscr_inherit_test dscr_inherit_exec_test dscr_sysfs_test  \
              dscr_sysfs_thread_test
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(OUTPUT)/dscr_default_test: LDLIBS += -lpthread
index 0dd3a01..11a10d7 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 8ebbe96..33ced6e 100644 (file)
@@ -5,6 +5,7 @@ noarg:
 TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors
 TEST_GEN_FILES := tempfile
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 6e1629b..19046db 100644 (file)
@@ -5,6 +5,7 @@ noarg:
 TEST_GEN_PROGS := count_instructions l3_bank_test per_event_excludes
 EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 all: $(TEST_GEN_PROGS) ebb
index c4e64bc..bd5dfa5 100644 (file)
@@ -17,6 +17,7 @@ TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test   \
         lost_exception_test no_handler_test                    \
         cycles_with_mmcr2_test
 
+top_srcdir = ../../../../../..
 include ../../../lib.mk
 
 $(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c \
index 175366d..ea2b7bd 100644 (file)
@@ -2,6 +2,7 @@ CFLAGS += -I$(CURDIR)
 
 TEST_GEN_PROGS := load_unaligned_zeropad
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 28f5b78..923d531 100644 (file)
@@ -4,6 +4,7 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
               ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \
               perf-hwbreak
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 all: $(TEST_PROGS)
index a7cbd50..1fca25c 100644 (file)
@@ -8,6 +8,7 @@ $(TEST_PROGS): ../harness.c ../utils.c signal.S
 CFLAGS += -maltivec
 signal_tm: CFLAGS += -mhtm
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 clean:
index 10b35c8..7fc0623 100644 (file)
@@ -29,6 +29,7 @@ endif
 
 ASFLAGS = $(CFLAGS)
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): $(EXTRA_SOURCES)
index 30b8ff8..fcd2dcb 100644 (file)
@@ -5,6 +5,7 @@ ASFLAGS += -O2 -Wall -g -nostdlib -m64
 
 EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S
index da22ca7..161b884 100644 (file)
@@ -2,6 +2,7 @@ TEST_GEN_PROGS := ipc_unmuxed
 
 CFLAGS += -I../../../../../usr/include
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index c0e45d2..9fc2cf6 100644 (file)
@@ -6,6 +6,7 @@ TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack
        tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \
        $(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c ../utils.c
index f8ced26..fb82068 100644 (file)
@@ -2,6 +2,7 @@ TEST_GEN_PROGS := test-vphn
 
 CFLAGS += -m64
 
+top_srcdir = ../../../../..
 include ../../lib.mk
 
 $(TEST_GEN_PROGS): ../harness.c
index 642d4e1..eec2663 100644 (file)
@@ -56,15 +56,13 @@ unsigned int yield_mod_cnt, nr_abort;
                        printf(fmt, ## __VA_ARGS__);    \
        } while (0)
 
-#if defined(__x86_64__) || defined(__i386__)
+#ifdef __i386__
 
 #define INJECT_ASM_REG "eax"
 
 #define RSEQ_INJECT_CLOBBER \
        , INJECT_ASM_REG
 
-#ifdef __i386__
-
 #define RSEQ_INJECT_ASM(n) \
        "mov asm_loop_cnt_" #n ", %%" INJECT_ASM_REG "\n\t" \
        "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
@@ -76,9 +74,16 @@ unsigned int yield_mod_cnt, nr_abort;
 
 #elif defined(__x86_64__)
 
+#define INJECT_ASM_REG_P       "rax"
+#define INJECT_ASM_REG         "eax"
+
+#define RSEQ_INJECT_CLOBBER \
+       , INJECT_ASM_REG_P \
+       , INJECT_ASM_REG
+
 #define RSEQ_INJECT_ASM(n) \
-       "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG "\n\t" \
-       "mov (%%" INJECT_ASM_REG "), %%" INJECT_ASM_REG "\n\t" \
+       "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG_P "\n\t" \
+       "mov (%%" INJECT_ASM_REG_P "), %%" INJECT_ASM_REG "\n\t" \
        "test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
        "jz 333f\n\t" \
        "222:\n\t" \
@@ -86,10 +91,6 @@ unsigned int yield_mod_cnt, nr_abort;
        "jnz 222b\n\t" \
        "333:\n\t"
 
-#else
-#error "Unsupported architecture"
-#endif
-
 #elif defined(__s390__)
 
 #define RSEQ_INJECT_INPUT \
index f03763d..30f9b54 100644 (file)
         ]
     },
     {
+        "id": "6aaf",
+        "name": "Add police actions with conform-exceed control pass/pipe [with numeric values]",
+        "category": [
+            "actions",
+            "police"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action police",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 0/3 index 1",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions get action police index 1",
+        "matchPattern": "action order [0-9]*:  police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action pass/pipe",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action police"
+        ]
+    },
+    {
+        "id": "29b1",
+        "name": "Add police actions with conform-exceed control <invalid>/drop",
+        "category": [
+            "actions",
+            "police"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action police",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action police rate 3mbit burst 250k conform-exceed 10/drop index 1",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions ls action police",
+        "matchPattern": "action order [0-9]*:  police 0x1 rate 3Mbit burst 250Kb mtu 2Kb action ",
+        "matchCount": "0",
+        "teardown": [
+            "$TC actions flush action police"
+        ]
+    },
+    {
         "id": "c26f",
         "name": "Add police action with invalid peakrate value",
         "category": [
index 9881876..e94b7b1 100644 (file)
@@ -26,10 +26,6 @@ TEST_PROGS := run_vmtests
 
 include ../lib.mk
 
-$(OUTPUT)/userfaultfd: ../../../../usr/include/linux/kernel.h
 $(OUTPUT)/userfaultfd: LDLIBS += -lpthread
 
 $(OUTPUT)/mlock-random-test: LDLIBS += -lcap
-
-../../../../usr/include/linux/kernel.h:
-       make -C ../../../.. headers_install
index 2352590..35edd61 100644 (file)
@@ -17,6 +17,7 @@
 #include <errno.h>
 #include <sched.h>
 #include <stdbool.h>
+#include <limits.h>
 
 #ifndef SYS_getcpu
 # ifdef __x86_64__
 
 int nerrs = 0;
 
+typedef int (*vgettime_t)(clockid_t, struct timespec *);
+
+vgettime_t vdso_clock_gettime;
+
+typedef long (*vgtod_t)(struct timeval *tv, struct timezone *tz);
+
+vgtod_t vdso_gettimeofday;
+
 typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
 
 getcpu_t vgetcpu;
@@ -95,6 +104,15 @@ static void fill_function_pointers()
                printf("Warning: failed to find getcpu in vDSO\n");
 
        vgetcpu = (getcpu_t) vsyscall_getcpu();
+
+       vdso_clock_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
+       if (!vdso_clock_gettime)
+               printf("Warning: failed to find clock_gettime in vDSO\n");
+
+       vdso_gettimeofday = (vgtod_t)dlsym(vdso, "__vdso_gettimeofday");
+       if (!vdso_gettimeofday)
+               printf("Warning: failed to find gettimeofday in vDSO\n");
+
 }
 
 static long sys_getcpu(unsigned * cpu, unsigned * node,
@@ -103,6 +121,16 @@ static long sys_getcpu(unsigned * cpu, unsigned * node,
        return syscall(__NR_getcpu, cpu, node, cache);
 }
 
+static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
+{
+       return syscall(__NR_clock_gettime, id, ts);
+}
+
+static inline int sys_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+       return syscall(__NR_gettimeofday, tv, tz);
+}
+
 static void test_getcpu(void)
 {
        printf("[RUN]\tTesting getcpu...\n");
@@ -155,10 +183,154 @@ static void test_getcpu(void)
        }
 }
 
+static bool ts_leq(const struct timespec *a, const struct timespec *b)
+{
+       if (a->tv_sec != b->tv_sec)
+               return a->tv_sec < b->tv_sec;
+       else
+               return a->tv_nsec <= b->tv_nsec;
+}
+
+static bool tv_leq(const struct timeval *a, const struct timeval *b)
+{
+       if (a->tv_sec != b->tv_sec)
+               return a->tv_sec < b->tv_sec;
+       else
+               return a->tv_usec <= b->tv_usec;
+}
+
+static char const * const clocknames[] = {
+       [0] = "CLOCK_REALTIME",
+       [1] = "CLOCK_MONOTONIC",
+       [2] = "CLOCK_PROCESS_CPUTIME_ID",
+       [3] = "CLOCK_THREAD_CPUTIME_ID",
+       [4] = "CLOCK_MONOTONIC_RAW",
+       [5] = "CLOCK_REALTIME_COARSE",
+       [6] = "CLOCK_MONOTONIC_COARSE",
+       [7] = "CLOCK_BOOTTIME",
+       [8] = "CLOCK_REALTIME_ALARM",
+       [9] = "CLOCK_BOOTTIME_ALARM",
+       [10] = "CLOCK_SGI_CYCLE",
+       [11] = "CLOCK_TAI",
+};
+
+static void test_one_clock_gettime(int clock, const char *name)
+{
+       struct timespec start, vdso, end;
+       int vdso_ret, end_ret;
+
+       printf("[RUN]\tTesting clock_gettime for clock %s (%d)...\n", name, clock);
+
+       if (sys_clock_gettime(clock, &start) < 0) {
+               if (errno == EINVAL) {
+                       vdso_ret = vdso_clock_gettime(clock, &vdso);
+                       if (vdso_ret == -EINVAL) {
+                               printf("[OK]\tNo such clock.\n");
+                       } else {
+                               printf("[FAIL]\tNo such clock, but __vdso_clock_gettime returned %d\n", vdso_ret);
+                               nerrs++;
+                       }
+               } else {
+                       printf("[WARN]\t clock_gettime(%d) syscall returned error %d\n", clock, errno);
+               }
+               return;
+       }
+
+       vdso_ret = vdso_clock_gettime(clock, &vdso);
+       end_ret = sys_clock_gettime(clock, &end);
+
+       if (vdso_ret != 0 || end_ret != 0) {
+               printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
+                      vdso_ret, errno);
+               nerrs++;
+               return;
+       }
+
+       printf("\t%llu.%09ld %llu.%09ld %llu.%09ld\n",
+              (unsigned long long)start.tv_sec, start.tv_nsec,
+              (unsigned long long)vdso.tv_sec, vdso.tv_nsec,
+              (unsigned long long)end.tv_sec, end.tv_nsec);
+
+       if (!ts_leq(&start, &vdso) || !ts_leq(&vdso, &end)) {
+               printf("[FAIL]\tTimes are out of sequence\n");
+               nerrs++;
+       }
+}
+
+static void test_clock_gettime(void)
+{
+       for (int clock = 0; clock < sizeof(clocknames) / sizeof(clocknames[0]);
+            clock++) {
+               test_one_clock_gettime(clock, clocknames[clock]);
+       }
+
+       /* Also test some invalid clock ids */
+       test_one_clock_gettime(-1, "invalid");
+       test_one_clock_gettime(INT_MIN, "invalid");
+       test_one_clock_gettime(INT_MAX, "invalid");
+}
+
+static void test_gettimeofday(void)
+{
+       struct timeval start, vdso, end;
+       struct timezone sys_tz, vdso_tz;
+       int vdso_ret, end_ret;
+
+       if (!vdso_gettimeofday)
+               return;
+
+       printf("[RUN]\tTesting gettimeofday...\n");
+
+       if (sys_gettimeofday(&start, &sys_tz) < 0) {
+               printf("[FAIL]\tsys_gettimeofday failed (%d)\n", errno);
+               nerrs++;
+               return;
+       }
+
+       vdso_ret = vdso_gettimeofday(&vdso, &vdso_tz);
+       end_ret = sys_gettimeofday(&end, NULL);
+
+       if (vdso_ret != 0 || end_ret != 0) {
+               printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
+                      vdso_ret, errno);
+               nerrs++;
+               return;
+       }
+
+       printf("\t%llu.%06ld %llu.%06ld %llu.%06ld\n",
+              (unsigned long long)start.tv_sec, start.tv_usec,
+              (unsigned long long)vdso.tv_sec, vdso.tv_usec,
+              (unsigned long long)end.tv_sec, end.tv_usec);
+
+       if (!tv_leq(&start, &vdso) || !tv_leq(&vdso, &end)) {
+               printf("[FAIL]\tTimes are out of sequence\n");
+               nerrs++;
+       }
+
+       if (sys_tz.tz_minuteswest == vdso_tz.tz_minuteswest &&
+           sys_tz.tz_dsttime == vdso_tz.tz_dsttime) {
+               printf("[OK]\ttimezones match: minuteswest=%d, dsttime=%d\n",
+                      sys_tz.tz_minuteswest, sys_tz.tz_dsttime);
+       } else {
+               printf("[FAIL]\ttimezones do not match\n");
+               nerrs++;
+       }
+
+       /* And make sure that passing NULL for tz doesn't crash. */
+       vdso_gettimeofday(&vdso, NULL);
+}
+
 int main(int argc, char **argv)
 {
        fill_function_pointers();
 
+       test_clock_gettime();
+       test_gettimeofday();
+
+       /*
+        * Test getcpu() last so that, if something goes wrong setting affinity,
+        * we still run the other tests.
+        */
        test_getcpu();
 
        return nerrs ? 1 : 0;
index 30cb0a0..37908a8 100644 (file)
@@ -159,12 +159,6 @@ static const char * const page_flag_names[] = {
 };
 
 
-static const char * const debugfs_known_mountpoints[] = {
-       "/sys/kernel/debug",
-       "/debug",
-       0,
-};
-
 /*
  * data structures
  */
index f82c2ea..334b16d 100644 (file)
@@ -30,8 +30,8 @@ struct slabinfo {
        int alias;
        int refs;
        int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
-       int hwcache_align, object_size, objs_per_slab;
-       int sanity_checks, slab_size, store_user, trace;
+       unsigned int hwcache_align, object_size, objs_per_slab;
+       unsigned int sanity_checks, slab_size, store_user, trace;
        int order, poison, reclaim_account, red_zone;
        unsigned long partial, objects, slabs, objects_partial, objects_total;
        unsigned long alloc_fastpath, alloc_slowpath;
index 91aaf73..ed162a6 100644 (file)
@@ -1817,18 +1817,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat
        return 0;
 }
 
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
-       unsigned long end = hva + PAGE_SIZE;
-
-       if (!kvm->arch.pgd)
-               return 0;
-
-       trace_kvm_unmap_hva(hva);
-       handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
-       return 0;
-}
-
 int kvm_unmap_hva_range(struct kvm *kvm,
                        unsigned long start, unsigned long end)
 {
@@ -1860,13 +1848,20 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data
 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
 {
        unsigned long end = hva + PAGE_SIZE;
+       kvm_pfn_t pfn = pte_pfn(pte);
        pte_t stage2_pte;
 
        if (!kvm->arch.pgd)
                return;
 
        trace_kvm_set_spte_hva(hva);
-       stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
+
+       /*
+        * We've moved a page around, probably through CoW, so let's treat it
+        * just like a translation fault and clean the cache to the PoC.
+        */
+       clean_dcache_guest_page(pfn, PAGE_SIZE);
+       stage2_pte = pfn_pte(pfn, PAGE_S2);
        handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
 }
 
index e53b596..57b3ede 100644 (file)
@@ -134,21 +134,6 @@ TRACE_EVENT(kvm_mmio_emulate,
                  __entry->vcpu_pc, __entry->instr, __entry->cpsr)
 );
 
-TRACE_EVENT(kvm_unmap_hva,
-       TP_PROTO(unsigned long hva),
-       TP_ARGS(hva),
-
-       TP_STRUCT__entry(
-               __field(        unsigned long,  hva             )
-       ),
-
-       TP_fast_assign(
-               __entry->hva            = hva;
-       ),
-
-       TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
-);
-
 TRACE_EVENT(kvm_unmap_hva_range,
        TP_PROTO(unsigned long start, unsigned long end),
        TP_ARGS(start, end),