Merge tag 'kbuild-fixes-v5.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 7 Feb 2021 17:37:37 +0000 (09:37 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 7 Feb 2021 17:37:37 +0000 (09:37 -0800)
Pull Kbuild fixes from Masahiro Yamada:

 - Use the 'python3' command to invoke python scripts because some
   distributions do not provide the 'python' command any more.

 - Clean-up and update documents

 - Use pkg-config to search libcrypto

 - Fix duplicated debug flags

 - Ignore some more stubs in scripts/kallsyms.c

* tag 'kbuild-fixes-v5.11-2' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy/linux-kbuild:
  kallsyms: fix nonconverging kallsyms table with lld
  kbuild: fix duplicated flags in DEBUG_CFLAGS
  scripts/clang-tools: switch explicitly to Python 3
  kbuild: remove PYTHON variable
  Documentation/llvm: Add a section about supported architectures
  Revert "checkpatch: add check for keyword 'boolean' in Kconfig definitions"
  scripts: use pkg-config to locate libcrypto
  kconfig: mconf: fix HOSTCC call
  doc: gcc-plugins: update gcc-plugins.rst
  kbuild: simplify GCC_PLUGINS enablement in dummy-tools/gcc
  Documentation/Kbuild: Remove references to gcc-plugin.sh
  scripts: switch explicitly to Python 3

857 files changed:
.clang-format
.mailmap
Documentation/ABI/testing/sysfs-class-devlink
Documentation/ABI/testing/sysfs-devices-consumer
Documentation/ABI/testing/sysfs-devices-supplier
Documentation/ABI/testing/sysfs-driver-ufs
Documentation/admin-guide/device-mapper/dm-integrity.rst
Documentation/admin-guide/media/rkisp1.rst
Documentation/asm-annotations.rst
Documentation/dev-tools/kasan.rst
Documentation/dev-tools/kunit/usage.rst
Documentation/devicetree/bindings/arm/cpus.yaml
Documentation/devicetree/bindings/display/bridge/sii902x.txt
Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
Documentation/devicetree/bindings/extcon/wlf,arizona.yaml
Documentation/devicetree/bindings/hwmon/adi,ltc2947.yaml
Documentation/devicetree/bindings/hwmon/baikal,bt1-pvt.yaml
Documentation/devicetree/bindings/hwmon/ti,tmp513.yaml
Documentation/devicetree/bindings/i2c/i2c-gpio.yaml
Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml
Documentation/devicetree/bindings/iio/accel/bosch,bma255.yaml
Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
Documentation/devicetree/bindings/iio/adc/maxim,max9611.yaml
Documentation/devicetree/bindings/iio/adc/st,stm32-adc.yaml
Documentation/devicetree/bindings/iio/adc/ti,palmas-gpadc.yaml
Documentation/devicetree/bindings/iio/dac/adi,ad5758.yaml
Documentation/devicetree/bindings/iio/health/maxim,max30100.yaml
Documentation/devicetree/bindings/input/adc-keys.txt
Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
Documentation/devicetree/bindings/input/touchscreen/touchscreen.yaml
Documentation/devicetree/bindings/leds/richtek,rt8515.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/media/mediatek-jpeg-decoder.txt
Documentation/devicetree/bindings/media/mediatek-jpeg-encoder.txt
Documentation/devicetree/bindings/media/mediatek-mdp.txt
Documentation/devicetree/bindings/mmc/mmc-controller.yaml
Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.yaml
Documentation/devicetree/bindings/net/ethernet-controller.yaml
Documentation/devicetree/bindings/net/snps,dwmac.yaml
Documentation/devicetree/bindings/power/supply/battery.yaml
Documentation/devicetree/bindings/power/supply/bq2515x.yaml
Documentation/devicetree/bindings/regulator/dlg,da9121.yaml
Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
Documentation/devicetree/bindings/rtc/rtc.yaml
Documentation/devicetree/bindings/serial/pl011.yaml
Documentation/devicetree/bindings/sound/mt8192-mt6359-rt1015-rt5682.yaml
Documentation/devicetree/bindings/sound/sgtl5000.yaml
Documentation/devicetree/bindings/usb/ti,j721e-usb.yaml
Documentation/devicetree/bindings/watchdog/watchdog.yaml
Documentation/filesystems/overlayfs.rst
Documentation/networking/ip-sysctl.rst
Documentation/networking/tls-offload.rst
Documentation/virt/kvm/api.rst
Documentation/virt/kvm/nested-vmx.rst
Documentation/virt/kvm/running-nested-guests.rst
MAINTAINERS
Makefile
arch/arm/boot/compressed/atags_to_fdt.c
arch/arm/boot/dts/imx6q-tbs2910.dts
arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
arch/arm/boot/dts/imx6qdl-sr-som.dtsi
arch/arm/boot/dts/imx7d-flex-concentrator.dts
arch/arm/boot/dts/lpc32xx.dtsi
arch/arm/boot/dts/omap3-gta04.dtsi
arch/arm/boot/dts/omap4-droid4-xt894.dts
arch/arm/boot/dts/ste-db8500.dtsi
arch/arm/boot/dts/ste-db8520.dtsi
arch/arm/boot/dts/ste-db9500.dtsi [new file with mode: 0644]
arch/arm/boot/dts/ste-snowball.dts
arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi
arch/arm/boot/dts/stm32mp15xx-dhcom-picoitx.dtsi
arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
arch/arm/boot/dts/sun7i-a20-bananapro.dts
arch/arm/include/asm/kexec-internal.h [new file with mode: 0644]
arch/arm/include/debug/tegra.S
arch/arm/kernel/asm-offsets.c
arch/arm/kernel/machine_kexec.c
arch/arm/kernel/relocate_kernel.S
arch/arm/kernel/signal.c
arch/arm/mach-footbridge/dc21285.c
arch/arm/mach-imx/suspend-imx6.S
arch/arm/mach-omap1/board-osk.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/cpuidle44xx.c
arch/arm/mach-omap2/pdata-quirks.c
arch/arm64/boot/dts/amlogic/meson-axg.dtsi
arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
arch/arm64/boot/dts/amlogic/meson-gx.dtsi
arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts
arch/arm64/boot/dts/broadcom/stingray/stingray-usb.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
arch/arm64/boot/dts/freescale/imx8mn.dtsi
arch/arm64/boot/dts/freescale/imx8mp.dtsi
arch/arm64/boot/dts/qcom/sdm845-db845c.dts
arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
arch/arm64/boot/dts/rockchip/px30.dtsi
arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
arch/arm64/boot/dts/rockchip/rk3399.dtsi
arch/arm64/configs/defconfig
arch/arm64/include/asm/memory.h
arch/arm64/kernel/probes/kprobes.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/hyp-init.S
arch/arm64/kvm/hyp/nvhe/psci-relay.c
arch/arm64/kvm/pmu-emul.c
arch/arm64/kvm/sys_regs.c
arch/arm64/mm/fault.c
arch/arm64/mm/physaddr.c
arch/ia64/include/asm/sparsemem.h
arch/ia64/include/uapi/asm/cmpxchg.h
arch/ia64/kernel/time.c
arch/mips/include/asm/highmem.h
arch/openrisc/include/asm/io.h
arch/openrisc/mm/ioremap.c
arch/parisc/Kconfig
arch/parisc/include/asm/irq.h
arch/parisc/kernel/entry.S
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/include/asm/feature-fixups.h
arch/powerpc/include/asm/highmem.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/vdso32/Makefile
arch/powerpc/kernel/vdso32/vdso32_wrapper.S [deleted file]
arch/powerpc/kernel/vdso32_wrapper.S [new file with mode: 0644]
arch/powerpc/kernel/vdso64/Makefile
arch/powerpc/kernel/vdso64/sigtramp.S
arch/powerpc/kernel/vdso64/vdso64.lds.S
arch/powerpc/kernel/vdso64/vdso64_wrapper.S [deleted file]
arch/powerpc/kernel/vdso64_wrapper.S [new file with mode: 0644]
arch/powerpc/kernel/vmlinux.lds.S
arch/powerpc/lib/feature-fixups.c
arch/powerpc/lib/sstep.c
arch/riscv/Kconfig
arch/riscv/include/asm/page.h
arch/riscv/include/asm/set_memory.h
arch/riscv/kernel/setup.c
arch/riscv/mm/init.c
arch/s390/boot/uv.c
arch/s390/include/asm/uv.h
arch/s390/kernel/uv.c
arch/sh/Kconfig
arch/sh/boards/mach-sh03/rtc.c
arch/sh/configs/landisk_defconfig
arch/sh/configs/microdev_defconfig
arch/sh/configs/sdk7780_defconfig
arch/sh/configs/sdk7786_defconfig
arch/sh/configs/se7750_defconfig
arch/sh/configs/sh03_defconfig
arch/sh/drivers/dma/Kconfig
arch/sh/include/asm/gpio.h
arch/sh/kernel/cpu/sh3/entry.S
arch/sh/mm/Kconfig
arch/sh/mm/asids-debugfs.c
arch/sh/mm/cache-debugfs.c
arch/sh/mm/pmb.c
arch/sparc/include/asm/highmem.h
arch/um/Kconfig
arch/um/drivers/ubd_kern.c
arch/um/drivers/virtio_uml.c
arch/um/include/asm/io.h
arch/um/include/asm/pgtable.h
arch/um/include/asm/set_memory.h [deleted file]
arch/um/include/shared/kern_util.h
arch/um/kernel/kmsg_dump.c
arch/um/kernel/process.c
arch/um/kernel/time.c
arch/um/kernel/tlb.c
arch/um/kernel/um_arch.c
arch/um/os-Linux/helper.c
arch/um/os-Linux/time.c
arch/x86/entry/common.c
arch/x86/entry/thunk_64.S
arch/x86/hyperv/hv_init.c
arch/x86/include/asm/fpu/api.h
arch/x86/include/asm/idtentry.h
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/msr.h
arch/x86/include/asm/topology.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/cpu/topology.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/sev-es.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/emulate.c
arch/x86/kvm/kvm_cache_regs.h
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/lib/mmx_32.c
arch/x86/mm/mem_encrypt.c
arch/x86/xen/enlighten_pv.c
arch/x86/xen/smp_hvm.c
arch/x86/xen/xen-asm.S
block/bfq-iosched.c
block/blk-cgroup.c
block/blk-mq.h
block/genhd.c
block/partitions/core.c
crypto/asymmetric_keys/public_key.c
crypto/xor.c
drivers/acpi/arm64/iort.c
drivers/acpi/device_sysfs.c
drivers/acpi/scan.c
drivers/acpi/thermal.c
drivers/base/core.c
drivers/base/dd.c
drivers/base/platform.c
drivers/block/nbd.c
drivers/block/null_blk/zoned.c
drivers/block/xen-blkfront.c
drivers/bus/arm-integrator-lm.c
drivers/bus/simple-pm-bus.c
drivers/clk/imx/Kconfig
drivers/clk/mmp/clk-audio.c
drivers/clk/qcom/gcc-sc7180.c
drivers/clk/qcom/gcc-sm8250.c
drivers/counter/ti-eqep.c
drivers/crypto/Kconfig
drivers/crypto/marvell/cesa/cesa.h
drivers/firmware/efi/apple-properties.c
drivers/firmware/imx/Kconfig
drivers/gpio/Kconfig
drivers/gpio/gpio-mvebu.c
drivers/gpio/gpiolib-cdev.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/dc_stream.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dcn30/Makefile
drivers/gpu/drm/amd/display/dc/dcn301/Makefile
drivers/gpu/drm/amd/display/dc/dcn302/Makefile
drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/bridge/lontium-lt9611uxc.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_gem_vram_helper.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp.h
drivers/gpu/drm/i915/display/intel_dp_link_training.c
drivers/gpu/drm/i915/display/intel_dp_link_training.h
drivers/gpu/drm/i915/display/intel_dp_mst.c
drivers/gpu/drm/i915/display/intel_hdcp.c
drivers/gpu/drm/i915/display/intel_overlay.c
drivers/gpu/drm/i915/display/intel_sprite.c
drivers/gpu/drm/i915/gem/i915_gem_domain.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gt/gen7_renderclear.c
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
drivers/gpu/drm/i915/gt/intel_ggtt.c
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_timeline.c
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/i915_request.h
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
drivers/gpu/drm/nouveau/dispnv50/base507c.c
drivers/gpu/drm/nouveau/dispnv50/base827c.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/dispnv50/head917d.c
drivers/gpu/drm/nouveau/dispnv50/wndw.c
drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h
drivers/gpu/drm/nouveau/include/nvif/push.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_svm.c
drivers/gpu/drm/ttm/ttm_pool.c
drivers/gpu/drm/vc4/vc4_hdmi.c
drivers/gpu/drm/vc4/vc4_hvs.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/hid/hid-multitouch.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.h
drivers/hwtracing/intel_th/pci.c
drivers/hwtracing/stm/heartbeat.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/busses/i2c-octeon-core.c
drivers/i2c/busses/i2c-tegra-bpmp.c
drivers/i2c/busses/i2c-tegra.c
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/common/st_sensors/st_sensors_trigger.c
drivers/iio/dac/ad5504.c
drivers/iio/proximity/sx9310.c
drivers/iio/temperature/mlx90632.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_qp.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/input/joystick/xpad.c
drivers/input/misc/ariel-pwrbutton.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/touchscreen/goodix.c
drivers/input/touchscreen/ili210x.c
drivers/input/touchscreen/st1232.c
drivers/iommu/amd/amd_iommu.h
drivers/iommu/amd/amd_iommu_types.h
drivers/iommu/amd/init.c
drivers/iommu/intel/dmar.c
drivers/iommu/intel/iommu.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-bcm2836.c
drivers/irqchip/irq-loongson-liointc.c
drivers/irqchip/irq-mips-cpu.c
drivers/irqchip/irq-sl28cpld.c
drivers/leds/Kconfig
drivers/leds/Makefile
drivers/leds/flash/Kconfig [new file with mode: 0644]
drivers/leds/flash/Makefile [new file with mode: 0644]
drivers/leds/flash/leds-rt8515.c [new file with mode: 0644]
drivers/leds/led-triggers.c
drivers/leds/leds-ariel.c
drivers/leds/leds-lm3533.c
drivers/lightnvm/core.c
drivers/md/bcache/features.h
drivers/md/dm-crypt.c
drivers/md/dm-integrity.c
drivers/md/dm-table.c
drivers/md/md.c
drivers/media/cec/platform/Makefile
drivers/media/common/videobuf2/videobuf2-v4l2.c
drivers/media/i2c/ccs-pll.c
drivers/media/i2c/ccs/ccs-data.c
drivers/media/pci/intel/ipu3/ipu3-cio2.c
drivers/media/platform/qcom/venus/core.c
drivers/media/platform/rcar-vin/rcar-core.c
drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
drivers/media/rc/ir-mce_kbd-decoder.c
drivers/media/rc/ite-cir.c
drivers/media/rc/rc-main.c
drivers/media/rc/serial_ir.c
drivers/media/v4l2-core/v4l2-common.c
drivers/misc/cardreader/rtsx_pcr.c
drivers/misc/habanalabs/common/device.c
drivers/misc/habanalabs/common/firmware_if.c
drivers/misc/habanalabs/common/habanalabs.h
drivers/misc/habanalabs/common/habanalabs_ioctl.c
drivers/misc/habanalabs/common/memory.c
drivers/misc/habanalabs/common/mmu.c
drivers/misc/habanalabs/common/mmu_v1.c
drivers/misc/habanalabs/gaudi/gaudi.c
drivers/misc/habanalabs/goya/goya.c
drivers/mmc/core/queue.c
drivers/mmc/core/sdio_cis.c
drivers/mmc/host/sdhci-brcmstb.c
drivers/mmc/host/sdhci-of-dwcmshc.c
drivers/mmc/host/sdhci-pltfm.h
drivers/mmc/host/sdhci-xenon.c
drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/raw/intel-nand-controller.c
drivers/mtd/nand/raw/nandsim.c
drivers/mtd/nand/raw/omap2.c
drivers/mtd/nand/spi/core.c
drivers/net/arcnet/arc-rimi.c
drivers/net/arcnet/arcdevice.h
drivers/net/arcnet/arcnet.c
drivers/net/arcnet/com20020-isa.c
drivers/net/arcnet/com20020-pci.c
drivers/net/arcnet/com20020_cs.c
drivers/net/arcnet/com90io.c
drivers/net/arcnet/com90xx.c
drivers/net/can/dev.c
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
drivers/net/can/vxcan.c
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/microchip/ksz8795.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/global1_vtu.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/igc/igc_ethtool.c
drivers/net/ethernet/intel/igc/igc_i225.c
drivers/net/ethernet/intel/igc/igc_mac.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
drivers/net/ethernet/mellanox/mlx5/core/en/health.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_net.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
drivers/net/ipa/gsi.c
drivers/net/ipa/ipa_endpoint.c
drivers/net/ipa/ipa_mem.c
drivers/net/mdio/mdio-bitbang.c
drivers/net/team/team.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/qmi_wwan.c
drivers/net/wireless/intel/iwlwifi/cfg/22000.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.h
drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
drivers/net/wireless/intel/iwlwifi/iwl-io.c
drivers/net/wireless/intel/iwlwifi/iwl-io.h
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/intel/iwlwifi/queue/tx.c
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
drivers/net/wireless/mediatek/mt7601u/dma.c
drivers/nvme/host/core.c
drivers/nvme/host/multipath.c
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/tcp.c
drivers/of/device.c
drivers/pci/pci.c
drivers/pci/pci.h
drivers/pci/pcie/aspm.c
drivers/phy/ingenic/Makefile
drivers/phy/mediatek/Kconfig
drivers/phy/motorola/phy-cpcap-usb.c
drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
drivers/pinctrl/nomadik/pinctrl-nomadik.c
drivers/pinctrl/pinctrl-ingenic.c
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/pinctrl/qcom/pinctrl-msm.h
drivers/platform/surface/Kconfig
drivers/platform/surface/surface_gpe.c
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/dell-wmi-sysman/sysman.c
drivers/platform/x86/hp-wmi.c
drivers/platform/x86/i2c-multi-instantiate.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel-vbtn.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/touchscreen_dmi.c
drivers/regulator/core.c
drivers/rtc/rtc-cmos.c
drivers/rtc/rtc-mc146818-lib.c
drivers/s390/block/dasd_devmap.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_int.h
drivers/s390/crypto/vfio_ap_drv.c
drivers/s390/crypto/vfio_ap_ops.c
drivers/s390/crypto/vfio_ap_private.h
drivers/scsi/fnic/vnic_dev.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/libfc/fc_exch.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_transport_srp.c
drivers/scsi/ufs/Kconfig
drivers/scsi/ufs/ufshcd.c
drivers/sh/intc/core.c
drivers/sh/intc/virq-debugfs.c
drivers/soc/atmel/soc.c
drivers/soc/imx/Kconfig
drivers/soc/litex/Kconfig
drivers/soc/litex/litex_soc_ctrl.c
drivers/soc/sunxi/sunxi_mbus.c
drivers/soc/ti/omap_prm.c
drivers/spi/spi-altera.c
drivers/spi/spi-cadence.c
drivers/spi/spi-fsl-spi.c
drivers/spi/spidev.c
drivers/staging/media/hantro/hantro_v4l2.c
drivers/staging/media/sunxi/cedrus/cedrus_h264.c
drivers/staging/rtl8723bs/include/rtw_wifi_regd.h
drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
drivers/staging/rtl8723bs/os_dep/sdio_intf.c
drivers/staging/rtl8723bs/os_dep/wifi_regd.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/target_core_user.c
drivers/tee/optee/call.c
drivers/thunderbolt/acpi.c
drivers/thunderbolt/icm.c
drivers/tty/n_tty.c
drivers/tty/serial/mvebu-uart.c
drivers/tty/tty_io.c
drivers/usb/cdns3/cdns3-imx.c
drivers/usb/class/usblp.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc3/core.c
drivers/usb/gadget/legacy/ether.c
drivers/usb/gadget/udc/aspeed-vhub/epn.c
drivers/usb/gadget/udc/aspeed-vhub/hub.c
drivers/usb/gadget/udc/bdc/Kconfig
drivers/usb/gadget/udc/core.c
drivers/usb/gadget/udc/dummy_hcd.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/xhci-mtk-sch.c
drivers/usb/host/xhci-mtk.c
drivers/usb/host/xhci-mtk.h
drivers/usb/host/xhci-mvebu.c
drivers/usb/host/xhci-mvebu.h
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-plat.h
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci-tegra.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/option.c
drivers/vdpa/mlx5/core/mlx5_vdpa.h
drivers/vdpa/mlx5/core/mr.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/xen/xenbus/xenbus_probe.c
fs/afs/main.c
fs/block_dev.c
fs/btrfs/backref.c
fs/btrfs/block-group.c
fs/btrfs/ctree.h
fs/btrfs/extent-tree.c
fs/btrfs/free-space-tree.c
fs/btrfs/send.c
fs/btrfs/transaction.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/cachefiles/rdwr.c
fs/ceph/mds_client.c
fs/cifs/cifs_dfs_ref.c
fs/cifs/cifsfs.c
fs/cifs/cifsproto.h
fs/cifs/connect.c
fs/cifs/dfs_cache.c
fs/cifs/dir.c
fs/cifs/fs_context.c
fs/cifs/smb2pdu.h
fs/cifs/transport.c
fs/ecryptfs/inode.c
fs/fs-writeback.c
fs/hugetlbfs/inode.c
fs/io_uring.c
fs/kernfs/file.c
fs/nfs/pnfs.c
fs/nfsd/nfs3xdr.c
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/file.c
fs/overlayfs/inode.c
fs/overlayfs/overlayfs.h
fs/overlayfs/ovl_entry.h
fs/overlayfs/readdir.c
fs/overlayfs/super.c
fs/overlayfs/util.c
fs/pipe.c
fs/proc/proc_sysctl.c
fs/udf/super.c
include/drm/drm_dp_mst_helper.h
include/dt-bindings/sound/apq8016-lpass.h
include/dt-bindings/sound/qcom,lpass.h [new file with mode: 0644]
include/dt-bindings/sound/sc7180-lpass.h
include/linux/device.h
include/linux/hugetlb.h
include/linux/iommu.h
include/linux/kasan.h
include/linux/kprobes.h
include/linux/kthread.h
include/linux/ktime.h
include/linux/linkage.h
include/linux/mdio-bitbang.h
include/linux/mlx5/driver.h
include/linux/nvme.h
include/linux/regulator/consumer.h
include/linux/sunrpc/xdr.h
include/linux/timekeeping32.h [deleted file]
include/linux/tracepoint.h
include/linux/tty.h
include/linux/usb/usbnet.h
include/linux/vmalloc.h
include/media/v4l2-common.h
include/net/cfg80211.h
include/net/inet_connection_sock.h
include/net/lapb.h
include/net/mac80211.h
include/net/netfilter/nf_tables.h
include/net/sch_generic.h
include/net/sock.h
include/net/tcp.h
include/net/udp.h
include/sound/pcm.h
include/trace/events/sched.h
include/trace/events/sunrpc.h
include/uapi/linux/mrp_bridge.h
include/uapi/linux/rkisp1-config.h
include/uapi/linux/rpl.h
include/uapi/linux/v4l2-subdev.h
include/uapi/rdma/vmw_pvrdma-abi.h
init/Kconfig
init/init_task.c
init/main.c
kernel/bpf/bpf_inode_storage.c
kernel/bpf/bpf_lsm.c
kernel/bpf/bpf_task_storage.c
kernel/bpf/btf.c
kernel/bpf/cgroup.c
kernel/bpf/helpers.c
kernel/bpf/preload/Makefile
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/dma/map_benchmark.c
kernel/entry/common.c
kernel/fork.c
kernel/futex.c
kernel/gcov/Kconfig
kernel/irq/manage.c
kernel/irq/msi.c
kernel/kexec_core.c
kernel/kprobes.c
kernel/kthread.c
kernel/locking/lockdep.c
kernel/locking/rtmutex.c
kernel/locking/rtmutex_common.h
kernel/power/swap.c
kernel/printk/printk.c
kernel/printk/printk_ringbuffer.c
kernel/sched/core.c
kernel/sched/sched.h
kernel/signal.c
kernel/smpboot.c
kernel/time/ntp.c
kernel/time/timekeeping.c
kernel/trace/fgraph.c
kernel/trace/trace_irqsoff.c
kernel/trace/trace_kprobe.c
kernel/workqueue.c
lib/Kconfig.ubsan
lib/ubsan.c
lib/ubsan.h
mm/compaction.c
mm/filemap.c
mm/highmem.c
mm/huge_memory.c
mm/hugetlb.c
mm/kasan/hw_tags.c
mm/kasan/init.c
mm/kasan/kasan.h
mm/memblock.c
mm/memcontrol.c
mm/memory-failure.c
mm/migrate.c
mm/page_alloc.c
mm/slub.c
net/bpf/test_run.c
net/bridge/br_private_mrp.h
net/ceph/auth_x.c
net/ceph/crypto.c
net/ceph/messenger_v1.c
net/ceph/messenger_v2.c
net/ceph/mon_client.c
net/ceph/osd_client.c
net/core/dev.c
net/core/devlink.c
net/core/gen_estimator.c
net/core/neighbour.c
net/core/skbuff.c
net/decnet/dn_route.c
net/hsr/hsr_main.h
net/ipv4/inet_connection_sock.c
net/ipv4/ip_tunnel.c
net/ipv4/netfilter/ipt_rpfilter.c
net/ipv4/tcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/tcp_recovery.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv6/addrconf.c
net/ipv6/udp_offload.c
net/key/af_key.c
net/lapb/lapb_iface.c
net/lapb/lapb_out.c
net/lapb/lapb_timer.c
net/mac80211/debugfs.c
net/mac80211/driver-ops.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/rate.c
net/mac80211/rx.c
net/mac80211/spectmgmt.c
net/mac80211/tx.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_dynset.c
net/nfc/nci/core.c
net/nfc/netlink.c
net/nfc/rawsock.c
net/rds/rdma.c
net/rxrpc/af_rxrpc.c
net/rxrpc/call_accept.c
net/sched/cls_flower.c
net/sched/cls_tcindex.c
net/sched/sch_api.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/auth_gss/auth_gss_internal.h [new file with mode: 0644]
net/sunrpc/auth_gss/gss_krb5_mech.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcsock.c
net/switchdev/switchdev.c
net/vmw_vsock/af_vsock.c
net/wireless/reg.c
net/wireless/wext-core.c
net/xdp/xsk.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_policy.c
security/commoncap.c
sound/core/pcm_native.c
sound/core/seq/oss/seq_oss_synth.c
sound/hda/intel-dsp-config.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/soc/amd/renoir/rn-pci-acp3x.c
sound/soc/codecs/ak4458.c
sound/soc/codecs/hdmi-codec.c
sound/soc/codecs/wm_adsp.c
sound/soc/fsl/imx-hdmi.c
sound/soc/intel/boards/sof_sdw.c
sound/soc/intel/skylake/skl-topology.c
sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c
sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c
sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c
sound/soc/qcom/lpass-cpu.c
sound/soc/qcom/lpass-ipq806x.c
sound/soc/qcom/lpass-lpaif-reg.h
sound/soc/qcom/lpass-platform.c
sound/soc/qcom/lpass-sc7180.c
sound/soc/qcom/lpass.h
sound/soc/soc-topology.c
sound/soc/sof/intel/Kconfig
sound/soc/sof/intel/hda-codec.c
sound/soc/sof/intel/hda-dsp.c
sound/soc/sof/intel/hda.h
sound/soc/sof/sof-acpi-dev.c
sound/soc/sof/sof-pci-dev.c
sound/usb/clock.c
sound/usb/endpoint.c
sound/usb/format.c
sound/usb/implicit.c
sound/usb/pcm.c
sound/usb/quirks.c
tools/gpio/gpio-event-mon.c
tools/gpio/gpio-watch.c
tools/lib/bpf/btf.c
tools/lib/perf/evlist.c
tools/objtool/check.c
tools/objtool/elf.c
tools/perf/builtin-script.c
tools/perf/util/metricgroup.c
tools/power/x86/intel-speed-select/isst-config.c
tools/testing/kunit/kunit.py
tools/testing/kunit/kunit_config.py
tools/testing/kunit/kunit_json.py
tools/testing/kunit/kunit_kernel.py
tools/testing/kunit/kunit_parser.py
tools/testing/selftests/bpf/prog_tests/test_local_storage.c
tools/testing/selftests/bpf/progs/local_storage.c
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/bpf/verifier/spill_fill.c
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/forwarding/router_mpath_nh.sh
tools/testing/selftests/net/forwarding/router_multipath.sh
tools/testing/selftests/net/xfrm_policy.sh
tools/testing/selftests/powerpc/alignment/alignment_handler.c
tools/testing/selftests/powerpc/mm/pkey_exec_prot.c
tools/testing/selftests/powerpc/mm/pkey_siginfo.c
virt/kvm/kvm_main.c

index 10dc5a9a61b3e33ba3c82e2059db2275e34efa63..01a341ceec6c04935529bb3bc4254a59ec651e93 100644 (file)
@@ -122,6 +122,7 @@ ForEachMacros:
   - 'drm_for_each_bridge_in_chain'
   - 'drm_for_each_connector_iter'
   - 'drm_for_each_crtc'
+  - 'drm_for_each_crtc_reverse'
   - 'drm_for_each_encoder'
   - 'drm_for_each_encoder_mask'
   - 'drm_for_each_fb'
@@ -203,14 +204,13 @@ ForEachMacros:
   - 'for_each_matching_node'
   - 'for_each_matching_node_and_match'
   - 'for_each_member'
-  - 'for_each_mem_region'
-  - 'for_each_memblock_type'
   - 'for_each_memcg_cache_index'
   - 'for_each_mem_pfn_range'
   - '__for_each_mem_range'
   - 'for_each_mem_range'
   - '__for_each_mem_range_rev'
   - 'for_each_mem_range_rev'
+  - 'for_each_mem_region'
   - 'for_each_migratetype_order'
   - 'for_each_msi_entry'
   - 'for_each_msi_entry_safe'
@@ -276,10 +276,8 @@ ForEachMacros:
   - 'for_each_reserved_mem_range'
   - 'for_each_reserved_mem_region'
   - 'for_each_rtd_codec_dais'
-  - 'for_each_rtd_codec_dais_rollback'
   - 'for_each_rtd_components'
   - 'for_each_rtd_cpu_dais'
-  - 'for_each_rtd_cpu_dais_rollback'
   - 'for_each_rtd_dais'
   - 'for_each_set_bit'
   - 'for_each_set_bit_from'
@@ -298,6 +296,7 @@ ForEachMacros:
   - '__for_each_thread'
   - 'for_each_thread'
   - 'for_each_unicast_dest_pgid'
+  - 'for_each_vsi'
   - 'for_each_wakeup_source'
   - 'for_each_zone'
   - 'for_each_zone_zonelist'
@@ -330,6 +329,7 @@ ForEachMacros:
   - 'hlist_for_each_entry_rcu_bh'
   - 'hlist_for_each_entry_rcu_notrace'
   - 'hlist_for_each_entry_safe'
+  - 'hlist_for_each_entry_srcu'
   - '__hlist_for_each_rcu'
   - 'hlist_for_each_safe'
   - 'hlist_nulls_for_each_entry'
@@ -378,6 +378,7 @@ ForEachMacros:
   - 'list_for_each_entry_safe_continue'
   - 'list_for_each_entry_safe_from'
   - 'list_for_each_entry_safe_reverse'
+  - 'list_for_each_entry_srcu'
   - 'list_for_each_prev'
   - 'list_for_each_prev_safe'
   - 'list_for_each_safe'
@@ -411,6 +412,8 @@ ForEachMacros:
   - 'of_property_for_each_string'
   - 'of_property_for_each_u32'
   - 'pci_bus_for_each_resource'
+  - 'pcl_for_each_chunk'
+  - 'pcl_for_each_segment'
   - 'pcm_for_each_format'
   - 'ping_portaddr_for_each_entry'
   - 'plist_for_each'
index 632700cee55cdaeaea8d6765037150f493271c8f..d674968df008b6b242d84bed04a7ddbdac0ba278 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -9,9 +9,6 @@
 #
 # Please keep this list dictionary sorted.
 #
-# This comment is parsed by git-shortlog:
-# repo-abbrev: /pub/scm/linux/kernel/git/
-#
 Aaron Durbin <adurbin@google.com>
 Adam Oldham <oldhamca@gmail.com>
 Adam Radford <aradford@gmail.com>
@@ -55,6 +52,8 @@ Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
 Ben Gardner <bgardner@wabtec.com>
 Ben M Cahill <ben.m.cahill@intel.com>
 Björn Steinbrink <B.Steinbrink@gmx.de>
+Björn Töpel <bjorn@kernel.org> <bjorn.topel@gmail.com>
+Björn Töpel <bjorn@kernel.org> <bjorn.topel@intel.com>
 Boris Brezillon <bbrezillon@kernel.org> <b.brezillon.dev@gmail.com>
 Boris Brezillon <bbrezillon@kernel.org> <b.brezillon@overkiz.com>
 Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@bootlin.com>
@@ -180,6 +179,8 @@ Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>
 Kees Cook <keescook@chromium.org> <keescook@google.com>
 Kees Cook <keescook@chromium.org> <kees@outflux.net>
 Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
+Keith Busch <kbusch@kernel.org> <keith.busch@intel.com>
+Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com>
 Kenneth W Chen <kenneth.w.chen@intel.com>
 Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
 Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
@@ -200,6 +201,8 @@ Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
 Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
 Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
 Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
+Manivannan Sadhasivam <mani@kernel.org> <manivannanece23@gmail.com>
+Manivannan Sadhasivam <mani@kernel.org> <manivannan.sadhasivam@linaro.org>
 Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
 Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
 Mark Brown <broonie@sirena.org.uk>
@@ -245,6 +248,7 @@ Morten Welinder <welinder@anemone.rentec.com>
 Morten Welinder <welinder@darter.rentec.com>
 Morten Welinder <welinder@troll.com>
 Mythri P K <mythripk@ti.com>
+Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
 Nguyen Anh Quynh <aquynh@gmail.com>
 Nicolas Ferre <nicolas.ferre@microchip.com> <nicolas.ferre@atmel.com>
 Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
@@ -335,6 +339,8 @@ Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org>
 Viresh Kumar <vireshk@kernel.org> <viresh.kumar2@arm.com>
 Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
 Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
+Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.org>
+Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.com>
 Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com>
 Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
 Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
index b662f747c83ebd99eac633e92a034ca407ed5354..8a21ce515f61fb2cf6680508c15445fcb4c1991e 100644 (file)
@@ -5,8 +5,8 @@ Description:
                Provide a place in sysfs for the device link objects in the
                kernel at any given time.  The name of a device link directory,
                denoted as ... above, is of the form <supplier>--<consumer>
-               where <supplier> is the supplier device name and <consumer> is
-               the consumer device name.
+               where <supplier> is the supplier bus:device name and <consumer>
+               is the consumer bus:device name.
 
 What:          /sys/class/devlink/.../auto_remove_on
 Date:          May 2020
index 1f06d74d1c3ccca76cb67e48c456d269ca3b2435..0809fda092e668e81535100630d9b653869fa4e8 100644 (file)
@@ -4,5 +4,6 @@ Contact:        Saravana Kannan <saravanak@google.com>
 Description:
                The /sys/devices/.../consumer:<consumer> are symlinks to device
                links where this device is the supplier. <consumer> denotes the
-               name of the consumer in that device link. There can be zero or
-               more of these symlinks for a given device.
+               name of the consumer in that device link and is of the form
+               bus:device name. There can be zero or more of these symlinks
+               for a given device.
index a919e0db5e902ca572ddde6560663a2ef05ad15d..207f5972e98d8c1cd847fddbf91e88759a4a581c 100644 (file)
@@ -4,5 +4,6 @@ Contact:        Saravana Kannan <saravanak@google.com>
 Description:
                The /sys/devices/.../supplier:<supplier> are symlinks to device
                links where this device is the consumer. <supplier> denotes the
-               name of the supplier in that device link. There can be zero or
-               more of these symlinks for a given device.
+               name of the supplier in that device link and is of the form
+               bus:device name. There can be zero or more of these symlinks
+               for a given device.
index adc0d0e916078001cd573c896cda7402e7b5e10b..75ccc5c62b3c498a940dafb12014b56cdfe156c2 100644 (file)
@@ -916,21 +916,25 @@ Date:             September 2014
 Contact:       Subhash Jadavani <subhashj@codeaurora.org>
 Description:   This entry could be used to set or show the UFS device
                runtime power management level. The current driver
-               implementation supports 6 levels with next target states:
+               implementation supports 7 levels with next target states:
 
                ==  ====================================================
-               0   an UFS device will stay active, an UIC link will
+               0   UFS device will stay active, UIC link will
                    stay active
-               1   an UFS device will stay active, an UIC link will
+               1   UFS device will stay active, UIC link will
                    hibernate
-               2   an UFS device will moved to sleep, an UIC link will
+               2   UFS device will be moved to sleep, UIC link will
                    stay active
-               3   an UFS device will moved to sleep, an UIC link will
+               3   UFS device will be moved to sleep, UIC link will
                    hibernate
-               4   an UFS device will be powered off, an UIC link will
+               4   UFS device will be powered off, UIC link will
                    hibernate
-               5   an UFS device will be powered off, an UIC link will
+               5   UFS device will be powered off, UIC link will
                    be powered off
+               6   UFS device will be moved to deep sleep, UIC link
+                   will be powered off. Note, deep sleep might not be
+                   supported in which case this value will not be
+                   accepted
                ==  ====================================================
 
 What:          /sys/bus/platform/drivers/ufshcd/*/rpm_target_dev_state
@@ -954,21 +958,25 @@ Date:             September 2014
 Contact:       Subhash Jadavani <subhashj@codeaurora.org>
 Description:   This entry could be used to set or show the UFS device
                system power management level. The current driver
-               implementation supports 6 levels with next target states:
+               implementation supports 7 levels with next target states:
 
                ==  ====================================================
-               0   an UFS device will stay active, an UIC link will
+               0   UFS device will stay active, UIC link will
                    stay active
-               1   an UFS device will stay active, an UIC link will
+               1   UFS device will stay active, UIC link will
                    hibernate
-               2   an UFS device will moved to sleep, an UIC link will
+               2   UFS device will be moved to sleep, UIC link will
                    stay active
-               3   an UFS device will moved to sleep, an UIC link will
+               3   UFS device will be moved to sleep, UIC link will
                    hibernate
-               4   an UFS device will be powered off, an UIC link will
+               4   UFS device will be powered off, UIC link will
                    hibernate
-               5   an UFS device will be powered off, an UIC link will
+               5   UFS device will be powered off, UIC link will
                    be powered off
+               6   UFS device will be moved to deep sleep, UIC link
+                   will be powered off. Note, deep sleep might not be
+                   supported in which case this value will not be
+                   accepted
                ==  ====================================================
 
 What:          /sys/bus/platform/drivers/ufshcd/*/spm_target_dev_state
index 4e6f504474ac2d48bab8e3df7260fc441e033ba4..2cc5488acbd97a1efda5152a4c6e80299246d2a2 100644 (file)
@@ -177,14 +177,20 @@ bitmap_flush_interval:number
        The bitmap flush interval in milliseconds. The metadata buffers
        are synchronized when this interval expires.
 
+allow_discards
+       Allow block discard requests (a.k.a. TRIM) for the integrity device.
+       Discards are only allowed to devices using internal hash.
+
 fix_padding
        Use a smaller padding of the tag area that is more
        space-efficient. If this option is not present, large padding is
        used - that is for compatibility with older kernels.
 
-allow_discards
-       Allow block discard requests (a.k.a. TRIM) for the integrity device.
-       Discards are only allowed to devices using internal hash.
+legacy_recalculate
+       Allow recalculating of volumes with HMAC keys. This is disabled by
+       default for security reasons - an attacker could modify the volume,
+       set recalc_sector to zero, and the kernel would not detect the
+       modification.
 
 The journal mode (D/J), buffer_sectors, journal_watermark, commit_time and
 allow_discards can be changed when reloading the target (load an inactive
index 2267e4fb475e58921b8fd60a6495a492e0570388..ccf418713623b676935cb422956c7418a15bd23c 100644 (file)
@@ -13,6 +13,22 @@ This file documents the driver for the Rockchip ISP1 that is part of RK3288
 and RK3399 SoCs. The driver is located under drivers/staging/media/rkisp1
 and uses the Media-Controller API.
 
+Revisions
+=========
+
+There exist multiple smaller revisions to this ISP that got introduced in
+later SoCs. Revisions can be found in the enum :c:type:`rkisp1_cif_isp_version`
+in the UAPI and the revision of the ISP inside the running SoC can be read
+in the field hw_revision of struct media_device_info as returned by
+ioctl MEDIA_IOC_DEVICE_INFO.
+
+Versions in use are:
+
+- RKISP1_V10: used at least in rk3288 and rk3399
+- RKISP1_V11: declared in the original vendor code, but not used
+- RKISP1_V12: used at least in rk3326 and px30
+- RKISP1_V13: used at least in rk1808
+
 Topology
 ========
 .. _rkisp1_topology_graph:
index 32ea57483378ddeea5edc84278fe3d412920fcc5..76424e0431f4b5903b303774520c83d0cffc8004 100644 (file)
@@ -100,6 +100,11 @@ Instruction Macros
 ~~~~~~~~~~~~~~~~~~
 This section covers ``SYM_FUNC_*`` and ``SYM_CODE_*`` enumerated above.
 
+``objtool`` requires that all code must be contained in an ELF symbol. Symbol
+names that have a ``.L`` prefix do not emit symbol table entries. ``.L``
+prefixed symbols can be used within a code region, but should be avoided for
+denoting a range of code via ``SYM_*_START/END`` annotations.
+
 * ``SYM_FUNC_START`` and ``SYM_FUNC_START_LOCAL`` are supposed to be **the
   most frequent markings**. They are used for functions with standard calling
   conventions -- global and local. Like in C, they both align the functions to
index 0fc3fb1860c4a082f896dc343f8e244e02b12755..1651d961f06a67ab1d3fc5858023c761658c346c 100644 (file)
@@ -160,29 +160,14 @@ intended for use in production as a security mitigation. Therefore it supports
 boot parameters that allow to disable KASAN competely or otherwise control
 particular KASAN features.
 
-The things that can be controlled are:
+- ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
 
-1. Whether KASAN is enabled at all.
-2. Whether KASAN collects and saves alloc/free stacks.
-3. Whether KASAN panics on a detected bug or not.
+- ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
+  traces collection (default: ``on`` for ``CONFIG_DEBUG_KERNEL=y``, otherwise
+  ``off``).
 
-The ``kasan.mode`` boot parameter allows to choose one of three main modes:
-
-- ``kasan.mode=off`` - KASAN is disabled, no tag checks are performed
-- ``kasan.mode=prod`` - only essential production features are enabled
-- ``kasan.mode=full`` - all KASAN features are enabled
-
-The chosen mode provides default control values for the features mentioned
-above. However it's also possible to override the default values by providing:
-
-- ``kasan.stacktrace=off`` or ``=on`` - enable alloc/free stack collection
-                                       (default: ``on`` for ``mode=full``,
-                                        otherwise ``off``)
-- ``kasan.fault=report`` or ``=panic`` - only print KASAN report or also panic
-                                        (default: ``report``)
-
-If ``kasan.mode`` parameter is not provided, it defaults to ``full`` when
-``CONFIG_DEBUG_KERNEL`` is enabled, and to ``prod`` otherwise.
+- ``kasan.fault=report`` or ``=panic`` controls whether to only print a KASAN
+  report or also panic the kernel (default: ``report``).
 
 For developers
 ~~~~~~~~~~~~~~
index d9fdc14f0677b889857a08c92d2db331aae83ef7..650f99590df57d0b3593455c993987cdfcdb400d 100644 (file)
@@ -522,6 +522,63 @@ There's more boilerplate involved, but it can:
   * E.g. if we wanted to also test ``sha256sum``, we could add a ``sha256``
     field and reuse ``cases``.
 
+* be converted to a "parameterized test", see below.
+
+Parameterized Testing
+~~~~~~~~~~~~~~~~~~~~~
+
+The table-driven testing pattern is common enough that KUnit has special
+support for it.
+
+Reusing the same ``cases`` array from above, we can write the test as a
+"parameterized test" with the following.
+
+.. code-block:: c
+
+       // This is copy-pasted from above.
+       struct sha1_test_case {
+               const char *str;
+               const char *sha1;
+       };
+       struct sha1_test_case cases[] = {
+               {
+                       .str = "hello world",
+                       .sha1 = "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed",
+               },
+               {
+                       .str = "hello world!",
+                       .sha1 = "430ce34d020724ed75a196dfc2ad67c77772d169",
+               },
+       };
+
+       // Need a helper function to generate a name for each test case.
+       static void case_to_desc(const struct sha1_test_case *t, char *desc)
+       {
+               strcpy(desc, t->str);
+       }
+       // Creates `sha1_gen_params()` to iterate over `cases`.
+       KUNIT_ARRAY_PARAM(sha1, cases, case_to_desc);
+
+       // Looks no different from a normal test.
+       static void sha1_test(struct kunit *test)
+       {
+               // This function can just contain the body of the for-loop.
+               // The former `cases[i]` is accessible under test->param_value.
+               char out[40];
+               struct sha1_test_case *test_param = (struct sha1_test_case *)(test->param_value);
+
+               sha1sum(test_param->str, out);
+               KUNIT_EXPECT_STREQ_MSG(test, (char *)out, test_param->sha1,
+                                     "sha1sum(%s)", test_param->str);
+       }
+
+       // Instead of KUNIT_CASE, we use KUNIT_CASE_PARAM and pass in the
+       // function declared by KUNIT_ARRAY_PARAM.
+       static struct kunit_case sha1_test_cases[] = {
+               KUNIT_CASE_PARAM(sha1_test, sha1_gen_params),
+               {}
+       };
+
 .. _kunit-on-non-uml:
 
 KUnit on non-UML architectures
index 14cd727d3c4b75c12afed857664d4dc5822a7f51..f02fd10de604b90907e5b9336183457a8fcc33f3 100644 (file)
@@ -232,7 +232,6 @@ properties:
       by this cpu (see ./idle-states.yaml).
 
   capacity-dmips-mhz:
-    $ref: '/schemas/types.yaml#/definitions/uint32'
     description:
       u32 value representing CPU capacity (see ./cpu-capacity.txt) in
       DMIPS/MHz, relative to highest capacity-dmips-mhz
index 02c21b58474189fe956e83189f622c5359c1faa7..3bc760cc31cbbeee3487230db7a4cd40fe0ff12b 100644 (file)
@@ -40,7 +40,7 @@ Optional properties:
        documents on how to describe the way the sii902x device is
        connected to the rest of the audio system:
        Documentation/devicetree/bindings/sound/simple-card.yaml
-       Documentation/devicetree/bindings/sound/audio-graph-card.txt
+       Documentation/devicetree/bindings/sound/audio-graph-card.yaml
        Note: In case of the audio-graph-card binding the used port
        index should be 3.
 
index 33977e15bebdf2f0005b00d51e21fb10f65b8cbc..ed76332ec01e830dd0607a000afb6c4c34e5dedd 100644 (file)
@@ -23,7 +23,7 @@ connected to.
 
 For a description of the display interface sink function blocks, see
 Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt and
-Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt.
+Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.yaml.
 
 Required properties (all function blocks):
 - compatible: "mediatek,<chip>-disp-<function>", one of
@@ -61,7 +61,7 @@ Required properties (DMA function blocks):
        "mediatek,<chip>-disp-wdma"
   the supported chips are mt2701, mt8167 and mt8173.
 - larb: Should contain a phandle pointing to the local arbiter device as defined
-  in Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+  in Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
 - iommus: Should point to the respective IOMMU block with master port as
   argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
   for details.
index 5fe784f487c5ee17651d68ff5fc7e9e1bc5b5bf4..efdf59abb2e18b454f194c4e2bb4401ee725d187 100644 (file)
@@ -85,7 +85,6 @@ properties:
   wlf,micd-timeout-ms:
     description:
       Timeout for microphone detection, specified in milliseconds.
-    $ref: "/schemas/types.yaml#/definitions/uint32"
 
   wlf,micd-force-micbias:
     description:
index eef614962b1039e91bccdbdf23abfbf44be9894a..bf04151b63d2b4e474b2bc8c6fcca6e192eb09fe 100644 (file)
@@ -49,7 +49,6 @@ properties:
     description:
       This property controls the Accumulation Dead band which allows to set the
       level of current below which no accumulation takes place.
-    $ref: /schemas/types.yaml#/definitions/uint32
     maximum: 255
     default: 0
 
index 00a6511354e6c911eeef8006ae662d537eb9486d..5d3ce641fcdeb431bb9678c7e8ecf7945da00bea 100644 (file)
@@ -73,11 +73,9 @@ properties:
     description: |
       Temperature sensor trimming factor. It can be used to manually adjust the
       temperature measurements within 7.130 degrees Celsius.
-    maxItems: 1
-    items:
-      default: 0
-      minimum: 0
-      maximum: 7130
+    default: 0
+    minimum: 0
+    maximum: 7130
 
 additionalProperties: false
 
index 8020d739a078e53d23692ce76acdf668fb805713..1502b22c77ccb6711912de5ab967345b4c267bd5 100644 (file)
@@ -52,7 +52,6 @@ properties:
   ti,bus-range-microvolt:
     description: |
       This is the operating range of the bus voltage in microvolt
-    $ref: /schemas/types.yaml#/definitions/uint32
     enum: [16000000, 32000000]
     default: 32000000
 
index cc3aa2a5e70bf707c0dd521f7aac7228acecaf44..ff99344788ab84374e797b6375fa05d41f45b152 100644 (file)
@@ -39,11 +39,9 @@ properties:
 
   i2c-gpio,delay-us:
     description: delay between GPIO operations (may depend on each platform)
-    $ref: /schemas/types.yaml#/definitions/uint32
 
   i2c-gpio,timeout-ms:
     description: timeout to get data
-    $ref: /schemas/types.yaml#/definitions/uint32
 
   # Deprecated properties, do not use in new device tree sources:
   gpios:
index c22b66b6219eaa37fa630ed4b6f5118690ff70c7..d9293c57f573c0534adb777f88efad95e5da6c85 100644 (file)
@@ -66,21 +66,18 @@ properties:
     default: 400000
 
   i2c-sda-hold-time-ns:
-    maxItems: 1
     description: |
       The property should contain the SDA hold time in nanoseconds. This option
       is only supported in hardware blocks version 1.11a or newer or on
       Microsemi SoCs.
 
   i2c-scl-falling-time-ns:
-    maxItems: 1
     description: |
       The property should contain the SCL falling time in nanoseconds.
       This value is used to compute the tLOW period.
     default: 300
 
   i2c-sda-falling-time-ns:
-    maxItems: 1
     description: |
       The property should contain the SDA falling time in nanoseconds.
       This value is used to compute the tHIGH period.
index 6eef3480ea8fc0730f6b5e1277b2e7e38b062a84..c2efbb813ca275562044aa154699e03e87e7ca9c 100644 (file)
@@ -16,8 +16,8 @@ description:
 properties:
   compatible:
     enum:
-      - bosch,bmc150
-      - bosch,bmi055
+      - bosch,bmc150_accel
+      - bosch,bmi055_accel
       - bosch,bma255
       - bosch,bma250e
       - bosch,bma222
index e0cc3b2e895749b1d74d915d27aa8a321645562a..22b7ed3723f6735360f7515b722607979e13e3aa 100644 (file)
@@ -80,7 +80,7 @@ properties:
     type: boolean
 
   bipolar:
-    description: see Documentation/devicetree/bindings/iio/adc/adc.txt
+    description: see Documentation/devicetree/bindings/iio/adc/adc.yaml
     type: boolean
 
 required:
index 9475a9e6e92075a748f2cc270f4c70488cc76aec..95774a55629df1d59717e5ff656c535691f686a9 100644 (file)
@@ -23,7 +23,6 @@ properties:
     maxItems: 1
 
   shunt-resistor-micro-ohms:
-    $ref: /schemas/types.yaml#/definitions/uint32
     description: |
       Value in micro Ohms of the shunt resistor connected between the RS+ and
       RS- inputs, across which the current is measured.  Value needed to compute
index 28417b31b5589b2bcb27e648643387a9a1500069..517e32976c30426a49e6e49a9f927c6547daecce 100644 (file)
@@ -246,7 +246,6 @@ patternProperties:
           Resolution (bits) to use for conversions:
             - can be 6, 8, 10 or 12 on stm32f4
             - can be 8, 10, 12, 14 or 16 on stm32h7 and stm32mp1
-        $ref: /schemas/types.yaml#/definitions/uint32
 
       st,adc-channels:
         description: |
index 692dacd0fee5fa9a87f4ebff610cb168dc7bf7be..7b895784e00853a13abc6630b453549be6b9d744 100644 (file)
@@ -42,7 +42,6 @@ properties:
     const: 1
 
   ti,channel0-current-microamp:
-    $ref: /schemas/types.yaml#/definitions/uint32
     description: Channel 0 current in uA.
     enum:
       - 0
@@ -51,7 +50,6 @@ properties:
       - 20
 
   ti,channel3-current-microamp:
-    $ref: /schemas/types.yaml#/definitions/uint32
     description: Channel 3 current in uA.
     enum:
       - 0
index 626ccb6fe21e136a0b493ef56f9ec3f3d75bad77..fd4edca34a2851bc5d4f5df944e5c403dc04fad3 100644 (file)
@@ -46,31 +46,42 @@ properties:
       two properties must be present:
 
   adi,range-microvolt:
-    $ref: /schemas/types.yaml#/definitions/int32-array
     description: |
       Voltage output range specified as <minimum, maximum>
-    enum:
-      - [[0, 5000000]]
-      - [[0, 10000000]]
-      - [[-5000000, 5000000]]
-      - [[-10000000, 10000000]]
+    oneOf:
+      - items:
+          - const: 0
+          - enum: [5000000, 10000000]
+      - items:
+          - const: -5000000
+          - const: 5000000
+      - items:
+          - const: -10000000
+          - const: 10000000
 
   adi,range-microamp:
-    $ref: /schemas/types.yaml#/definitions/int32-array
     description: |
       Current output range specified as <minimum, maximum>
-    enum:
-      - [[0, 20000]]
-      - [[0, 24000]]
-      - [[4, 24000]]
-      - [[-20000, 20000]]
-      - [[-24000, 24000]]
-      - [[-1000, 22000]]
+    oneOf:
+      - items:
+          - const: 0
+          - enum: [20000, 24000]
+      - items:
+          - const: 4
+          - const: 24000
+      - items:
+          - const: -20000
+          - const: 20000
+      - items:
+          - const: -24000
+          - const: 24000
+      - items:
+          - const: -1000
+          - const: 22000
 
   reset-gpios: true
 
   adi,dc-dc-ilim-microamp:
-    $ref: /schemas/types.yaml#/definitions/uint32
     enum: [150000, 200000, 250000, 300000, 350000, 400000]
     description: |
       The dc-to-dc converter current limit.
index 64b8626370393662d2dfaaf27d0291452d96bd76..967778fb0ce89f6129f857226d6581b0459722ea 100644 (file)
@@ -21,7 +21,6 @@ properties:
     description: Connected to ADC_RDY pin.
 
   maxim,led-current-microamp:
-    $ref: /schemas/types.yaml#/definitions/uint32-array
     minItems: 2
     maxItems: 2
     description: |
index e551814629b48e3a0ece53bf226864a701d1ed61..6c8be6a9ace286c359c1ed50fca83879b01e18ed 100644 (file)
@@ -5,7 +5,8 @@ Required properties:
  - compatible: "adc-keys"
  - io-channels: Phandle to an ADC channel
  - io-channel-names = "buttons";
- - keyup-threshold-microvolt: Voltage at which all the keys are considered up.
+ - keyup-threshold-microvolt: Voltage above or equal to which all the keys are
+                             considered up.
 
 Optional properties:
        - poll-interval: Poll interval time in milliseconds
@@ -17,7 +18,12 @@ Each button (key) is represented as a sub-node of "adc-keys":
 Required subnode-properties:
        - label: Descriptive name of the key.
        - linux,code: Keycode to emit.
-       - press-threshold-microvolt: Voltage ADC input when this key is pressed.
+       - press-threshold-microvolt: voltage above or equal to which this key is
+                                    considered pressed.
+
+No two values of press-threshold-microvolt may be the same.
+All values of press-threshold-microvolt must be less than
+keyup-threshold-microvolt.
 
 Example:
 
@@ -47,3 +53,15 @@ Example:
                        press-threshold-microvolt = <500000>;
                };
        };
+
++--------------------------------+------------------------+
+| 2.000.000 <= value             | no key pressed         |
++--------------------------------+------------------------+
+| 1.500.000 <= value < 2.000.000 | KEY_VOLUMEUP pressed   |
++--------------------------------+------------------------+
+| 1.000.000 <= value < 1.500.000 | KEY_VOLUMEDOWN pressed |
++--------------------------------+------------------------+
+|   500.000 <= value < 1.000.000 | KEY_ENTER pressed      |
++--------------------------------+------------------------+
+|              value <   500.000 | no key pressed         |
++--------------------------------+------------------------+
index da5b0d87e16d08aefec0c59826ff2253b6852c2e..93f2ce3130ae7e9c55806162d994a0414c3e94b4 100644 (file)
@@ -26,6 +26,7 @@ properties:
       - goodix,gt927
       - goodix,gt9271
       - goodix,gt928
+      - goodix,gt9286
       - goodix,gt967
 
   reg:
index a771a15f053fa2c582cb9f7156a225a9ed1715bb..046ace461cc9d4d0450c94241e74ca66ef3dbb1e 100644 (file)
@@ -70,11 +70,9 @@ properties:
 
   touchscreen-x-mm:
     description: horizontal length in mm of the touchscreen
-    $ref: /schemas/types.yaml#/definitions/uint32
 
   touchscreen-y-mm:
     description: vertical length in mm of the touchscreen
-    $ref: /schemas/types.yaml#/definitions/uint32
 
 dependencies:
   touchscreen-size-x: [ touchscreen-size-y ]
diff --git a/Documentation/devicetree/bindings/leds/richtek,rt8515.yaml b/Documentation/devicetree/bindings/leds/richtek,rt8515.yaml
new file mode 100644 (file)
index 0000000..68c328e
--- /dev/null
@@ -0,0 +1,111 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/richtek,rt8515.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RT8515 1.5A dual channel LED driver
+
+maintainers:
+  - Linus Walleij <linus.walleij@linaro.org>
+
+description: |
+  The Richtek RT8515 is a dual channel (two mode) LED driver that
+  supports driving a white LED in flash or torch mode. The maximum
+  current for each mode is defined in hardware using two resistors
+  RFS and RTS.
+
+properties:
+  compatible:
+    const: richtek,rt8515
+
+  enf-gpios:
+    maxItems: 1
+    description: A connection to the 'ENF' (enable flash) pin.
+
+  ent-gpios:
+    maxItems: 1
+    description: A connection to the 'ENT' (enable torch) pin.
+
+  richtek,rfs-ohms:
+    minimum: 7680
+    maximum: 367000
+    description: The resistance value of the RFS resistor. This
+      resistors limits the maximum flash current. This must be set
+      for the property flash-max-microamp to work, the RFS resistor
+      defines the range of the dimmer setting (brightness) of the
+      flash LED.
+
+  richtek,rts-ohms:
+    minimum: 7680
+    maximum: 367000
+    description: The resistance value of the RTS resistor. This
+      resistors limits the maximum torch current. This must be set
+      for the property torch-max-microamp to work, the RTS resistor
+      defines the range of the dimmer setting (brightness) of the
+      torch LED.
+
+  led:
+    type: object
+    $ref: common.yaml#
+    properties:
+      function: true
+      color: true
+      flash-max-timeout-us: true
+
+      flash-max-microamp:
+        maximum: 700000
+        description: The maximum current for flash mode
+          is hardwired to the component using the RFS resistor to
+          ground. The maximum hardware current setting is calculated
+          according to the formula Imax = 5500 / RFS. The lowest
+          allowed resistance value is 7.86 kOhm giving an absolute
+          maximum current of 700mA. By setting this attribute in
+          the device tree, you can further restrict the maximum
+          current below the hardware limit. This requires the RFS
+          to be defined as it defines the maximum range.
+
+      led-max-microamp:
+        maximum: 700000
+        description: The maximum current for torch mode
+          is hardwired to the component using the RTS resistor to
+          ground. The maximum hardware current setting is calculated
+          according to the formula Imax = 5500 / RTS. The lowest
+          allowed resistance value is 7.86 kOhm giving an absolute
+          maximum current of 700mA. By setting this attribute in
+          the device tree, you can further restrict the maximum
+          current below the hardware limit. This requires the RTS
+          to be defined as it defines the maximum range.
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - ent-gpios
+  - enf-gpios
+  - led
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/leds/common.h>
+
+    led-controller {
+        compatible = "richtek,rt8515";
+        enf-gpios = <&gpio4 12 GPIO_ACTIVE_HIGH>;
+        ent-gpios = <&gpio4 13 GPIO_ACTIVE_HIGH>;
+        richtek,rfs-ohms = <16000>;
+        richtek,rts-ohms = <100000>;
+
+        led {
+            function = LED_FUNCTION_FLASH;
+            color = <LED_COLOR_ID_WHITE>;
+            flash-max-timeout-us = <250000>;
+            flash-max-microamp = <150000>;
+            led-max-microamp = <25000>;
+        };
+    };
+
+...
index 044b11913c49b8fa5ef2843b6bb7d42d0583f6d6..cf60c5acc0e4e0844ff77719123cc7357fd58751 100644 (file)
@@ -16,7 +16,7 @@ Required properties:
 - power-domains: a phandle to the power domain, see
   Documentation/devicetree/bindings/power/power_domain.txt for details.
 - mediatek,larb: must contain the local arbiters in the current Socs, see
-  Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+  Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
   for details.
 - iommus: should point to the respective IOMMU block with master port as
   argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
index 736be7cad3857b893f11fb39943b208ead4d317a..acfb50375b8acdced638980d9525a64802a8d445 100644 (file)
@@ -14,7 +14,7 @@ Required properties:
 - power-domains: a phandle to the power domain, see
   Documentation/devicetree/bindings/power/power_domain.txt for details.
 - mediatek,larb: must contain the local arbiters in the current SoCs, see
-  Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+  Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
   for details.
 - iommus: should point to the respective IOMMU block with master port as
   argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
index 0d03e3ae2be2f23a294a27fbc63425d3f540eb05..f4798d04e92523552d85874796637d8b48fc99b9 100644 (file)
@@ -28,7 +28,7 @@ Required properties (DMA function blocks, child node):
   argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
   for details.
 - mediatek,larb: must contain the local arbiters in the current Socs, see
-  Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
+  Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.yaml
   for details.
 
 Example:
index 186f04ba935796758e20fa94376ce7057a4a0886..e674bba52ee97a1202754aa4b93151e56a0034a4 100644 (file)
@@ -259,7 +259,6 @@ properties:
       waiting for I/O signalling and card power supply to be stable,
       regardless of whether pwrseq-simple is used. Default to 10ms if
       no available.
-    $ref: /schemas/types.yaml#/definitions/uint32
     default: 10
 
   supports-cqe:
index 6cd57863c1dbdbdc669ef2174fc7f344cd84a0a9..226fb191913d2a5dd7b757509a2e10b3edb7a9a9 100644 (file)
@@ -41,13 +41,11 @@ properties:
     description:
       Delay in ms after powering the card and de-asserting the
       reset-gpios (if any).
-    $ref: /schemas/types.yaml#/definitions/uint32
 
   power-off-delay-us:
     description:
       Delay in us after asserting the reset-gpios (if any)
       during power off of the card.
-    $ref: /schemas/types.yaml#/definitions/uint32
 
 required:
   - compatible
index 0965f6515f9ec1d70a0eca3ee5e00b41df12577c..dac4aadb6e2e70ba6c147446481afb47a98d6479 100644 (file)
@@ -122,7 +122,6 @@ properties:
       such as flow control thresholds.
 
   rx-internal-delay-ps:
-    $ref: /schemas/types.yaml#/definitions/uint32
     description: |
       RGMII Receive Clock Delay defined in pico seconds.
       This is used for controllers that have configurable RX internal delays.
@@ -140,7 +139,6 @@ properties:
       is used for components that can have configurable fifo sizes.
 
   tx-internal-delay-ps:
-    $ref: /schemas/types.yaml#/definitions/uint32
     description: |
       RGMII Transmit Clock Delay defined in pico seconds.
       This is used for controllers that have configurable TX internal delays.
index dfbf5fe4547ab611a7004ee2baeb4914d93ce61e..0642b0f59491036d649af5f7d1513b2047c05619 100644 (file)
@@ -212,7 +212,6 @@ properties:
       Triplet of delays. The 1st cell is reset pre-delay in micro
       seconds. The 2nd cell is reset pulse in micro seconds. The 3rd
       cell is reset post-delay in micro seconds.
-    $ref: /schemas/types.yaml#/definitions/uint32-array
     minItems: 3
     maxItems: 3
 
index 0c7e2e44793baa5cd5c0260d6ed6233420d53f51..c3b4b754359186ab8dae6c1bc685926b34592ec8 100644 (file)
@@ -83,21 +83,18 @@ properties:
       for each of the battery capacity lookup table.
 
   operating-range-celsius:
-    $ref: /schemas/types.yaml#/definitions/uint32-array
     description: operating temperature range of a battery
     items:
       - description: minimum temperature at which battery can operate
       - description: maximum temperature at which battery can operate
 
   ambient-celsius:
-    $ref: /schemas/types.yaml#/definitions/uint32-array
     description: safe range of ambient temperature
     items:
       - description: alert when ambient temperature is lower than this value
       - description: alert when ambient temperature is higher than this value
 
   alert-celsius:
-    $ref: /schemas/types.yaml#/definitions/uint32-array
     description: safe range of battery temperature
     items:
       - description: alert when battery temperature is lower than this value
index 75a56773be4a0a364909feff196b2e81dc6288e6..813d6afde6069239458cb0506a436ee8f3a9daa4 100644 (file)
@@ -50,7 +50,6 @@ properties:
     maxItems: 1
 
   input-current-limit-microamp:
-    $ref: /schemas/types.yaml#/definitions/uint32
     description: Maximum input current in micro Amps.
     minimum: 50000
     maximum: 500000
index 6f2164f7bc577fc6dbe04b47ce4b9b521c40acba..228018c87bea891a0958ef35e7bef5d3447bf553 100644 (file)
@@ -62,7 +62,6 @@ properties:
     description: IRQ line information.
 
   dlg,irq-polling-delay-passive-ms:
-    $ref: "/schemas/types.yaml#/definitions/uint32"
     minimum: 1000
     maximum: 10000
     description: |
index d3d0dc13dd8b80192f4860d930e89319da27ca2d..8850c01bd47060c2aad7cefa74d37fa303a286a8 100644 (file)
@@ -72,11 +72,9 @@ properties:
 
   startup-delay-us:
     description: startup time in microseconds
-    $ref: /schemas/types.yaml#/definitions/uint32
 
   off-on-delay-us:
     description: off delay time in microseconds
-    $ref: /schemas/types.yaml#/definitions/uint32
 
   enable-active-high:
     description:
index d30dc045aac6489ba18e4b2c246b77d3d636088b..0ec3551f12dd306fd06c6caa288eb78ca479ed6f 100644 (file)
@@ -27,7 +27,6 @@ properties:
       1: chargeable
 
   quartz-load-femtofarads:
-    $ref: /schemas/types.yaml#/definitions/uint32
     description:
       The capacitive load of the quartz(x-tal), expressed in femto
       Farad (fF). The default value shall be listed (if optional),
@@ -47,7 +46,6 @@ properties:
     deprecated: true
 
   trickle-resistor-ohms:
-    $ref: /schemas/types.yaml#/definitions/uint32
     description:
       Selected resistor for trickle charger. Should be given
       if trickle charger should be enabled.
index c23c93b400f062f82501a09d3363cc14faab1dec..07fa6d26f2b43c8091f0a4d61701930bee44364c 100644 (file)
@@ -88,14 +88,12 @@ properties:
     description:
       Rate at which poll occurs when auto-poll is set.
       default 100ms.
-    $ref: /schemas/types.yaml#/definitions/uint32
     default: 100
 
   poll-timeout-ms:
     description:
       Poll timeout when auto-poll is set, default
       3000ms.
-    $ref: /schemas/types.yaml#/definitions/uint32
     default: 3000
 
 required:
index bf8c8ba25009dcc90c26f9049492c29a1e6e46c4..54650823b29a41de10061ad664ac2080eefdab66 100644 (file)
@@ -7,8 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Mediatek MT8192 with MT6359, RT1015 and RT5682 ASoC sound card driver
 
 maintainers:
-   - Jiaxin Yu <jiaxin.yu@mediatek.com>
-   - Shane Chien <shane.chien@mediatek.com>
+  - Jiaxin Yu <jiaxin.yu@mediatek.com>
+  - Shane Chien <shane.chien@mediatek.com>
 
 description:
   This binding describes the MT8192 sound card.
index d116c174b545f412556143b5c775b392f0cde1d7..70b4a8831073233a4c45828395226ce542225bed 100644 (file)
@@ -41,14 +41,12 @@ properties:
       values of 2k, 4k or 8k. If set to 0 it will be off. If this node is not
       mentioned or if the value is unknown, then micbias resistor is set to
       4k.
-    $ref: "/schemas/types.yaml#/definitions/uint32"
     enum: [ 0, 2, 4, 8 ]
 
   micbias-voltage-m-volts:
     description: The bias voltage to be used in mVolts. The voltage can take
       values from 1.25V to 3V by 250mV steps. If this node is not mentioned
       or the value is unknown, then the value is set to 1.25V.
-    $ref: "/schemas/types.yaml#/definitions/uint32"
     enum: [ 1250, 1500, 1750, 2000, 2250, 2500, 2750, 3000 ]
 
   lrclk-strength:
index 148b3fb4ceaf85f69a57883efc0962f434dce008..c80a835719196b6ec1ca1eef6d1bbaa61e704872 100644 (file)
@@ -21,6 +21,8 @@ properties:
   reg:
     description: module registers
 
+  ranges: true
+
   power-domains:
     description:
       PM domain provider node and an args specifier containing
@@ -62,6 +64,8 @@ properties:
   '#size-cells':
     const: 2
 
+  dma-coherent: true
+
 patternProperties:
   "^usb@":
     type: object
index 4e2c26cd981d90653c22fb7390c8638692404d7c..e3dfb02f0ca52467ea3276dee2b2b8ce34bf5779 100644 (file)
@@ -19,7 +19,6 @@ properties:
     pattern: "^watchdog(@.*|-[0-9a-f])?$"
 
   timeout-sec:
-    $ref: /schemas/types.yaml#/definitions/uint32
     description:
       Contains the watchdog timeout in seconds.
 
index 587a939739290628f7c236443493f570c6df7040..78240e29b0bb676963ffe1dfb023d5e501fd1e51 100644 (file)
@@ -586,6 +586,14 @@ without significant effort.
 The advantage of mounting with the "volatile" option is that all forms of
 sync calls to the upper filesystem are omitted.
 
+In order to avoid a giving a false sense of safety, the syncfs (and fsync)
+semantics of volatile mounts are slightly different than that of the rest of
+VFS.  If any writeback error occurs on the upperdir's filesystem after a
+volatile mount takes place, all sync functions will return an error.  Once this
+condition is reached, the filesystem will not recover, and every subsequent sync
+call will return an error, even if the upperdir has not experience a new error
+since the last sync call.
+
 When overlay is mounted with "volatile" option, the directory
 "$workdir/work/incompat/volatile" is created.  During next mount, overlay
 checks for this directory and refuses to mount if present. This is a strong
index dd2b12a32b73903b1fe6a422214c7d832bb30d05..fa544e9037b995f9cb0041184643166ed92f086d 100644 (file)
@@ -1196,7 +1196,7 @@ icmp_errors_use_inbound_ifaddr - BOOLEAN
 
        If non-zero, the message will be sent with the primary address of
        the interface that received the packet that caused the icmp error.
-       This is the behaviour network many administrators will expect from
+       This is the behaviour many network administrators will expect from
        a router. And it can make debugging complicated network layouts
        much easier.
 
@@ -1807,12 +1807,24 @@ seg6_flowlabel - INTEGER
 ``conf/default/*``:
        Change the interface-specific default settings.
 
+       These settings would be used during creating new interfaces.
+
 
 ``conf/all/*``:
        Change all the interface-specific settings.
 
        [XXX:  Other special features than forwarding?]
 
+conf/all/disable_ipv6 - BOOLEAN
+       Changing this value is same as changing ``conf/default/disable_ipv6``
+       setting and also all per-interface ``disable_ipv6`` settings to the same
+       value.
+
+       Reading this value does not have any particular meaning. It does not say
+       whether IPv6 support is enabled or disabled. Returned value can be 1
+       also in the case when some interface has ``disable_ipv6`` set to 0 and
+       has configured IPv6 addresses.
+
 conf/all/forwarding - BOOLEAN
        Enable global IPv6 forwarding between all interfaces.
 
index 9af3334d9ad080c1aaef9dca67f709a2fa8ef343..5f0dea3d571e359a2655defdcf9b7383b9801ba8 100644 (file)
@@ -534,3 +534,6 @@ offload. Hence, TLS TX device feature flag requires TX csum offload being set.
 Disabling the latter implies clearing the former. Disabling TX checksum offload
 should not affect old connections, and drivers should make sure checksum
 calculation does not break for them.
+Similarly, device-offloaded TLS decryption implies doing RXCSUM. If the user
+does not want to enable RX csum offload, TLS RX device feature is disabled
+as well.
index c136e254b4960270863e86ecc01325771ab5ab6a..99ceb978c8b0804b63dc9bb74d144f28dba17f49 100644 (file)
@@ -360,10 +360,9 @@ since the last call to this ioctl.  Bit 0 is the first page in the
 memory slot.  Ensure the entire structure is cleared to avoid padding
 issues.
 
-If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies
-the address space for which you want to return the dirty bitmap.
-They must be less than the value that KVM_CHECK_EXTENSION returns for
-the KVM_CAP_MULTI_ADDRESS_SPACE capability.
+If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of slot field specifies
+the address space for which you want to return the dirty bitmap.  See
+KVM_SET_USER_MEMORY_REGION for details on the usage of slot field.
 
 The bits in the dirty bitmap are cleared before the ioctl returns, unless
 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is enabled.  For more information,
@@ -1281,6 +1280,9 @@ field userspace_addr, which must point at user addressable memory for
 the entire memory slot size.  Any object may back this memory, including
 anonymous memory, ordinary files, and hugetlbfs.
 
+On architectures that support a form of address tagging, userspace_addr must
+be an untagged address.
+
 It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr
 be identical.  This allows large pages in the guest to be backed by large
 pages in the host.
@@ -1333,7 +1335,7 @@ documentation when it pops into existence).
 
 :Capability: KVM_CAP_ENABLE_CAP_VM
 :Architectures: all
-:Type: vcpu ioctl
+:Type: vm ioctl
 :Parameters: struct kvm_enable_cap (in)
 :Returns: 0 on success; -1 on error
 
@@ -4432,7 +4434,7 @@ to I/O ports.
 :Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
 :Architectures: x86, arm, arm64, mips
 :Type: vm ioctl
-:Parameters: struct kvm_dirty_log (in)
+:Parameters: struct kvm_clear_dirty_log (in)
 :Returns: 0 on success, -1 on error
 
 ::
@@ -4459,10 +4461,9 @@ in KVM's dirty bitmap, and dirty tracking is re-enabled for that page
 (for example via write-protection, or by clearing the dirty bit in
 a page table entry).
 
-If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies
-the address space for which you want to return the dirty bitmap.
-They must be less than the value that KVM_CHECK_EXTENSION returns for
-the KVM_CAP_MULTI_ADDRESS_SPACE capability.
+If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of slot field specifies
+the address space for which you want to clear the dirty status.  See
+KVM_SET_USER_MEMORY_REGION for details on the usage of slot field.
 
 This ioctl is mostly useful when KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
 is enabled; for more information, see the description of the capability.
index 6ab4e35cee233c408338d3d7efbff4d6c9f065a9..ac2095d41f020c9095918bede118a72b34b118a1 100644 (file)
@@ -37,8 +37,10 @@ call L2.
 Running nested VMX
 ------------------
 
-The nested VMX feature is disabled by default. It can be enabled by giving
-the "nested=1" option to the kvm-intel module.
+The nested VMX feature is enabled by default since Linux kernel v4.20. For
+older Linux kernel, it can be enabled by giving the "nested=1" option to the
+kvm-intel module.
+
 
 No modifications are required to user space (qemu). However, qemu's default
 emulated CPU type (qemu64) does not list the "VMX" CPU feature, so it must be
index d0a1fc754c84f4addc328e359d4d4f41319ec45b..bd70c69468aebb236937232d41adce56654e3149 100644 (file)
@@ -74,7 +74,7 @@ few:
 Enabling "nested" (x86)
 -----------------------
 
-From Linux kernel v4.19 onwards, the ``nested`` KVM parameter is enabled
+From Linux kernel v4.20 onwards, the ``nested`` KVM parameter is enabled
 by default for Intel and AMD.  (Though your Linux distribution might
 override this default.)
 
index 00836f6452f044319e0334c6229cc9db87892ed8..667d03852191fbbcb6827eaa7bdbd9bbfc8c0a4f 100644 (file)
@@ -2616,8 +2616,8 @@ S:        Maintained
 F:     drivers/power/reset/keystone-reset.c
 
 ARM/TEXAS INSTRUMENTS K3 ARCHITECTURE
-M:     Tero Kristo <t-kristo@ti.com>
 M:     Nishanth Menon <nm@ti.com>
+M:     Tero Kristo <kristo@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 F:     Documentation/devicetree/bindings/arm/ti/k3.yaml
@@ -3239,6 +3239,7 @@ L:        netdev@vger.kernel.org
 S:     Supported
 W:     http://sourceforge.net/projects/bonding/
 F:     drivers/net/bonding/
+F:     include/net/bonding.h
 F:     include/uapi/linux/if_bonding.h
 
 BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER
@@ -3334,7 +3335,7 @@ F:        arch/riscv/net/
 X:     arch/riscv/net/bpf_jit_comp64.c
 
 BPF JIT for RISC-V (64-bit)
-M:     Björn Töpel <bjorn.topel@gmail.com>
+M:     Björn Töpel <bjorn@kernel.org>
 L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
 S:     Maintained
@@ -3411,7 +3412,7 @@ F:        Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
 F:     drivers/pci/controller/pcie-brcmstb.c
 F:     drivers/staging/vc04_services
 N:     bcm2711
-N:     bcm2835
+N:     bcm283*
 
 BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
@@ -3879,7 +3880,7 @@ F:        Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
 F:     drivers/mtd/nand/raw/cadence-nand-controller.c
 
 CADENCE USB3 DRD IP DRIVER
-M:     Peter Chen <peter.chen@nxp.com>
+M:     Peter Chen <peter.chen@kernel.org>
 M:     Pawel Laszczak <pawell@cadence.com>
 R:     Roger Quadros <rogerq@kernel.org>
 R:     Aswath Govindraju <a-govindraju@ti.com>
@@ -4161,7 +4162,7 @@ S:        Maintained
 F:     Documentation/translations/zh_CN/
 
 CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
-M:     Peter Chen <Peter.Chen@nxp.com>
+M:     Peter Chen <peter.chen@kernel.org>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
@@ -4303,7 +4304,7 @@ S:        Maintained
 F:     .clang-format
 
 CLANG/LLVM BUILD SUPPORT
-M:     Nathan Chancellor <natechancellor@gmail.com>
+M:     Nathan Chancellor <nathan@kernel.org>
 M:     Nick Desaulniers <ndesaulniers@google.com>
 L:     clang-built-linux@googlegroups.com
 S:     Supported
@@ -4311,7 +4312,9 @@ W:        https://clangbuiltlinux.github.io/
 B:     https://github.com/ClangBuiltLinux/linux/issues
 C:     irc://chat.freenode.net/clangbuiltlinux
 F:     Documentation/kbuild/llvm.rst
+F:     include/linux/compiler-clang.h
 F:     scripts/clang-tools/
+F:     scripts/clang-version.sh
 F:     scripts/lld-version.sh
 K:     \b(?i:clang|llvm)\b
 
@@ -6471,9 +6474,9 @@ S:        Maintained
 F:     drivers/edac/skx_*.[ch]
 
 EDAC-TI
-M:     Tero Kristo <t-kristo@ti.com>
+M:     Tero Kristo <kristo@kernel.org>
 L:     linux-edac@vger.kernel.org
-S:     Maintained
+S:     Odd Fixes
 F:     drivers/edac/ti_edac.c
 
 EDIROL UA-101/UA-1000 DRIVER
@@ -8431,11 +8434,8 @@ F:       drivers/i3c/
 F:     include/linux/i3c/
 
 IA64 (Itanium) PLATFORM
-M:     Tony Luck <tony.luck@intel.com>
-M:     Fenghua Yu <fenghua.yu@intel.com>
 L:     linux-ia64@vger.kernel.org
-S:     Odd Fixes
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git
+S:     Orphan
 F:     Documentation/ia64/
 F:     arch/ia64/
 
@@ -12413,6 +12413,7 @@ F:      tools/testing/selftests/net/ipsec.c
 NETWORKING [IPv4/IPv6]
 M:     "David S. Miller" <davem@davemloft.net>
 M:     Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
+M:     David Ahern <dsahern@kernel.org>
 L:     netdev@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
@@ -14504,10 +14505,18 @@ S:    Supported
 F:     drivers/crypto/qat/
 
 QCOM AUDIO (ASoC) DRIVERS
-M:     Patrick Lai <plai@codeaurora.org>
+M:     Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
 M:     Banajit Goswami <bgoswami@codeaurora.org>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Supported
+F:     sound/soc/codecs/lpass-va-macro.c
+F:     sound/soc/codecs/lpass-wsa-macro.*
+F:     sound/soc/codecs/msm8916-wcd-analog.c
+F:     sound/soc/codecs/msm8916-wcd-digital.c
+F:     sound/soc/codecs/wcd9335.*
+F:     sound/soc/codecs/wcd934x.c
+F:     sound/soc/codecs/wcd-clsh-v2.*
+F:     sound/soc/codecs/wsa881x.c
 F:     sound/soc/qcom/
 
 QCOM IPA DRIVER
@@ -16959,7 +16968,7 @@ M:      Olivier Moysan <olivier.moysan@st.com>
 M:     Arnaud Pouliquen <arnaud.pouliquen@st.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
-F:     Documentation/devicetree/bindings/sound/st,stm32-*.txt
+F:     Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml
 F:     sound/soc/stm/
 
 STM32 TIMER/LPTIMER DRIVERS
@@ -17563,7 +17572,7 @@ F:      include/linux/dma/k3-psil.h
 
 TEXAS INSTRUMENTS' SYSTEM CONTROL INTERFACE (TISCI) PROTOCOL DRIVER
 M:     Nishanth Menon <nm@ti.com>
-M:     Tero Kristo <t-kristo@ti.com>
+M:     Tero Kristo <kristo@kernel.org>
 M:     Santosh Shilimkar <ssantosh@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org
 S:     Maintained
@@ -17707,9 +17716,9 @@ S:      Maintained
 F:     drivers/clk/clk-cdce706.c
 
 TI CLOCK DRIVER
-M:     Tero Kristo <t-kristo@ti.com>
+M:     Tero Kristo <kristo@kernel.org>
 L:     linux-omap@vger.kernel.org
-S:     Maintained
+S:     Odd Fixes
 F:     drivers/clk/ti/
 F:     include/linux/clk/ti.h
 
@@ -18412,7 +18421,7 @@ F:      Documentation/usb/ohci.rst
 F:     drivers/usb/host/ohci*
 
 USB OTG FSM (Finite State Machine)
-M:     Peter Chen <Peter.Chen@nxp.com>
+M:     Peter Chen <peter.chen@kernel.org>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
@@ -19416,7 +19425,7 @@ F:      drivers/net/ethernet/*/*/*xdp*
 K:     (?:\b|_)xdp(?:\b|_)
 
 XDP SOCKETS (AF_XDP)
-M:     Björn Töpel <bjorn.topel@intel.com>
+M:     Björn Töpel <bjorn@kernel.org>
 M:     Magnus Karlsson <magnus.karlsson@intel.com>
 R:     Jonathan Lemon <jonathan.lemon@gmail.com>
 L:     netdev@vger.kernel.org
index 97c781cf042bc020db51bf88b5d95ad3d12e53db..01c36d209ab434525404cc1b7d8fbeb62718133b 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 11
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
index 8452753efebe5621e2569db95df2b4c30b26df99..31927d2fe297281a47f810e96b06e541043e90d4 100644 (file)
@@ -15,7 +15,8 @@ static int node_offset(void *fdt, const char *node_path)
 {
        int offset = fdt_path_offset(fdt, node_path);
        if (offset == -FDT_ERR_NOTFOUND)
-               offset = fdt_add_subnode(fdt, 0, node_path);
+               /* Add the node to root if not found, dropping the leading '/' */
+               offset = fdt_add_subnode(fdt, 0, node_path + 1);
        return offset;
 }
 
index 861e05d53157e94a3203db81bbed827953cc2a53..343364d3e4f7d3d59d3592cabca17bafb3b7b0cb 100644 (file)
                stdout-path = &uart1;
        };
 
+       aliases {
+               mmc0 = &usdhc2;
+               mmc1 = &usdhc3;
+               mmc2 = &usdhc4;
+               /delete-property/ mmc3;
+       };
+
        memory@10000000 {
                device_type = "memory";
                reg = <0x10000000 0x80000000>;
index 736074f1c3ef940c101160dcd5f0e79c56baebaa..959d8ac2e393bfa9afca68f71c4d7c1d8c360edb 100644 (file)
 
                        /* VDD_AUD_1P8: Audio codec */
                        reg_aud_1p8v: ldo3 {
-                               regulator-name = "vdd1p8";
+                               regulator-name = "vdd1p8a";
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <1800000>;
                                regulator-boot-on;
index d6df598bd1c29b728ca8ffa1e194df95f2826c14..b167b33bd108dc0f197a493a6c8f6c5207910338 100644 (file)
 
        lcd_backlight: lcd-backlight {
                compatible = "pwm-backlight";
-               pwms = <&pwm4 0 5000000>;
+               pwms = <&pwm4 0 5000000 0>;
                pwm-names = "LCD_BKLT_PWM";
 
                brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>;
                i2c-gpio,delay-us = <2>; /* ~100 kHz */
                #address-cells = <1>;
                #size-cells = <0>;
-               status = "disabld";
+               status = "disabled";
        };
 
        i2c_cam: i2c-gpio-cam {
                i2c-gpio,delay-us = <2>; /* ~100 kHz */
                #address-cells = <1>;
                #size-cells = <0>;
-               status = "disabld";
+               status = "disabled";
        };
 };
 
index b06577808ff4eb13b644a489af529b2df7ee7ea7..7e4e5fd0143a1272b9f83067f7b2f02339193533 100644 (file)
@@ -53,7 +53,6 @@
 &fec {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_microsom_enet_ar8035>;
-       phy-handle = <&phy>;
        phy-mode = "rgmii-id";
        phy-reset-duration = <2>;
        phy-reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
                #address-cells = <1>;
                #size-cells = <0>;
 
-               phy: ethernet-phy@0 {
+               /*
+                * The PHY can appear at either address 0 or 4 due to the
+                * configuration (LED) pin not being pulled sufficiently.
+                */
+               ethernet-phy@0 {
                        reg = <0>;
                        qca,clk-out-frequency = <125000000>;
                };
+
+               ethernet-phy@4 {
+                       reg = <4>;
+                       qca,clk-out-frequency = <125000000>;
+               };
        };
 };
 
index 84b095279e656397445ad92a4099f8eb64459dbe..bd6b5285aa8d27b0debd4e73e70251c21159c46d 100644 (file)
                compatible = "nxp,pcf2127";
                reg = <0>;
                spi-max-frequency = <2000000>;
+               reset-source;
        };
 };
 
index 3a5cfb0ddb20ae691b38ceb1b2a3d1ac44a45a22..c87066d6c9950c51fce95b6681379eb446bfcf2f 100644 (file)
 
                                        clocks = <&xtal_32k>, <&xtal>;
                                        clock-names = "xtal_32k", "xtal";
-
-                                       assigned-clocks = <&clk LPC32XX_CLK_HCLK_PLL>;
-                                       assigned-clock-rates = <208000000>;
                                };
                        };
 
index c8745bc800f717530fccdf2da9b42fd9b8816ff9..7b8c18e6605e406339776d88b6e3b1b35213fac4 100644 (file)
                gpio-sck = <&gpio1 12 GPIO_ACTIVE_HIGH>;
                gpio-miso = <&gpio1 18 GPIO_ACTIVE_HIGH>;
                gpio-mosi = <&gpio1 20 GPIO_ACTIVE_HIGH>;
-               cs-gpios = <&gpio1 19 GPIO_ACTIVE_HIGH>;
+               cs-gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
                num-chipselects = <1>;
 
                /* lcd panel */
                        spi-max-frequency = <100000>;
                        spi-cpol;
                        spi-cpha;
-                       spi-cs-high;
 
                        backlight= <&backlight>;
                        label = "lcd";
index 3ea4c5b9fd31961d1037d40924652e9dc311d4ed..e833c21f1c01491956bc1949d385ad673edf0011 100644 (file)
                        debounce-interval = <10>;
                };
 
+               /*
+                * We use pad 0x4a100116 abe_dmic_din3.gpio_122 as the irq instead
+                * of the gpio interrupt to avoid lost events in deeper idle states.
+               */
                slider {
                        label = "Keypad Slide";
+                       interrupts-extended = <&omap4_pmx_core 0xd6>;
                        gpios = <&gpio4 26 GPIO_ACTIVE_HIGH>; /* gpio122 */
                        linux,input-type = <EV_SW>;
                        linux,code = <SW_KEYPAD_SLIDE>;
index d309fad32229d9adc3c88ff7c789d04aa7b6b3cd..344d29853bf76f1300872822fd4ba6d0297f903d 100644 (file)
                                            200000 0>;
                };
        };
+
+       reserved-memory {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               /* Modem trace memory */
+               ram@06000000 {
+                       reg = <0x06000000 0x00f00000>;
+                       no-map;
+               };
+
+               /* Modem shared memory */
+               ram@06f00000 {
+                       reg = <0x06f00000 0x00100000>;
+                       no-map;
+               };
+
+               /* Modem private memory */
+               ram@07000000 {
+                       reg = <0x07000000 0x01000000>;
+                       no-map;
+               };
+
+               /*
+                * Initial Secure Software ISSW memory
+                *
+                * This is probably only used if the kernel tries
+                * to actually call into trustzone to run secure
+                * applications, which the mainline kernel probably
+                * will not do on this old chipset. But you can never
+                * be too careful, so reserve this memory anyway.
+                */
+               ram@17f00000 {
+                       reg = <0x17f00000 0x00100000>;
+                       no-map;
+               };
+       };
 };
index 48bd8728ae27fd377931c2240883a050b56de517..287804e9e1836775364650c402bf01fdf4fcf742 100644 (file)
                                            200000 0>;
                };
        };
+
+       reserved-memory {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               /* Modem trace memory */
+               ram@06000000 {
+                       reg = <0x06000000 0x00f00000>;
+                       no-map;
+               };
+
+               /* Modem shared memory */
+               ram@06f00000 {
+                       reg = <0x06f00000 0x00100000>;
+                       no-map;
+               };
+
+               /* Modem private memory */
+               ram@07000000 {
+                       reg = <0x07000000 0x01000000>;
+                       no-map;
+               };
+
+               /*
+                * Initial Secure Software ISSW memory
+                *
+                * This is probably only used if the kernel tries
+                * to actually call into trustzone to run secure
+                * applications, which the mainline kernel probably
+                * will not do on this old chipset. But you can never
+                * be too careful, so reserve this memory anyway.
+                */
+               ram@17f00000 {
+                       reg = <0x17f00000 0x00100000>;
+                       no-map;
+               };
+       };
 };
diff --git a/arch/arm/boot/dts/ste-db9500.dtsi b/arch/arm/boot/dts/ste-db9500.dtsi
new file mode 100644 (file)
index 0000000..0afff70
--- /dev/null
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "ste-dbx5x0.dtsi"
+
+/ {
+       cpus {
+               cpu@300 {
+                       /* cpufreq controls */
+                       operating-points = <1152000 0
+                                           800000 0
+                                           400000 0
+                                           200000 0>;
+               };
+       };
+
+       reserved-memory {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               /*
+                * Initial Secure Software ISSW memory
+                *
+                * This is probably only used if the kernel tries
+                * to actually call into trustzone to run secure
+                * applications, which the mainline kernel probably
+                * will not do on this old chipset. But you can never
+                * be too careful, so reserve this memory anyway.
+                */
+               ram@17f00000 {
+                       reg = <0x17f00000 0x00100000>;
+                       no-map;
+               };
+       };
+};
index be90e73c923ec255e156996f1b0990f5d37886c4..27d8a07718a001c31b9f38ef21c6c2f33b12425f 100644 (file)
@@ -4,7 +4,7 @@
  */
 
 /dts-v1/;
-#include "ste-db8500.dtsi"
+#include "ste-db9500.dtsi"
 #include "ste-href-ab8500.dtsi"
 #include "ste-href-family-pinctrl.dtsi"
 
index 62ab23824a3e747e1525a5f1309f32fce51f9d2f..5088dd3a301b7d18110a3ed11716721003434860 100644 (file)
@@ -33,9 +33,9 @@
         * during TX anyway and that it only controls drive enable DE
         * line. Hence, the RX is always enabled here.
         */
-       rs485-rx-en {
+       rs485-rx-en-hog {
                gpio-hog;
-               gpios = <8 GPIO_ACTIVE_HIGH>;
+               gpios = <8 0>;
                output-low;
                line-name = "rs485-rx-en";
        };
@@ -61,9 +61,9 @@
         * order to reset the Hub when USB bus is powered down, but
         * so far there is no such functionality.
         */
-       usb-hub {
+       usb-hub-hog {
                gpio-hog;
-               gpios = <2 GPIO_ACTIVE_HIGH>;
+               gpios = <2 0>;
                output-high;
                line-name = "usb-hub-reset";
        };
        };
 };
 
+&i2c4 {
+       touchscreen@49 {
+               status = "disabled";
+       };
+};
+
 &i2c5 {        /* TP7/TP8 */
        pinctrl-names = "default";
        pinctrl-0 = <&i2c5_pins_a>;
         * are used for on-board microSD slot instead.
         */
        /delete-property/broken-cd;
-       cd-gpios = <&gpioi 10 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+       cd-gpios = <&gpioi 10 GPIO_ACTIVE_HIGH>;
        disable-wp;
 };
 
index 356150d28c420952d9941998092346616629f708..32700cca24c8207e3ae52a2a21694218ef877585 100644 (file)
@@ -43,9 +43,9 @@
         * in order to turn on port power when USB bus is powered up, but so
         * far there is no such functionality.
         */
-       usb-port-power {
+       usb-port-power-hog {
                gpio-hog;
-               gpios = <13 GPIO_ACTIVE_LOW>;
+               gpios = <13 0>;
                output-low;
                line-name = "usb-port-power";
        };
index ac46ab363e1b49807764b73441b96d164dd17304..daff5318f301ec0a3dfe7fbd2bb15a97dd0d3a8c 100644 (file)
        pinctrl-0 = <&sdmmc1_b4_pins_a &sdmmc1_dir_pins_a>;
        pinctrl-1 = <&sdmmc1_b4_od_pins_a &sdmmc1_dir_pins_a>;
        pinctrl-2 = <&sdmmc1_b4_sleep_pins_a &sdmmc1_dir_sleep_pins_a>;
-       broken-cd;
+       cd-gpios = <&gpiog 1 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+       disable-wp;
        st,sig-dir;
        st,neg-edge;
        st,use-ckin;
index 01ccff756996d83ac7d843f6fe61d7dcab5bad37..5740f9442705c47acd39883e57a0dca6efb1acd6 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&gmac_rgmii_pins>;
        phy-handle = <&phy1>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-id";
        phy-supply = <&reg_gmac_3v3>;
        status = "okay";
 };
diff --git a/arch/arm/include/asm/kexec-internal.h b/arch/arm/include/asm/kexec-internal.h
new file mode 100644 (file)
index 0000000..ecc2322
--- /dev/null
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARM_KEXEC_INTERNAL_H
+#define _ARM_KEXEC_INTERNAL_H
+
+struct kexec_relocate_data {
+       unsigned long kexec_start_address;
+       unsigned long kexec_indirection_page;
+       unsigned long kexec_mach_type;
+       unsigned long kexec_r2;
+};
+
+#endif
index 98daa7f483148bf5a972743dc4d569446fb3e023..7454480d084b2ec63a9ee0547eb94fa1aec56bd7 100644 (file)
 
                .align
 99:            .word   .
+#if defined(ZIMAGE)
+               .word   . + 4
+/*
+ * Storage for the state maintained by the macro.
+ *
+ * In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.
+ * That's because this header is included from multiple files, and we only
+ * want a single copy of the data. In particular, the UART probing code above
+ * assumes it's running using physical addresses. This is true when this file
+ * is included from head.o, but not when included from debug.o. So we need
+ * to share the probe results between the two copies, rather than having
+ * to re-run the probing again later.
+ *
+ * In the decompressor, we put the storage right here, since common.c
+ * isn't included in the decompressor build. This storage data gets put in
+ * .text even though it's really data, since .data is discarded from the
+ * decompressor. Luckily, .text is writeable in the decompressor, unless
+ * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
+ */
+               /* Debug UART initialization required */
+               .word   1
+               /* Debug UART physical address */
+               .word   0
+               /* Debug UART virtual address */
+               .word   0
+#else
                .word   tegra_uart_config
+#endif
                .ltorg
 
                /* Load previously selected UART address */
 
                .macro  waituarttxrdy,rd,rx
                .endm
-
-/*
- * Storage for the state maintained by the macros above.
- *
- * In the kernel proper, this data is located in arch/arm/mach-tegra/tegra.c.
- * That's because this header is included from multiple files, and we only
- * want a single copy of the data. In particular, the UART probing code above
- * assumes it's running using physical addresses. This is true when this file
- * is included from head.o, but not when included from debug.o. So we need
- * to share the probe results between the two copies, rather than having
- * to re-run the probing again later.
- *
- * In the decompressor, we put the symbol/storage right here, since common.c
- * isn't included in the decompressor build. This symbol gets put in .text
- * even though it's really data, since .data is discarded from the
- * decompressor. Luckily, .text is writeable in the decompressor, unless
- * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
- */
-#if defined(ZIMAGE)
-tegra_uart_config:
-       /* Debug UART initialization required */
-       .word 1
-       /* Debug UART physical address */
-       .word 0
-       /* Debug UART virtual address */
-       .word 0
-#endif
index a1570c8bab25acd9bdbfbc7f26db705fa923f873..be8050b0c3dfbfb3e2d9e70705e014a0ec7ff915 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include <asm/cacheflush.h>
+#include <asm/kexec-internal.h>
 #include <asm/glue-df.h>
 #include <asm/glue-pf.h>
 #include <asm/mach/arch.h>
@@ -170,5 +171,9 @@ int main(void)
   DEFINE(MPU_RGN_PRBAR,        offsetof(struct mpu_rgn, prbar));
   DEFINE(MPU_RGN_PRLAR,        offsetof(struct mpu_rgn, prlar));
 #endif
+  DEFINE(KEXEC_START_ADDR,     offsetof(struct kexec_relocate_data, kexec_start_address));
+  DEFINE(KEXEC_INDIR_PAGE,     offsetof(struct kexec_relocate_data, kexec_indirection_page));
+  DEFINE(KEXEC_MACH_TYPE,      offsetof(struct kexec_relocate_data, kexec_mach_type));
+  DEFINE(KEXEC_R2,             offsetof(struct kexec_relocate_data, kexec_r2));
   return 0; 
 }
index 5d84ad333f050aa6cd85446cedc3613a873d5e82..2b09dad7935eb4ed3496f77be46b5748bfdd7cc4 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/of_fdt.h>
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
+#include <asm/kexec-internal.h>
 #include <asm/fncpy.h>
 #include <asm/mach-types.h>
 #include <asm/smp_plat.h>
 extern void relocate_new_kernel(void);
 extern const unsigned int relocate_new_kernel_size;
 
-extern unsigned long kexec_start_address;
-extern unsigned long kexec_indirection_page;
-extern unsigned long kexec_mach_type;
-extern unsigned long kexec_boot_atags;
-
 static atomic_t waiting_for_crash_ipi;
 
 /*
@@ -159,6 +155,7 @@ void (*kexec_reinit)(void);
 void machine_kexec(struct kimage *image)
 {
        unsigned long page_list, reboot_entry_phys;
+       struct kexec_relocate_data *data;
        void (*reboot_entry)(void);
        void *reboot_code_buffer;
 
@@ -174,18 +171,17 @@ void machine_kexec(struct kimage *image)
 
        reboot_code_buffer = page_address(image->control_code_page);
 
-       /* Prepare parameters for reboot_code_buffer*/
-       set_kernel_text_rw();
-       kexec_start_address = image->start;
-       kexec_indirection_page = page_list;
-       kexec_mach_type = machine_arch_type;
-       kexec_boot_atags = image->arch.kernel_r2;
-
        /* copy our kernel relocation code to the control code page */
        reboot_entry = fncpy(reboot_code_buffer,
                             &relocate_new_kernel,
                             relocate_new_kernel_size);
 
+       data = reboot_code_buffer + relocate_new_kernel_size;
+       data->kexec_start_address = image->start;
+       data->kexec_indirection_page = page_list;
+       data->kexec_mach_type = machine_arch_type;
+       data->kexec_r2 = image->arch.kernel_r2;
+
        /* get the identity mapping physical address for the reboot code */
        reboot_entry_phys = virt_to_idmap(reboot_entry);
 
index 72a08786e16eb0bbc172e144e9300c75a6b6f988..218d524360fcd3f3a752fd3168665d20e4507a2f 100644 (file)
@@ -5,14 +5,16 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/asm-offsets.h>
 #include <asm/kexec.h>
 
        .align  3       /* not needed for this code, but keeps fncpy() happy */
 
 ENTRY(relocate_new_kernel)
 
-       ldr     r0,kexec_indirection_page
-       ldr     r1,kexec_start_address
+       adr     r7, relocate_new_kernel_end
+       ldr     r0, [r7, #KEXEC_INDIR_PAGE]
+       ldr     r1, [r7, #KEXEC_START_ADDR]
 
        /*
         * If there is no indirection page (we are doing crashdumps)
@@ -57,34 +59,16 @@ ENTRY(relocate_new_kernel)
 
 2:
        /* Jump to relocated kernel */
-       mov lr,r1
-       mov r0,#0
-       ldr r1,kexec_mach_type
-       ldr r2,kexec_boot_atags
- ARM(  ret lr  )
- THUMB(        bx lr           )
-
-       .align
-
-       .globl kexec_start_address
-kexec_start_address:
-       .long   0x0
-
-       .globl kexec_indirection_page
-kexec_indirection_page:
-       .long   0x0
-
-       .globl kexec_mach_type
-kexec_mach_type:
-       .long   0x0
-
-       /* phy addr of the atags for the new kernel */
-       .globl kexec_boot_atags
-kexec_boot_atags:
-       .long   0x0
+       mov     lr, r1
+       mov     r0, #0
+       ldr     r1, [r7, #KEXEC_MACH_TYPE]
+       ldr     r2, [r7, #KEXEC_R2]
+ ARM(  ret     lr      )
+ THUMB(        bx      lr      )
 
 ENDPROC(relocate_new_kernel)
 
+       .align  3
 relocate_new_kernel_end:
 
        .globl relocate_new_kernel_size
index 9d2e916121be4ebfc331b9771a553c66a6aa2e28..a3a38d0a4c853332102aefd01c02b0967e0ccc3f 100644 (file)
@@ -693,18 +693,20 @@ struct page *get_signal_page(void)
 
        addr = page_address(page);
 
+       /* Poison the entire page */
+       memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
+                PAGE_SIZE / sizeof(u32));
+
        /* Give the signal return code some randomness */
        offset = 0x200 + (get_random_int() & 0x7fc);
        signal_return_offset = offset;
 
-       /*
-        * Copy signal return handlers into the vector page, and
-        * set sigreturn to be a pointer to these.
-        */
+       /* Copy signal return handlers into the page */
        memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
 
-       ptr = (unsigned long)addr + offset;
-       flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+       /* Flush out all instructions in this page */
+       ptr = (unsigned long)addr;
+       flush_icache_range(ptr, ptr + PAGE_SIZE);
 
        return page;
 }
index 416462e3f5d63676fe8f00abca7aaa94c17f3e90..f9713dc561cf77447dc1ad65cce249b3ea3ca0dd 100644 (file)
@@ -65,15 +65,15 @@ dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where,
        if (addr)
                switch (size) {
                case 1:
-                       asm("ldrb       %0, [%1, %2]"
+                       asm volatile("ldrb      %0, [%1, %2]"
                                : "=r" (v) : "r" (addr), "r" (where) : "cc");
                        break;
                case 2:
-                       asm("ldrh       %0, [%1, %2]"
+                       asm volatile("ldrh      %0, [%1, %2]"
                                : "=r" (v) : "r" (addr), "r" (where) : "cc");
                        break;
                case 4:
-                       asm("ldr        %0, [%1, %2]"
+                       asm volatile("ldr       %0, [%1, %2]"
                                : "=r" (v) : "r" (addr), "r" (where) : "cc");
                        break;
                }
@@ -99,17 +99,17 @@ dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where,
        if (addr)
                switch (size) {
                case 1:
-                       asm("strb       %0, [%1, %2]"
+                       asm volatile("strb      %0, [%1, %2]"
                                : : "r" (value), "r" (addr), "r" (where)
                                : "cc");
                        break;
                case 2:
-                       asm("strh       %0, [%1, %2]"
+                       asm volatile("strh      %0, [%1, %2]"
                                : : "r" (value), "r" (addr), "r" (where)
                                : "cc");
                        break;
                case 4:
-                       asm("str        %0, [%1, %2]"
+                       asm volatile("str       %0, [%1, %2]"
                                : : "r" (value), "r" (addr), "r" (where)
                                : "cc");
                        break;
index 1eabf2d2834be79c6a6a695262ca4f1af2edcfc2..e06f946b75b96a9455d34facbf019b16121edb82 100644 (file)
@@ -67,6 +67,7 @@
 #define MX6Q_CCM_CCR   0x0
 
        .align 3
+       .arm
 
        .macro  sync_l2_cache
 
index a720259099edfb4bd5c226b104a761dae341dfca..0a4c9b0b13b0cfbaa57c78388447746344253733 100644 (file)
@@ -203,6 +203,8 @@ static int osk_tps_setup(struct i2c_client *client, void *context)
         */
        gpio_request(OSK_TPS_GPIO_USB_PWR_EN, "n_vbus_en");
        gpio_direction_output(OSK_TPS_GPIO_USB_PWR_EN, 1);
+       /* Free the GPIO again as the driver will request it */
+       gpio_free(OSK_TPS_GPIO_USB_PWR_EN);
 
        /* Set GPIO 2 high so LED D3 is off by default */
        tps65010_set_gpio_out_value(GPIO2, HIGH);
index 4a59c169a11394a57bd4c52bed23b07ded5ef0cc..4178c0ee46eba5a63ee33f8923ea431268a76ced 100644 (file)
@@ -17,11 +17,10 @@ config ARCH_OMAP3
        bool "TI OMAP3"
        depends on ARCH_MULTI_V7
        select ARCH_OMAP2PLUS
-       select ARM_CPU_SUSPEND if PM
+       select ARM_CPU_SUSPEND
        select OMAP_HWMOD
        select OMAP_INTERCONNECT
-       select PM_OPP if PM
-       select PM if CPU_IDLE
+       select PM_OPP
        select SOC_HAS_OMAP2_SDRC
        select ARM_ERRATA_430973
 
@@ -30,7 +29,7 @@ config ARCH_OMAP4
        depends on ARCH_MULTI_V7
        select ARCH_OMAP2PLUS
        select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
-       select ARM_CPU_SUSPEND if PM
+       select ARM_CPU_SUSPEND
        select ARM_ERRATA_720789
        select ARM_GIC
        select HAVE_ARM_SCU if SMP
@@ -40,7 +39,7 @@ config ARCH_OMAP4
        select OMAP_INTERCONNECT_BARRIER
        select PL310_ERRATA_588369 if CACHE_L2X0
        select PL310_ERRATA_727915 if CACHE_L2X0
-       select PM_OPP if PM
+       select PM_OPP
        select PM if CPU_IDLE
        select ARM_ERRATA_754322
        select ARM_ERRATA_775420
@@ -50,7 +49,7 @@ config SOC_OMAP5
        bool "TI OMAP5"
        depends on ARCH_MULTI_V7
        select ARCH_OMAP2PLUS
-       select ARM_CPU_SUSPEND if PM
+       select ARM_CPU_SUSPEND
        select ARM_GIC
        select HAVE_ARM_SCU if SMP
        select HAVE_ARM_ARCH_TIMER
@@ -58,14 +57,14 @@ config SOC_OMAP5
        select OMAP_HWMOD
        select OMAP_INTERCONNECT
        select OMAP_INTERCONNECT_BARRIER
-       select PM_OPP if PM
+       select PM_OPP
        select ZONE_DMA if ARM_LPAE
 
 config SOC_AM33XX
        bool "TI AM33XX"
        depends on ARCH_MULTI_V7
        select ARCH_OMAP2PLUS
-       select ARM_CPU_SUSPEND if PM
+       select ARM_CPU_SUSPEND
 
 config SOC_AM43XX
        bool "TI AM43x"
@@ -79,13 +78,13 @@ config SOC_AM43XX
        select ARM_ERRATA_754322
        select ARM_ERRATA_775420
        select OMAP_INTERCONNECT
-       select ARM_CPU_SUSPEND if PM
+       select ARM_CPU_SUSPEND
 
 config SOC_DRA7XX
        bool "TI DRA7XX"
        depends on ARCH_MULTI_V7
        select ARCH_OMAP2PLUS
-       select ARM_CPU_SUSPEND if PM
+       select ARM_CPU_SUSPEND
        select ARM_GIC
        select HAVE_ARM_SCU if SMP
        select HAVE_ARM_ARCH_TIMER
@@ -94,7 +93,7 @@ config SOC_DRA7XX
        select OMAP_HWMOD
        select OMAP_INTERCONNECT
        select OMAP_INTERCONNECT_BARRIER
-       select PM_OPP if PM
+       select PM_OPP
        select ZONE_DMA if ARM_LPAE
        select PINCTRL_TI_IODELAY if OF && PINCTRL
 
@@ -112,9 +111,11 @@ config ARCH_OMAP2PLUS
        select OMAP_DM_TIMER
        select OMAP_GPMC
        select PINCTRL
-       select PM_GENERIC_DOMAINS if PM
-       select PM_GENERIC_DOMAINS_OF if PM
+       select PM
+       select PM_GENERIC_DOMAINS
+       select PM_GENERIC_DOMAINS_OF
        select RESET_CONTROLLER
+       select SIMPLE_PM_BUS
        select SOC_BUS
        select TI_SYSC
        select OMAP_IRQCHIP
@@ -140,7 +141,6 @@ config ARCH_OMAP2PLUS_TYPICAL
        select I2C_OMAP
        select MENELAUS if ARCH_OMAP2
        select NEON if CPU_V7
-       select PM
        select REGULATOR
        select REGULATOR_FIXED_VOLTAGE
        select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
index c8d317fafe2eaf21cd0f1926ddf48604f301d2e8..de37027ad75870dd1d0ffebca6901dca635a094c 100644 (file)
@@ -151,10 +151,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
                                 (cx->mpu_logic_state == PWRDM_POWER_OFF);
 
        /* Enter broadcast mode for periodic timers */
-       tick_broadcast_enable();
+       RCU_NONIDLE(tick_broadcast_enable());
 
        /* Enter broadcast mode for one-shot timers */
-       tick_broadcast_enter();
+       RCU_NONIDLE(tick_broadcast_enter());
 
        /*
         * Call idle CPU PM enter notifier chain so that
@@ -166,7 +166,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
 
        if (dev->cpu == 0) {
                pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
-               omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
+               RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
 
                /*
                 * Call idle CPU cluster PM enter notifier chain
@@ -178,7 +178,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
                                index = 0;
                                cx = state_ptr + index;
                                pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
-                               omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
+                               RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
                                mpuss_can_lose_context = 0;
                        }
                }
@@ -194,9 +194,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
                    mpuss_can_lose_context)
                        gic_dist_disable();
 
-               clkdm_deny_idle(cpu_clkdm[1]);
-               omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
-               clkdm_allow_idle(cpu_clkdm[1]);
+               RCU_NONIDLE(clkdm_deny_idle(cpu_clkdm[1]));
+               RCU_NONIDLE(omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON));
+               RCU_NONIDLE(clkdm_allow_idle(cpu_clkdm[1]));
 
                if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
                    mpuss_can_lose_context) {
@@ -222,7 +222,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
        cpu_pm_exit();
 
 cpu_pm_out:
-       tick_broadcast_exit();
+       RCU_NONIDLE(tick_broadcast_exit());
 
 fail:
        cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
index cd38bf07c094d568613a8503e37500dc03703f8d..2e3a10914c40aaa6cb47d6745c607e78f4dcade4 100644 (file)
@@ -522,6 +522,7 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = {
                       &dra7_ipu1_dsp_iommu_pdata),
 #endif
        /* Common auxdata */
+       OF_DEV_AUXDATA("simple-pm-bus", 0, NULL, omap_auxdata_lookup),
        OF_DEV_AUXDATA("ti,sysc", 0, NULL, &ti_sysc_pdata),
        OF_DEV_AUXDATA("pinctrl-single", 0, NULL, &pcs_pdata),
        OF_DEV_AUXDATA("ti,omap-prm-inst", 0, NULL, &ti_prm_pdata),
index ba1c6dfdc4b6b7d2f585556779f9eb6f676a6cb7..d945c84ab697ae944df92923d3e6ca18b3ba57aa 100644 (file)
                                      "timing-adjustment";
                        rx-fifo-depth = <4096>;
                        tx-fifo-depth = <2048>;
-                       resets = <&reset RESET_ETHERNET>;
-                       reset-names = "stmmaceth";
                        power-domains = <&pwrc PWRC_AXG_ETHERNET_MEM_ID>;
                        status = "disabled";
                };
index 9c90d562ada1a31e82756224bf6fc1b53af5078a..b858c5e43cc88fb1e593b28fe5a93a908331b196 100644 (file)
                                      "timing-adjustment";
                        rx-fifo-depth = <4096>;
                        tx-fifo-depth = <2048>;
-                       resets = <&reset RESET_ETHERNET>;
-                       reset-names = "stmmaceth";
                        status = "disabled";
 
                        mdio0: mdio {
                                interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
                                dr_mode = "host";
                                snps,dis_u2_susphy_quirk;
-                               snps,quirk-frame-length-adjustment;
+                               snps,quirk-frame-length-adjustment = <0x20>;
                                snps,parkmode-disable-ss-quirk;
                        };
                };
index 726b91d3a905a5cfd9a9322654570a2074ff58d0..0edd137151f89eb6ba63695d28abdfaf18cf6434 100644 (file)
@@ -13,7 +13,6 @@
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/power/meson-gxbb-power.h>
-#include <dt-bindings/reset/amlogic,meson-gxbb-reset.h>
 #include <dt-bindings/thermal/thermal.h>
 
 / {
                        interrupt-names = "macirq";
                        rx-fifo-depth = <4096>;
                        tx-fifo-depth = <2048>;
-                       resets = <&reset RESET_ETHERNET>;
-                       reset-names = "stmmaceth";
                        power-domains = <&pwrc PWRC_GXBB_ETHERNET_MEM_ID>;
                        status = "disabled";
                };
index cf5a98f0e47c8e4bb1a12ce142a76cf0d889fe79..a712273c905afc72aff9a7e663ed64a81f234031 100644 (file)
@@ -52,7 +52,7 @@
                regulator-min-microvolt = <3300000>;
                regulator-max-microvolt = <3300000>;
 
-               gpio = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>;
+               gpio = <&gpio_ao GPIOAO_3 GPIO_OPEN_DRAIN>;
                enable-active-high;
                regulator-always-on;
        };
index aef8f2b00778d718e360344f68ae9f0c71c639c5..5401a646c8406f2a4ef4d6d438920607b6eb5241 100644 (file)
@@ -4,11 +4,16 @@
  */
        usb {
                compatible = "simple-bus";
-               dma-ranges;
                #address-cells = <2>;
                #size-cells = <2>;
                ranges = <0x0 0x0 0x0 0x68500000 0x0 0x00400000>;
 
+               /*
+                * Internally, USB bus to the interconnect can only address up
+                * to 40-bit
+                */
+               dma-ranges = <0 0 0 0 0x100 0x0>;
+
                usbphy0: usb-phy@0 {
                        compatible = "brcm,sr-usb-combo-phy";
                        reg = <0x0 0x00000000 0x0 0x100>;
index 60ff19fa53b40fb0eab33454886f5a8a203949a5..6c8a61c2cc740406c7bbd29a38c6c61a115b91ff 100644 (file)
        reboot {
                compatible ="syscon-reboot";
                regmap = <&rst>;
-               offset = <0xb0>;
+               offset = <0>;
                mask = <0x02>;
        };
 
index 025e1f5876627eb84e2b676ca29232edca88cfbc..565934cbfa280feb07a3b91cd0bb5397e2181759 100644 (file)
 
                dcfg: dcfg@1ee0000 {
                        compatible = "fsl,ls1046a-dcfg", "syscon";
-                       reg = <0x0 0x1ee0000 0x0 0x10000>;
+                       reg = <0x0 0x1ee0000 0x0 0x1000>;
                        big-endian;
                };
 
index ee1790230490d418d603aaa19da047b231631000..2a79e89f821ee317cf775bc221d4b2fd2c3fc726 100644 (file)
                        #size-cells = <1>;
                        ranges;
 
-                       spba: bus@30000000 {
+                       spba: spba-bus@30000000 {
                                compatible = "fsl,spba-bus", "simple-bus";
                                #address-cells = <1>;
                                #size-cells = <1>;
index ecccfbb4f5ad6831c5d00af1f56201ad99fe5f9d..23f5a5e37167bfd431ae496e6be768f21747cc5e 100644 (file)
                                #gpio-cells = <2>;
                                interrupt-controller;
                                #interrupt-cells = <2>;
-                               gpio-ranges = <&iomuxc 0 56 26>, <&iomuxc 0 144 4>;
+                               gpio-ranges = <&iomuxc 0 56 26>, <&iomuxc 26 144 4>;
                        };
 
                        gpio4: gpio@30230000 {
index 7cc236575ee203e58622f622fd4d85dc9b39a21b..c0b93813ea9acd42f66f9fa173b99ae9ae8b722d 100644 (file)
 &gcc {
        protected-clocks = <GCC_QSPI_CORE_CLK>,
                           <GCC_QSPI_CORE_CLK_SRC>,
-                          <GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
+                          <GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
+                          <GCC_LPASS_Q6_AXI_CLK>,
+                          <GCC_LPASS_SWAY_CLK>;
 };
 
 &gpu {
index 13fdd02cffe6320428486539059b89805229aeff..8b40f96e978025168752bcea8a1000e8944be647 100644 (file)
 &gcc {
        protected-clocks = <GCC_QSPI_CORE_CLK>,
                           <GCC_QSPI_CORE_CLK_SRC>,
-                          <GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
+                          <GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
+                          <GCC_LPASS_Q6_AXI_CLK>,
+                          <GCC_LPASS_SWAY_CLK>;
 };
 
 &gpu {
 &i2c3 {
        status = "okay";
        clock-frequency = <400000>;
+       /* Overwrite pinctrl-0 from sdm845.dtsi */
+       pinctrl-0 = <&qup_i2c3_default &i2c3_hid_active>;
 
        tsel: hid@15 {
                compatible = "hid-over-i2c";
                hid-descr-addr = <0x1>;
 
                interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>;
-
-               pinctrl-names = "default";
-               pinctrl-0 = <&i2c3_hid_active>;
        };
 
        tsc2: hid@2c {
                hid-descr-addr = <0x20>;
 
                interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>;
-
-               pinctrl-names = "default";
-               pinctrl-0 = <&i2c3_hid_active>;
-
-               status = "disabled";
        };
 };
 
index 2695ea8cda142c8600d21ebd93720ee519b1903c..64193292d26c32f8dbff6957634a9eb4124b7df3 100644 (file)
        vopl_mmu: iommu@ff470f00 {
                compatible = "rockchip,iommu";
                reg = <0x0 0xff470f00 0x0 0x100>;
-               interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "vopl_mmu";
                clocks = <&cru ACLK_VOPL>, <&cru HCLK_VOPL>;
                clock-names = "aclk", "iface";
index 2ee07d15a6e375e0f07cf8ed60354bcacb4c4d0a..1eecad724f04c74cea2e01369b528e9c7632ecd8 100644 (file)
        cpu-supply = <&vdd_arm>;
 };
 
+&display_subsystem {
+       status = "disabled";
+};
+
 &gmac2io {
        assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
        assigned-clock-parents = <&gmac_clk>, <&gmac_clk>;
index 06d48338c83622f5e3da4a938dde256bf56f7eb4..219b7507a10fb7732f1a1cbc56cc52c07ef0893b 100644 (file)
 &pcie0 {
        bus-scan-delay-ms = <1000>;
        ep-gpios = <&gpio2 RK_PD4 GPIO_ACTIVE_HIGH>;
-       max-link-speed = <2>;
        num-lanes = <4>;
        pinctrl-names = "default";
        pinctrl-0 = <&pcie_clkreqn_cpm>;
index f5dee5f447bbacb5b4f69fcf04f18e020ef86813..2551b238b97c6ab708ae5f2bd20701d10591cf2a 100644 (file)
                reg = <0x0 0xf8000000 0x0 0x2000000>,
                      <0x0 0xfd000000 0x0 0x1000000>;
                reg-names = "axi-base", "apb-base";
+               device_type = "pci";
                #address-cells = <3>;
                #size-cells = <2>;
                #interrupt-cells = <1>;
                                <0 0 0 2 &pcie0_intc 1>,
                                <0 0 0 3 &pcie0_intc 2>,
                                <0 0 0 4 &pcie0_intc 3>;
-               linux,pci-domain = <0>;
                max-link-speed = <1>;
                msi-map = <0x0 &its 0x0 0x1000>;
                phys = <&pcie_phy 0>, <&pcie_phy 1>,
                compatible = "rockchip,rk3399-vdec";
                reg = <0x0 0xff660000 0x0 0x400>;
                interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
-               interrupt-names = "vdpu";
                clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>,
                         <&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>;
                clock-names = "axi", "ahb", "cabac", "core";
index 838301650a794681264b5e3a75b46f7f759d14f9..01aa3eee90e8e75ee9b19e21cb9d9ea8a534a6da 100644 (file)
@@ -991,8 +991,6 @@ CONFIG_ARCH_TEGRA_210_SOC=y
 CONFIG_ARCH_TEGRA_186_SOC=y
 CONFIG_ARCH_TEGRA_194_SOC=y
 CONFIG_ARCH_TEGRA_234_SOC=y
-CONFIG_ARCH_K3_AM6_SOC=y
-CONFIG_ARCH_K3_J721E_SOC=y
 CONFIG_TI_SCI_PM_DOMAINS=y
 CONFIG_EXTCON_PTN5150=m
 CONFIG_EXTCON_USB_GPIO=y
@@ -1078,7 +1076,7 @@ CONFIG_INTERCONNECT=y
 CONFIG_INTERCONNECT_QCOM=y
 CONFIG_INTERCONNECT_QCOM_MSM8916=m
 CONFIG_INTERCONNECT_QCOM_OSM_L3=m
-CONFIG_INTERCONNECT_QCOM_SDM845=m
+CONFIG_INTERCONNECT_QCOM_SDM845=y
 CONFIG_INTERCONNECT_QCOM_SM8150=m
 CONFIG_INTERCONNECT_QCOM_SM8250=m
 CONFIG_EXT2_FS=y
index 18fce223b67b2746d08b1d5f64ee3c54e456989d..ff4732785c32f9650f381e9b6338e49df3ca64b4 100644 (file)
@@ -247,11 +247,13 @@ static inline const void *__tag_set(const void *addr, u8 tag)
 
 
 /*
- * The linear kernel range starts at the bottom of the virtual address space.
+ * Check whether an arbitrary address is within the linear map, which
+ * lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the
+ * kernel's TTBR1 address range.
  */
-#define __is_lm_address(addr)  (((u64)(addr) & ~PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
+#define __is_lm_address(addr)  (((u64)(addr) PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
 
-#define __lm_to_phys(addr)     (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
+#define __lm_to_phys(addr)     (((addr) PAGE_OFFSET) + PHYS_OFFSET)
 #define __kimg_to_phys(addr)   ((addr) - kimage_voffset)
 
 #define __virt_to_phys_nodebug(x) ({                                   \
@@ -330,7 +332,7 @@ static inline void *phys_to_virt(phys_addr_t x)
 #endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
 
 #define virt_addr_valid(addr)  ({                                      \
-       __typeof__(addr) __addr = addr;                                 \
+       __typeof__(addr) __addr = __tag_reset(addr);                    \
        __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr));      \
 })
 
index 89c64ada8732413bc28ae5669651b0adf5efe702..66aac2881ba84ea06b2318b45f42919982c31c98 100644 (file)
@@ -352,8 +352,8 @@ kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr)
        unsigned long addr = instruction_pointer(regs);
        struct kprobe *cur = kprobe_running();
 
-       if (cur && (kcb->kprobe_status == KPROBE_HIT_SS)
-           && ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
+       if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
+           ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
                kprobes_restore_local_irqflag(kcb, regs);
                post_kprobe_handler(cur, kcb, regs);
 
index 04c44853b103b1681d600d853f5d10160147552c..fe60d25c000e4f7148fd36e762932f4e923745f4 100644 (file)
@@ -1396,8 +1396,9 @@ static void cpu_init_hyp_mode(void)
         * Calculate the raw per-cpu offset without a translation from the
         * kernel's mapping to the linear mapping, and store it in tpidr_el2
         * so that we can use adr_l to access per-cpu variables in EL2.
+        * Also drop the KASAN tag which gets in the way...
         */
-       params->tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
+       params->tpidr_el2 = (unsigned long)kasan_reset_tag(this_cpu_ptr_nvhe_sym(__per_cpu_start)) -
                            (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
 
        params->mair_el2 = read_sysreg(mair_el1);
index 31b060a4404524720cf5ae05f1e21014a35de0c6..b17bf19217f1100b962bb5794c05a9214262cb54 100644 (file)
@@ -47,6 +47,8 @@ __invalid:
        b       .
 
        /*
+        * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
+        *
         * x0: SMCCC function ID
         * x1: struct kvm_nvhe_init_params PA
         */
@@ -70,9 +72,9 @@ __do_hyp_init:
        eret
 
 1:     mov     x0, x1
-       mov     x4, lr
-       bl      ___kvm_hyp_init
-       mov     lr, x4
+       mov     x3, lr
+       bl      ___kvm_hyp_init                 // Clobbers x0..x2
+       mov     lr, x3
 
        /* Hello, World! */
        mov     x0, #SMCCC_RET_SUCCESS
@@ -82,8 +84,8 @@ SYM_CODE_END(__kvm_hyp_init)
 /*
  * Initialize the hypervisor in EL2.
  *
- * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers
- * and leave x4 for the caller.
+ * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers
+ * and leave x3 for the caller.
  *
  * x0: struct kvm_nvhe_init_params PA
  */
@@ -112,9 +114,9 @@ alternative_else_nop_endif
        /*
         * Set the PS bits in TCR_EL2.
         */
-       ldr     x1, [x0, #NVHE_INIT_TCR_EL2]
-       tcr_compute_pa_size x1, #TCR_EL2_PS_SHIFT, x2, x3
-       msr     tcr_el2, x1
+       ldr     x0, [x0, #NVHE_INIT_TCR_EL2]
+       tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
+       msr     tcr_el2, x0
 
        isb
 
@@ -193,7 +195,7 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
 
        /* Enable MMU, set vectors and stack. */
        mov     x0, x28
-       bl      ___kvm_hyp_init                 // Clobbers x0..x3
+       bl      ___kvm_hyp_init                 // Clobbers x0..x2
 
        /* Leave idmap. */
        mov     x0, x29
index e3947846ffcb9acd8d4528190741fdb48325e711..8e7128cb76678f8697abc0bfe4a9e7c57a899afd 100644 (file)
@@ -77,12 +77,6 @@ static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt)
                         cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
 }
 
-static __noreturn unsigned long psci_forward_noreturn(struct kvm_cpu_context *host_ctxt)
-{
-       psci_forward(host_ctxt);
-       hyp_panic(); /* unreachable */
-}
-
 static unsigned int find_cpu_id(u64 mpidr)
 {
        unsigned int i;
@@ -251,10 +245,13 @@ static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_
        case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
        case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
                return psci_forward(host_ctxt);
+       /*
+        * SYSTEM_OFF/RESET should not return according to the spec.
+        * Allow it so as to stay robust to broken firmware.
+        */
        case PSCI_0_2_FN_SYSTEM_OFF:
        case PSCI_0_2_FN_SYSTEM_RESET:
-               psci_forward_noreturn(host_ctxt);
-               unreachable();
+               return psci_forward(host_ctxt);
        case PSCI_0_2_FN64_CPU_SUSPEND:
                return psci_cpu_suspend(func_id, host_ctxt);
        case PSCI_0_2_FN64_CPU_ON:
index 4ad66a532e38b37007fd75610e15a426b7123bd8..247422ac78a9e5c01e543146076428d81db061fc 100644 (file)
@@ -788,7 +788,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
 {
        unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
        u64 val, mask = 0;
-       int base, i;
+       int base, i, nr_events;
 
        if (!pmceid1) {
                val = read_sysreg(pmceid0_el0);
@@ -801,13 +801,17 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
        if (!bmap)
                return val;
 
+       nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
+
        for (i = 0; i < 32; i += 8) {
                u64 byte;
 
                byte = bitmap_get_value8(bmap, base + i);
                mask |= byte << i;
-               byte = bitmap_get_value8(bmap, 0x4000 + base + i);
-               mask |= byte << (32 + i);
+               if (nr_events >= (0x4000 + base + 32)) {
+                       byte = bitmap_get_value8(bmap, 0x4000 + base + i);
+                       mask |= byte << (32 + i);
+               }
        }
 
        return val & mask;
index 42ccc27fb684cad8bce5c5587fa5291468ee0b92..7c4f79532406b5ca865e4fdaf37bcc8e5cea9bd2 100644 (file)
  * 64bit interface.
  */
 
+#define reg_to_encoding(x)                                             \
+       sys_reg((u32)(x)->Op0, (u32)(x)->Op1,                           \
+               (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
+
 static bool read_from_write_only(struct kvm_vcpu *vcpu,
                                 struct sys_reg_params *params,
                                 const struct sys_reg_desc *r)
@@ -273,8 +277,7 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
                          const struct sys_reg_desc *r)
 {
        u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
-       u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
-                        (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+       u32 sr = reg_to_encoding(r);
 
        if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
                kvm_inject_undefined(vcpu);
@@ -590,6 +593,15 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
        vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
 }
 
+static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
+                                  const struct sys_reg_desc *r)
+{
+       if (kvm_vcpu_has_pmu(vcpu))
+               return 0;
+
+       return REG_HIDDEN;
+}
+
 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 {
        u64 pmcr, val;
@@ -613,9 +625,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
 {
        u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
-       bool enabled = kvm_vcpu_has_pmu(vcpu);
+       bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
 
-       enabled &= (reg & flags) || vcpu_mode_priv(vcpu);
        if (!enabled)
                kvm_inject_undefined(vcpu);
 
@@ -900,11 +911,6 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                             const struct sys_reg_desc *r)
 {
-       if (!kvm_vcpu_has_pmu(vcpu)) {
-               kvm_inject_undefined(vcpu);
-               return false;
-       }
-
        if (p->is_write) {
                if (!vcpu_mode_priv(vcpu)) {
                        kvm_inject_undefined(vcpu);
@@ -921,10 +927,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
        return true;
 }
 
-#define reg_to_encoding(x)                                             \
-       sys_reg((u32)(x)->Op0, (u32)(x)->Op1,                           \
-               (u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
-
 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
 #define DBG_BCR_BVR_WCR_WVR_EL1(n)                                     \
        { SYS_DESC(SYS_DBGBVRn_EL1(n)),                                 \
@@ -936,15 +938,18 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
        { SYS_DESC(SYS_DBGWCRn_EL1(n)),                                 \
          trap_wcr, reset_wcr, 0, 0,  get_wcr, set_wcr }
 
+#define PMU_SYS_REG(r)                                         \
+       SYS_DESC(r), .reset = reset_unknown, .visibility = pmu_visibility
+
 /* Macro to expand the PMEVCNTRn_EL0 register */
 #define PMU_PMEVCNTR_EL0(n)                                            \
-       { SYS_DESC(SYS_PMEVCNTRn_EL0(n)),                                       \
-         access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
+       { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)),                            \
+         .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
 
 /* Macro to expand the PMEVTYPERn_EL0 register */
 #define PMU_PMEVTYPER_EL0(n)                                           \
-       { SYS_DESC(SYS_PMEVTYPERn_EL0(n)),                                      \
-         access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
+       { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)),                           \
+         .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
 
 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                         const struct sys_reg_desc *r)
@@ -1020,8 +1025,7 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
 static u64 read_id_reg(const struct kvm_vcpu *vcpu,
                struct sys_reg_desc const *r, bool raz)
 {
-       u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
-                        (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+       u32 id = reg_to_encoding(r);
        u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
 
        if (id == SYS_ID_AA64PFR0_EL1) {
@@ -1062,8 +1066,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
                                  const struct sys_reg_desc *r)
 {
-       u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
-                        (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
+       u32 id = reg_to_encoding(r);
 
        switch (id) {
        case SYS_ID_AA64ZFR0_EL1:
@@ -1486,8 +1489,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
        { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
 
-       { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
-       { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
+       { PMU_SYS_REG(SYS_PMINTENSET_EL1),
+         .access = access_pminten, .reg = PMINTENSET_EL1 },
+       { PMU_SYS_REG(SYS_PMINTENCLR_EL1),
+         .access = access_pminten, .reg = PMINTENSET_EL1 },
 
        { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
        { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
@@ -1526,23 +1531,36 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
        { SYS_DESC(SYS_CTR_EL0), access_ctr },
 
-       { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
-       { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
-       { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
-       { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
-       { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
-       { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
-       { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
-       { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
-       { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
-       { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
-       { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
+       { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
+         .reset = reset_pmcr, .reg = PMCR_EL0 },
+       { PMU_SYS_REG(SYS_PMCNTENSET_EL0),
+         .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
+       { PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
+         .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
+       { PMU_SYS_REG(SYS_PMOVSCLR_EL0),
+         .access = access_pmovs, .reg = PMOVSSET_EL0 },
+       { PMU_SYS_REG(SYS_PMSWINC_EL0),
+         .access = access_pmswinc, .reg = PMSWINC_EL0 },
+       { PMU_SYS_REG(SYS_PMSELR_EL0),
+         .access = access_pmselr, .reg = PMSELR_EL0 },
+       { PMU_SYS_REG(SYS_PMCEID0_EL0),
+         .access = access_pmceid, .reset = NULL },
+       { PMU_SYS_REG(SYS_PMCEID1_EL0),
+         .access = access_pmceid, .reset = NULL },
+       { PMU_SYS_REG(SYS_PMCCNTR_EL0),
+         .access = access_pmu_evcntr, .reg = PMCCNTR_EL0 },
+       { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
+         .access = access_pmu_evtyper, .reset = NULL },
+       { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
+         .access = access_pmu_evcntr, .reset = NULL },
        /*
         * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
         * in 32bit mode. Here we choose to reset it as zero for consistency.
         */
-       { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
-       { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
+       { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
+         .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
+       { PMU_SYS_REG(SYS_PMOVSSET_EL0),
+         .access = access_pmovs, .reg = PMOVSSET_EL0 },
 
        { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
        { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
@@ -1694,7 +1712,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
         * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
         * in 32bit mode. Here we choose to reset it as zero for consistency.
         */
-       { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
+       { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
+         .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
 
        { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
        { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
index 3c40da479899dbb667f427ce8206c52d35e0c16e..35d75c60e2b8df0ed065e6090082c498d25897f7 100644 (file)
@@ -709,10 +709,11 @@ static int do_tag_check_fault(unsigned long far, unsigned int esr,
                              struct pt_regs *regs)
 {
        /*
-        * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN for tag
-        * check faults. Mask them out now so that userspace doesn't see them.
+        * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN
+        * for tag check faults. Set them to corresponding bits in the untagged
+        * address.
         */
-       far &= (1UL << 60) - 1;
+       far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK);
        do_bad_area(far, esr, regs);
        return 0;
 }
index 67a9ba9eaa96bd1b900732e6c134010a5bf9c3a6..cde44c13dda1bc0924d2d50369ad87f544a0a802 100644 (file)
@@ -9,7 +9,7 @@
 
 phys_addr_t __virt_to_phys(unsigned long x)
 {
-       WARN(!__is_lm_address(x),
+       WARN(!__is_lm_address(__tag_reset(x)),
             "virt_to_phys used for non-linear address: %pK (%pS)\n",
              (void *)x,
              (void *)x);
index dd8c166ffd7b5ffbf56e3217a209512745d7e65b..42ed5248fae9876875a71f5b66c40a0a884c8749 100644 (file)
@@ -3,6 +3,7 @@
 #define _ASM_IA64_SPARSEMEM_H
 
 #ifdef CONFIG_SPARSEMEM
+#include <asm/page.h>
 /*
  * SECTION_SIZE_BITS            2^N: how big each section will be
  * MAX_PHYSMEM_BITS             2^N: how much memory we can have in that space
index d69c979936d41e4a3225091efcd31b46cde5c230..5d90307fd6e073642313b8f0d0c2e4386ea2e717 100644 (file)
@@ -54,7 +54,7 @@ extern void ia64_xchg_called_with_bad_pointer(void);
 })
 
 #define xchg(ptr, x)                                                   \
-((__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr))))
+({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
 
 /*
  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
index ed9fc3d057a6db70de9e4a72b7b02d252e9b6ab0..43e8050145bef2bf45a8b5f11b34b4e281ad4e64 100644 (file)
@@ -171,29 +171,34 @@ void vtime_account_hardirq(struct task_struct *tsk)
 static irqreturn_t
 timer_interrupt (int irq, void *dev_id)
 {
-       unsigned long cur_itm, new_itm, ticks;
+       unsigned long new_itm;
 
        if (cpu_is_offline(smp_processor_id())) {
                return IRQ_HANDLED;
        }
 
        new_itm = local_cpu_data->itm_next;
-       cur_itm = ia64_get_itc();
 
-       if (!time_after(cur_itm, new_itm)) {
+       if (!time_after(ia64_get_itc(), new_itm))
                printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
-                      cur_itm, new_itm);
-               ticks = 1;
-       } else {
-               ticks = DIV_ROUND_UP(cur_itm - new_itm,
-                                    local_cpu_data->itm_delta);
-               new_itm += ticks * local_cpu_data->itm_delta;
-       }
+                      ia64_get_itc(), new_itm);
+
+       while (1) {
+               new_itm += local_cpu_data->itm_delta;
+
+               legacy_timer_tick(smp_processor_id() == time_keeper_id);
 
-       if (smp_processor_id() != time_keeper_id)
-               ticks = 0;
+               local_cpu_data->itm_next = new_itm;
 
-       legacy_timer_tick(ticks);
+               if (time_after(new_itm, ia64_get_itc()))
+                       break;
+
+               /*
+                * Allow IPIs to interrupt the timer loop.
+                */
+               local_irq_enable();
+               local_irq_disable();
+       }
 
        do {
                /*
index 19edf8e699712be0f39a08ad4c97b12189984cf0..292d0425717f36dda8ff83d427ad2059560e1180 100644 (file)
@@ -51,6 +51,7 @@ extern void kmap_flush_tlb(unsigned long addr);
 
 #define flush_cache_kmaps()    BUG_ON(cpu_has_dc_aliases)
 
+#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) set_pte(ptep, ptev)
 #define arch_kmap_local_post_map(vaddr, pteval)        local_flush_tlb_one(vaddr)
 #define arch_kmap_local_post_unmap(vaddr)      local_flush_tlb_one(vaddr)
 
index 7d6b4a77b379d8e2f14aeb862128e481e8b20850..c298061c70a7ee2ed6b1a71db0d5399164e45a11 100644 (file)
@@ -31,7 +31,7 @@
 void __iomem *ioremap(phys_addr_t offset, unsigned long size);
 
 #define iounmap iounmap
-extern void iounmap(void *addr);
+extern void iounmap(void __iomem *addr);
 
 #include <asm-generic/io.h>
 
index 5aed97a18bac934dd7e0abfb1e9b5d44c99dcfec..daae13a76743be694d295c570419dcf31dc2c7fb 100644 (file)
@@ -77,7 +77,7 @@ void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size)
 }
 EXPORT_SYMBOL(ioremap);
 
-void iounmap(void *addr)
+void iounmap(void __iomem *addr)
 {
        /* If the page is from the fixmap pool then we just clear out
         * the fixmap mapping.
index 78b17621ee4a5e5a67588b522fe5a13c115e84b3..278462186ac4759c43c704d6d07d5f6189a663d9 100644 (file)
@@ -202,9 +202,8 @@ config PREFETCH
        depends on PA8X00 || PA7200
 
 config MLONGCALLS
-       bool "Enable the -mlong-calls compiler option for big kernels"
-       default y if !MODULES || UBSAN || FTRACE
-       default n
+       def_bool y if !MODULES || UBSAN || FTRACE
+       bool "Enable the -mlong-calls compiler option for big kernels" if MODULES && !UBSAN && !FTRACE
        depends on PA8X00
        help
          If you configure the kernel to include many drivers built-in instead
index 959e79cd2c1488a08f5cd9a0aacb52c933f2eb0e..378f63c4015b49921a34cdfdd2612e79894a0df5 100644 (file)
@@ -47,7 +47,4 @@ extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
 extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *);
 extern int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest);
 
-/* soft power switch support (power.c) */
-extern struct tasklet_struct power_tasklet;
-
 #endif /* _ASM_PARISC_IRQ_H */
index beba9816cc6c160b7f573f4e19224871894201eb..4d37cc9cba37c4b9c3b62d55fc9e39964e4d34cd 100644 (file)
@@ -997,10 +997,17 @@ intr_do_preempt:
        bb,<,n  %r20, 31 - PSW_SM_I, intr_restore
        nop
 
+       /* ssm PSW_SM_I done later in intr_restore */
+#ifdef CONFIG_MLONGCALLS
+       ldil    L%intr_restore, %r2
+       load32  preempt_schedule_irq, %r1
+       bv      %r0(%r1)
+       ldo     R%intr_restore(%r2), %r2
+#else
+       ldil    L%intr_restore, %r1
        BL      preempt_schedule_irq, %r2
-       nop
-
-       b,n     intr_restore            /* ssm PSW_SM_I done by intr_restore */
+       ldo     R%intr_restore(%r1), %r2
+#endif
 #endif /* CONFIG_PREEMPTION */
 
        /*
index 1d32b174ab6aec3fb5403c11bddd7f938a78c2e8..c1a8aac01cf91d2917e923044cef69886f6eb367 100644 (file)
        nop;                                                            \
        nop;
 
+#define SCV_ENTRY_FLUSH_SLOT                                           \
+       SCV_ENTRY_FLUSH_FIXUP_SECTION;                                  \
+       nop;                                                            \
+       nop;                                                            \
+       nop;
+
 /*
  * r10 must be free to use, r13 must be paca
  */
        STF_ENTRY_BARRIER_SLOT;                                         \
        ENTRY_FLUSH_SLOT
 
+/*
+ * r10, ctr must be free to use, r13 must be paca
+ */
+#define SCV_INTERRUPT_TO_KERNEL                                                \
+       STF_ENTRY_BARRIER_SLOT;                                         \
+       SCV_ENTRY_FLUSH_SLOT
+
 /*
  * Macros for annotating the expected destination of (h)rfid
  *
index f6d2acb574252e4810c8cff1455b23cf8cb2cedf..ac605fc369c42326d8537d0be9239e41dc937c6f 100644 (file)
@@ -240,6 +240,14 @@ label##3:                                          \
        FTR_ENTRY_OFFSET 957b-958b;                     \
        .popsection;
 
+#define SCV_ENTRY_FLUSH_FIXUP_SECTION                  \
+957:                                                   \
+       .pushsection __scv_entry_flush_fixup,"a";       \
+       .align 2;                                       \
+958:                                                   \
+       FTR_ENTRY_OFFSET 957b-958b;                     \
+       .popsection;
+
 #define RFI_FLUSH_FIXUP_SECTION                                \
 951:                                                   \
        .pushsection __rfi_flush_fixup,"a";             \
@@ -273,10 +281,12 @@ label##3:                                         \
 
 extern long stf_barrier_fallback;
 extern long entry_flush_fallback;
+extern long scv_entry_flush_fallback;
 extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
 extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
 extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
 extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
+extern long __start___scv_entry_flush_fixup, __stop___scv_entry_flush_fixup;
 extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
 extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
 extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
index 80a5ae771c65324d31fa45981f458e053c07f0f7..c0fcd1bbdba98078dbe28982bb6ca7e6a5a02ba2 100644 (file)
@@ -58,6 +58,8 @@ extern pte_t *pkmap_page_table;
 
 #define flush_cache_kmaps()    flush_cache_all()
 
+#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \
+       __set_pte_at(mm, vaddr, ptep, ptev, 1)
 #define arch_kmap_local_post_map(vaddr, pteval)        \
        local_flush_tlb_page(NULL, vaddr)
 #define arch_kmap_local_post_unmap(vaddr)      \
index fe2ef598e2ead0a5f429cd30a90e0edd48e22664..79ee7750937db06bade246afd710016a6fef2f1b 100644 (file)
@@ -51,7 +51,7 @@ obj-y                         += ptrace/
 obj-$(CONFIG_PPC64)            += setup_64.o \
                                   paca.o nvram_64.o note.o syscall_64.o
 obj-$(CONFIG_COMPAT)           += sys_ppc32.o signal_32.o
-obj-$(CONFIG_VDSO32)           += vdso32/
+obj-$(CONFIG_VDSO32)           += vdso32_wrapper.o
 obj-$(CONFIG_PPC_WATCHDOG)     += watchdog.o
 obj-$(CONFIG_HAVE_HW_BREAKPOINT)       += hw_breakpoint.o
 obj-$(CONFIG_PPC_DAWR)         += dawr.o
@@ -60,7 +60,7 @@ obj-$(CONFIG_PPC_BOOK3S_64)   += cpu_setup_power.o
 obj-$(CONFIG_PPC_BOOK3S_64)    += mce.o mce_power.o
 obj-$(CONFIG_PPC_BOOK3E_64)    += exceptions-64e.o idle_book3e.o
 obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o
-obj-$(CONFIG_PPC64)            += vdso64/
+obj-$(CONFIG_PPC64)            += vdso64_wrapper.o
 obj-$(CONFIG_ALTIVEC)          += vecemu.o
 obj-$(CONFIG_PPC_BOOK3S_IDLE)  += idle_book3s.o
 procfs-y                       := proc_powerpc.o
index aa1af139d947297fdac16126fdb7b48d4d189a2b..33ddfeef4fe9ed4a8eb13f38964cc195e896d7c4 100644 (file)
@@ -75,7 +75,7 @@ BEGIN_FTR_SECTION
        bne     .Ltabort_syscall
 END_FTR_SECTION_IFSET(CPU_FTR_TM)
 #endif
-       INTERRUPT_TO_KERNEL
+       SCV_INTERRUPT_TO_KERNEL
        mr      r10,r1
        ld      r1,PACAKSAVE(r13)
        std     r10,0(r1)
index e02ad6fefa46cc4232199fddac38353c5433246b..6e53f76387374bc7f0485246bbc018f7ed21947f 100644 (file)
@@ -2993,6 +2993,25 @@ TRAMP_REAL_BEGIN(entry_flush_fallback)
        ld      r11,PACA_EXRFI+EX_R11(r13)
        blr
 
+/*
+ * The SCV entry flush happens with interrupts enabled, so it must disable
+ * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10
+ * (containing LR) does not need to be preserved here because scv entry
+ * puts 0 in the pt_regs, CTR can be clobbered for the same reason.
+ */
+TRAMP_REAL_BEGIN(scv_entry_flush_fallback)
+       li      r10,0
+       mtmsrd  r10,1
+       lbz     r10,PACAIRQHAPPENED(r13)
+       ori     r10,r10,PACA_IRQ_HARD_DIS
+       stb     r10,PACAIRQHAPPENED(r13)
+       std     r11,PACA_EXRFI+EX_R11(r13)
+       L1D_DISPLACEMENT_FLUSH
+       ld      r11,PACA_EXRFI+EX_R11(r13)
+       li      r10,MSR_RI
+       mtmsrd  r10,1
+       blr
+
 TRAMP_REAL_BEGIN(rfi_flush_fallback)
        SET_SCRATCH0(r13);
        GET_PACA(r13);
index 6b1eca53e36cc834b853822a9ee7faf886bf7e04..cc7a6271b6b4ec5408bfd2ed07a41432f0cc5a37 100644 (file)
@@ -180,13 +180,18 @@ void notrace restore_interrupts(void)
 
 void replay_soft_interrupts(void)
 {
+       struct pt_regs regs;
+
        /*
-        * We use local_paca rather than get_paca() to avoid all
-        * the debug_smp_processor_id() business in this low level
-        * function
+        * Be careful here, calling these interrupt handlers can cause
+        * softirqs to be raised, which they may run when calling irq_exit,
+        * which will cause local_irq_enable() to be run, which can then
+        * recurse into this function. Don't keep any state across
+        * interrupt handler calls which may change underneath us.
+        *
+        * We use local_paca rather than get_paca() to avoid all the
+        * debug_smp_processor_id() business in this low level function.
         */
-       unsigned char happened = local_paca->irq_happened;
-       struct pt_regs regs;
 
        ppc_save_regs(&regs);
        regs.softe = IRQS_ENABLED;
@@ -209,7 +214,7 @@ again:
         * This is a higher priority interrupt than the others, so
         * replay it first.
         */
-       if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (happened & PACA_IRQ_HMI)) {
+       if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) {
                local_paca->irq_happened &= ~PACA_IRQ_HMI;
                regs.trap = 0xe60;
                handle_hmi_exception(&regs);
@@ -217,7 +222,7 @@ again:
                        hard_irq_disable();
        }
 
-       if (happened & PACA_IRQ_DEC) {
+       if (local_paca->irq_happened & PACA_IRQ_DEC) {
                local_paca->irq_happened &= ~PACA_IRQ_DEC;
                regs.trap = 0x900;
                timer_interrupt(&regs);
@@ -225,7 +230,7 @@ again:
                        hard_irq_disable();
        }
 
-       if (happened & PACA_IRQ_EE) {
+       if (local_paca->irq_happened & PACA_IRQ_EE) {
                local_paca->irq_happened &= ~PACA_IRQ_EE;
                regs.trap = 0x500;
                do_IRQ(&regs);
@@ -233,7 +238,7 @@ again:
                        hard_irq_disable();
        }
 
-       if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (happened & PACA_IRQ_DBELL)) {
+       if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) {
                local_paca->irq_happened &= ~PACA_IRQ_DBELL;
                if (IS_ENABLED(CONFIG_PPC_BOOK3E))
                        regs.trap = 0x280;
@@ -245,7 +250,7 @@ again:
        }
 
        /* Book3E does not support soft-masking PMI interrupts */
-       if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (happened & PACA_IRQ_PMI)) {
+       if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) {
                local_paca->irq_happened &= ~PACA_IRQ_PMI;
                regs.trap = 0xf00;
                performance_monitor_exception(&regs);
@@ -253,8 +258,7 @@ again:
                        hard_irq_disable();
        }
 
-       happened = local_paca->irq_happened;
-       if (happened & ~PACA_IRQ_HARD_DIS) {
+       if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) {
                /*
                 * We are responding to the next interrupt, so interrupt-off
                 * latencies should be reset here.
index 9cb6f524854b9d6f480e32f3cf346fe7d7a57071..7d9a6fee0e3dc1a01e066314721f09e732af8ae6 100644 (file)
@@ -30,7 +30,7 @@ CC32FLAGS += -m32
 KBUILD_CFLAGS := $(filter-out -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc,$(KBUILD_CFLAGS))
 endif
 
-targets := $(obj-vdso32) vdso32.so.dbg
+targets := $(obj-vdso32) vdso32.so.dbg vgettimeofday.o
 obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
 
 GCOV_PROFILE := n
@@ -46,9 +46,6 @@ obj-y += vdso32_wrapper.o
 targets += vdso32.lds
 CPPFLAGS_vdso32.lds += -P -C -Upowerpc
 
-# Force dependency (incbin is bad)
-$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so.dbg
-
 # link rule for the .so file, .lds has to be first
 $(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) $(obj)/vgettimeofday.o FORCE
        $(call if_changed,vdso32ld_and_check)
diff --git a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
deleted file mode 100644 (file)
index 3f5ef03..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/linkage.h>
-#include <asm/page.h>
-
-       __PAGE_ALIGNED_DATA
-
-       .globl vdso32_start, vdso32_end
-       .balign PAGE_SIZE
-vdso32_start:
-       .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg"
-       .balign PAGE_SIZE
-vdso32_end:
-
-       .previous
diff --git a/arch/powerpc/kernel/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32_wrapper.S
new file mode 100644 (file)
index 0000000..3f5ef03
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+       __PAGE_ALIGNED_DATA
+
+       .globl vdso32_start, vdso32_end
+       .balign PAGE_SIZE
+vdso32_start:
+       .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg"
+       .balign PAGE_SIZE
+vdso32_end:
+
+       .previous
index bf363ff371521c65477853f89c8f4a16d3a0d3de..2813e3f98db65611e0c83ac1891fe5b30b445820 100644 (file)
@@ -17,7 +17,7 @@ endif
 
 # Build rules
 
-targets := $(obj-vdso64) vdso64.so.dbg
+targets := $(obj-vdso64) vdso64.so.dbg vgettimeofday.o
 obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
 
 GCOV_PROFILE := n
@@ -29,15 +29,9 @@ ccflags-y := -shared -fno-common -fno-builtin -nostdlib \
        -Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both
 asflags-y := -D__VDSO64__ -s
 
-obj-y += vdso64_wrapper.o
 targets += vdso64.lds
 CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
 
-$(obj)/vgettimeofday.o: %.o: %.c FORCE
-
-# Force dependency (incbin is bad)
-$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so.dbg
-
 # link rule for the .so file, .lds has to be first
 $(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(obj)/vgettimeofday.o FORCE
        $(call if_changed,vdso64ld_and_check)
index bbf68cd01088b5ab04092c099c922709c44eef1f..2d4067561293ea76645e57ba5748819992108ad8 100644 (file)
 
        .text
 
+/*
+ * __kernel_start_sigtramp_rt64 and __kernel_sigtramp_rt64 together
+ * are one function split in two parts. The kernel jumps to the former
+ * and the signal handler indirectly (by blr) returns to the latter.
+ * __kernel_sigtramp_rt64 needs to point to the return address so
+ * glibc can correctly identify the trampoline stack frame.
+ */
        .balign 8
        .balign IFETCH_ALIGN_BYTES
-V_FUNCTION_BEGIN(__kernel_sigtramp_rt64)
+V_FUNCTION_BEGIN(__kernel_start_sigtramp_rt64)
 .Lsigrt_start:
        bctrl   /* call the handler */
+V_FUNCTION_END(__kernel_start_sigtramp_rt64)
+V_FUNCTION_BEGIN(__kernel_sigtramp_rt64)
        addi    r1, r1, __SIGNAL_FRAMESIZE
        li      r0,__NR_rt_sigreturn
        sc
index 6164d1a1ba11b7d8d0a303411cd9519b41089c60..2f3c359cacd3a850a373141f37a46a9238fbd58d 100644 (file)
@@ -131,4 +131,4 @@ VERSION
 /*
  * Make the sigreturn code visible to the kernel.
  */
-VDSO_sigtramp_rt64     = __kernel_sigtramp_rt64;
+VDSO_sigtramp_rt64     = __kernel_start_sigtramp_rt64;
diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
deleted file mode 100644 (file)
index 1d56d81..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/linkage.h>
-#include <asm/page.h>
-
-       __PAGE_ALIGNED_DATA
-
-       .globl vdso64_start, vdso64_end
-       .balign PAGE_SIZE
-vdso64_start:
-       .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg"
-       .balign PAGE_SIZE
-vdso64_end:
-
-       .previous
diff --git a/arch/powerpc/kernel/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64_wrapper.S
new file mode 100644 (file)
index 0000000..1d56d81
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+       __PAGE_ALIGNED_DATA
+
+       .globl vdso64_start, vdso64_end
+       .balign PAGE_SIZE
+vdso64_start:
+       .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg"
+       .balign PAGE_SIZE
+vdso64_end:
+
+       .previous
index 4ab426b8b0e02276423241b25d2386eedc24b90e..72fa3c00229a56ebfab492c563a2678c8fbc8bc2 100644 (file)
@@ -145,6 +145,13 @@ SECTIONS
                __stop___entry_flush_fixup = .;
        }
 
+       . = ALIGN(8);
+       __scv_entry_flush_fixup : AT(ADDR(__scv_entry_flush_fixup) - LOAD_OFFSET) {
+               __start___scv_entry_flush_fixup = .;
+               *(__scv_entry_flush_fixup)
+               __stop___scv_entry_flush_fixup = .;
+       }
+
        . = ALIGN(8);
        __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
                __start___stf_exit_barrier_fixup = .;
index 47821055b94c933851a3a213f25238707bbc3d9a..1fd31b4b0e139e36715f49fbd2928f5384d6ed3b 100644 (file)
@@ -290,9 +290,6 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
        long *start, *end;
        int i;
 
-       start = PTRRELOC(&__start___entry_flush_fixup);
-       end = PTRRELOC(&__stop___entry_flush_fixup);
-
        instrs[0] = 0x60000000; /* nop */
        instrs[1] = 0x60000000; /* nop */
        instrs[2] = 0x60000000; /* nop */
@@ -312,6 +309,8 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
        if (types & L1D_FLUSH_MTTRIG)
                instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
 
+       start = PTRRELOC(&__start___entry_flush_fixup);
+       end = PTRRELOC(&__stop___entry_flush_fixup);
        for (i = 0; start < end; start++, i++) {
                dest = (void *)start + *start;
 
@@ -328,6 +327,25 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
                patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
        }
 
+       start = PTRRELOC(&__start___scv_entry_flush_fixup);
+       end = PTRRELOC(&__stop___scv_entry_flush_fixup);
+       for (; start < end; start++, i++) {
+               dest = (void *)start + *start;
+
+               pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+               patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+
+               if (types == L1D_FLUSH_FALLBACK)
+                       patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&scv_entry_flush_fallback,
+                                    BRANCH_SET_LINK);
+               else
+                       patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
+
+               patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+       }
+
+
        printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
                (types == L1D_FLUSH_NONE)       ? "no" :
                (types == L1D_FLUSH_FALLBACK)   ? "fallback displacement" :
index bf7a7d62ae8b5d5bb367926ebb3dd120ff76a4b7..ede093e9623472f1f9652d98843ef9f48fcc7aaa 100644 (file)
@@ -818,13 +818,15 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
                        break;
                if (rev) {
                        /* reverse 32 bytes */
-                       buf.d[0] = byterev_8(reg->d[3]);
-                       buf.d[1] = byterev_8(reg->d[2]);
-                       buf.d[2] = byterev_8(reg->d[1]);
-                       buf.d[3] = byterev_8(reg->d[0]);
-                       reg = &buf;
+                       union vsx_reg buf32[2];
+                       buf32[0].d[0] = byterev_8(reg[1].d[1]);
+                       buf32[0].d[1] = byterev_8(reg[1].d[0]);
+                       buf32[1].d[0] = byterev_8(reg[0].d[1]);
+                       buf32[1].d[1] = byterev_8(reg[0].d[0]);
+                       memcpy(mem, buf32, size);
+               } else {
+                       memcpy(mem, reg, size);
                }
-               memcpy(mem, reg, size);
                break;
        case 16:
                /* stxv, stxvx, stxvl, stxvll */
index e9e2c1f0a690594389a2d034a56f59f8a376a89f..e0a34eb5ed3b37a199d44fd661a4dabd57be603b 100644 (file)
@@ -252,8 +252,10 @@ choice
        default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
 
        config MAXPHYSMEM_1GB
+               depends on 32BIT
                bool "1GiB"
        config MAXPHYSMEM_2GB
+               depends on 64BIT && CMODEL_MEDLOW
                bool "2GiB"
        config MAXPHYSMEM_128GB
                depends on 64BIT && CMODEL_MEDANY
index 2d50f76efe4819ffa8645ac1aa1d1c7473fe400f..64a675c5c30acb2c9806645ba9f3f5e7297cef21 100644 (file)
@@ -135,7 +135,10 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
 
 #endif /* __ASSEMBLY__ */
 
-#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
+#define virt_addr_valid(vaddr) ({                                              \
+       unsigned long _addr = (unsigned long)vaddr;                             \
+       (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
+})
 
 #define VM_DATA_DEFAULT_FLAGS  VM_DATA_FLAGS_NON_EXEC
 
index 211eb8244a454bd4038e6a9867643d4232b3fd4e..8b80c80c7f1acad9d9e1b3cb29d28a005fd83cd3 100644 (file)
@@ -32,14 +32,14 @@ bool kernel_page_present(struct page *page);
 
 #endif /* __ASSEMBLY__ */
 
-#ifdef CONFIG_ARCH_HAS_STRICT_KERNEL_RWX
+#ifdef CONFIG_STRICT_KERNEL_RWX
 #ifdef CONFIG_64BIT
 #define SECTION_ALIGN (1 << 21)
 #else
 #define SECTION_ALIGN (1 << 22)
 #endif
-#else /* !CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
+#else /* !CONFIG_STRICT_KERNEL_RWX */
 #define SECTION_ALIGN L1_CACHE_BYTES
-#endif /* CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
+#endif /* CONFIG_STRICT_KERNEL_RWX */
 
 #endif /* _ASM_RISCV_SET_MEMORY_H */
index 3fa3f26dde85664a6ccd993d9ee3bdcdf118eee3..c7c0655dd45b0f617a34d89b315e5829638c43af 100644 (file)
@@ -293,6 +293,8 @@ void free_initmem(void)
        unsigned long init_begin = (unsigned long)__init_begin;
        unsigned long init_end = (unsigned long)__init_end;
 
-       set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
+       if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
+               set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
+
        free_initmem_default(POISON_FREE_INITMEM);
 }
index 7cd4993f4ff21a932b6ef29512f685ea4e947f9a..f9f9568d689ef53e215c44de8b4468ae3033cfa9 100644 (file)
@@ -196,7 +196,7 @@ void __init setup_bootmem(void)
        max_pfn = PFN_DOWN(dram_end);
        max_low_pfn = max_pfn;
        dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
-       set_max_mapnr(max_low_pfn);
+       set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
 
 #ifdef CONFIG_BLK_DEV_INITRD
        setup_initrd();
index a15c033f53ca42a4228e4a30265749c69747bb4f..87641dd65ccf915e8d11aa5062dcc0433f34eeb4 100644 (file)
@@ -35,7 +35,7 @@ void uv_query_info(void)
                uv_info.guest_cpu_stor_len = uvcb.cpu_stor_len;
                uv_info.max_sec_stor_addr = ALIGN(uvcb.max_guest_stor_addr, PAGE_SIZE);
                uv_info.max_num_sec_conf = uvcb.max_num_sec_conf;
-               uv_info.max_guest_cpus = uvcb.max_guest_cpus;
+               uv_info.max_guest_cpu_id = uvcb.max_guest_cpu_id;
        }
 
 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
index 0325fc0469b7b021613d1d1837b62b85378665f4..7b98d4caee779cfd128182f62a6f56fbc4dae1e2 100644 (file)
@@ -96,7 +96,7 @@ struct uv_cb_qui {
        u32 max_num_sec_conf;
        u64 max_guest_stor_addr;
        u8  reserved88[158 - 136];
-       u16 max_guest_cpus;
+       u16 max_guest_cpu_id;
        u8  reserveda0[200 - 160];
 } __packed __aligned(8);
 
@@ -273,7 +273,7 @@ struct uv_info {
        unsigned long guest_cpu_stor_len;
        unsigned long max_sec_stor_addr;
        unsigned int max_num_sec_conf;
-       unsigned short max_guest_cpus;
+       unsigned short max_guest_cpu_id;
 };
 
 extern struct uv_info uv_info;
index 883bfed9f5c2ce38e25ebc24d4e408fbcaccb6cd..b2d2ad1530676ddad2142c3963bdd41bd0a72da7 100644 (file)
@@ -368,7 +368,7 @@ static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
                                       struct kobj_attribute *attr, char *page)
 {
        return scnprintf(page, PAGE_SIZE, "%d\n",
-                       uv_info.max_guest_cpus);
+                       uv_info.max_guest_cpu_id + 1);
 }
 
 static struct kobj_attribute uv_query_max_guest_cpus_attr =
index 5fa580219a864e14982d5e7a9921b9f00a4cab0c..52646f52f130fd2737ae959bc0756d8d94714273 100644 (file)
@@ -29,7 +29,6 @@ config SUPERH
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
-       select HAVE_COPY_THREAD_TLS
        select HAVE_DEBUG_BUGVERBOSE
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DYNAMIC_FTRACE
index 8b23ed7c201c6ebca21e15f2682b2030d3104474..7fb474844a2d1d65b6b0b0010ba024f10a43e7ce 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/sched.h>
 #include <linux/time.h>
 #include <linux/bcd.h>
-#include <linux/rtc.h>
 #include <linux/spinlock.h>
 #include <linux/io.h>
 #include <linux/rtc.h>
index ba6ec042606f5d3201fcc935401da2490a5fa41d..e6c5ddf070c0080c52f89904693bdb38dbedfd8b 100644 (file)
@@ -27,13 +27,12 @@ CONFIG_NETFILTER=y
 CONFIG_ATALK=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_OFFBOARD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_AEC62XX=y
+CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_ATP867X=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
 CONFIG_SCSI_MULTI_LUN=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=m
index c65667d00313bad3aab8e94bee1a8debda626338..e9825196dd66affa5f6ff86f8e789a9bf9dff687 100644 (file)
@@ -20,8 +20,6 @@ CONFIG_IP_PNP=y
 # CONFIG_IPV6 is not set
 # CONFIG_FW_LOADER is not set
 CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
 CONFIG_SMC91X=y
index d10a0414123a519a5461f2054320f87d87072934..d00376eb044f8ab45c1bfaf44e1be3922011fa7b 100644 (file)
@@ -44,16 +44,14 @@ CONFIG_NET_SCHED=y
 CONFIG_PARPORT=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_PLATFORM=y
-CONFIG_BLK_DEV_GENERIC=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
 CONFIG_CHR_DEV_SG=y
 CONFIG_SCSI_SPI_ATTRS=y
 CONFIG_SCSI_FC_ATTRS=y
 CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_PLATFORM=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_DM=y
 CONFIG_NETDEVICES=y
index 61bec46ebd66a857beedfbaaa9915e8bb48fa2d6..4a44cac640bc94b00c3c8c153afe30824d3bdf6a 100644 (file)
@@ -116,9 +116,6 @@ CONFIG_MTD_UBI_GLUEBI=m
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_CRYPTOLOOP=y
 CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_PLATFORM=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_BLK_DEV_SR=y
 CONFIG_SCSI_MULTI_LUN=y
index 3f1c13799d799c2a0241dc0c9f0c55c1a6948de5..4defc7628a498a32703113f27684954d4d6a965e 100644 (file)
@@ -29,7 +29,6 @@ CONFIG_MTD_BLOCK=y
 CONFIG_MTD_CFI=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_ROM=y
-CONFIG_IDE=y
 CONFIG_SCSI=y
 CONFIG_NETDEVICES=y
 CONFIG_NET_ETHERNET=y
index f0073ed3994771c071effbf29365dc144a6c561d..48b457d59e790a06e9adac083ee50990aa0462dd 100644 (file)
@@ -39,9 +39,6 @@ CONFIG_IP_PNP_RARP=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_NBD=y
 CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_BLK_DEV_IDETAPE=m
 CONFIG_SCSI=m
 CONFIG_BLK_DEV_SD=m
 CONFIG_BLK_DEV_SR=m
index d0de378beefe5add201e32bfcbf9bdc793dada34..7d54f284ce10fb10f3c5d30b34775f9a3288a84f 100644 (file)
@@ -63,8 +63,7 @@ config PVR2_DMA
 
 config G2_DMA
        tristate "G2 Bus DMA support"
-       depends on SH_DREAMCAST
-       select SH_DMA_API
+       depends on SH_DREAMCAST && SH_DMA_API
        help
          This enables support for the DMA controller for the Dreamcast's
          G2 bus. Drivers that want this will generally enable this on
index 351918894e867fd1f303cefa8037ba32383939ed..d643250f0a0fa0b49de816db3a41cdc5c618ea5c 100644 (file)
@@ -16,7 +16,6 @@
 #include <cpu/gpio.h>
 #endif
 
-#define ARCH_NR_GPIOS 512
 #include <asm-generic/gpio.h>
 
 #ifdef CONFIG_GPIOLIB
index 25eb8090541604cadc83bb8db671ee13219c5778..e48b3dd996f58d6dfd9452640e16d5c5e29f8563 100644 (file)
@@ -14,7 +14,6 @@
 #include <cpu/mmu_context.h>
 #include <asm/page.h>
 #include <asm/cache.h>
-#include <asm/thread_info.h>
 
 ! NOTE:
 ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
index 703d3069997cbef34ae755d93d37dacd3ed72a02..77aa2f802d8d1ccb2f40ace9f51d2ad4ae714baa 100644 (file)
@@ -105,7 +105,7 @@ config VSYSCALL
          (the default value) say Y.
 
 config NUMA
-       bool "Non Uniform Memory Access (NUMA) Support"
+       bool "Non-Uniform Memory Access (NUMA) Support"
        depends on MMU && SYS_SUPPORTS_NUMA
        select ARCH_WANT_NUMA_VARIABLE_LOCALITY
        default n
index 4c1ca197e9c5f213efce54e66655fae16c00524b..d16d6f5ec77499c48812b885e5f1bd2250e46f65 100644 (file)
@@ -26,7 +26,7 @@
 #include <asm/processor.h>
 #include <asm/mmu_context.h>
 
-static int asids_seq_show(struct seq_file *file, void *iter)
+static int asids_debugfs_show(struct seq_file *file, void *iter)
 {
        struct task_struct *p;
 
@@ -48,18 +48,7 @@ static int asids_seq_show(struct seq_file *file, void *iter)
        return 0;
 }
 
-static int asids_debugfs_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, asids_seq_show, inode->i_private);
-}
-
-static const struct file_operations asids_debugfs_fops = {
-       .owner          = THIS_MODULE,
-       .open           = asids_debugfs_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(asids_debugfs);
 
 static int __init asids_debugfs_init(void)
 {
index 17d780794497153cbb6935b2e2e81d1a7575dbf1..b0f185169dfa0c615e92b630e7299fef39ef3c80 100644 (file)
@@ -22,7 +22,7 @@ enum cache_type {
        CACHE_TYPE_UNIFIED,
 };
 
-static int cache_seq_show(struct seq_file *file, void *iter)
+static int cache_debugfs_show(struct seq_file *file, void *iter)
 {
        unsigned int cache_type = (unsigned int)file->private;
        struct cache_info *cache;
@@ -94,18 +94,7 @@ static int cache_seq_show(struct seq_file *file, void *iter)
        return 0;
 }
 
-static int cache_debugfs_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, cache_seq_show, inode->i_private);
-}
-
-static const struct file_operations cache_debugfs_fops = {
-       .owner          = THIS_MODULE,
-       .open           = cache_debugfs_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(cache_debugfs);
 
 static int __init cache_debugfs_init(void)
 {
index b20aba6e1b37c3f5040123d4e2cde8e5b56f6cfd..68eb7cc6e56435eb7262e1f69182d0d29f509dab 100644 (file)
@@ -812,7 +812,7 @@ bool __in_29bit_mode(void)
         return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
 }
 
-static int pmb_seq_show(struct seq_file *file, void *iter)
+static int pmb_debugfs_show(struct seq_file *file, void *iter)
 {
        int i;
 
@@ -846,18 +846,7 @@ static int pmb_seq_show(struct seq_file *file, void *iter)
        return 0;
 }
 
-static int pmb_debugfs_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, pmb_seq_show, NULL);
-}
-
-static const struct file_operations pmb_debugfs_fops = {
-       .owner          = THIS_MODULE,
-       .open           = pmb_debugfs_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(pmb_debugfs);
 
 static int __init pmb_debugfs_init(void)
 {
index 875116209ec14dca07609b1f70f84c934bfca62a..c7b2e208328b503407eea5356b02cb2163d6ec01 100644 (file)
@@ -50,10 +50,11 @@ extern pte_t *pkmap_page_table;
 
 #define flush_cache_kmaps()    flush_cache_all()
 
-/* FIXME: Use __flush_tlb_one(vaddr) instead of flush_cache_all() -- Anton */
-#define arch_kmap_local_post_map(vaddr, pteval)        flush_cache_all()
-#define arch_kmap_local_post_unmap(vaddr)      flush_cache_all()
-
+/* FIXME: Use __flush_*_one(vaddr) instead of flush_*_all() -- Anton */
+#define arch_kmap_local_pre_map(vaddr, pteval) flush_cache_all()
+#define arch_kmap_local_pre_unmap(vaddr)       flush_cache_all()
+#define arch_kmap_local_post_map(vaddr, pteval)        flush_tlb_all()
+#define arch_kmap_local_post_unmap(vaddr)      flush_tlb_all()
 
 #endif /* __KERNEL__ */
 
index 34d302d1a07ff865c46b9b4320bcc70fe77846e1..c3030db3325f157a91c4cfae256183b041b5bcd1 100644 (file)
@@ -15,7 +15,6 @@ config UML
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DEBUG_BUGVERBOSE
        select NO_DMA
-       select ARCH_HAS_SET_MEMORY
        select GENERIC_IRQ_SHOW
        select GENERIC_CPU_DEVICES
        select HAVE_GCC_PLUGINS
index 13b1fe694b90494dab3d8034709172fbe9125616..8e0b43cf089f4761b3defb1fb1721bd37d3c2464 100644 (file)
@@ -375,11 +375,11 @@ break_loop:
                file = NULL;
 
        backing_file = strsep(&str, ",:");
-       if (*backing_file == '\0')
+       if (backing_file && *backing_file == '\0')
                backing_file = NULL;
 
        serial = strsep(&str, ",:");
-       if (*serial == '\0')
+       if (serial && *serial == '\0')
                serial = NULL;
 
        if (backing_file && ubd_dev->no_cow) {
@@ -1241,7 +1241,7 @@ static int __init ubd_driver_init(void){
                /* Letting ubd=sync be like using ubd#s= instead of ubd#= is
                 * enough. So use anyway the io thread. */
        }
-       stack = alloc_stack(0);
+       stack = alloc_stack(0, 0);
        io_pid = start_io_thread(stack + PAGE_SIZE - sizeof(void *),
                                 &thread_fd);
        if(io_pid < 0){
index 27e92d3881ff4ef608a6a51dc8a8a9e2b63467a4..5d957b7e7fd5279a2344254693b9b136931f8401 100644 (file)
@@ -1084,6 +1084,7 @@ static void virtio_uml_release_dev(struct device *d)
        }
 
        os_close_file(vu_dev->sock);
+       kfree(vu_dev);
 }
 
 /* Platform device */
@@ -1097,7 +1098,7 @@ static int virtio_uml_probe(struct platform_device *pdev)
        if (!pdata)
                return -EINVAL;
 
-       vu_dev = devm_kzalloc(&pdev->dev, sizeof(*vu_dev), GFP_KERNEL);
+       vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
        if (!vu_dev)
                return -ENOMEM;
 
index 96f77b5232aafe01e1f73cf3641683160b8b1ba5..cef03e3aa0f994ae58fe3cab6ab47b7b19630017 100644 (file)
@@ -5,7 +5,7 @@
 #define ioremap ioremap
 static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
 {
-       return (void __iomem *)(unsigned long)offset;
+       return NULL;
 }
 
 #define iounmap iounmap
index 39376bb63abf58596e30dcdd35971d66c4aaf707..def376194dce63a3c7b80f8986033e59d3a81be8 100644 (file)
@@ -55,15 +55,12 @@ extern unsigned long end_iomem;
 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
 #define __PAGE_KERNEL_EXEC                                              \
         (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
-#define __PAGE_KERNEL_RO                                               \
-        (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
 #define PAGE_NONE      __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
 #define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
 #define PAGE_COPY      __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
 #define PAGE_READONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
 #define PAGE_KERNEL    __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
 #define PAGE_KERNEL_EXEC       __pgprot(__PAGE_KERNEL_EXEC)
-#define PAGE_KERNEL_RO         __pgprot(__PAGE_KERNEL_RO)
 
 /*
  * The i386 can't do page protection for execute, and considers that the same
diff --git a/arch/um/include/asm/set_memory.h b/arch/um/include/asm/set_memory.h
deleted file mode 100644 (file)
index 24266c6..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/set_memory.h>
index d8c279e3312fe69ab409b35331aa34632e0dd8a7..2888ec812f6ece8721f63fb5818b5f1bef19b501 100644 (file)
@@ -19,7 +19,7 @@ extern int kmalloc_ok;
 #define UML_ROUND_UP(addr) \
        ((((unsigned long) addr) + PAGE_SIZE - 1) & PAGE_MASK)
 
-extern unsigned long alloc_stack(int atomic);
+extern unsigned long alloc_stack(int order, int atomic);
 extern void free_stack(unsigned long stack, int order);
 
 struct pt_regs;
index e4abac6c9727cc274647b47da116c1dfeae4e6d8..6516ef1f827457b61f743a449931c2d9fc387361 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/kmsg_dump.h>
 #include <linux/console.h>
+#include <linux/string.h>
 #include <shared/init.h>
 #include <shared/kern.h>
 #include <os.h>
@@ -16,8 +17,12 @@ static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
        if (!console_trylock())
                return;
 
-       for_each_console(con)
-               break;
+       for_each_console(con) {
+               if(strcmp(con->name, "tty") == 0 &&
+                  (con->flags & (CON_ENABLED | CON_CONSDEV)) != 0) {
+                       break;
+               }
+       }
 
        console_unlock();
 
index 2a986ece54780ddb55e436e1097c93337d57a5b1..81d508daf67cbc4299e28899a8811dfb143180d5 100644 (file)
@@ -32,7 +32,6 @@
 #include <os.h>
 #include <skas.h>
 #include <linux/time-internal.h>
-#include <asm/set_memory.h>
 
 /*
  * This is a per-cpu array.  A processor only modifies its entry and it only
@@ -63,18 +62,16 @@ void free_stack(unsigned long stack, int order)
        free_pages(stack, order);
 }
 
-unsigned long alloc_stack(int atomic)
+unsigned long alloc_stack(int order, int atomic)
 {
-       unsigned long addr;
+       unsigned long page;
        gfp_t flags = GFP_KERNEL;
 
        if (atomic)
                flags = GFP_ATOMIC;
-       addr = __get_free_pages(flags, 1);
+       page = __get_free_pages(flags, order);
 
-       set_memory_ro(addr, 1);
-
-       return addr + PAGE_SIZE;
+       return page;
 }
 
 static inline void set_current(struct task_struct *task)
index f4db89b5b5a6f5a61675bee36f1646cb1a744038..315248b039414c9a3d93b754adb8abfed24c9e7e 100644 (file)
@@ -535,6 +535,31 @@ invalid_number:
 
        return 1;
 }
+
+static void time_travel_set_start(void)
+{
+       if (time_travel_start_set)
+               return;
+
+       switch (time_travel_mode) {
+       case TT_MODE_EXTERNAL:
+               time_travel_start = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
+               /* controller gave us the *current* time, so adjust by that */
+               time_travel_ext_get_time();
+               time_travel_start -= time_travel_time;
+               break;
+       case TT_MODE_INFCPU:
+       case TT_MODE_BASIC:
+               if (!time_travel_start_set)
+                       time_travel_start = os_persistent_clock_emulation();
+               break;
+       case TT_MODE_OFF:
+               /* we just read the host clock with os_persistent_clock_emulation() */
+               break;
+       }
+
+       time_travel_start_set = true;
+}
 #else /* CONFIG_UML_TIME_TRAVEL_SUPPORT */
 #define time_travel_start_set 0
 #define time_travel_start 0
@@ -553,6 +578,10 @@ static void time_travel_set_interval(unsigned long long interval)
 {
 }
 
+static inline void time_travel_set_start(void)
+{
+}
+
 /* fail link if this actually gets used */
 extern u64 time_travel_ext_req(u32 op, u64 time);
 
@@ -731,6 +760,8 @@ void read_persistent_clock64(struct timespec64 *ts)
 {
        long long nsecs;
 
+       time_travel_set_start();
+
        if (time_travel_mode != TT_MODE_OFF)
                nsecs = time_travel_start + time_travel_time;
        else
@@ -742,25 +773,6 @@ void read_persistent_clock64(struct timespec64 *ts)
 
 void __init time_init(void)
 {
-#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
-       switch (time_travel_mode) {
-       case TT_MODE_EXTERNAL:
-               time_travel_start = time_travel_ext_req(UM_TIMETRAVEL_GET_TOD, -1);
-               /* controller gave us the *current* time, so adjust by that */
-               time_travel_ext_get_time();
-               time_travel_start -= time_travel_time;
-               break;
-       case TT_MODE_INFCPU:
-       case TT_MODE_BASIC:
-               if (!time_travel_start_set)
-                       time_travel_start = os_persistent_clock_emulation();
-               break;
-       case TT_MODE_OFF:
-               /* we just read the host clock with os_persistent_clock_emulation() */
-               break;
-       }
-#endif
-
        timer_set_signal_handler();
        late_time_init = um_timer_setup;
 }
index 437d1f1cc5ecd862890d8d0405d49f668bd75e29..61776790cd678110f4a2a8af3953778598362b18 100644 (file)
@@ -608,57 +608,3 @@ void force_flush_all(void)
                vma = vma->vm_next;
        }
 }
-
-struct page_change_data {
-       unsigned int set_mask, clear_mask;
-};
-
-static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
-{
-       struct page_change_data *cdata = data;
-       pte_t pte = READ_ONCE(*ptep);
-
-       pte_clear_bits(pte, cdata->clear_mask);
-       pte_set_bits(pte, cdata->set_mask);
-
-       set_pte(ptep, pte);
-       return 0;
-}
-
-static int change_memory(unsigned long start, unsigned long pages,
-                        unsigned int set_mask, unsigned int clear_mask)
-{
-       unsigned long size = pages * PAGE_SIZE;
-       struct page_change_data data;
-       int ret;
-
-       data.set_mask = set_mask;
-       data.clear_mask = clear_mask;
-
-       ret = apply_to_page_range(&init_mm, start, size, change_page_range,
-                                 &data);
-
-       flush_tlb_kernel_range(start, start + size);
-
-       return ret;
-}
-
-int set_memory_ro(unsigned long addr, int numpages)
-{
-       return change_memory(addr, numpages, 0, _PAGE_RW);
-}
-
-int set_memory_rw(unsigned long addr, int numpages)
-{
-       return change_memory(addr, numpages, _PAGE_RW, 0);
-}
-
-int set_memory_nx(unsigned long addr, int numpages)
-{
-       return -EOPNOTSUPP;
-}
-
-int set_memory_x(unsigned long addr, int numpages)
-{
-       return -EOPNOTSUPP;
-}
index 31d356b1ffd8253375aa738c4aa1e24d178ac7bc..80e2660782a05964fd34381d80d5d2ad220771a2 100644 (file)
@@ -26,7 +26,8 @@
 #include <mem_user.h>
 #include <os.h>
 
-#define DEFAULT_COMMAND_LINE "root=98:0"
+#define DEFAULT_COMMAND_LINE_ROOT "root=98:0"
+#define DEFAULT_COMMAND_LINE_CONSOLE "console=tty"
 
 /* Changed in add_arg and setup_arch, which run before SMP is started */
 static char __initdata command_line[COMMAND_LINE_SIZE] = { 0 };
@@ -109,7 +110,8 @@ unsigned long end_vm;
 int ncpus = 1;
 
 /* Set in early boot */
-static int have_root __initdata = 0;
+static int have_root __initdata;
+static int have_console __initdata;
 
 /* Set in uml_mem_setup and modified in linux_main */
 long long physmem_size = 32 * 1024 * 1024;
@@ -161,6 +163,17 @@ __uml_setup("debug", no_skas_debug_setup,
 "    this flag is not needed to run gdb on UML in skas mode\n\n"
 );
 
+static int __init uml_console_setup(char *line, int *add)
+{
+       have_console = 1;
+       return 0;
+}
+
+__uml_setup("console=", uml_console_setup,
+"console=<preferred console>\n"
+"    Specify the preferred console output driver\n\n"
+);
+
 static int __init Usage(char *line, int *add)
 {
        const char **p;
@@ -264,7 +277,10 @@ int __init linux_main(int argc, char **argv)
                        add_arg(argv[i]);
        }
        if (have_root == 0)
-               add_arg(DEFAULT_COMMAND_LINE);
+               add_arg(DEFAULT_COMMAND_LINE_ROOT);
+
+       if (have_console == 0)
+               add_arg(DEFAULT_COMMAND_LINE_CONSOLE);
 
        host_task_size = os_get_top_address();
        /*
index feb48d796e0050e1803fe1b637b3334447fcb7e3..9fa6e4187d4fb4bcd0d116169a0527c73d020e92 100644 (file)
@@ -45,7 +45,7 @@ int run_helper(void (*pre_exec)(void *), void *pre_data, char **argv)
        unsigned long stack, sp;
        int pid, fds[2], ret, n;
 
-       stack = alloc_stack(__cant_sleep());
+       stack = alloc_stack(0, __cant_sleep());
        if (stack == 0)
                return -ENOMEM;
 
@@ -116,7 +116,7 @@ int run_helper_thread(int (*proc)(void *), void *arg, unsigned int flags,
        unsigned long stack, sp;
        int pid, status, err;
 
-       stack = alloc_stack(__cant_sleep());
+       stack = alloc_stack(0, __cant_sleep());
        if (stack == 0)
                return -ENOMEM;
 
index a61cbf73a179dec6a4bf55595c3d5906057c3c38..6c5041c5560b28a16cac11bdfa7a0403b31528b0 100644 (file)
@@ -104,5 +104,18 @@ long long os_nsecs(void)
  */
 void os_idle_sleep(void)
 {
-       pause();
+       struct itimerspec its;
+       sigset_t set, old;
+
+       /* block SIGALRM while we analyze the timer state */
+       sigemptyset(&set);
+       sigaddset(&set, SIGALRM);
+       sigprocmask(SIG_BLOCK, &set, &old);
+
+       /* check the timer, and if it'll fire then wait for it */
+       timer_gettime(event_high_res_timer, &its);
+       if (its.it_value.tv_sec || its.it_value.tv_nsec)
+               sigsuspend(&old);
+       /* either way, restore the signal mask */
+       sigprocmask(SIG_UNBLOCK, &set, NULL);
 }
index 18d8f17f755c4fab63fd71eadfcc632228aa1c0d..0904f5676e4d8880562ad9d00532c12d2fb79b8c 100644 (file)
@@ -73,10 +73,8 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs,
                                                  unsigned int nr)
 {
        if (likely(nr < IA32_NR_syscalls)) {
-               instrumentation_begin();
                nr = array_index_nospec(nr, IA32_NR_syscalls);
                regs->ax = ia32_sys_call_table[nr](regs);
-               instrumentation_end();
        }
 }
 
@@ -91,8 +89,11 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
         * or may not be necessary, but it matches the old asm behavior.
         */
        nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
+       instrumentation_begin();
 
        do_syscall_32_irqs_on(regs, nr);
+
+       instrumentation_end();
        syscall_exit_to_user_mode(regs);
 }
 
@@ -121,11 +122,12 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
                res = get_user(*(u32 *)&regs->bp,
                       (u32 __user __force *)(unsigned long)(u32)regs->sp);
        }
-       instrumentation_end();
 
        if (res) {
                /* User code screwed up. */
                regs->ax = -EFAULT;
+
+               instrumentation_end();
                syscall_exit_to_user_mode(regs);
                return false;
        }
@@ -135,6 +137,8 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
 
        /* Now this is just like a normal syscall. */
        do_syscall_32_irqs_on(regs, nr);
+
+       instrumentation_end();
        syscall_exit_to_user_mode(regs);
        return true;
 }
index ccd32877a3c4141faaf97ad4dfec58217ac2cf48..496b11ec469def7b22e0948b60f07d6f7746e1b6 100644 (file)
@@ -10,7 +10,7 @@
 #include <asm/export.h>
 
        /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
-       .macro THUNK name, func, put_ret_addr_in_rdi=0
+       .macro THUNK name, func
 SYM_FUNC_START_NOALIGN(\name)
        pushq %rbp
        movq %rsp, %rbp
@@ -25,13 +25,8 @@ SYM_FUNC_START_NOALIGN(\name)
        pushq %r10
        pushq %r11
 
-       .if \put_ret_addr_in_rdi
-       /* 8(%rbp) is return addr on stack */
-       movq 8(%rbp), %rdi
-       .endif
-
        call \func
-       jmp  .L_restore
+       jmp  __thunk_restore
 SYM_FUNC_END(\name)
        _ASM_NOKPROBE(\name)
        .endm
@@ -44,7 +39,7 @@ SYM_FUNC_END(\name)
 #endif
 
 #ifdef CONFIG_PREEMPTION
-SYM_CODE_START_LOCAL_NOALIGN(.L_restore)
+SYM_CODE_START_LOCAL_NOALIGN(__thunk_restore)
        popq %r11
        popq %r10
        popq %r9
@@ -56,6 +51,6 @@ SYM_CODE_START_LOCAL_NOALIGN(.L_restore)
        popq %rdi
        popq %rbp
        ret
-       _ASM_NOKPROBE(.L_restore)
-SYM_CODE_END(.L_restore)
+       _ASM_NOKPROBE(__thunk_restore)
+SYM_CODE_END(__thunk_restore)
 #endif
index 4638a52d8eaeae7419a2881e0452237a7251f9e6..6375967a8244dc4e6777d8ff95de1be33ea77e06 100644 (file)
@@ -315,6 +315,25 @@ static struct syscore_ops hv_syscore_ops = {
        .resume         = hv_resume,
 };
 
+static void (* __initdata old_setup_percpu_clockev)(void);
+
+static void __init hv_stimer_setup_percpu_clockev(void)
+{
+       /*
+        * Ignore any errors in setting up stimer clockevents
+        * as we can run with the LAPIC timer as a fallback.
+        */
+       (void)hv_stimer_alloc();
+
+       /*
+        * Still register the LAPIC timer, because the direct-mode STIMER is
+        * not supported by old versions of Hyper-V. This also allows users
+        * to switch to LAPIC timer via /sys, if they want to.
+        */
+       if (old_setup_percpu_clockev)
+               old_setup_percpu_clockev();
+}
+
 /*
  * This function is to be invoked early in the boot sequence after the
  * hypervisor has been detected.
@@ -393,10 +412,14 @@ void __init hyperv_init(void)
        wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
 
        /*
-        * Ignore any errors in setting up stimer clockevents
-        * as we can run with the LAPIC timer as a fallback.
+        * hyperv_init() is called before LAPIC is initialized: see
+        * apic_intr_mode_init() -> x86_platform.apic_post_init() and
+        * apic_bsp_setup() -> setup_local_APIC(). The direct-mode STIMER
+        * depends on LAPIC, so hv_stimer_alloc() should be called from
+        * x86_init.timers.setup_percpu_clockev.
         */
-       (void)hv_stimer_alloc();
+       old_setup_percpu_clockev = x86_init.timers.setup_percpu_clockev;
+       x86_init.timers.setup_percpu_clockev = hv_stimer_setup_percpu_clockev;
 
        hv_apic_init();
 
index a5aba4ab02248c5423c2d30576d137115cdd3046..67a4f1cb2aac58cf196fd4cd645b9736a0d76aaf 100644 (file)
  * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
  * disables preemption so be careful if you intend to use it for long periods
  * of time.
- * If you intend to use the FPU in softirq you need to check first with
+ * If you intend to use the FPU in irq/softirq you need to check first with
  * irq_fpu_usable() if it is possible.
  */
-extern void kernel_fpu_begin(void);
+
+/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
+#define KFPU_387       _BITUL(0)       /* 387 state will be initialized */
+#define KFPU_MXCSR     _BITUL(1)       /* MXCSR will be initialized */
+
+extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
 extern void kernel_fpu_end(void);
 extern bool irq_fpu_usable(void);
 extern void fpregs_mark_activate(void);
 
+/* Code that is unaware of kernel_fpu_begin_mask() can use this */
+static inline void kernel_fpu_begin(void)
+{
+       kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
+}
+
 /*
  * Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
  * A context switch will (and softirq might) save CPU's FPU registers to
index 247a60a47331774e34e4fbc85fbcfbc3c86e58bc..f656aabd1545cc8eb0d7ea8abf46686e5834e91e 100644 (file)
@@ -613,6 +613,7 @@ DECLARE_IDTENTRY_VC(X86_TRAP_VC,    exc_vmm_communication);
 
 #ifdef CONFIG_XEN_PV
 DECLARE_IDTENTRY_XENCB(X86_TRAP_OTHER, exc_xen_hypervisor_callback);
+DECLARE_IDTENTRY_RAW(X86_TRAP_OTHER,   exc_xen_unknown_trap);
 #endif
 
 /* Device interrupts common/spurious */
index 5e658ba2654a7f687886276b504657b4d035e22a..9abe842dbd843a2d569a10e0131c937282ed1b46 100644 (file)
@@ -97,6 +97,7 @@
 
 #define        INTEL_FAM6_LAKEFIELD            0x8A
 #define INTEL_FAM6_ALDERLAKE           0x97
+#define INTEL_FAM6_ALDERLAKE_L         0x9A
 
 /* "Small Core" Processors (Atom) */
 
index 0b4920a7238e389c0eb1414fa0e1b5fa392f8f52..e16cccdd04207c8a9a0268663c6ded1d9c5f7904 100644 (file)
@@ -86,7 +86,7 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
  * think of extending them - you will be slapped with a stinking trout or a frozen
  * shark will reach you, wherever you are! You've been warned.
  */
-static inline unsigned long long notrace __rdmsr(unsigned int msr)
+static __always_inline unsigned long long __rdmsr(unsigned int msr)
 {
        DECLARE_ARGS(val, low, high);
 
@@ -98,7 +98,7 @@ static inline unsigned long long notrace __rdmsr(unsigned int msr)
        return EAX_EDX_VAL(val, low, high);
 }
 
-static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high)
+static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
 {
        asm volatile("1: wrmsr\n"
                     "2:\n"
index 488a8e8487548b7bc34ae6567b8a81528ccd3a41..9239399e54914537c5453c77c234d4af7877b403 100644 (file)
@@ -110,6 +110,8 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
 #define topology_die_id(cpu)                   (cpu_data(cpu).cpu_die_id)
 #define topology_core_id(cpu)                  (cpu_data(cpu).cpu_core_id)
 
+extern unsigned int __max_die_per_package;
+
 #ifdef CONFIG_SMP
 #define topology_die_cpumask(cpu)              (per_cpu(cpu_die_map, cpu))
 #define topology_core_cpumask(cpu)             (per_cpu(cpu_core_map, cpu))
@@ -118,8 +120,6 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
 extern unsigned int __max_logical_packages;
 #define topology_max_packages()                        (__max_logical_packages)
 
-extern unsigned int __max_die_per_package;
-
 static inline int topology_max_die_per_package(void)
 {
        return __max_die_per_package;
index f8ca66f3d8617ddd14fb508c6fe0fda603e50376..347a956f71ca098b8423ba96d64ce394352b16c3 100644 (file)
@@ -542,12 +542,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                u32 ecx;
 
                ecx = cpuid_ecx(0x8000001e);
-               nodes_per_socket = ((ecx >> 8) & 7) + 1;
+               __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
        } else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
                u64 value;
 
                rdmsrl(MSR_FAM10H_NODE_ID, value);
-               nodes_per_socket = ((value >> 3) & 7) + 1;
+               __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
        }
 
        if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
index 13d3f1cbda176711069606c6dacbe2900579c208..e133ce1e562b381468e897d2c7b9dc708d35f4ac 100644 (file)
@@ -1992,10 +1992,9 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
         * that out because it's an indirect call. Annotate it.
         */
        instrumentation_begin();
-       trace_hardirqs_off_finish();
+
        machine_check_vector(regs);
-       if (regs->flags & X86_EFLAGS_IF)
-               trace_hardirqs_on_prepare();
+
        instrumentation_end();
        irqentry_nmi_exit(regs, irq_state);
 }
@@ -2004,7 +2003,9 @@ static __always_inline void exc_machine_check_user(struct pt_regs *regs)
 {
        irqentry_enter_from_user_mode(regs);
        instrumentation_begin();
+
        machine_check_vector(regs);
+
        instrumentation_end();
        irqentry_exit_to_user_mode(regs);
 }
index 1068002c8532391beaed43a7f1125e96a2d929c5..8678864ce7123c35cbb5d0ab09e9ffcd91095e16 100644 (file)
 #define BITS_SHIFT_NEXT_LEVEL(eax)     ((eax) & 0x1f)
 #define LEVEL_MAX_SIBLINGS(ebx)                ((ebx) & 0xffff)
 
-#ifdef CONFIG_SMP
 unsigned int __max_die_per_package __read_mostly = 1;
 EXPORT_SYMBOL(__max_die_per_package);
 
+#ifdef CONFIG_SMP
 /*
  * Check if given CPUID extended toplogy "leaf" is implemented
  */
index eb86a2b831b15a74588b302ccf8eed58e7490331..571220ac8beaa7020cb1a98a41ffb0e79c759484 100644 (file)
@@ -121,7 +121,7 @@ int copy_fpregs_to_fpstate(struct fpu *fpu)
 }
 EXPORT_SYMBOL(copy_fpregs_to_fpstate);
 
-void kernel_fpu_begin(void)
+void kernel_fpu_begin_mask(unsigned int kfpu_mask)
 {
        preempt_disable();
 
@@ -141,13 +141,14 @@ void kernel_fpu_begin(void)
        }
        __cpu_invalidate_fpregs_state();
 
-       if (boot_cpu_has(X86_FEATURE_XMM))
+       /* Put sane initial values into the control registers. */
+       if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
                ldmxcsr(MXCSR_DEFAULT);
 
-       if (boot_cpu_has(X86_FEATURE_FPU))
+       if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
                asm volatile ("fninit");
 }
-EXPORT_SYMBOL_GPL(kernel_fpu_begin);
+EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
 
 void kernel_fpu_end(void)
 {
index 0bd1a0fc587e0f830e25e5b4199972ad1471ff96..84c1821819afb8fb3da3b09b8d429930bd1d4da3 100644 (file)
@@ -225,7 +225,7 @@ static inline u64 sev_es_rd_ghcb_msr(void)
        return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
 }
 
-static inline void sev_es_wr_ghcb_msr(u64 val)
+static __always_inline void sev_es_wr_ghcb_msr(u64 val)
 {
        u32 low, high;
 
@@ -286,6 +286,12 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
        u16 d2;
        u8  d1;
 
+       /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
+       if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
+               memcpy(dst, buf, size);
+               return ES_OK;
+       }
+
        switch (size) {
        case 1:
                memcpy(&d1, buf, 1);
@@ -335,6 +341,12 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
        u16 d2;
        u8  d1;
 
+       /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
+       if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
+               memcpy(buf, src, size);
+               return ES_OK;
+       }
+
        switch (size) {
        case 1:
                if (get_user(d1, s))
index 8ca66af96a547ddc6606e3783ec33950f7716e93..117e24fbfd8a07587532c613fd3386872273a2ad 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/numa.h>
 #include <linux/pgtable.h>
 #include <linux/overflow.h>
+#include <linux/syscore_ops.h>
 
 #include <asm/acpi.h>
 #include <asm/desc.h>
@@ -2083,6 +2084,23 @@ static void init_counter_refs(void)
        this_cpu_write(arch_prev_mperf, mperf);
 }
 
+#ifdef CONFIG_PM_SLEEP
+static struct syscore_ops freq_invariance_syscore_ops = {
+       .resume = init_counter_refs,
+};
+
+static void register_freq_invariance_syscore_ops(void)
+{
+       /* Bail out if registered already. */
+       if (freq_invariance_syscore_ops.node.prev)
+               return;
+
+       register_syscore_ops(&freq_invariance_syscore_ops);
+}
+#else
+static inline void register_freq_invariance_syscore_ops(void) {}
+#endif
+
 static void init_freq_invariance(bool secondary, bool cppc_ready)
 {
        bool ret = false;
@@ -2109,6 +2127,7 @@ static void init_freq_invariance(bool secondary, bool cppc_ready)
        if (ret) {
                init_counter_refs();
                static_branch_enable(&arch_scale_freq_key);
+               register_freq_invariance_syscore_ops();
                pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
        } else {
                pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n");
index 13036cf0b9124587002b4bf50a1796f6171539d6..38172ca627d36d2199fdfc64d7fa41ebba2f0e07 100644 (file)
@@ -321,7 +321,7 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
        if (cpuid->nent < vcpu->arch.cpuid_nent)
                goto out;
        r = -EFAULT;
-       if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
+       if (copy_to_user(entries, vcpu->arch.cpuid_entries,
                         vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
                goto out;
        return 0;
index 56cae1ff9e3fe46362947f0911b39573d44397a8..66a08322988f223a179edb33cf97c190c6136a54 100644 (file)
@@ -2879,6 +2879,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
        ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
        *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
                                                              (u32)msr_data;
+       if (efer & EFER_LMA)
+               ctxt->mode = X86EMUL_MODE_PROT64;
 
        return X86EMUL_CONTINUE;
 }
index f15bc16de07c3318061727eb1ee71b27e131ab78..a889563ad02d5e49bdfed32d30fdc8edcae1233c 100644 (file)
@@ -9,31 +9,6 @@
        (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
         | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
 
-static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
-                                            enum kvm_reg reg)
-{
-       return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
-}
-
-static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
-                                        enum kvm_reg reg)
-{
-       return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
-}
-
-static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
-                                              enum kvm_reg reg)
-{
-       __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
-}
-
-static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
-                                          enum kvm_reg reg)
-{
-       __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
-       __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
-}
-
 #define BUILD_KVM_GPR_ACCESSORS(lname, uname)                                \
 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
 {                                                                            \
@@ -43,7 +18,6 @@ static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu,              \
                                                unsigned long val)            \
 {                                                                            \
        vcpu->arch.regs[VCPU_REGS_##uname] = val;                             \
-       kvm_register_mark_dirty(vcpu, VCPU_REGS_##uname);                     \
 }
 BUILD_KVM_GPR_ACCESSORS(rax, RAX)
 BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
@@ -63,6 +37,31 @@ BUILD_KVM_GPR_ACCESSORS(r14, R14)
 BUILD_KVM_GPR_ACCESSORS(r15, R15)
 #endif
 
+static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
+                                            enum kvm_reg reg)
+{
+       return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+}
+
+static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu,
+                                        enum kvm_reg reg)
+{
+       return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
+}
+
+static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
+                                              enum kvm_reg reg)
+{
+       __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+}
+
+static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
+                                          enum kvm_reg reg)
+{
+       __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+       __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
+}
+
 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
 {
        if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
index 581925e476d6c57ccc7af45f61761b55e8686c8e..261be1d2032ba8ca9d8a73b81beec3b9bf50f7ca 100644 (file)
 #define PT32_ROOT_LEVEL 2
 #define PT32E_ROOT_LEVEL 3
 
-static inline u64 rsvd_bits(int s, int e)
+static __always_inline u64 rsvd_bits(int s, int e)
 {
+       BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
+
+       if (__builtin_constant_p(e))
+               BUILD_BUG_ON(e > 63);
+       else
+               e &= 63;
+
        if (e < s)
                return 0;
 
index 2ef8615f9dba87bca72d4912df6e4cd9394def56..b56d604809b8afdf2d7080e4cf471ed649e675d7 100644 (file)
@@ -1049,8 +1049,8 @@ bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
 }
 
 /*
- * Clear non-leaf entries (and free associated page tables) which could
- * be replaced by large mappings, for GFNs within the slot.
+ * Clear leaf entries which could be replaced by large mappings, for
+ * GFNs within the slot.
  */
 static void zap_collapsible_spte_range(struct kvm *kvm,
                                       struct kvm_mmu_page *root,
@@ -1062,7 +1062,7 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
 
        tdp_root_for_each_pte(iter, root, start, end) {
                if (!is_shadow_present_pte(iter.old_spte) ||
-                   is_last_spte(iter.old_spte, iter.level))
+                   !is_last_spte(iter.old_spte, iter.level))
                        continue;
 
                pfn = spte_to_pfn(iter.old_spte);
index cb4c6ee10029c96d97460250315516468d187d83..db30670dd8c4a8ccf2ca611f513e056a0480e696 100644 (file)
@@ -200,6 +200,9 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
+       if (WARN_ON(!is_guest_mode(vcpu)))
+               return true;
+
        if (!nested_svm_vmrun_msrpm(svm)) {
                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                vcpu->run->internal.suberror =
@@ -228,6 +231,7 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
 
 static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
 {
+       struct kvm_vcpu *vcpu = &svm->vcpu;
        bool vmcb12_lma;
 
        if ((vmcb12->save.efer & EFER_SVME) == 0)
@@ -241,18 +245,10 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
 
        vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
 
-       if (!vmcb12_lma) {
-               if (vmcb12->save.cr4 & X86_CR4_PAE) {
-                       if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
-                               return false;
-               } else {
-                       if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
-                               return false;
-               }
-       } else {
+       if (vmcb12_lma) {
                if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
                    !(vmcb12->save.cr0 & X86_CR0_PE) ||
-                   (vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
+                   (vmcb12->save.cr3 & vcpu->arch.cr3_lm_rsvd_bits))
                        return false;
        }
        if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
index c8ffdbc81709ee75f88824b26c1dda9d7d165dc6..48017fef1cd9ccd0fff5c207031da4f6bf12588f 100644 (file)
@@ -342,6 +342,8 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
        unsigned long first, last;
        int ret;
 
+       lockdep_assert_held(&kvm->lock);
+
        if (ulen == 0 || uaddr + ulen < uaddr)
                return ERR_PTR(-EINVAL);
 
@@ -1119,12 +1121,20 @@ int svm_register_enc_region(struct kvm *kvm,
        if (!region)
                return -ENOMEM;
 
+       mutex_lock(&kvm->lock);
        region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
        if (IS_ERR(region->pages)) {
                ret = PTR_ERR(region->pages);
+               mutex_unlock(&kvm->lock);
                goto e_free;
        }
 
+       region->uaddr = range->addr;
+       region->size = range->size;
+
+       list_add_tail(&region->list, &sev->regions_list);
+       mutex_unlock(&kvm->lock);
+
        /*
         * The guest may change the memory encryption attribute from C=0 -> C=1
         * or vice versa for this memory range. Lets make sure caches are
@@ -1133,13 +1143,6 @@ int svm_register_enc_region(struct kvm *kvm,
         */
        sev_clflush_pages(region->pages, region->npages);
 
-       region->uaddr = range->addr;
-       region->size = range->size;
-
-       mutex_lock(&kvm->lock);
-       list_add_tail(&region->list, &sev->regions_list);
-       mutex_unlock(&kvm->lock);
-
        return ret;
 
 e_free:
@@ -1415,16 +1418,13 @@ static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
         * to be returned:
         *   GPRs RAX, RBX, RCX, RDX
         *
-        * Copy their values to the GHCB if they are dirty.
+        * Copy their values, even if they may not have been written during the
+        * VM-Exit.  It's the guest's responsibility to not consume random data.
         */
-       if (kvm_register_is_dirty(vcpu, VCPU_REGS_RAX))
-               ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
-       if (kvm_register_is_dirty(vcpu, VCPU_REGS_RBX))
-               ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
-       if (kvm_register_is_dirty(vcpu, VCPU_REGS_RCX))
-               ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
-       if (kvm_register_is_dirty(vcpu, VCPU_REGS_RDX))
-               ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
+       ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
+       ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
+       ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
+       ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
 }
 
 static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
index 7ef171790d02b669ffaa677cf0519972774ea384..3442d44ca53b854ca978185a351ef078d416ce80 100644 (file)
@@ -454,6 +454,11 @@ static int has_svm(void)
                return 0;
        }
 
+       if (sev_active()) {
+               pr_info("KVM is unsupported when running as an SEV guest\n");
+               return 0;
+       }
+
        return 1;
 }
 
@@ -3739,6 +3744,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
+       trace_kvm_entry(vcpu);
+
        svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
        svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
        svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
index 0fe874ae54982e146ee4ffda756ac80470d73bcb..6e7d070f8b86d17b89e941b175e2d944f9f5978a 100644 (file)
@@ -403,9 +403,6 @@ static inline bool gif_set(struct vcpu_svm *svm)
 }
 
 /* svm.c */
-#define MSR_CR3_LEGACY_RESERVED_MASK           0xfe7U
-#define MSR_CR3_LEGACY_PAE_RESERVED_MASK       0x7U
-#define MSR_CR3_LONG_MBZ_MASK                  0xfff0000000000000U
 #define MSR_INVALID                            0xffffffffU
 
 extern int sev;
index 0fbb46990dfce110aa23e62a84c343b4c408eabc..f2b9bfb5820677aa2a55e0db895c2936e1383fdc 100644 (file)
@@ -3124,13 +3124,9 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
 {
-       struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       struct kvm_host_map *map;
-       struct page *page;
-       u64 hpa;
 
        /*
         * hv_evmcs may end up being not mapped after migration (when
@@ -3153,6 +3149,17 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
                }
        }
 
+       return true;
+}
+
+static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+{
+       struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       struct kvm_host_map *map;
+       struct page *page;
+       u64 hpa;
+
        if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
                /*
                 * Translate L1 physical address to host physical
@@ -3221,6 +3228,18 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
                exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
        else
                exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
+
+       return true;
+}
+
+static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
+{
+       if (!nested_get_evmcs_page(vcpu))
+               return false;
+
+       if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
+               return false;
+
        return true;
 }
 
@@ -6077,11 +6096,14 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
        if (is_guest_mode(vcpu)) {
                sync_vmcs02_to_vmcs12(vcpu, vmcs12);
                sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
-       } else if (!vmx->nested.need_vmcs12_to_shadow_sync) {
-               if (vmx->nested.hv_evmcs)
-                       copy_enlightened_to_vmcs12(vmx);
-               else if (enable_shadow_vmcs)
-                       copy_shadow_to_vmcs12(vmx);
+       } else  {
+               copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
+               if (!vmx->nested.need_vmcs12_to_shadow_sync) {
+                       if (vmx->nested.hv_evmcs)
+                               copy_enlightened_to_vmcs12(vmx);
+                       else if (enable_shadow_vmcs)
+                               copy_shadow_to_vmcs12(vmx);
+               }
        }
 
        BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
@@ -6602,7 +6624,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
        .hv_timer_pending = nested_vmx_preemption_timer_pending,
        .get_state = vmx_get_nested_state,
        .set_state = vmx_set_nested_state,
-       .get_nested_state_pages = nested_get_vmcs12_pages,
+       .get_nested_state_pages = vmx_get_nested_state_pages,
        .write_log_dirty = nested_vmx_write_pml_buffer,
        .enable_evmcs = nested_enable_evmcs,
        .get_evmcs_version = nested_get_evmcs_version,
index a886a47daebdadffa556e44e8e9dd6848abbe733..cdf5f34518f43b4c2e46bf123a489cf6cf002461 100644 (file)
@@ -29,7 +29,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = {
        [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
        [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
        [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
-       [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
+       [7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
 };
 
 /* mapping between fixed pmc index and intel_arch_events array */
@@ -345,7 +345,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 
        pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
                                         x86_pmu.num_counters_gp);
+       eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
        pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
+       eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
        pmu->available_event_types = ~entry->ebx &
                                        ((1ull << eax.split.mask_length) - 1);
 
@@ -355,6 +357,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
                pmu->nr_arch_fixed_counters =
                        min_t(int, edx.split.num_counters_fixed,
                              x86_pmu.num_counters_fixed);
+               edx.split.bit_width_fixed = min_t(int,
+                       edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
                pmu->counter_bitmask[KVM_PMC_FIXED] =
                        ((u64)1 << edx.split.bit_width_fixed) - 1;
        }
index 2af05d3b0590985ce22ee26e6a83b0848b6043ef..eb69fef57485d61b846ba321ef9a34ec056617f8 100644 (file)
@@ -6653,6 +6653,8 @@ reenter_guest:
        if (vmx->emulation_required)
                return EXIT_FASTPATH_NONE;
 
+       trace_kvm_entry(vcpu);
+
        if (vmx->ple_window_dirty) {
                vmx->ple_window_dirty = false;
                vmcs_write32(PLE_WINDOW, vmx->ple_window);
@@ -6858,11 +6860,20 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
                switch (index) {
                case MSR_IA32_TSX_CTRL:
                        /*
-                        * No need to pass TSX_CTRL_CPUID_CLEAR through, so
-                        * let's avoid changing CPUID bits under the host
-                        * kernel's feet.
+                        * TSX_CTRL_CPUID_CLEAR is handled in the CPUID
+                        * interception.  Keep the host value unchanged to avoid
+                        * changing CPUID bits under the host kernel's feet.
+                        *
+                        * hle=0, rtm=0, tsx_ctrl=1 can be found with some
+                        * combinations of new kernel and old userspace.  If
+                        * those guests run on a tsx=off host, do allow guests
+                        * to use TSX_CTRL, but do not change the value on the
+                        * host so that TSX remains always disabled.
                         */
-                       vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
+                       if (boot_cpu_has(X86_FEATURE_RTM))
+                               vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
+                       else
+                               vmx->guest_uret_msrs[j].mask = 0;
                        break;
                default:
                        vmx->guest_uret_msrs[j].mask = -1ull;
index 9a8969a6dd06362a84ad6989f34d48b51e59ae24..1b404e4d7dd8e0441c436811b42ec1b62a048855 100644 (file)
@@ -105,6 +105,7 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
 
 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
 static void process_nmi(struct kvm_vcpu *vcpu);
+static void process_smi(struct kvm_vcpu *vcpu);
 static void enter_smm(struct kvm_vcpu *vcpu);
 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
 static void store_regs(struct kvm_vcpu *vcpu);
@@ -1393,16 +1394,24 @@ static u64 kvm_get_arch_capabilities(void)
        if (!boot_cpu_has_bug(X86_BUG_MDS))
                data |= ARCH_CAP_MDS_NO;
 
-       /*
-        * On TAA affected systems:
-        *      - nothing to do if TSX is disabled on the host.
-        *      - we emulate TSX_CTRL if present on the host.
-        *        This lets the guest use VERW to clear CPU buffers.
-        */
-       if (!boot_cpu_has(X86_FEATURE_RTM))
-               data &= ~(ARCH_CAP_TAA_NO | ARCH_CAP_TSX_CTRL_MSR);
-       else if (!boot_cpu_has_bug(X86_BUG_TAA))
+       if (!boot_cpu_has(X86_FEATURE_RTM)) {
+               /*
+                * If RTM=0 because the kernel has disabled TSX, the host might
+                * have TAA_NO or TSX_CTRL.  Clear TAA_NO (the guest sees RTM=0
+                * and therefore knows that there cannot be TAA) but keep
+                * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts,
+                * and we want to allow migrating those guests to tsx=off hosts.
+                */
+               data &= ~ARCH_CAP_TAA_NO;
+       } else if (!boot_cpu_has_bug(X86_BUG_TAA)) {
                data |= ARCH_CAP_TAA_NO;
+       } else {
+               /*
+                * Nothing to do here; we emulate TSX_CTRL if present on the
+                * host so the guest can choose between disabling TSX or
+                * using VERW to clear CPU buffers.
+                */
+       }
 
        return data;
 }
@@ -4230,6 +4239,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
 {
        process_nmi(vcpu);
 
+       if (kvm_check_request(KVM_REQ_SMI, vcpu))
+               process_smi(vcpu);
+
        /*
         * In guest mode, payload delivery should be deferred,
         * so that the L1 hypervisor can intercept #PF before
@@ -8802,9 +8814,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 
        if (kvm_request_pending(vcpu)) {
                if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
-                       if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
-                               ;
-                       else if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
+                       if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
                                r = 0;
                                goto out;
                        }
@@ -8988,8 +8998,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                kvm_x86_ops.request_immediate_exit(vcpu);
        }
 
-       trace_kvm_entry(vcpu);
-
        fpregs_assert_state_consistent();
        if (test_thread_flag(TIF_NEED_FPU_LOAD))
                switch_fpu_return();
@@ -9616,6 +9624,8 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
                 */
                if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA))
                        return false;
+               if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits)
+                       return false;
        } else {
                /*
                 * Not in 64-bit mode: EFER.LMA is clear and the code
@@ -9993,6 +10003,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
        fx_init(vcpu);
 
        vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
+       vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63);
 
        vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
 
@@ -10494,7 +10505,7 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
                        return 0;
 
                old_npages = slot->npages;
-               hva = 0;
+               hva = slot->userspace_addr;
        }
 
        for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
@@ -11556,6 +11567,7 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
 }
 EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
 
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
index c5ee0f5ce0f13e236c43cbd4886ef39f3a67c855..0f727b50bd3d2dcabbcf3d661b9b6aefe02e924a 100644 (file)
@@ -425,6 +425,8 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
                __reserved_bits |= X86_CR4_UMIP;        \
        if (!__cpu_has(__c, X86_FEATURE_VMX))           \
                __reserved_bits |= X86_CR4_VMXE;        \
+       if (!__cpu_has(__c, X86_FEATURE_PCID))          \
+               __reserved_bits |= X86_CR4_PCIDE;       \
        __reserved_bits;                                \
 })
 
index 4321fa02e18df07368689469049cad31b87eca04..419365c48b2ada2b40094552affd0e4cd4ba433c 100644 (file)
 #include <asm/fpu/api.h>
 #include <asm/asm.h>
 
+/*
+ * Use KFPU_387.  MMX instructions are not affected by MXCSR,
+ * but both AMD and Intel documentation states that even integer MMX
+ * operations will result in #MF if an exception is pending in FCW.
+ *
+ * EMMS is not needed afterwards because, after calling kernel_fpu_end(),
+ * any subsequent user of the 387 stack will reinitialize it using
+ * KFPU_387.
+ */
+
 void *_mmx_memcpy(void *to, const void *from, size_t len)
 {
        void *p;
@@ -37,7 +47,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
        p = to;
        i = len >> 6; /* len/64 */
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        __asm__ __volatile__ (
                "1: prefetch (%0)\n"            /* This set is 28 bytes */
@@ -127,7 +137,7 @@ static void fast_clear_page(void *page)
 {
        int i;
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        __asm__ __volatile__ (
                "  pxor %%mm0, %%mm0\n" : :
@@ -160,7 +170,7 @@ static void fast_copy_page(void *to, void *from)
 {
        int i;
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        /*
         * maybe the prefetch stuff can go before the expensive fnsave...
@@ -247,7 +257,7 @@ static void fast_clear_page(void *page)
 {
        int i;
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        __asm__ __volatile__ (
                "  pxor %%mm0, %%mm0\n" : :
@@ -282,7 +292,7 @@ static void fast_copy_page(void *to, void *from)
 {
        int i;
 
-       kernel_fpu_begin();
+       kernel_fpu_begin_mask(KFPU_387);
 
        __asm__ __volatile__ (
                "1: prefetch (%0)\n"
index c79e5736ab2b7ebf43182e773c9c35457edcf366..c3d5f0236f353cd42159ab487163d5bc511df95b 100644 (file)
@@ -382,6 +382,7 @@ bool sev_active(void)
 {
        return sev_status & MSR_AMD64_SEV_ENABLED;
 }
+EXPORT_SYMBOL_GPL(sev_active);
 
 /* Needs to be called from non-instrumentable code */
 bool noinstr sev_es_active(void)
index 4409306364dc3cd5c774ee190efe83a6079a9cc2..9a5a50cdaab596c08a24a749ab7c6a086933d142 100644 (file)
@@ -583,6 +583,13 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
                exc_debug(regs);
 }
 
+DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
+{
+       /* This should never happen and there is no way to handle it. */
+       pr_err("Unknown trap in Xen PV mode.");
+       BUG();
+}
+
 struct trap_array_entry {
        void (*orig)(void);
        void (*xen)(void);
@@ -631,6 +638,7 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
 {
        unsigned int nr;
        bool ist_okay = false;
+       bool found = false;
 
        /*
         * Replace trap handler addresses by Xen specific ones.
@@ -645,6 +653,7 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
                if (*addr == entry->orig) {
                        *addr = entry->xen;
                        ist_okay = entry->ist_okay;
+                       found = true;
                        break;
                }
        }
@@ -655,9 +664,13 @@ static bool __ref get_trap_addr(void **addr, unsigned int ist)
                nr = (*addr - (void *)early_idt_handler_array[0]) /
                     EARLY_IDT_HANDLER_SIZE;
                *addr = (void *)xen_early_idt_handler_array[nr];
+               found = true;
        }
 
-       if (WARN_ON(ist != 0 && !ist_okay))
+       if (!found)
+               *addr = (void *)xen_asm_exc_xen_unknown_trap;
+
+       if (WARN_ON(found && ist != 0 && !ist_okay))
                return false;
 
        return true;
index 056430a1080bbcab9a575f06b2c78cc657cf421b..6ff3c887e0b99523cd69774b8de8f3009f69d92f 100644 (file)
@@ -74,7 +74,9 @@ void __init xen_hvm_smp_init(void)
        smp_ops.cpu_die = xen_hvm_cpu_die;
 
        if (!xen_have_vector_callback) {
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
                nopvspin = true;
+#endif
                return;
        }
 
index 1cb0e84b91610a33be6978b75c986729848876dc..53cf8aa35032d69948093f97c1f0836a3bc4075c 100644 (file)
@@ -178,6 +178,7 @@ xen_pv_trap asm_exc_simd_coprocessor_error
 #ifdef CONFIG_IA32_EMULATION
 xen_pv_trap entry_INT80_compat
 #endif
+xen_pv_trap asm_exc_xen_unknown_trap
 xen_pv_trap asm_exc_xen_hypervisor_callback
 
        __INIT
index 9e4eb0fc1c16e7fb2ecce1091c499de6a0878912..9e81d1052091fcbc9a71bdf6aa36bcfa50a221f7 100644 (file)
@@ -6332,13 +6332,13 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
         * limit 'something'.
         */
        /* no more than 50% of tags for async I/O */
-       bfqd->word_depths[0][0] = max(bt->sb.depth >> 1, 1U);
+       bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
        /*
         * no more than 75% of tags for sync writes (25% extra tags
         * w.r.t. async I/O, to prevent async I/O from starving sync
         * writes)
         */
-       bfqd->word_depths[0][1] = max((bt->sb.depth * 3) >> 2, 1U);
+       bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
 
        /*
         * In-word depths in case some bfq_queue is being weight-
@@ -6348,9 +6348,9 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
         * shortage.
         */
        /* no more than ~18% of tags for async I/O */
-       bfqd->word_depths[1][0] = max((bt->sb.depth * 3) >> 4, 1U);
+       bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
        /* no more than ~37% of tags for sync writes (~20% extra tags) */
-       bfqd->word_depths[1][1] = max((bt->sb.depth * 6) >> 4, 1U);
+       bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
 
        for (i = 0; i < 2; i++)
                for (j = 0; j < 2; j++)
index 031114d454a6046c2d336f37a2d615c52ed5fdf1..4221a1539391c8eb1dfaf63e30e22aa2418ba00f 100644 (file)
@@ -1016,6 +1016,8 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css)
  */
 void blkcg_destroy_blkgs(struct blkcg *blkcg)
 {
+       might_sleep();
+
        spin_lock_irq(&blkcg->lock);
 
        while (!hlist_empty(&blkcg->blkg_list)) {
@@ -1023,14 +1025,20 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg)
                                                struct blkcg_gq, blkcg_node);
                struct request_queue *q = blkg->q;
 
-               if (spin_trylock(&q->queue_lock)) {
-                       blkg_destroy(blkg);
-                       spin_unlock(&q->queue_lock);
-               } else {
+               if (need_resched() || !spin_trylock(&q->queue_lock)) {
+                       /*
+                        * Given that the system can accumulate a huge number
+                        * of blkgs in pathological cases, check to see if we
+                        * need to rescheduling to avoid softlockup.
+                        */
                        spin_unlock_irq(&blkcg->lock);
-                       cpu_relax();
+                       cond_resched();
                        spin_lock_irq(&blkcg->lock);
+                       continue;
                }
+
+               blkg_destroy(blkg);
+               spin_unlock(&q->queue_lock);
        }
 
        spin_unlock_irq(&blkcg->lock);
index c1458d9502f1c6566cfa22d4acd268f87457e08f..3616453ca28c8f3be4763b30ccc3acf7f700d127 100644 (file)
@@ -304,7 +304,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
                struct request_queue *q = hctx->queue;
                struct blk_mq_tag_set *set = q->tag_set;
 
-               if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &q->queue_flags))
+               if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
                        return true;
                users = atomic_read(&set->active_queues_shared_sbitmap);
        } else {
index 419548e92d82f6216995293928b1c0e2f63f1d4c..9e741a4f351bed030e9a4804a0f2e0ff8493dd47 100644 (file)
@@ -45,10 +45,11 @@ static void disk_release_events(struct gendisk *disk);
 void set_capacity(struct gendisk *disk, sector_t sectors)
 {
        struct block_device *bdev = disk->part0;
+       unsigned long flags;
 
-       spin_lock(&bdev->bd_size_lock);
+       spin_lock_irqsave(&bdev->bd_size_lock, flags);
        i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
-       spin_unlock(&bdev->bd_size_lock);
+       spin_unlock_irqrestore(&bdev->bd_size_lock, flags);
 }
 EXPORT_SYMBOL(set_capacity);
 
index e7d776db803b12f7e784235a20fdc5989cbe6cc6..4601a845cd79edde9f4f86a57dd6fbcfae1eb954 100644 (file)
@@ -88,9 +88,11 @@ static int (*check_part[])(struct parsed_partitions *) = {
 
 static void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
 {
-       spin_lock(&bdev->bd_size_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&bdev->bd_size_lock, flags);
        i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
-       spin_unlock(&bdev->bd_size_lock);
+       spin_unlock_irqrestore(&bdev->bd_size_lock, flags);
 }
 
 static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
@@ -384,7 +386,7 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
 
        err = blk_alloc_devt(bdev, &devt);
        if (err)
-               goto out_bdput;
+               goto out_put;
        pdev->devt = devt;
 
        /* delay uevent until 'holders' subdir is created */
index 8892908ad58ce40c2b868c48c957cc49c49d1ad9..788a4ba1e2e747de90b600b56a5b610ba4eb935b 100644 (file)
@@ -356,7 +356,8 @@ int public_key_verify_signature(const struct public_key *pkey,
        if (ret)
                goto error_free_key;
 
-       if (strcmp(sig->pkey_algo, "sm2") == 0 && sig->data_size) {
+       if (sig->pkey_algo && strcmp(sig->pkey_algo, "sm2") == 0 &&
+           sig->data_size) {
                ret = cert_sig_digest_update(sig, tfm);
                if (ret)
                        goto error_free_key;
index eacbf4f939900fe4f280f053c5ee1ac45a75a0c2..8f899f898ec9f903734693cd6f4ba8a593bbcc14 100644 (file)
@@ -107,6 +107,8 @@ do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
        preempt_enable();
 
        // bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s]
+       if (!min)
+               min = 1;
        speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
        tmpl->speed = speed;
 
index d4eac6d7e9fbc5659bc494402212d0d9eedca66e..2494138a6905ed105009894a8eb49ed2ee4ac4e6 100644 (file)
@@ -1107,6 +1107,11 @@ static int nc_dma_get_range(struct device *dev, u64 *size)
 
        ncomp = (struct acpi_iort_named_component *)node->node_data;
 
+       if (!ncomp->memory_address_limit) {
+               pr_warn(FW_BUG "Named component missing memory address limit\n");
+               return -EINVAL;
+       }
+
        *size = ncomp->memory_address_limit >= 64 ? U64_MAX :
                        1ULL<<ncomp->memory_address_limit;
 
@@ -1126,6 +1131,11 @@ static int rc_dma_get_range(struct device *dev, u64 *size)
 
        rc = (struct acpi_iort_root_complex *)node->node_data;
 
+       if (!rc->memory_address_limit) {
+               pr_warn(FW_BUG "Root complex missing memory address limit\n");
+               return -EINVAL;
+       }
+
        *size = rc->memory_address_limit >= 64 ? U64_MAX :
                        1ULL<<rc->memory_address_limit;
 
@@ -1173,8 +1183,8 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
                end = dmaaddr + size - 1;
                mask = DMA_BIT_MASK(ilog2(end) + 1);
                dev->bus_dma_limit = end;
-               dev->coherent_dma_mask = mask;
-               *dev->dma_mask = mask;
+               dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
+               *dev->dma_mask = min(*dev->dma_mask, mask);
        }
 
        *dma_addr = dmaaddr;
index 96869f1538b93a1e6500f62df4b1fb4d41b78684..bfca116482b8bb5d22defaf301cca48384700740 100644 (file)
@@ -251,20 +251,12 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev,
        if (add_uevent_var(env, "MODALIAS="))
                return -ENOMEM;
 
-       len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
-                                 sizeof(env->buf) - env->buflen);
-       if (len < 0)
-               return len;
-
-       env->buflen += len;
-       if (!adev->data.of_compatible)
-               return 0;
-
-       if (len > 0 && add_uevent_var(env, "MODALIAS="))
-               return -ENOMEM;
-
-       len = create_of_modalias(adev, &env->buf[env->buflen - 1],
-                                sizeof(env->buf) - env->buflen);
+       if (adev->data.of_compatible)
+               len = create_of_modalias(adev, &env->buf[env->buflen - 1],
+                                        sizeof(env->buf) - env->buflen);
+       else
+               len = create_pnp_modalias(adev, &env->buf[env->buflen - 1],
+                                         sizeof(env->buf) - env->buflen);
        if (len < 0)
                return len;
 
index 58ff36340cd7c595521e753897da6d2dfc2a91be..22566b4b3150ada418ed25f377c7b585d58ce50b 100644 (file)
@@ -586,6 +586,8 @@ static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
        if (!device)
                return -EINVAL;
 
+       *device = NULL;
+
        status = acpi_get_data_full(handle, acpi_scan_drop_device,
                                    (void **)device, callback);
        if (ACPI_FAILURE(status) || !*device) {
@@ -2121,12 +2123,12 @@ void acpi_walk_dep_device_list(acpi_handle handle)
        list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
                if (dep->supplier == handle) {
                        acpi_bus_get_device(dep->consumer, &adev);
-                       if (!adev)
-                               continue;
 
-                       adev->dep_unmet--;
-                       if (!adev->dep_unmet)
-                               acpi_bus_attach(adev, true);
+                       if (adev) {
+                               adev->dep_unmet--;
+                               if (!adev->dep_unmet)
+                                       acpi_bus_attach(adev, true);
+                       }
 
                        list_del(&dep->node);
                        kfree(dep);
index 12c0ece746f04ac763bf2630185d40fe9ac36fe7..859b1de31ddc0cfdaf04d8f4dc36c4210f6ca2f2 100644 (file)
@@ -174,6 +174,8 @@ struct acpi_thermal {
        struct thermal_zone_device *thermal_zone;
        int kelvin_offset;      /* in millidegrees */
        struct work_struct thermal_check_work;
+       struct mutex thermal_check_lock;
+       refcount_t thermal_check_count;
 };
 
 /* --------------------------------------------------------------------------
@@ -495,14 +497,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
        return 0;
 }
 
-static void acpi_thermal_check(void *data)
-{
-       struct acpi_thermal *tz = data;
-
-       thermal_zone_device_update(tz->thermal_zone,
-                                  THERMAL_EVENT_UNSPECIFIED);
-}
-
 /* sys I/F for generic thermal sysfs support */
 
 static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
@@ -900,6 +894,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
                                  Driver Interface
    -------------------------------------------------------------------------- */
 
+static void acpi_queue_thermal_check(struct acpi_thermal *tz)
+{
+       if (!work_pending(&tz->thermal_check_work))
+               queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
+}
+
 static void acpi_thermal_notify(struct acpi_device *device, u32 event)
 {
        struct acpi_thermal *tz = acpi_driver_data(device);
@@ -910,17 +910,17 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
 
        switch (event) {
        case ACPI_THERMAL_NOTIFY_TEMPERATURE:
-               acpi_thermal_check(tz);
+               acpi_queue_thermal_check(tz);
                break;
        case ACPI_THERMAL_NOTIFY_THRESHOLDS:
                acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
-               acpi_thermal_check(tz);
+               acpi_queue_thermal_check(tz);
                acpi_bus_generate_netlink_event(device->pnp.device_class,
                                                  dev_name(&device->dev), event, 0);
                break;
        case ACPI_THERMAL_NOTIFY_DEVICES:
                acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
-               acpi_thermal_check(tz);
+               acpi_queue_thermal_check(tz);
                acpi_bus_generate_netlink_event(device->pnp.device_class,
                                                  dev_name(&device->dev), event, 0);
                break;
@@ -1020,7 +1020,25 @@ static void acpi_thermal_check_fn(struct work_struct *work)
 {
        struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
                                               thermal_check_work);
-       acpi_thermal_check(tz);
+
+       /*
+        * In general, it is not sufficient to check the pending bit, because
+        * subsequent instances of this function may be queued after one of them
+        * has started running (e.g. if _TMP sleeps).  Avoid bailing out if just
+        * one of them is running, though, because it may have done the actual
+        * check some time ago, so allow at least one of them to block on the
+        * mutex while another one is running the update.
+        */
+       if (!refcount_dec_not_one(&tz->thermal_check_count))
+               return;
+
+       mutex_lock(&tz->thermal_check_lock);
+
+       thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED);
+
+       refcount_inc(&tz->thermal_check_count);
+
+       mutex_unlock(&tz->thermal_check_lock);
 }
 
 static int acpi_thermal_add(struct acpi_device *device)
@@ -1052,6 +1070,8 @@ static int acpi_thermal_add(struct acpi_device *device)
        if (result)
                goto free_memory;
 
+       refcount_set(&tz->thermal_check_count, 3);
+       mutex_init(&tz->thermal_check_lock);
        INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
 
        pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
@@ -1117,7 +1137,7 @@ static int acpi_thermal_resume(struct device *dev)
                tz->state.active |= tz->trips.active[i].flags.enabled;
        }
 
-       queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
+       acpi_queue_thermal_check(tz);
 
        return AE_OK;
 }
index 14f1658167425a5d20f26cd3d73bcf4ad1a17948..6eb4c7a904c560d9ebaf6e7e871c364ffac123d1 100644 (file)
@@ -208,6 +208,16 @@ int device_links_read_lock_held(void)
 #endif
 #endif /* !CONFIG_SRCU */
 
+static bool device_is_ancestor(struct device *dev, struct device *target)
+{
+       while (target->parent) {
+               target = target->parent;
+               if (dev == target)
+                       return true;
+       }
+       return false;
+}
+
 /**
  * device_is_dependent - Check if one device depends on another one
  * @dev: Device to check dependencies for.
@@ -221,7 +231,12 @@ int device_is_dependent(struct device *dev, void *target)
        struct device_link *link;
        int ret;
 
-       if (dev == target)
+       /*
+        * The "ancestors" check is needed to catch the case when the target
+        * device has not been completely initialized yet and it is still
+        * missing from the list of children of its parent device.
+        */
+       if (dev == target || device_is_ancestor(dev, target))
                return 1;
 
        ret = device_for_each_child(dev, target, device_is_dependent);
@@ -456,7 +471,9 @@ static int devlink_add_symlinks(struct device *dev,
        struct device *con = link->consumer;
        char *buf;
 
-       len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
+       len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
+                 strlen(dev_bus_name(con)) + strlen(dev_name(con)));
+       len += strlen(":");
        len += strlen("supplier:") + 1;
        buf = kzalloc(len, GFP_KERNEL);
        if (!buf)
@@ -470,12 +487,12 @@ static int devlink_add_symlinks(struct device *dev,
        if (ret)
                goto err_con;
 
-       snprintf(buf, len, "consumer:%s", dev_name(con));
+       snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
        ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
        if (ret)
                goto err_con_dev;
 
-       snprintf(buf, len, "supplier:%s", dev_name(sup));
+       snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
        ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
        if (ret)
                goto err_sup_dev;
@@ -483,7 +500,7 @@ static int devlink_add_symlinks(struct device *dev,
        goto out;
 
 err_sup_dev:
-       snprintf(buf, len, "consumer:%s", dev_name(con));
+       snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
        sysfs_remove_link(&sup->kobj, buf);
 err_con_dev:
        sysfs_remove_link(&link->link_dev.kobj, "consumer");
@@ -506,7 +523,9 @@ static void devlink_remove_symlinks(struct device *dev,
        sysfs_remove_link(&link->link_dev.kobj, "consumer");
        sysfs_remove_link(&link->link_dev.kobj, "supplier");
 
-       len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
+       len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
+                 strlen(dev_bus_name(con)) + strlen(dev_name(con)));
+       len += strlen(":");
        len += strlen("supplier:") + 1;
        buf = kzalloc(len, GFP_KERNEL);
        if (!buf) {
@@ -514,9 +533,9 @@ static void devlink_remove_symlinks(struct device *dev,
                return;
        }
 
-       snprintf(buf, len, "supplier:%s", dev_name(sup));
+       snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
        sysfs_remove_link(&con->kobj, buf);
-       snprintf(buf, len, "consumer:%s", dev_name(con));
+       snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
        sysfs_remove_link(&sup->kobj, buf);
        kfree(buf);
 }
@@ -737,8 +756,9 @@ struct device_link *device_link_add(struct device *consumer,
 
        link->link_dev.class = &devlink_class;
        device_set_pm_not_required(&link->link_dev);
-       dev_set_name(&link->link_dev, "%s--%s",
-                    dev_name(supplier), dev_name(consumer));
+       dev_set_name(&link->link_dev, "%s:%s--%s:%s",
+                    dev_bus_name(supplier), dev_name(supplier),
+                    dev_bus_name(consumer), dev_name(consumer));
        if (device_register(&link->link_dev)) {
                put_device(consumer);
                put_device(supplier);
@@ -1808,9 +1828,7 @@ const char *dev_driver_string(const struct device *dev)
         * never change once they are set, so they don't need special care.
         */
        drv = READ_ONCE(dev->driver);
-       return drv ? drv->name :
-                       (dev->bus ? dev->bus->name :
-                       (dev->class ? dev->class->name : ""));
+       return drv ? drv->name : dev_bus_name(dev);
 }
 EXPORT_SYMBOL(dev_driver_string);
 
index 2f32f38a11ed0b1c176bffeaf94005216152418c..9179825ff646f4e3aeb2fe87455caaec8db233de 100644 (file)
@@ -370,13 +370,6 @@ static void driver_bound(struct device *dev)
 
        device_pm_check_callbacks(dev);
 
-       /*
-        * Reorder successfully probed devices to the end of the device list.
-        * This ensures that suspend/resume order matches probe order, which
-        * is usually what drivers rely on.
-        */
-       device_pm_move_to_tail(dev);
-
        /*
         * Make sure the device is no longer in one of the deferred lists and
         * kick off retrying all pending devices
@@ -619,6 +612,8 @@ dev_groups_failed:
        else if (drv->remove)
                drv->remove(dev);
 probe_failed:
+       kfree(dev->dma_range_map);
+       dev->dma_range_map = NULL;
        if (dev->bus)
                blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
                                             BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
index 95fd1549f87de38dcbad0846e923b06ee25bdbd2..8456d8384ac8e14842c603554516aede25239f6c 100644 (file)
@@ -366,6 +366,8 @@ int devm_platform_get_irqs_affinity(struct platform_device *dev,
                return -ERANGE;
 
        nvec = platform_irq_count(dev);
+       if (nvec < 0)
+               return nvec;
 
        if (nvec < minvec)
                return -ENOSPC;
index 6727358e147dd8f8d3fb738951276f5881c20a66..e6ea5d344f87b6190e1c5aaf335875bbf7dc45f9 100644 (file)
@@ -1022,6 +1022,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
        if (!sock)
                return err;
 
+       /*
+        * We need to make sure we don't get any errant requests while we're
+        * reallocating the ->socks array.
+        */
+       blk_mq_freeze_queue(nbd->disk->queue);
+
        if (!netlink && !nbd->task_setup &&
            !test_bit(NBD_RT_BOUND, &config->runtime_flags))
                nbd->task_setup = current;
@@ -1060,10 +1066,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
        nsock->cookie = 0;
        socks[config->num_connections++] = nsock;
        atomic_inc(&config->live_connections);
+       blk_mq_unfreeze_queue(nbd->disk->queue);
 
        return 0;
 
 put_socket:
+       blk_mq_unfreeze_queue(nbd->disk->queue);
        sockfd_put(sock);
        return err;
 }
index 148b871f263ba91629b52001786074a96fe13a55..fce0a54df0e5ff4a204e98106451c5b9bc6cfb29 100644 (file)
@@ -6,7 +6,10 @@
 #define CREATE_TRACE_POINTS
 #include "trace.h"
 
-#define MB_TO_SECTS(mb) (((sector_t)mb * SZ_1M) >> SECTOR_SHIFT)
+static inline sector_t mb_to_sects(unsigned long mb)
+{
+       return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
+}
 
 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
 {
@@ -77,12 +80,11 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
                return -EINVAL;
        }
 
-       zone_capacity_sects = MB_TO_SECTS(dev->zone_capacity);
-       dev_capacity_sects = MB_TO_SECTS(dev->size);
-       dev->zone_size_sects = MB_TO_SECTS(dev->zone_size);
-       dev->nr_zones = dev_capacity_sects >> ilog2(dev->zone_size_sects);
-       if (dev_capacity_sects & (dev->zone_size_sects - 1))
-               dev->nr_zones++;
+       zone_capacity_sects = mb_to_sects(dev->zone_capacity);
+       dev_capacity_sects = mb_to_sects(dev->size);
+       dev->zone_size_sects = mb_to_sects(dev->zone_size);
+       dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
+               >> ilog2(dev->zone_size_sects);
 
        dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
                                    GFP_KERNEL | __GFP_ZERO);
index 5265975b3fba5c0c726b7ed5bd5f595cd6bd5145..e1c6798889f48afe9e6fc14a8ef5d93f919d365c 100644 (file)
@@ -945,7 +945,8 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
        if (info->feature_discard) {
                blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
                blk_queue_max_discard_sectors(rq, get_capacity(gd));
-               rq->limits.discard_granularity = info->discard_granularity;
+               rq->limits.discard_granularity = info->discard_granularity ?:
+                                                info->physical_sector_size;
                rq->limits.discard_alignment = info->discard_alignment;
                if (info->feature_secdiscard)
                        blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
@@ -2179,19 +2180,12 @@ static void blkfront_closing(struct blkfront_info *info)
 
 static void blkfront_setup_discard(struct blkfront_info *info)
 {
-       int err;
-       unsigned int discard_granularity;
-       unsigned int discard_alignment;
-
        info->feature_discard = 1;
-       err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
-               "discard-granularity", "%u", &discard_granularity,
-               "discard-alignment", "%u", &discard_alignment,
-               NULL);
-       if (!err) {
-               info->discard_granularity = discard_granularity;
-               info->discard_alignment = discard_alignment;
-       }
+       info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend,
+                                                        "discard-granularity",
+                                                        0);
+       info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend,
+                                                      "discard-alignment", 0);
        info->feature_secdiscard =
                !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
                                       0);
index 845b6c43fef897bc1cf562d6f2251c01fb8c9861..2344d560b1449c0f8115dbba82f96ac9ae068aaa 100644 (file)
@@ -54,6 +54,7 @@ static int integrator_lm_populate(int num, struct device *dev)
                        ret = of_platform_default_populate(child, NULL, dev);
                        if (ret) {
                                dev_err(dev, "failed to populate module\n");
+                               of_node_put(child);
                                return ret;
                        }
                }
index c5eb46cbf388b507608f96ed1b55082d42d54981..01a3d0cd08edc7271215e0e61c5a1d5497ac33ac 100644 (file)
@@ -16,6 +16,7 @@
 
 static int simple_pm_bus_probe(struct platform_device *pdev)
 {
+       const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev);
        struct device_node *np = pdev->dev.of_node;
 
        dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -23,7 +24,7 @@ static int simple_pm_bus_probe(struct platform_device *pdev)
        pm_runtime_enable(&pdev->dev);
 
        if (np)
-               of_platform_populate(np, NULL, NULL, &pdev->dev);
+               of_platform_populate(np, NULL, lookup, &pdev->dev);
 
        return 0;
 }
index 3061896503f300accf3dda9782864e2e19288c84..47d9ec3abd2f7d8a31a5624d3ada4f18268d8418 100644 (file)
@@ -6,8 +6,6 @@ config MXC_CLK
 
 config MXC_CLK_SCU
        tristate
-       depends on ARCH_MXC
-       depends on IMX_SCU && HAVE_ARM_SMCCC
 
 config CLK_IMX1
        def_bool SOC_IMX1
index eea69d498bd273887d04bd6b85cab3ad5c23bf2c..7aa7f4a9564fde8617f9e2fd5819219dfadbc20d 100644 (file)
@@ -392,7 +392,8 @@ static int mmp2_audio_clk_remove(struct platform_device *pdev)
        return 0;
 }
 
-static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int mmp2_audio_clk_suspend(struct device *dev)
 {
        struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
 
@@ -404,7 +405,7 @@ static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev)
        return 0;
 }
 
-static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
+static int mmp2_audio_clk_resume(struct device *dev)
 {
        struct mmp2_audio_clk *priv = dev_get_drvdata(dev);
 
@@ -415,6 +416,7 @@ static int __maybe_unused mmp2_audio_clk_resume(struct device *dev)
 
        return 0;
 }
+#endif
 
 static const struct dev_pm_ops mmp2_audio_clk_pm_ops = {
        SET_RUNTIME_PM_OPS(mmp2_audio_clk_suspend, mmp2_audio_clk_resume, NULL)
index d82d725ac2319f43e29fa96c37ad9663f1315b3d..b05901b249172221fb464f74272c31042e12f4c2 100644 (file)
@@ -891,21 +891,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = {
        },
 };
 
-static struct clk_branch gcc_camera_ahb_clk = {
-       .halt_reg = 0xb008,
-       .halt_check = BRANCH_HALT,
-       .hwcg_reg = 0xb008,
-       .hwcg_bit = 1,
-       .clkr = {
-               .enable_reg = 0xb008,
-               .enable_mask = BIT(0),
-               .hw.init = &(struct clk_init_data){
-                       .name = "gcc_camera_ahb_clk",
-                       .ops = &clk_branch2_ops,
-               },
-       },
-};
-
 static struct clk_branch gcc_camera_hf_axi_clk = {
        .halt_reg = 0xb020,
        .halt_check = BRANCH_HALT,
@@ -2317,7 +2302,6 @@ static struct clk_regmap *gcc_sc7180_clocks[] = {
        [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
        [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
        [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
-       [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
        [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
        [GCC_CAMERA_THROTTLE_HF_AXI_CLK] = &gcc_camera_throttle_hf_axi_clk.clkr,
        [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
@@ -2519,11 +2503,12 @@ static int gcc_sc7180_probe(struct platform_device *pdev)
 
        /*
         * Keep the clocks always-ON
-        * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_DISP_AHB_CLK
-        * GCC_GPU_CFG_AHB_CLK
+        * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK,
+        * GCC_DISP_AHB_CLK, GCC_GPU_CFG_AHB_CLK
         */
        regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0));
        regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0));
+       regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0));
        regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0));
        regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0));
 
index 6cb6617b8d88c25cf61cb66ea0560b40b7dbd162..ab594a0f0c4084aeb30931f2956407d921da73c3 100644 (file)
@@ -722,7 +722,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
                .name = "gcc_sdcc2_apps_clk_src",
                .parent_data = gcc_parent_data_4,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_floor_ops,
        },
 };
 
@@ -745,7 +745,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
                .name = "gcc_sdcc4_apps_clk_src",
                .parent_data = gcc_parent_data_0,
                .num_parents = 3,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_floor_ops,
        },
 };
 
index a60aee1a1a29150a94fd4a43d69f4154023baab4..65df9ef5b5bc053d8b85d56063fdf92a833f05aa 100644 (file)
@@ -235,36 +235,6 @@ static ssize_t ti_eqep_position_ceiling_write(struct counter_device *counter,
        return len;
 }
 
-static ssize_t ti_eqep_position_floor_read(struct counter_device *counter,
-                                          struct counter_count *count,
-                                          void *ext_priv, char *buf)
-{
-       struct ti_eqep_cnt *priv = counter->priv;
-       u32 qposinit;
-
-       regmap_read(priv->regmap32, QPOSINIT, &qposinit);
-
-       return sprintf(buf, "%u\n", qposinit);
-}
-
-static ssize_t ti_eqep_position_floor_write(struct counter_device *counter,
-                                           struct counter_count *count,
-                                           void *ext_priv, const char *buf,
-                                           size_t len)
-{
-       struct ti_eqep_cnt *priv = counter->priv;
-       int err;
-       u32 res;
-
-       err = kstrtouint(buf, 0, &res);
-       if (err < 0)
-               return err;
-
-       regmap_write(priv->regmap32, QPOSINIT, res);
-
-       return len;
-}
-
 static ssize_t ti_eqep_position_enable_read(struct counter_device *counter,
                                            struct counter_count *count,
                                            void *ext_priv, char *buf)
@@ -301,11 +271,6 @@ static struct counter_count_ext ti_eqep_position_ext[] = {
                .read   = ti_eqep_position_ceiling_read,
                .write  = ti_eqep_position_ceiling_write,
        },
-       {
-               .name   = "floor",
-               .read   = ti_eqep_position_floor_read,
-               .write  = ti_eqep_position_floor_write,
-       },
        {
                .name   = "enable",
                .read   = ti_eqep_position_enable_read,
index bbd51703e738b972a9437476e8cb4b60b31f5c8f..e535f28a80283e36db6ca9d1a9d484078ca40c0d 100644 (file)
@@ -366,6 +366,7 @@ if CRYPTO_DEV_OMAP
 config CRYPTO_DEV_OMAP_SHAM
        tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator"
        depends on ARCH_OMAP2PLUS
+       select CRYPTO_ENGINE
        select CRYPTO_SHA1
        select CRYPTO_MD5
        select CRYPTO_SHA256
index fabfaaccca8720574134f3531d2ac32270e46983..fa56b45620c7962dc45e3efef9792929e1574f38 100644 (file)
@@ -300,11 +300,11 @@ struct mv_cesa_tdma_desc {
        __le32 byte_cnt;
        union {
                __le32 src;
-               dma_addr_t src_dma;
+               u32 src_dma;
        };
        union {
                __le32 dst;
-               dma_addr_t dst_dma;
+               u32 dst_dma;
        };
        __le32 next_dma;
 
index 34f53d898acb0f9f61c1ff3573d7428be52701df..e1926483ae2fdc910a1cc785ac390c11c2ebd408 100644 (file)
@@ -3,8 +3,9 @@
  * apple-properties.c - EFI device properties on Macs
  * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de>
  *
- * Note, all properties are considered as u8 arrays.
- * To get a value of any of them the caller must use device_property_read_u8_array().
+ * Properties are stored either as:
+ * u8 arrays which can be retrieved with device_property_read_u8_array() or
+ * booleans which can be queried with device_property_present().
  */
 
 #define pr_fmt(fmt) "apple-properties: " fmt
@@ -88,8 +89,12 @@ static void __init unmarshal_key_value_pairs(struct dev_header *dev_header,
 
                entry_data = ptr + key_len + sizeof(val_len);
                entry_len = val_len - sizeof(val_len);
-               entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
-                                                      entry_len);
+               if (entry_len)
+                       entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data,
+                                                              entry_len);
+               else
+                       entry[i] = PROPERTY_ENTRY_BOOL(key);
+
                if (dump_properties) {
                        dev_info(dev, "property: %s\n", key);
                        print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET,
index 1d2e5b85d7ca8458f3d67bac38b964f4f9100ef6..c027d99f2a599ea81f478dfa1c6c8b0ea2a970dd 100644 (file)
@@ -13,6 +13,7 @@ config IMX_DSP
 config IMX_SCU
        bool "IMX SCU Protocol driver"
        depends on IMX_MBOX
+       select SOC_BUS
        help
          The System Controller Firmware (SCFW) is a low-level system function
          which runs on a dedicated Cortex-M core to provide power, clock, and
index c70f46e80a3b72ea828ddfdb526f8a7632d38030..dea65d85594fcceb84cb60c721714f5276904734 100644 (file)
@@ -521,7 +521,8 @@ config GPIO_SAMA5D2_PIOBU
 
 config GPIO_SIFIVE
        bool "SiFive GPIO support"
-       depends on OF_GPIO && IRQ_DOMAIN_HIERARCHY
+       depends on OF_GPIO
+       select IRQ_DOMAIN_HIERARCHY
        select GPIO_GENERIC
        select GPIOLIB_IRQCHIP
        select REGMAP_MMIO
@@ -597,6 +598,8 @@ config GPIO_TEGRA
        default ARCH_TEGRA
        depends on ARCH_TEGRA || COMPILE_TEST
        depends on OF_GPIO
+       select GPIOLIB_IRQCHIP
+       select IRQ_DOMAIN_HIERARCHY
        help
          Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
 
index 672681a976f50ee08d593ee5db71d8b5c64f8766..a912a8fed197ac32bb393a87ba17c63421f12184 100644 (file)
@@ -676,20 +676,17 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
        else
                state->duty_cycle = 1;
 
+       val = (unsigned long long) u; /* on duration */
        regmap_read(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm), &u);
-       val = (unsigned long long) u * NSEC_PER_SEC;
+       val += (unsigned long long) u; /* period = on + off duration */
+       val *= NSEC_PER_SEC;
        do_div(val, mvpwm->clk_rate);
-       if (val < state->duty_cycle) {
+       if (val > UINT_MAX)
+               state->period = UINT_MAX;
+       else if (val)
+               state->period = val;
+       else
                state->period = 1;
-       } else {
-               val -= state->duty_cycle;
-               if (val > UINT_MAX)
-                       state->period = UINT_MAX;
-               else if (val)
-                       state->period = val;
-               else
-                       state->period = 1;
-       }
 
        regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u);
        if (u)
index 12b679ca552cccea423e04874e1f2e3c11bef7a6..1631727bf0da1ce90d87b36aa3625cc79d12fab1 100644 (file)
@@ -776,6 +776,8 @@ static void edge_detector_stop(struct line *line)
        cancel_delayed_work_sync(&line->work);
        WRITE_ONCE(line->sw_debounced, 0);
        WRITE_ONCE(line->eflags, 0);
+       if (line->desc)
+               WRITE_ONCE(line->desc->debounce_period_us, 0);
        /* do not change line->level - see comment in debounced_value() */
 }
 
@@ -1979,6 +1981,21 @@ struct gpio_chardev_data {
 #endif
 };
 
+static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
+{
+       struct gpio_device *gdev = cdev->gdev;
+       struct gpiochip_info chipinfo;
+
+       memset(&chipinfo, 0, sizeof(chipinfo));
+
+       strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
+       strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
+       chipinfo.lines = gdev->ngpio;
+       if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
+               return -EFAULT;
+       return 0;
+}
+
 #ifdef CONFIG_GPIO_CDEV_V1
 /*
  * returns 0 if the versions match, else the previously selected ABI version
@@ -1993,6 +2010,41 @@ static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
 
        return abiv;
 }
+
+static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
+                          bool watch)
+{
+       struct gpio_desc *desc;
+       struct gpioline_info lineinfo;
+       struct gpio_v2_line_info lineinfo_v2;
+
+       if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
+               return -EFAULT;
+
+       /* this doubles as a range check on line_offset */
+       desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset);
+       if (IS_ERR(desc))
+               return PTR_ERR(desc);
+
+       if (watch) {
+               if (lineinfo_ensure_abi_version(cdev, 1))
+                       return -EPERM;
+
+               if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
+                       return -EBUSY;
+       }
+
+       gpio_desc_to_lineinfo(desc, &lineinfo_v2);
+       gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
+
+       if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
+               if (watch)
+                       clear_bit(lineinfo.line_offset, cdev->watched_lines);
+               return -EFAULT;
+       }
+
+       return 0;
+}
 #endif
 
 static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
@@ -2030,6 +2082,22 @@ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
        return 0;
 }
 
+static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
+{
+       __u32 offset;
+
+       if (copy_from_user(&offset, ip, sizeof(offset)))
+               return -EFAULT;
+
+       if (offset >= cdev->gdev->ngpio)
+               return -EINVAL;
+
+       if (!test_and_clear_bit(offset, cdev->watched_lines))
+               return -EBUSY;
+
+       return 0;
+}
+
 /*
  * gpio_ioctl() - ioctl handler for the GPIO chardev
  */
@@ -2037,80 +2105,24 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        struct gpio_chardev_data *cdev = file->private_data;
        struct gpio_device *gdev = cdev->gdev;
-       struct gpio_chip *gc = gdev->chip;
        void __user *ip = (void __user *)arg;
-       __u32 offset;
 
        /* We fail any subsequent ioctl():s when the chip is gone */
-       if (!gc)
+       if (!gdev->chip)
                return -ENODEV;
 
        /* Fill in the struct and pass to userspace */
        if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
-               struct gpiochip_info chipinfo;
-
-               memset(&chipinfo, 0, sizeof(chipinfo));
-
-               strscpy(chipinfo.name, dev_name(&gdev->dev),
-                       sizeof(chipinfo.name));
-               strscpy(chipinfo.label, gdev->label,
-                       sizeof(chipinfo.label));
-               chipinfo.lines = gdev->ngpio;
-               if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
-                       return -EFAULT;
-               return 0;
+               return chipinfo_get(cdev, ip);
 #ifdef CONFIG_GPIO_CDEV_V1
-       } else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
-               struct gpio_desc *desc;
-               struct gpioline_info lineinfo;
-               struct gpio_v2_line_info lineinfo_v2;
-
-               if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
-                       return -EFAULT;
-
-               /* this doubles as a range check on line_offset */
-               desc = gpiochip_get_desc(gc, lineinfo.line_offset);
-               if (IS_ERR(desc))
-                       return PTR_ERR(desc);
-
-               gpio_desc_to_lineinfo(desc, &lineinfo_v2);
-               gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
-
-               if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
-                       return -EFAULT;
-               return 0;
        } else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) {
                return linehandle_create(gdev, ip);
        } else if (cmd == GPIO_GET_LINEEVENT_IOCTL) {
                return lineevent_create(gdev, ip);
-       } else if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
-               struct gpio_desc *desc;
-               struct gpioline_info lineinfo;
-               struct gpio_v2_line_info lineinfo_v2;
-
-               if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
-                       return -EFAULT;
-
-               /* this doubles as a range check on line_offset */
-               desc = gpiochip_get_desc(gc, lineinfo.line_offset);
-               if (IS_ERR(desc))
-                       return PTR_ERR(desc);
-
-               if (lineinfo_ensure_abi_version(cdev, 1))
-                       return -EPERM;
-
-               if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
-                       return -EBUSY;
-
-               gpio_desc_to_lineinfo(desc, &lineinfo_v2);
-               gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
-
-               if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
-                       clear_bit(lineinfo.line_offset, cdev->watched_lines);
-                       return -EFAULT;
-               }
-
-               return 0;
+       } else if (cmd == GPIO_GET_LINEINFO_IOCTL ||
+                  cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
+               return lineinfo_get_v1(cdev, ip,
+                                      cmd == GPIO_GET_LINEINFO_WATCH_IOCTL);
 #endif /* CONFIG_GPIO_CDEV_V1 */
        } else if (cmd == GPIO_V2_GET_LINEINFO_IOCTL ||
                   cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL) {
@@ -2119,16 +2131,7 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        } else if (cmd == GPIO_V2_GET_LINE_IOCTL) {
                return linereq_create(gdev, ip);
        } else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) {
-               if (copy_from_user(&offset, ip, sizeof(offset)))
-                       return -EFAULT;
-
-               if (offset >= cdev->gdev->ngpio)
-                       return -EINVAL;
-
-               if (!test_and_clear_bit(offset, cdev->watched_lines))
-                       return -EBUSY;
-
-               return 0;
+               return lineinfo_unwatch(cdev, ip);
        }
        return -EINVAL;
 }
index b02cc2abd3b680263cdc9c5cd11c278969c9b9ee..97eec8d8dbdc42bb8b3100d1c338c5811f264396 100644 (file)
@@ -603,7 +603,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
                ret = gdev->id;
                goto err_free_gdev;
        }
-       dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
+
+       ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id);
+       if (ret)
+               goto err_free_ida;
+
        device_initialize(&gdev->dev);
        dev_set_drvdata(&gdev->dev, gdev);
        if (gc->parent && gc->parent->driver)
@@ -617,7 +621,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
        gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
        if (!gdev->descs) {
                ret = -ENOMEM;
-               goto err_free_ida;
+               goto err_free_dev_name;
        }
 
        if (gc->ngpio == 0) {
@@ -768,6 +772,8 @@ err_free_label:
        kfree_const(gdev->label);
 err_free_descs:
        kfree(gdev->descs);
+err_free_dev_name:
+       kfree(dev_name(&gdev->dev));
 err_free_ida:
        ida_free(&gpio_ida, gdev->id);
 err_free_gdev:
@@ -1489,6 +1495,9 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
                type = IRQ_TYPE_NONE;
        }
 
+       if (gc->to_irq)
+               chip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n", __func__);
+
        gc->to_irq = gpiochip_to_irq;
        gc->irq.default_type = type;
        gc->irq.lock_key = lock_key;
@@ -2548,7 +2557,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
                struct gpio_chip *gc = desc_array[i]->gdev->chip;
                unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)];
                unsigned long *mask, *bits;
-               int first, j, ret;
+               int first, j;
 
                if (likely(gc->ngpio <= FASTPATH_NGPIO)) {
                        mask = fastpath;
index 2d991da2cead7ed7386f8e7828d7a89d27e56d55..d1ed4f8df2b76d7dd41286b52426dfbde175f33f 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/sched/task.h>
 
 #include "amdgpu_object.h"
+#include "amdgpu_gem.h"
 #include "amdgpu_vm.h"
 #include "amdgpu_amdkfd.h"
 #include "amdgpu_dma_buf.h"
@@ -1152,7 +1153,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
        struct sg_table *sg = NULL;
        uint64_t user_addr = 0;
        struct amdgpu_bo *bo;
-       struct amdgpu_bo_param bp;
+       struct drm_gem_object *gobj;
        u32 domain, alloc_domain;
        u64 alloc_flags;
        int ret;
@@ -1220,19 +1221,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
        pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
                        va, size, domain_string(alloc_domain));
 
-       memset(&bp, 0, sizeof(bp));
-       bp.size = size;
-       bp.byte_align = 1;
-       bp.domain = alloc_domain;
-       bp.flags = alloc_flags;
-       bp.type = bo_type;
-       bp.resv = NULL;
-       ret = amdgpu_bo_create(adev, &bp, &bo);
+       ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
+                                      bo_type, NULL, &gobj);
        if (ret) {
                pr_debug("Failed to create BO on domain %s. ret %d\n",
-                               domain_string(alloc_domain), ret);
+                        domain_string(alloc_domain), ret);
                goto err_bo_create;
        }
+       bo = gem_to_amdgpu_bo(gobj);
        if (bo_type == ttm_bo_type_sg) {
                bo->tbo.sg = sg;
                bo->tbo.ttm->sg = sg;
index 087afab67e2216c993c6fc0f7b8f1ff9294b1e03..cab1ebaf6d6299a365c272b90055cd8e847104fb 100644 (file)
@@ -81,7 +81,6 @@ MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin");
 
 #define AMDGPU_RESUME_MS               2000
 
index f764803c53a4bb2466485d12e0f7c75099bf0e41..48cb33e5b3826caac252debfc011dc6b2c91c322 100644 (file)
@@ -926,8 +926,10 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
                                       struct drm_file *file_priv,
                                       const struct drm_mode_fb_cmd2 *mode_cmd)
 {
-       struct drm_gem_object *obj;
        struct amdgpu_framebuffer *amdgpu_fb;
+       struct drm_gem_object *obj;
+       struct amdgpu_bo *bo;
+       uint32_t domains;
        int ret;
 
        obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
@@ -938,7 +940,9 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
        }
 
        /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
-       if (obj->import_attach) {
+       bo = gem_to_amdgpu_bo(obj);
+       domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
+       if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
                drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
                return ERR_PTR(-EINVAL);
        }
index d0a1fee1f5f6f9761dc0c56ea9d7013329ab946b..174a73eb23f08117528f8a458a68e57d50f89a72 100644 (file)
@@ -269,8 +269,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
                resv = vm->root.base.bo->tbo.base.resv;
        }
 
-retry:
        initial_domain = (u32)(0xffffffff & args->in.domains);
+retry:
        r = amdgpu_gem_object_create(adev, size, args->in.alignment,
                                     initial_domain,
                                     flags, ttm_bo_type_device, resv, &gobj);
index 25ec4d57333f6a9786e98551044e2476cb8fc39f..b4c8e5d5c763cd5416b712e60c3a7ec2a10ed44a 100644 (file)
@@ -897,7 +897,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                return -EINVAL;
 
        /* A shared bo cannot be migrated to VRAM */
-       if (bo->prime_shared_count) {
+       if (bo->prime_shared_count || bo->tbo.base.import_attach) {
                if (domain & AMDGPU_GEM_DOMAIN_GTT)
                        domain = AMDGPU_GEM_DOMAIN_GTT;
                else
index 619d34c041ee025f89a4bf6f892e04df78c64a56..d86b42a36560183b4d1ef0bde1a199800bec4285 100644 (file)
 #define mmGCR_GENERAL_CNTL_Sienna_Cichlid                      0x1580
 #define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX     0
 
+#define mmCGTS_TCC_DISABLE_Vangogh                0x5006
+#define mmCGTS_TCC_DISABLE_Vangogh_BASE_IDX       1
+#define mmCGTS_USER_TCC_DISABLE_Vangogh                0x5007
+#define mmCGTS_USER_TCC_DISABLE_Vangogh_BASE_IDX       1
 #define mmGOLDEN_TSC_COUNT_UPPER_Vangogh                0x0025
 #define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX       1
 #define mmGOLDEN_TSC_COUNT_LOWER_Vangogh                0x0026
 #define mmVGT_ESGS_RING_SIZE_Vangogh_BASE_IDX    1
 #define mmSPI_CONFIG_CNTL_Vangogh                0x2440
 #define mmSPI_CONFIG_CNTL_Vangogh_BASE_IDX       1
+#define mmGCR_GENERAL_CNTL_Vangogh               0x1580
+#define mmGCR_GENERAL_CNTL_Vangogh_BASE_IDX      0
 
 #define mmCP_HYP_PFP_UCODE_ADDR                        0x5814
 #define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX       1
@@ -3244,7 +3250,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000142),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
@@ -4934,8 +4940,18 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
 static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
 {
        /* TCCs are global (not instanced). */
-       uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
-                              RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+       uint32_t tcc_disable;
+
+       switch (adev->asic_type) {
+       case CHIP_VANGOGH:
+               tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_Vangogh) |
+                               RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_Vangogh);
+               break;
+       default:
+               tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
+                               RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+               break;
+       }
 
        adev->gfx.config.tcc_disabled_mask =
                REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
index 07104a1de3082b776211bd36f7ea1eaa8e4873ae..1961745e89c73df00c0fcc5198b96039d0f481e7 100644 (file)
@@ -491,12 +491,11 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
 {
        uint32_t def, data, def1, data1;
 
-       def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
+       def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
        def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
 
        if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
-               data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
-
+               data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
                data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
                           DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
                           DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
@@ -505,8 +504,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
                           DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
 
        } else {
-               data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
-
+               data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
                data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
                          DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
                          DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
@@ -516,7 +514,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
        }
 
        if (def != data)
-               WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
+               WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
        if (def1 != data1)
                WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
 }
@@ -525,17 +523,44 @@ static void
 mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
                                           bool enable)
 {
-       uint32_t def, data;
-
-       def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
-
-       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
-               data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
-       else
-               data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+       uint32_t def, data, def1, data1, def2, data2;
+
+       def  = data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
+       def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
+       def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
+
+       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
+               data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
+               data1 &= !(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+               data2 &= !(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+       } else {
+               data |= MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
+               data1 |= (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+                       DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+               data2 |= (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+                       DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
+       }
 
        if (def != data)
-               WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
+               WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
+       if (def1 != data1)
+               WREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL, data1);
+       if (def2 != data2)
+               WREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL, data2);
 }
 
 static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
@@ -554,26 +579,39 @@ static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
 
 static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags)
 {
-       int data, data1;
+       int data, data1, data2, data3;
 
        if (amdgpu_sriov_vf(adev))
                *flags = 0;
 
-       data  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
-       data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+       data = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
+       data1  = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
+       data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
+       data3 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
 
        /* AMD_CG_SUPPORT_MC_MGCG */
-       if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK) &&
-           !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+       if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
                       DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
                       DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
                       DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
                       DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
-                      DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
-               *flags |= AMD_CG_SUPPORT_MC_MGCG;
+                      DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))
+               && !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) {
+                       *flags |= AMD_CG_SUPPORT_MC_MGCG;
+       }
 
        /* AMD_CG_SUPPORT_MC_LS */
-       if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+       if (!(data1 & MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK)
+               && !(data2 & (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+                               DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+                               DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+                               DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+                               DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK))
+               && !(data3 & (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
+                               DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
+                               DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
+                               DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
+                               DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK)))
                *flags |= AMD_CG_SUPPORT_MC_LS;
 }
 
index c6da89df055de87c89ba0ba05ed8739993a1c97b..961abf1cf040cc0372d0d9c74d456684f81cb07d 100644 (file)
@@ -1833,8 +1833,8 @@ static void emulated_link_detect(struct dc_link *link)
        link->type = dc_connection_none;
        prev_sink = link->local_sink;
 
-       if (prev_sink != NULL)
-               dc_sink_retain(prev_sink);
+       if (prev_sink)
+               dc_sink_release(prev_sink);
 
        switch (link->connector_signal) {
        case SIGNAL_TYPE_HDMI_TYPE_A: {
@@ -1934,7 +1934,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
                dc_commit_updates_for_stream(
                        dm->dc, bundle->surface_updates,
                        dc_state->stream_status->plane_count,
-                       dc_state->streams[k], &bundle->stream_update, dc_state);
+                       dc_state->streams[k], &bundle->stream_update);
        }
 
 cleanup:
@@ -1965,8 +1965,7 @@ static void dm_set_dpms_off(struct dc_link *link)
 
        stream_update.stream = stream_state;
        dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
-                                    stream_state, &stream_update,
-                                    stream_state->ctx->dc->current_state);
+                                    stream_state, &stream_update);
        mutex_unlock(&adev->dm.dc_lock);
 }
 
@@ -2330,8 +2329,10 @@ void amdgpu_dm_update_connector_after_detect(
                 * TODO: check if we still need the S3 mode update workaround.
                 * If yes, put it here.
                 */
-               if (aconnector->dc_sink)
+               if (aconnector->dc_sink) {
                        amdgpu_dm_update_freesync_caps(connector, NULL);
+                       dc_sink_release(aconnector->dc_sink);
+               }
 
                aconnector->dc_sink = sink;
                dc_sink_retain(aconnector->dc_sink);
@@ -2347,8 +2348,6 @@ void amdgpu_dm_update_connector_after_detect(
 
                        drm_connector_update_edid_property(connector,
                                                           aconnector->edid);
-                       drm_add_edid_modes(connector, aconnector->edid);
-
                        if (aconnector->dc_link->aux_mode)
                                drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
                                                    aconnector->edid);
@@ -7549,7 +7548,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                                    struct drm_crtc *pcrtc,
                                    bool wait_for_vblank)
 {
-       uint32_t i;
+       int i;
        uint64_t timestamp_ns;
        struct drm_plane *plane;
        struct drm_plane_state *old_plane_state, *new_plane_state;
@@ -7590,7 +7589,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                amdgpu_dm_commit_cursors(state);
 
        /* update planes when needed */
-       for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+       for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
                struct drm_crtc *crtc = new_plane_state->crtc;
                struct drm_crtc_state *new_crtc_state;
                struct drm_framebuffer *fb = new_plane_state->fb;
@@ -7813,8 +7812,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                                                     bundle->surface_updates,
                                                     planes_count,
                                                     acrtc_state->stream,
-                                                    &bundle->stream_update,
-                                                    dc_state);
+                                                    &bundle->stream_update);
 
                /**
                 * Enable or disable the interrupts on the backend.
@@ -8150,13 +8148,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
                struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
-               struct dc_surface_update dummy_updates[MAX_SURFACES];
+               struct dc_surface_update surface_updates[MAX_SURFACES];
                struct dc_stream_update stream_update;
                struct dc_info_packet hdr_packet;
                struct dc_stream_status *status = NULL;
                bool abm_changed, hdr_changed, scaling_changed;
 
-               memset(&dummy_updates, 0, sizeof(dummy_updates));
+               memset(&surface_updates, 0, sizeof(surface_updates));
                memset(&stream_update, 0, sizeof(stream_update));
 
                if (acrtc) {
@@ -8213,16 +8211,15 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                 * To fix this, DC should permit updating only stream properties.
                 */
                for (j = 0; j < status->plane_count; j++)
-                       dummy_updates[j].surface = status->plane_states[0];
+                       surface_updates[j].surface = status->plane_states[j];
 
 
                mutex_lock(&dm->dc_lock);
                dc_commit_updates_for_stream(dm->dc,
-                                                    dummy_updates,
+                                               surface_updates,
                                                     status->plane_count,
                                                     dm_new_crtc_state->stream,
-                                                    &stream_update,
-                                                    dc_state);
+                                                    &stream_update);
                mutex_unlock(&dm->dc_lock);
        }
 
@@ -8359,14 +8356,14 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
 
        ret = PTR_ERR_OR_ZERO(conn_state);
        if (ret)
-               goto err;
+               goto out;
 
        /* Attach crtc to drm_atomic_state*/
        crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
 
        ret = PTR_ERR_OR_ZERO(crtc_state);
        if (ret)
-               goto err;
+               goto out;
 
        /* force a restore */
        crtc_state->mode_changed = true;
@@ -8376,17 +8373,15 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
 
        ret = PTR_ERR_OR_ZERO(plane_state);
        if (ret)
-               goto err;
-
+               goto out;
 
        /* Call commit internally with the state we just constructed */
        ret = drm_atomic_commit(state);
-       if (!ret)
-               return 0;
 
-err:
-       DRM_ERROR("Restoring old state failed with %i\n", ret);
+out:
        drm_atomic_state_put(state);
+       if (ret)
+               DRM_ERROR("Restoring old state failed with %i\n", ret);
 
        return ret;
 }
index 8ab0b9060d2bbabcfc714af6917f4ad9efbc9ae6..f2d8cf34be46e92bbd2c83cf832835389a8936b6 100644 (file)
@@ -833,6 +833,9 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
                if (computed_streams[i])
                        continue;
 
+               if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
+                       return false;
+
                mutex_lock(&aconnector->mst_mgr.lock);
                if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
                        mutex_unlock(&aconnector->mst_mgr.lock);
@@ -850,7 +853,8 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
                stream = dc_state->streams[i];
 
                if (stream->timing.flags.DSC == 1)
-                       dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream);
+                       if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
+                               return false;
        }
 
        return true;
index 5b466f440d671a6f6c2333b54eb7ccea20c6d51c..ab98c259ef69562a4b9b6c096fee5535a6bab022 100644 (file)
@@ -251,6 +251,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
        struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
        bool force_reset = false;
        bool update_uclk = false;
+       bool p_state_change_support;
 
        if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
                return;
@@ -291,8 +292,9 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
                clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
 
        clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
-       if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
-               clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
+       p_state_change_support = new_clocks->p_state_change_support || (display_count == 0);
+       if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+               clk_mgr_base->clks.p_state_change_support = p_state_change_support;
 
                /* to disable P-State switching, set UCLK min = max */
                if (!clk_mgr_base->clks.p_state_change_support)
index 58eb0d69873a6225698486a1038ed58d085d502f..6cf1a5a2a5ecc0f5dda72f60930325b1df4de9cb 100644 (file)
@@ -2679,8 +2679,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
                struct dc_surface_update *srf_updates,
                int surface_count,
                struct dc_stream_state *stream,
-               struct dc_stream_update *stream_update,
-               struct dc_state *state)
+               struct dc_stream_update *stream_update)
 {
        const struct dc_stream_status *stream_status;
        enum surface_update_type update_type;
@@ -2699,6 +2698,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
 
 
        if (update_type >= UPDATE_TYPE_FULL) {
+               struct dc_plane_state *new_planes[MAX_SURFACES];
+
+               memset(new_planes, 0, sizeof(new_planes));
+
+               for (i = 0; i < surface_count; i++)
+                       new_planes[i] = srf_updates[i].surface;
 
                /* initialize scratch memory for building context */
                context = dc_create_state(dc);
@@ -2707,15 +2712,21 @@ void dc_commit_updates_for_stream(struct dc *dc,
                        return;
                }
 
-               dc_resource_state_copy_construct(state, context);
+               dc_resource_state_copy_construct(
+                               dc->current_state, context);
 
-               for (i = 0; i < dc->res_pool->pipe_count; i++) {
-                       struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
-                       struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+               /*remove old surfaces from context */
+               if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
+                       DC_ERROR("Failed to remove streams for new validate context!\n");
+                       return;
+               }
 
-                       if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
-                               new_pipe->plane_state->force_full_update = true;
+               /* add surface to context */
+               if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
+                       DC_ERROR("Failed to add streams for new validate context!\n");
+                       return;
                }
+
        }
 
 
index 1bd1a0935290673535333991f7fa7f7cb37798d6..1e4794e2825caf2b23b330255b899ec6c5614a05 100644 (file)
@@ -892,13 +892,13 @@ static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_inte
 
        switch (dpcd_aux_read_interval) {
        case 0x01:
-               aux_rd_interval_us = 400;
+               aux_rd_interval_us = 4000;
                break;
        case 0x02:
-               aux_rd_interval_us = 4000;
+               aux_rd_interval_us = 8000;
                break;
        case 0x03:
-               aux_rd_interval_us = 8000;
+               aux_rd_interval_us = 12000;
                break;
        case 0x04:
                aux_rd_interval_us = 16000;
@@ -2399,6 +2399,9 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
                        initial_link_setting;
        uint32_t link_bw;
 
+       if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
+               return false;
+
        /* search for the minimum link setting that:
         * 1. is supported according to the link training result
         * 2. could support the b/w requested by the timing
@@ -3045,14 +3048,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
                for (i = 0; i < MAX_PIPES; i++) {
                        pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
                        if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
-                                       pipe_ctx->stream->link == link)
+                                       pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
                                core_link_disable_stream(pipe_ctx);
                }
 
                for (i = 0; i < MAX_PIPES; i++) {
                        pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
                        if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
-                                       pipe_ctx->stream->link == link)
+                                       pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
                                core_link_enable_stream(link->dc->current_state, pipe_ctx);
                }
 
index b7910976b81a74ab2ef78bc39fae2fc112e98afd..e243c01b9672eee14e92e384e300d61213c4a601 100644 (file)
@@ -283,8 +283,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
                struct dc_surface_update *srf_updates,
                int surface_count,
                struct dc_stream_state *stream,
-               struct dc_stream_update *stream_update,
-               struct dc_state *state);
+               struct dc_stream_update *stream_update);
 /*
  * Log the current stream state.
  */
index cfc130e2d6fd01241e3ef0788f03250bd45a2c61..017b67b830e669a05acd66619484787960f97383 100644 (file)
@@ -647,8 +647,13 @@ static void power_on_plane(
        if (REG(DC_IP_REQUEST_CNTL)) {
                REG_SET(DC_IP_REQUEST_CNTL, 0,
                                IP_REQUEST_EN, 1);
-               hws->funcs.dpp_pg_control(hws, plane_id, true);
-               hws->funcs.hubp_pg_control(hws, plane_id, true);
+
+               if (hws->funcs.dpp_pg_control)
+                       hws->funcs.dpp_pg_control(hws, plane_id, true);
+
+               if (hws->funcs.hubp_pg_control)
+                       hws->funcs.hubp_pg_control(hws, plane_id, true);
+
                REG_SET(DC_IP_REQUEST_CNTL, 0,
                                IP_REQUEST_EN, 0);
                DC_LOG_DEBUG(
@@ -1082,8 +1087,13 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
        if (REG(DC_IP_REQUEST_CNTL)) {
                REG_SET(DC_IP_REQUEST_CNTL, 0,
                                IP_REQUEST_EN, 1);
-               hws->funcs.dpp_pg_control(hws, dpp->inst, false);
-               hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
+               if (hws->funcs.dpp_pg_control)
+                       hws->funcs.dpp_pg_control(hws, dpp->inst, false);
+
+               if (hws->funcs.hubp_pg_control)
+                       hws->funcs.hubp_pg_control(hws, hubp->inst, false);
+
                dpp->funcs->dpp_reset(dpp);
                REG_SET(DC_IP_REQUEST_CNTL, 0,
                                IP_REQUEST_EN, 0);
index cb822df21b7c5a213f7261a2ccd54760a532deaa..480d928cb1ca6edb6a3c1dd9e19ac9575521c199 100644 (file)
@@ -1062,8 +1062,13 @@ static void dcn20_power_on_plane(
        if (REG(DC_IP_REQUEST_CNTL)) {
                REG_SET(DC_IP_REQUEST_CNTL, 0,
                                IP_REQUEST_EN, 1);
-               dcn20_dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
-               dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
+               if (hws->funcs.dpp_pg_control)
+                       hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
+
+               if (hws->funcs.hubp_pg_control)
+                       hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
+
                REG_SET(DC_IP_REQUEST_CNTL, 0,
                                IP_REQUEST_EN, 0);
                DC_LOG_DEBUG(
index e04ecf0fc0dbc6f4c2afa5846c22fd3b3fc5ef78..5ed18cac57e8da901fc02a489ea11c44d745c036 100644 (file)
@@ -2517,8 +2517,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
                 * if this primary pipe has a bottom pipe in prev. state
                 * and if the bottom pipe is still available (which it should be),
                 * pick that pipe as secondary
-                * Same logic applies for ODM pipes. Since mpo is not allowed with odm
-                * check in else case.
+                * Same logic applies for ODM pipes
                 */
                if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) {
                        preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx;
@@ -2526,7 +2525,9 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
                                secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
                                secondary_pipe->pipe_idx = preferred_pipe_idx;
                        }
-               } else if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
+               }
+               if (secondary_pipe == NULL &&
+                               dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
                        preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx;
                        if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
                                secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
index 1c88d2edd381c567e9b0759010051b62a19b39e4..6743764289167a06639268284dba98356c883f8f 100644 (file)
@@ -296,7 +296,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
        .num_banks = 8,
        .num_chans = 4,
        .vmm_page_size_bytes = 4096,
-       .dram_clock_change_latency_us = 23.84,
+       .dram_clock_change_latency_us = 11.72,
        .return_bus_width_bytes = 64,
        .dispclk_dppclk_vco_speed_mhz = 3600,
        .xfc_bus_transport_time_us = 4,
@@ -906,6 +906,8 @@ enum dcn20_clk_src_array_id {
        DCN20_CLK_SRC_PLL0,
        DCN20_CLK_SRC_PLL1,
        DCN20_CLK_SRC_PLL2,
+       DCN20_CLK_SRC_PLL3,
+       DCN20_CLK_SRC_PLL4,
        DCN20_CLK_SRC_TOTAL_DCN21
 };
 
@@ -2030,6 +2032,14 @@ static bool dcn21_resource_construct(
                        dcn21_clock_source_create(ctx, ctx->dc_bios,
                                CLOCK_SOURCE_COMBO_PHY_PLL2,
                                &clk_src_regs[2], false);
+       pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
+                       dcn21_clock_source_create(ctx, ctx->dc_bios,
+                               CLOCK_SOURCE_COMBO_PHY_PLL3,
+                               &clk_src_regs[3], false);
+       pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
+                       dcn21_clock_source_create(ctx, ctx->dc_bios,
+                               CLOCK_SOURCE_COMBO_PHY_PLL4,
+                               &clk_src_regs[4], false);
 
        pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
 
index c20331eb62e01e6f66cc9e1eeda2c6022c3b807f..dfd77b3cc84d8c36f88fa218a8a6b9a2c8c50078 100644 (file)
@@ -32,8 +32,8 @@ DCN30 = dcn30_init.o dcn30_hubbub.o dcn30_hubp.o dcn30_dpp.o dcn30_optc.o \
 
 
 ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -msse
-CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -msse
 endif
 
 ifdef CONFIG_PPC64
@@ -45,6 +45,8 @@ ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
 endif
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -mhard-float
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -mhard-float
 endif
 
 ifdef CONFIG_X86
index 3ca7d911d25c440c5b8862660b25a0749355c718..09264716d1dc9d06da1060de0494c1af16b29597 100644 (file)
@@ -14,7 +14,7 @@ DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
                dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o
 
 ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -msse
 endif
 
 ifdef CONFIG_PPC64
@@ -25,6 +25,7 @@ ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
 endif
+CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mhard-float
 endif
 
 ifdef CONFIG_X86
index 8d4924b7dc2216312f5b82be65b4133e705df30a..101620a8867aa9796742e32cb1f50d61812478b7 100644 (file)
@@ -13,7 +13,7 @@
 DCN3_02 = dcn302_init.o dcn302_hwseq.o dcn302_resource.o
 
 ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -msse
 endif
 
 ifdef CONFIG_PPC64
@@ -24,6 +24,7 @@ ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
 endif
+CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o += -mhard-float
 endif
 
 ifdef CONFIG_X86
index 4bdbcce7092d17c67523d943a71f2915860e7605..0d797fa9f5cc69dd37314151d48d955a89e1e7d9 100644 (file)
@@ -553,6 +553,7 @@ struct pptable_funcs {
                                             *clock_req);
        uint32_t (*get_fan_control_mode)(struct smu_context *smu);
        int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
+       int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
        int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
        int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
        int (*gfx_off_control)(struct smu_context *smu, bool enable);
index 13de692a421363ef33b589add8f5678bbeca29f8..5d0b29653ffa13ccf8b4ae265b69512331a5dc27 100644 (file)
@@ -203,6 +203,9 @@ int
 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
                               uint32_t mode);
 
+int
+smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed);
+
 int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
                                       uint32_t speed);
 
index 8b867a6d52b57f0f6e3b9aa1f136494f82ac81d5..e84c737e39673b35b429f85ea7076d320ebc1597 100644 (file)
@@ -2151,19 +2151,14 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
 int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
 {
        int ret = 0;
-       uint32_t rpm;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
        mutex_lock(&smu->mutex);
 
-       if (smu->ppt_funcs->set_fan_speed_rpm) {
-               if (speed > 100)
-                       speed = 100;
-               rpm = speed * smu->fan_max_rpm / 100;
-               ret = smu->ppt_funcs->set_fan_speed_rpm(smu, rpm);
-       }
+       if (smu->ppt_funcs->set_fan_speed_percent)
+               ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed);
 
        mutex_unlock(&smu->mutex);
 
index cd7b411457ffac308882acf71fa7ba045e82eb2d..16db0b506b0de0a6ee3be4a80f385e861169e8c5 100644 (file)
@@ -2326,6 +2326,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
        .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
        .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
        .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+       .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
        .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
        .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
        .gfx_off_control = smu_v11_0_gfx_off_control,
index 51e83123f72a1057bffb54f98fa8dfeed294a405..cd7efa923195e3fa2a94414adf096fbfc1eccd0b 100644 (file)
@@ -2456,6 +2456,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
        .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
        .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
        .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+       .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
        .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
        .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
        .gfx_off_control = smu_v11_0_gfx_off_control,
index 12b36eb0ff6a52b09d5d05349adf21c8d80d3607..d68d3dfee51d47c0c92c364c4495c332c7e37439 100644 (file)
@@ -2802,6 +2802,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
        .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
        .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
        .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+       .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
        .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
        .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
        .gfx_off_control = smu_v11_0_gfx_off_control,
index b279dbbbce6ba408a3250eb513ce5e942be814af..5aeb5f5a04478a04f75dc94bfcee685a672c3ee5 100644 (file)
@@ -1173,6 +1173,35 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
        return 0;
 }
 
+int
+smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
+{
+       struct amdgpu_device *adev = smu->adev;
+       uint32_t duty100, duty;
+       uint64_t tmp64;
+
+       if (speed > 100)
+               speed = 100;
+
+       if (smu_v11_0_auto_fan_control(smu, 0))
+               return -EINVAL;
+
+       duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+                               CG_FDO_CTRL1, FMAX_DUTY100);
+       if (!duty100)
+               return -EINVAL;
+
+       tmp64 = (uint64_t)speed * duty100;
+       do_div(tmp64, 100);
+       duty = (uint32_t)tmp64;
+
+       WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
+                    REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
+                                  CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
+
+       return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
+}
+
 int
 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
                               uint32_t mode)
@@ -1181,7 +1210,7 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu,
 
        switch (mode) {
        case AMD_FAN_CTRL_NONE:
-               ret = smu_v11_0_set_fan_speed_rpm(smu, smu->fan_max_rpm);
+               ret = smu_v11_0_set_fan_speed_percent(smu, 100);
                break;
        case AMD_FAN_CTRL_MANUAL:
                ret = smu_v11_0_auto_fan_control(smu, 0);
index 5c1482d4ca43e3af916596de68571092c25ea276..92ad2cdbae107c18aef29ac906e3ea4a14e08ba4 100644 (file)
@@ -591,14 +591,17 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
        gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
        gpu_metrics->average_cpu_power = metrics.Power[0];
        gpu_metrics->average_soc_power = metrics.Power[1];
+       gpu_metrics->average_gfx_power = metrics.Power[2];
        memcpy(&gpu_metrics->average_core_power[0],
                &metrics.CorePower[0],
                sizeof(uint16_t) * 8);
 
        gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
        gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
+       gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
        gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
        gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
+       gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
 
        memcpy(&gpu_metrics->current_coreclk[0],
                &metrics.CoreFrequency[0],
index f743685a20e8a70bf71d0b7885b4da46ec2dda78..9a9697038016027d84c2cc2696a86922df5a8eda 100644 (file)
@@ -1121,7 +1121,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
 static int renoir_gfx_state_change_set(struct smu_context *smu, uint32_t state)
 {
 
-       return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GpuChangeState, state, NULL);
+       return 0;
 }
 
 static const struct pptable_funcs renoir_ppt_funcs = {
index 0c98d27f84ac7da2b4b23b557e7e28c24377e70a..fee27952ec6d1f01c2d48b7b8a8694bd451f8805 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/regmap.h>
 #include <linux/regulator/consumer.h>
 #include <linux/wait.h>
+#include <linux/workqueue.h>
 
 #include <sound/hdmi-codec.h>
 
@@ -36,6 +37,7 @@ struct lt9611uxc {
        struct mutex ocm_lock;
 
        struct wait_queue_head wq;
+       struct work_struct work;
 
        struct device_node *dsi0_node;
        struct device_node *dsi1_node;
@@ -52,6 +54,8 @@ struct lt9611uxc {
 
        bool hpd_supported;
        bool edid_read;
+       /* can be accessed from different threads, so protect this with ocm_lock */
+       bool hdmi_connected;
        uint8_t fw_version;
 };
 
@@ -143,21 +147,41 @@ static irqreturn_t lt9611uxc_irq_thread_handler(int irq, void *dev_id)
        if (irq_status)
                regmap_write(lt9611uxc->regmap, 0xb022, 0);
 
-       lt9611uxc_unlock(lt9611uxc);
-
-       if (irq_status & BIT(0))
+       if (irq_status & BIT(0)) {
                lt9611uxc->edid_read = !!(hpd_status & BIT(0));
+               wake_up_all(&lt9611uxc->wq);
+       }
 
        if (irq_status & BIT(1)) {
-               if (lt9611uxc->connector.dev)
-                       drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
-               else
-                       drm_bridge_hpd_notify(&lt9611uxc->bridge, !!(hpd_status & BIT(1)));
+               lt9611uxc->hdmi_connected = hpd_status & BIT(1);
+               schedule_work(&lt9611uxc->work);
        }
 
+       lt9611uxc_unlock(lt9611uxc);
+
        return IRQ_HANDLED;
 }
 
+static void lt9611uxc_hpd_work(struct work_struct *work)
+{
+       struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work);
+       bool connected;
+
+       if (lt9611uxc->connector.dev)
+               drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
+       else {
+
+               mutex_lock(&lt9611uxc->ocm_lock);
+               connected = lt9611uxc->hdmi_connected;
+               mutex_unlock(&lt9611uxc->ocm_lock);
+
+               drm_bridge_hpd_notify(&lt9611uxc->bridge,
+                                     connected ?
+                                     connector_status_connected :
+                                     connector_status_disconnected);
+       }
+}
+
 static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc)
 {
        gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1);
@@ -445,18 +469,21 @@ static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *brid
        struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
        unsigned int reg_val = 0;
        int ret;
-       int connected = 1;
+       bool connected = true;
+
+       lt9611uxc_lock(lt9611uxc);
 
        if (lt9611uxc->hpd_supported) {
-               lt9611uxc_lock(lt9611uxc);
                ret = regmap_read(lt9611uxc->regmap, 0xb023, &reg_val);
-               lt9611uxc_unlock(lt9611uxc);
 
                if (ret)
                        dev_err(lt9611uxc->dev, "failed to read hpd status: %d\n", ret);
                else
                        connected  = reg_val & BIT(1);
        }
+       lt9611uxc->hdmi_connected = connected;
+
+       lt9611uxc_unlock(lt9611uxc);
 
        return connected ?  connector_status_connected :
                                connector_status_disconnected;
@@ -465,7 +492,7 @@ static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *brid
 static int lt9611uxc_wait_for_edid(struct lt9611uxc *lt9611uxc)
 {
        return wait_event_interruptible_timeout(lt9611uxc->wq, lt9611uxc->edid_read,
-                       msecs_to_jiffies(100));
+                       msecs_to_jiffies(500));
 }
 
 static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
@@ -503,7 +530,10 @@ static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge,
        ret = lt9611uxc_wait_for_edid(lt9611uxc);
        if (ret < 0) {
                dev_err(lt9611uxc->dev, "wait for EDID failed: %d\n", ret);
-               return ERR_PTR(ret);
+               return NULL;
+       } else if (ret == 0) {
+               dev_err(lt9611uxc->dev, "wait for EDID timeout\n");
+               return NULL;
        }
 
        return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc);
@@ -926,6 +956,8 @@ retry:
        lt9611uxc->fw_version = ret;
 
        init_waitqueue_head(&lt9611uxc->wq);
+       INIT_WORK(&lt9611uxc->work, lt9611uxc_hpd_work);
+
        ret = devm_request_threaded_irq(dev, client->irq, NULL,
                                        lt9611uxc_irq_thread_handler,
                                        IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
@@ -962,6 +994,7 @@ static int lt9611uxc_remove(struct i2c_client *client)
        struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
 
        disable_irq(client->irq);
+       flush_scheduled_work();
        lt9611uxc_audio_exit(lt9611uxc);
        drm_bridge_remove(&lt9611uxc->bridge);
 
index ba1507036f265bba483090b72aeb757969762c32..4a8cbec832bc46051a8d500fa1ab3df67d23929d 100644 (file)
@@ -3021,7 +3021,7 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set,
 
        ret = handle_conflicting_encoders(state, true);
        if (ret)
-               return ret;
+               goto fail;
 
        ret = drm_atomic_commit(state);
 
index 0401b2f47500274ad73f337298624988167aefd3..8781deefeae3e69642930fb2013b61fea149cafd 100644 (file)
@@ -3629,14 +3629,26 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
        return 0;
 }
 
-static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8  dp_link_count)
+/**
+ * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
+ * @link_rate: link rate in 10kbits/s units
+ * @link_lane_count: lane count
+ *
+ * Calculate the total bandwidth of a MultiStream Transport link. The returned
+ * value is in units of PBNs/(timeslots/1 MTP). This value can be used to
+ * convert the number of PBNs required for a given stream to the number of
+ * timeslots this stream requires in each MTP.
+ */
+int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
 {
-       if (dp_link_bw == 0 || dp_link_count == 0)
-               DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
-                             dp_link_bw, dp_link_count);
+       if (link_rate == 0 || link_lane_count == 0)
+               DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n",
+                             link_rate, link_lane_count);
 
-       return dp_link_bw * dp_link_count / 2;
+       /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
+       return link_rate * link_lane_count / 54000;
 }
+EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
 
 /**
  * drm_dp_read_mst_cap() - check whether or not a sink supports MST
@@ -3692,7 +3704,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
                        goto out_unlock;
                }
 
-               mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
+               mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]),
                                                        mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
                if (mgr->pbn_div == 0) {
                        ret = -EINVAL;
index 02ca22e90290d4a3ad583b69c4b330a90c3cd594..0b232a73c1b74d3a41a11126df9f9a256add07dc 100644 (file)
@@ -387,9 +387,16 @@ static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
        if (gbo->vmap_use_count > 0)
                goto out;
 
-       ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
-       if (ret)
-               return ret;
+       /*
+        * VRAM helpers unmap the BO only on demand. So the previous
+        * page mapping might still be around. Only vmap if the there's
+        * no mapping present.
+        */
+       if (dma_buf_map_is_null(&gbo->map)) {
+               ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
+               if (ret)
+                       return ret;
+       }
 
 out:
        ++gbo->vmap_use_count;
@@ -577,6 +584,7 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
                return;
 
        ttm_bo_vunmap(bo, &gbo->map);
+       dma_buf_map_clear(&gbo->map); /* explicitly clear mapping for next vmap call */
 }
 
 static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
index 6e74e6745ecaeb21a864768d66dc07dfe7a11d82..3491460498491d0d8f5f96058b5adbf6a7ef6c80 100644 (file)
@@ -388,19 +388,18 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
                return -ENOENT;
 
        *fence = drm_syncobj_fence_get(syncobj);
-       drm_syncobj_put(syncobj);
 
        if (*fence) {
                ret = dma_fence_chain_find_seqno(fence, point);
                if (!ret)
-                       return 0;
+                       goto out;
                dma_fence_put(*fence);
        } else {
                ret = -EINVAL;
        }
 
        if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
-               return ret;
+               goto out;
 
        memset(&wait, 0, sizeof(wait));
        wait.task = current;
@@ -432,6 +431,9 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
        if (wait.node.next)
                drm_syncobj_remove_wait(syncobj, &wait);
 
+out:
+       drm_syncobj_put(syncobj);
+
        return ret;
 }
 EXPORT_SYMBOL(drm_syncobj_find_fence);
index 92940a0c5ef8fa77c2d5664fcd2d40b864416d2f..dc13d1814d95d6c2074709754df8f71ee463655f 100644 (file)
@@ -2754,13 +2754,15 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
        int n_entries, ln;
        u32 val;
 
+       if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
+               return;
+
        ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
-       /* The table does not have values for level 3 and level 9. */
-       if (level >= n_entries || level == 3 || level == 9) {
+       if (level >= n_entries) {
                drm_dbg_kms(&dev_priv->drm,
                            "DDI translation not found for level %d. Using %d instead.",
-                           level, n_entries - 2);
-               level = n_entries - 2;
+                           level, n_entries - 1);
+               level = n_entries - 1;
        }
 
        /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
@@ -2891,6 +2893,9 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
        u32 val, dpcnt_mask, dpcnt_val;
        int n_entries, ln;
 
+       if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
+               return;
+
        ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
 
        if (level >= n_entries)
@@ -3532,6 +3537,23 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
        intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state));
 }
 
+static void intel_ddi_power_up_lanes(struct intel_encoder *encoder,
+                                    const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+       enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+       if (intel_phy_is_combo(i915, phy)) {
+               bool lane_reversal =
+                       dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
+
+               intel_combo_phy_power_up_lanes(i915, phy, false,
+                                              crtc_state->lane_count,
+                                              lane_reversal);
+       }
+}
+
 static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
                                  struct intel_encoder *encoder,
                                  const struct intel_crtc_state *crtc_state,
@@ -3621,14 +3643,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
         * 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up
         * the used lanes of the DDI.
         */
-       if (intel_phy_is_combo(dev_priv, phy)) {
-               bool lane_reversal =
-                       dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-
-               intel_combo_phy_power_up_lanes(dev_priv, phy, false,
-                                              crtc_state->lane_count,
-                                              lane_reversal);
-       }
+       intel_ddi_power_up_lanes(encoder, crtc_state);
 
        /*
         * 7.g Configure and enable DDI_BUF_CTL
@@ -3713,19 +3728,12 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
        else
                intel_prepare_dp_ddi_buffers(encoder, crtc_state);
 
-       if (intel_phy_is_combo(dev_priv, phy)) {
-               bool lane_reversal =
-                       dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
-
-               intel_combo_phy_power_up_lanes(dev_priv, phy, false,
-                                              crtc_state->lane_count,
-                                              lane_reversal);
-       }
+       intel_ddi_power_up_lanes(encoder, crtc_state);
 
        intel_ddi_init_dp_buf_reg(encoder, crtc_state);
        if (!is_mst)
                intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
-       intel_dp_configure_protocol_converter(intel_dp);
+       intel_dp_configure_protocol_converter(intel_dp, crtc_state);
        intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
                                              true);
        intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
@@ -4206,6 +4214,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
                intel_de_write(dev_priv, reg, val);
        }
 
+       intel_ddi_power_up_lanes(encoder, crtc_state);
+
        /* In HDMI/DVI mode, the port width, and swing/emphasis values
         * are ignored so nothing special needs to be done besides
         * enabling the port.
index 53a00cf3fa325ef280f649bed2bbd98b3aae20bc..61be6bed916291d614245b10a8cbe1299ff7eda1 100644 (file)
@@ -2309,7 +2309,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
                 */
                ret = i915_vma_pin_fence(vma);
                if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
-                       i915_gem_object_unpin_from_display_plane(vma);
+                       i915_vma_unpin(vma);
                        vma = ERR_PTR(ret);
                        goto err;
                }
@@ -2327,12 +2327,9 @@ err:
 
 void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
 {
-       i915_gem_object_lock(vma->obj, NULL);
        if (flags & PLANE_HAS_FENCE)
                i915_vma_unpin_fence(vma);
-       i915_gem_object_unpin_from_display_plane(vma);
-       i915_gem_object_unlock(vma->obj);
-
+       i915_vma_unpin(vma);
        i915_vma_put(vma);
 }
 
@@ -4807,6 +4804,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
                        plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
        } else if (fb->format->is_yuv) {
                plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
+               if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
+                       plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
        }
 
        return plane_color_ctl;
index 37f1a10fd02172e79f443c32471e74a9b407cf88..8a26307c48960110b678af7075db41ad3e65de6f 100644 (file)
@@ -4014,7 +4014,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
        intel_de_posting_read(dev_priv, intel_dp->output_reg);
 }
 
-void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
+                                          const struct intel_crtc_state *crtc_state)
 {
        struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 tmp;
@@ -4033,8 +4034,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
                drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
                            enableddisabled(intel_dp->has_hdmi_sink));
 
-       tmp = intel_dp->dfp.ycbcr_444_to_420 ?
-               DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
+       tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
+               intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
 
        if (drm_dp_dpcd_writeb(&intel_dp->aux,
                               DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
@@ -4088,7 +4089,7 @@ static void intel_enable_dp(struct intel_atomic_state *state,
        }
 
        intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
-       intel_dp_configure_protocol_converter(intel_dp);
+       intel_dp_configure_protocol_converter(intel_dp, pipe_config);
        intel_dp_start_link_train(intel_dp, pipe_config);
        intel_dp_stop_link_train(intel_dp, pipe_config);
 
@@ -4636,24 +4637,6 @@ ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
        intel_de_posting_read(dev_priv, intel_dp->output_reg);
 }
 
-void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
-                               const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       u8 train_set = intel_dp->train_set[0];
-
-       drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
-                   train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
-                   train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
-       drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
-                   (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
-                   DP_TRAIN_PRE_EMPHASIS_SHIFT,
-                   train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
-                   " (max)" : "");
-
-       intel_dp->set_signal_levels(intel_dp, crtc_state);
-}
-
 void
 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
                                       const struct intel_crtc_state *crtc_state,
@@ -5702,7 +5685,7 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
 
        intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
 
-       intel_dp_set_signal_levels(intel_dp, crtc_state);
+       intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX);
 
        intel_dp_phy_pattern_update(intel_dp, crtc_state);
 
index b871a09b69013a398a4b19853f065afabf37ecc7..6620f9efdcbbaebb525ee3425bfc58ce08fd7a91 100644 (file)
@@ -51,7 +51,8 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
 int intel_dp_retrain_link(struct intel_encoder *encoder,
                          struct drm_modeset_acquire_ctx *ctx);
 void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode);
-void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp);
+void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
+                                          const struct intel_crtc_state *crtc_state);
 void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
                                           const struct intel_crtc_state *crtc_state,
                                           bool enable);
@@ -95,9 +96,6 @@ void
 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
                                       const struct intel_crtc_state *crtc_state,
                                       u8 dp_train_pat);
-void
-intel_dp_set_signal_levels(struct intel_dp *intel_dp,
-                          const struct intel_crtc_state *crtc_state);
 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
                           u8 *link_bw, u8 *rate_select);
 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
index 91d3979902d03cb891474ad587a79c760118797c..d8c6d7054d11d929ff8d4170b10b96dcd2c6dcda 100644 (file)
@@ -334,6 +334,27 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
        return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len;
 }
 
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
+                               const struct intel_crtc_state *crtc_state,
+                               enum drm_dp_phy dp_phy)
+{
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       u8 train_set = intel_dp->train_set[0];
+       char phy_name[10];
+
+       drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s, pre-emphasis level %d%s, at %s\n",
+                   train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
+                   train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "",
+                   (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+                   DP_TRAIN_PRE_EMPHASIS_SHIFT,
+                   train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
+                   " (max)" : "",
+                   intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
+
+       if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
+               intel_dp->set_signal_levels(intel_dp, crtc_state);
+}
+
 static bool
 intel_dp_reset_link_train(struct intel_dp *intel_dp,
                          const struct intel_crtc_state *crtc_state,
@@ -341,7 +362,7 @@ intel_dp_reset_link_train(struct intel_dp *intel_dp,
                          u8 dp_train_pat)
 {
        memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
-       intel_dp_set_signal_levels(intel_dp, crtc_state);
+       intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
        return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat);
 }
 
@@ -355,7 +376,7 @@ intel_dp_update_link_train(struct intel_dp *intel_dp,
                            DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy);
        int ret;
 
-       intel_dp_set_signal_levels(intel_dp, crtc_state);
+       intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy);
 
        ret = drm_dp_dpcd_write(&intel_dp->aux, reg,
                                intel_dp->train_set, crtc_state->lane_count);
index 86905aa24db76a6c079118b32fca14beb91cc65f..6a1f76bd8c7586a6f40fea2e4b8b61cb30e97225 100644 (file)
@@ -17,6 +17,9 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
                               const struct intel_crtc_state *crtc_state,
                               enum drm_dp_phy dp_phy,
                               const u8 link_status[DP_LINK_STATUS_SIZE]);
+void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
+                               const struct intel_crtc_state *crtc_state,
+                               enum drm_dp_phy dp_phy);
 void intel_dp_start_link_train(struct intel_dp *intel_dp,
                               const struct intel_crtc_state *crtc_state);
 void intel_dp_stop_link_train(struct intel_dp *intel_dp,
index 27f04aed8764a87287175ab3c51fbc8f200f2208..3286b232be0b8f0d1dbb1340a9408fea82b1fc9c 100644 (file)
@@ -69,7 +69,9 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
 
                slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
                                                      connector->port,
-                                                     crtc_state->pbn, 0);
+                                                     crtc_state->pbn,
+                                                     drm_dp_get_vc_payload_bw(crtc_state->port_clock,
+                                                                              crtc_state->lane_count));
                if (slots == -EDEADLK)
                        return slots;
                if (slots >= 0)
index b2a4bbcfdcd25acd81a97d720d18bb50f2aa3964..b9d8825e2bb12f397ea31cee5b2288d6995a0fd0 100644 (file)
@@ -2210,6 +2210,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
        if (content_protection_type_changed) {
                mutex_lock(&hdcp->mutex);
                hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+               drm_connector_get(&connector->base);
                schedule_work(&hdcp->prop_work);
                mutex_unlock(&hdcp->mutex);
        }
@@ -2221,6 +2222,14 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
                desired_and_not_enabled =
                        hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
                mutex_unlock(&hdcp->mutex);
+               /*
+                * If HDCP already ENABLED and CP property is DESIRED, schedule
+                * prop_work to update correct CP property to user space.
+                */
+               if (!desired_and_not_enabled && !content_protection_type_changed) {
+                       drm_connector_get(&connector->base);
+                       schedule_work(&hdcp->prop_work);
+               }
        }
 
        if (desired_and_not_enabled || content_protection_type_changed)
index 52b4f6193b4ce5a32b5de8001a1e88a734d00cb5..0095c8cac9b40f1e0856151d958f26516226e25f 100644 (file)
@@ -359,7 +359,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
        intel_frontbuffer_flip_complete(overlay->i915,
                                        INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
 
-       i915_gem_object_unpin_from_display_plane(vma);
+       i915_vma_unpin(vma);
        i915_vma_put(vma);
 }
 
@@ -860,7 +860,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
        return 0;
 
 out_unpin:
-       i915_gem_object_unpin_from_display_plane(vma);
+       i915_vma_unpin(vma);
 out_pin_section:
        atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
 
index 019a2d6d807a6873fe7a184f12a52c6c6f62aeb8..3da2544fa1c0b6dfdadb206fb1ee9cd48762a359 100644 (file)
@@ -618,13 +618,19 @@ skl_program_scaler(struct intel_plane *plane,
 
 /* Preoffset values for YUV to RGB Conversion */
 #define PREOFF_YUV_TO_RGB_HI           0x1800
-#define PREOFF_YUV_TO_RGB_ME           0x1F00
+#define PREOFF_YUV_TO_RGB_ME           0x0000
 #define PREOFF_YUV_TO_RGB_LO           0x1800
 
 #define  ROFF(x)          (((x) & 0xffff) << 16)
 #define  GOFF(x)          (((x) & 0xffff) << 0)
 #define  BOFF(x)          (((x) & 0xffff) << 16)
 
+/*
+ * Programs the input color space conversion stage for ICL HDR planes.
+ * Note that it is assumed that this stage always happens after YUV
+ * range correction. Thus, the input to this stage is assumed to be
+ * in full-range YCbCr.
+ */
 static void
 icl_program_input_csc(struct intel_plane *plane,
                      const struct intel_crtc_state *crtc_state,
@@ -672,52 +678,7 @@ icl_program_input_csc(struct intel_plane *plane,
                        0x0, 0x7800, 0x7F10,
                },
        };
-
-       /* Matrix for Limited Range to Full Range Conversion */
-       static const u16 input_csc_matrix_lr[][9] = {
-               /*
-                * BT.601 Limted range YCbCr -> full range RGB
-                * The matrix required is :
-                * [1.164384, 0.000, 1.596027,
-                *  1.164384, -0.39175, -0.812813,
-                *  1.164384, 2.017232, 0.0000]
-                */
-               [DRM_COLOR_YCBCR_BT601] = {
-                       0x7CC8, 0x7950, 0x0,
-                       0x8D00, 0x7950, 0x9C88,
-                       0x0, 0x7950, 0x6810,
-               },
-               /*
-                * BT.709 Limited range YCbCr -> full range RGB
-                * The matrix required is :
-                * [1.164384, 0.000, 1.792741,
-                *  1.164384, -0.213249, -0.532909,
-                *  1.164384, 2.112402, 0.0000]
-                */
-               [DRM_COLOR_YCBCR_BT709] = {
-                       0x7E58, 0x7950, 0x0,
-                       0x8888, 0x7950, 0xADA8,
-                       0x0, 0x7950,  0x6870,
-               },
-               /*
-                * BT.2020 Limited range YCbCr -> full range RGB
-                * The matrix required is :
-                * [1.164, 0.000, 1.678,
-                *  1.164, -0.1873, -0.6504,
-                *  1.164, 2.1417, 0.0000]
-                */
-               [DRM_COLOR_YCBCR_BT2020] = {
-                       0x7D70, 0x7950, 0x0,
-                       0x8A68, 0x7950, 0xAC00,
-                       0x0, 0x7950, 0x6890,
-               },
-       };
-       const u16 *csc;
-
-       if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
-               csc = input_csc_matrix[plane_state->hw.color_encoding];
-       else
-               csc = input_csc_matrix_lr[plane_state->hw.color_encoding];
+       const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding];
 
        intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
                          ROFF(csc[0]) | GOFF(csc[1]));
@@ -734,14 +695,8 @@ icl_program_input_csc(struct intel_plane *plane,
 
        intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
                          PREOFF_YUV_TO_RGB_HI);
-       if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
-               intel_de_write_fw(dev_priv,
-                                 PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
-                                 0);
-       else
-               intel_de_write_fw(dev_priv,
-                                 PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
-                                 PREOFF_YUV_TO_RGB_ME);
+       intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+                         PREOFF_YUV_TO_RGB_ME);
        intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
                          PREOFF_YUV_TO_RGB_LO);
        intel_de_write_fw(dev_priv,
index fcce6909f201769876dea539f22681ff37d80ec6..3d435bfff7649aeaf279164b8b16ce3fe6391c54 100644 (file)
@@ -387,48 +387,6 @@ err:
        return vma;
 }
 
-static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
-{
-       struct drm_i915_private *i915 = to_i915(obj->base.dev);
-       struct i915_vma *vma;
-
-       if (list_empty(&obj->vma.list))
-               return;
-
-       mutex_lock(&i915->ggtt.vm.mutex);
-       spin_lock(&obj->vma.lock);
-       for_each_ggtt_vma(vma, obj) {
-               if (!drm_mm_node_allocated(&vma->node))
-                       continue;
-
-               GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
-               list_move_tail(&vma->vm_link, &vma->vm->bound_list);
-       }
-       spin_unlock(&obj->vma.lock);
-       mutex_unlock(&i915->ggtt.vm.mutex);
-
-       if (i915_gem_object_is_shrinkable(obj)) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&i915->mm.obj_lock, flags);
-
-               if (obj->mm.madv == I915_MADV_WILLNEED &&
-                   !atomic_read(&obj->mm.shrink_pin))
-                       list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
-
-               spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
-       }
-}
-
-void
-i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
-{
-       /* Bump the LRU to try and avoid premature eviction whilst flipping  */
-       i915_gem_object_bump_inactive_ggtt(vma->obj);
-
-       i915_vma_unpin(vma);
-}
-
 /**
  * Moves a single object to the CPU read, and possibly write domain.
  * @obj: object to act on
@@ -569,9 +527,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        else
                err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
 
-       /* And bump the LRU for this access */
-       i915_gem_object_bump_inactive_ggtt(obj);
-
        i915_gem_object_unlock(obj);
 
        if (write_domain)
index be14486f63a7aa1e157625a7d34a3573112abbd7..4556afe18f16d41779e043d904ca02408649b013 100644 (file)
@@ -486,7 +486,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                                     u32 alignment,
                                     const struct i915_ggtt_view *view,
                                     unsigned int flags);
-void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
 
 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
index 94465374ca2fe5505c67804bf697d5b88f9c102f..e961ad6a312944a89c09adcd5c57e29466d7e43d 100644 (file)
@@ -390,6 +390,16 @@ static void emit_batch(struct i915_vma * const vma,
                                                     &cb_kernel_ivb,
                                                     desc_count);
 
+       /* Reset inherited context registers */
+       gen7_emit_pipeline_invalidate(&cmds);
+       batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
+       batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
+       batch_add(&cmds, 0xffff0000);
+       batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
+       batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+       gen7_emit_pipeline_flush(&cmds);
+
+       /* Switch to the media pipeline and our base address */
        gen7_emit_pipeline_invalidate(&cmds);
        batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
        batch_add(&cmds, MI_NOOP);
@@ -399,9 +409,11 @@ static void emit_batch(struct i915_vma * const vma,
        gen7_emit_state_base_address(&cmds, descriptors);
        gen7_emit_pipeline_invalidate(&cmds);
 
+       /* Set the clear-residual kernel state */
        gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
        gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count);
 
+       /* Execute the kernel on all HW threads */
        for (i = 0; i < num_primitives(bv); i++)
                gen7_emit_media_object(&cmds, i);
 
index a24cc1ff08a0c4a6b2d4853fdfa5af04973cca91..1d1757584f49026d7f3b6fb7cedfb875fce68a0f 100644 (file)
@@ -134,11 +134,6 @@ static bool remove_signaling_context(struct intel_breadcrumbs *b,
        return true;
 }
 
-static inline bool __request_completed(const struct i915_request *rq)
-{
-       return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
-}
-
 __maybe_unused static bool
 check_signal_order(struct intel_context *ce, struct i915_request *rq)
 {
@@ -192,18 +187,6 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
                intel_engine_add_retire(b->irq_engine, tl);
 }
 
-static bool __signal_request(struct i915_request *rq)
-{
-       GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags));
-
-       if (!__dma_fence_signal(&rq->fence)) {
-               i915_request_put(rq);
-               return false;
-       }
-
-       return true;
-}
-
 static struct llist_node *
 slist_add(struct llist_node *node, struct llist_node *head)
 {
@@ -257,7 +240,7 @@ static void signal_irq_work(struct irq_work *work)
                list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
                        bool release;
 
-                       if (!__request_completed(rq))
+                       if (!__i915_request_is_complete(rq))
                                break;
 
                        if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
@@ -274,9 +257,11 @@ static void signal_irq_work(struct irq_work *work)
                        release = remove_signaling_context(b, ce);
                        spin_unlock(&ce->signal_lock);
 
-                       if (__signal_request(rq))
+                       if (__dma_fence_signal(&rq->fence))
                                /* We own signal_node now, xfer to local list */
                                signal = slist_add(&rq->signal_node, signal);
+                       else
+                               i915_request_put(rq);
 
                        if (release) {
                                add_retire(b, ce->timeline);
@@ -363,6 +348,17 @@ void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
        kfree(b);
 }
 
+static void irq_signal_request(struct i915_request *rq,
+                              struct intel_breadcrumbs *b)
+{
+       if (!__dma_fence_signal(&rq->fence))
+               return;
+
+       i915_request_get(rq);
+       if (llist_add(&rq->signal_node, &b->signaled_requests))
+               irq_work_queue(&b->irq_work);
+}
+
 static void insert_breadcrumb(struct i915_request *rq)
 {
        struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
@@ -372,17 +368,13 @@ static void insert_breadcrumb(struct i915_request *rq)
        if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
                return;
 
-       i915_request_get(rq);
-
        /*
         * If the request is already completed, we can transfer it
         * straight onto a signaled list, and queue the irq worker for
         * its signal completion.
         */
-       if (__request_completed(rq)) {
-               if (__signal_request(rq) &&
-                   llist_add(&rq->signal_node, &b->signaled_requests))
-                       irq_work_queue(&b->irq_work);
+       if (__i915_request_is_complete(rq)) {
+               irq_signal_request(rq, b);
                return;
        }
 
@@ -413,6 +405,8 @@ static void insert_breadcrumb(struct i915_request *rq)
                                break;
                }
        }
+
+       i915_request_get(rq);
        list_add_rcu(&rq->signal_link, pos);
        GEM_BUG_ON(!check_signal_order(ce, rq));
        GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
@@ -453,19 +447,25 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq)
 
 void i915_request_cancel_breadcrumb(struct i915_request *rq)
 {
+       struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs;
        struct intel_context *ce = rq->context;
        bool release;
 
-       if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags))
+       spin_lock(&ce->signal_lock);
+       if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) {
+               spin_unlock(&ce->signal_lock);
                return;
+       }
 
-       spin_lock(&ce->signal_lock);
        list_del_rcu(&rq->signal_link);
-       release = remove_signaling_context(rq->engine->breadcrumbs, ce);
+       release = remove_signaling_context(b, ce);
        spin_unlock(&ce->signal_lock);
        if (release)
                intel_context_put(ce);
 
+       if (__i915_request_is_complete(rq))
+               irq_signal_request(rq, b);
+
        i915_request_put(rq);
 }
 
index cf94525be2c18c5f19abe623c6143810b83d5182..db8c66dde6558cbee274f72257cf5a5442ed14a1 100644 (file)
@@ -526,16 +526,39 @@ static int init_ggtt(struct i915_ggtt *ggtt)
 
        mutex_init(&ggtt->error_mutex);
        if (ggtt->mappable_end) {
-               /* Reserve a mappable slot for our lockless error capture */
-               ret = drm_mm_insert_node_in_range(&ggtt->vm.mm,
-                                                 &ggtt->error_capture,
-                                                 PAGE_SIZE, 0,
-                                                 I915_COLOR_UNEVICTABLE,
-                                                 0, ggtt->mappable_end,
-                                                 DRM_MM_INSERT_LOW);
-               if (ret)
-                       return ret;
+               /*
+                * Reserve a mappable slot for our lockless error capture.
+                *
+                * We strongly prefer taking address 0x0 in order to protect
+                * other critical buffers against accidental overwrites,
+                * as writing to address 0 is a very common mistake.
+                *
+                * Since 0 may already be in use by the system (e.g. the BIOS
+                * framebuffer), we let the reservation fail quietly and hope
+                * 0 remains reserved always.
+                *
+                * If we fail to reserve 0, and then fail to find any space
+                * for an error-capture, remain silent. We can afford not
+                * to reserve an error_capture node as we have fallback
+                * paths, and we trust that 0 will remain reserved. However,
+                * the only likely reason for failure to insert is a driver
+                * bug, which we expect to cause other failures...
+                */
+               ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
+               ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
+               if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
+                       drm_mm_insert_node_in_range(&ggtt->vm.mm,
+                                                   &ggtt->error_capture,
+                                                   ggtt->error_capture.size, 0,
+                                                   ggtt->error_capture.color,
+                                                   0, ggtt->mappable_end,
+                                                   DRM_MM_INSERT_LOW);
        }
+       if (drm_mm_node_allocated(&ggtt->error_capture))
+               drm_dbg(&ggtt->vm.i915->drm,
+                       "Reserved GGTT:[%llx, %llx] for use by error capture\n",
+                       ggtt->error_capture.start,
+                       ggtt->error_capture.start + ggtt->error_capture.size);
 
        /*
         * The upper portion of the GuC address space has a sizeable hole
@@ -548,9 +571,9 @@ static int init_ggtt(struct i915_ggtt *ggtt)
 
        /* Clear any non-preallocated blocks */
        drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
-               drm_dbg_kms(&ggtt->vm.i915->drm,
-                           "clearing unused GTT space: [%lx, %lx]\n",
-                           hole_start, hole_end);
+               drm_dbg(&ggtt->vm.i915->drm,
+                       "clearing unused GTT space: [%lx, %lx]\n",
+                       hole_start, hole_end);
                ggtt->vm.clear_range(&ggtt->vm, hole_start,
                                     hole_end - hole_start);
        }
index 7614a3d24fca5e65783dcf6ebf1126c175a8c2fa..26c7d0a50585a0e2808a17e0a7745d5c6b77d787 100644 (file)
@@ -3988,6 +3988,9 @@ err:
 static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
 {
        i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
+
+       /* Called on error unwind, clear all flags to prevent further use */
+       memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx));
 }
 
 typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
index 7ea94d201fe6fbd9c5e8d1e160621bfd2ceaf273..8015964043eb7ad4e0bce14f88373dbd2577f6f3 100644 (file)
@@ -126,6 +126,10 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
        struct intel_timeline_cacheline *cl =
                container_of(rcu, typeof(*cl), rcu);
 
+       /* Must wait until after all *rq->hwsp are complete before removing */
+       i915_gem_object_unpin_map(cl->hwsp->vma->obj);
+       __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
+
        i915_active_fini(&cl->active);
        kfree(cl);
 }
@@ -133,11 +137,6 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
 static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
 {
        GEM_BUG_ON(!i915_active_is_idle(&cl->active));
-
-       i915_gem_object_unpin_map(cl->hwsp->vma->obj);
-       i915_vma_put(cl->hwsp->vma);
-       __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
-
        call_rcu(&cl->rcu, __rcu_cacheline_free);
 }
 
@@ -179,7 +178,6 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
                return ERR_CAST(vaddr);
        }
 
-       i915_vma_get(hwsp->vma);
        cl->hwsp = hwsp;
        cl->vaddr = page_pack_bits(vaddr, cacheline);
 
index 10a865f3dc09ac4ba90993aa420b23af1650faf3..9ed19b8bca60093a3ff9ab666ecf0f445dcf8183 100644 (file)
@@ -631,24 +631,26 @@ static int flush_lazy_signals(struct i915_active *ref)
 
 int __i915_active_wait(struct i915_active *ref, int state)
 {
-       int err;
-
        might_sleep();
 
-       if (!i915_active_acquire_if_busy(ref))
-               return 0;
-
        /* Any fence added after the wait begins will not be auto-signaled */
-       err = flush_lazy_signals(ref);
-       i915_active_release(ref);
-       if (err)
-               return err;
+       if (i915_active_acquire_if_busy(ref)) {
+               int err;
 
-       if (!i915_active_is_idle(ref) &&
-           ___wait_var_event(ref, i915_active_is_idle(ref),
-                             state, 0, 0, schedule()))
-               return -EINTR;
+               err = flush_lazy_signals(ref);
+               i915_active_release(ref);
+               if (err)
+                       return err;
 
+               if (___wait_var_event(ref, i915_active_is_idle(ref),
+                                     state, 0, 0, schedule()))
+                       return -EINTR;
+       }
+
+       /*
+        * After the wait is complete, the caller may free the active.
+        * We have to flush any concurrent retirement before returning.
+        */
        flush_work(&ref->work);
        return 0;
 }
index 632c713227dc7011bb0fa1f6017d78e76ce0ac74..c6964f82a1bb68268d837d311dad592a37d33e92 100644 (file)
@@ -1346,7 +1346,7 @@ intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
 {
        const unsigned int pi = __platform_mask_index(info, p);
 
-       return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
+       return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1);
 }
 
 static __always_inline bool
index d76685ce03998143a090a52e787f799bcfc19a5b..9856479b56d8bf17ea4f2731601b7d0cc7177cb4 100644 (file)
@@ -184,13 +184,24 @@ static u64 get_rc6(struct intel_gt *gt)
        return val;
 }
 
-static void park_rc6(struct drm_i915_private *i915)
+static void init_rc6(struct i915_pmu *pmu)
 {
-       struct i915_pmu *pmu = &i915->pmu;
+       struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+       intel_wakeref_t wakeref;
 
-       if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
+       with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) {
                pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
+               pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
+                                       pmu->sample[__I915_SAMPLE_RC6].cur;
+               pmu->sleep_last = ktime_get();
+       }
+}
 
+static void park_rc6(struct drm_i915_private *i915)
+{
+       struct i915_pmu *pmu = &i915->pmu;
+
+       pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
        pmu->sleep_last = ktime_get();
 }
 
@@ -201,6 +212,7 @@ static u64 get_rc6(struct intel_gt *gt)
        return __get_rc6(gt);
 }
 
+static void init_rc6(struct i915_pmu *pmu) { }
 static void park_rc6(struct drm_i915_private *i915) {}
 
 #endif
@@ -612,10 +624,8 @@ static void i915_pmu_enable(struct perf_event *event)
                container_of(event->pmu, typeof(*i915), pmu.base);
        unsigned int bit = event_enabled_bit(event);
        struct i915_pmu *pmu = &i915->pmu;
-       intel_wakeref_t wakeref;
        unsigned long flags;
 
-       wakeref = intel_runtime_pm_get(&i915->runtime_pm);
        spin_lock_irqsave(&pmu->lock, flags);
 
        /*
@@ -626,13 +636,6 @@ static void i915_pmu_enable(struct perf_event *event)
        GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
        GEM_BUG_ON(pmu->enable_count[bit] == ~0);
 
-       if (pmu->enable_count[bit] == 0 &&
-           config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
-               pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
-               pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
-               pmu->sleep_last = ktime_get();
-       }
-
        pmu->enable |= BIT_ULL(bit);
        pmu->enable_count[bit]++;
 
@@ -673,8 +676,6 @@ static void i915_pmu_enable(struct perf_event *event)
         * an existing non-zero value.
         */
        local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
-
-       intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 }
 
 static void i915_pmu_disable(struct perf_event *event)
@@ -1130,6 +1131,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
        hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        pmu->timer.function = i915_sample;
        pmu->cpuhp.cpu = -1;
+       init_rc6(pmu);
 
        if (!is_igp(i915)) {
                pmu->name = kasprintf(GFP_KERNEL,
index 620b6fab2c5cfb8cd19dfbf0ae12d9f80ba93b3d..92adfee30c7c022def7ad74e29865a16221a9764 100644 (file)
@@ -434,7 +434,7 @@ static inline u32 hwsp_seqno(const struct i915_request *rq)
 
 static inline bool __i915_request_has_started(const struct i915_request *rq)
 {
-       return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
+       return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
 }
 
 /**
@@ -465,11 +465,19 @@ static inline bool __i915_request_has_started(const struct i915_request *rq)
  */
 static inline bool i915_request_started(const struct i915_request *rq)
 {
+       bool result;
+
        if (i915_request_signaled(rq))
                return true;
 
-       /* Remember: started but may have since been preempted! */
-       return __i915_request_has_started(rq);
+       result = true;
+       rcu_read_lock(); /* the HWSP may be freed at runtime */
+       if (likely(!i915_request_signaled(rq)))
+               /* Remember: started but may have since been preempted! */
+               result = __i915_request_has_started(rq);
+       rcu_read_unlock();
+
+       return result;
 }
 
 /**
@@ -482,10 +490,16 @@ static inline bool i915_request_started(const struct i915_request *rq)
  */
 static inline bool i915_request_is_running(const struct i915_request *rq)
 {
+       bool result;
+
        if (!i915_request_is_active(rq))
                return false;
 
-       return __i915_request_has_started(rq);
+       rcu_read_lock();
+       result = __i915_request_has_started(rq) && i915_request_is_active(rq);
+       rcu_read_unlock();
+
+       return result;
 }
 
 /**
@@ -509,12 +523,25 @@ static inline bool i915_request_is_ready(const struct i915_request *rq)
        return !list_empty(&rq->sched.link);
 }
 
+static inline bool __i915_request_is_complete(const struct i915_request *rq)
+{
+       return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
+}
+
 static inline bool i915_request_completed(const struct i915_request *rq)
 {
+       bool result;
+
        if (i915_request_signaled(rq))
                return true;
 
-       return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
+       result = true;
+       rcu_read_lock(); /* the HWSP may be freed at runtime */
+       if (likely(!i915_request_signaled(rq)))
+               result = __i915_request_is_complete(rq);
+       rcu_read_unlock();
+
+       return result;
 }
 
 static inline void i915_request_mark_complete(struct i915_request *rq)
index c53a222e3dece33793deec5f182e5a84929e333c..713770fb2b92d4846acfd2b1e69fe413cb03b74e 100644 (file)
@@ -1880,7 +1880,7 @@ static int igt_cs_tlb(void *arg)
        vma = i915_vma_instance(out, vm, NULL);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
-               goto out_put_batch;
+               goto out_put_out;
        }
 
        err = i915_vma_pin(vma, 0, 0,
index 302d4e6fc52f1a0bd2e90121818c15aa7b48f10e..788db043a34299e41ac692189bcfc0caa6dab774 100644 (file)
@@ -88,7 +88,11 @@ base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
                          NVVAL(NV507C, SET_CONVERSION, OFS, 0x64));
        } else {
                PUSH_MTHD(push, NV507C, SET_PROCESSING,
-                         NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
+                         NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
+
+                                       SET_CONVERSION,
+                         NVVAL(NV507C, SET_CONVERSION, GAIN, 0) |
+                         NVVAL(NV507C, SET_CONVERSION, OFS, 0));
        }
 
        PUSH_MTHD(push, NV507C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8);
index 18d34096f1258f1285e36f4f21223890b6b0855b..093d4ba6910ec40ec556eeea003e5d83261d6b52 100644 (file)
@@ -49,7 +49,11 @@ base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
                          NVVAL(NV827C, SET_CONVERSION, OFS, 0x64));
        } else {
                PUSH_MTHD(push, NV827C, SET_PROCESSING,
-                         NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE));
+                         NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE),
+
+                                       SET_CONVERSION,
+                         NVVAL(NV827C, SET_CONVERSION, GAIN, 0) |
+                         NVVAL(NV827C, SET_CONVERSION, OFS, 0));
        }
 
        PUSH_MTHD(push, NV827C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8,
index c6367035970eb5e6a14fcf4e4d3c531c28de91b7..5f4f09a601d4c38ca5d1dbebb46e052807cd8af6 100644 (file)
@@ -2663,6 +2663,14 @@ nv50_display_create(struct drm_device *dev)
        else
                nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
 
+       if (disp->disp->object.oclass >= GK104_DISP) {
+               dev->mode_config.cursor_width = 256;
+               dev->mode_config.cursor_height = 256;
+       } else {
+               dev->mode_config.cursor_width = 64;
+               dev->mode_config.cursor_height = 64;
+       }
+
        /* create crtc objects to represent the hw heads */
        if (disp->disp->object.oclass >= GV100_DISP)
                crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
index a5d8274036609cc86305e80b2053efd7f5b78bd6..ea9f8667305ecb9fc80c02930b0f43c60c92bd87 100644 (file)
@@ -22,6 +22,7 @@
 #include "head.h"
 #include "core.h"
 
+#include "nvif/push.h"
 #include <nvif/push507c.h>
 
 #include <nvhw/class/cl917d.h>
@@ -73,6 +74,31 @@ head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh)
        return 0;
 }
 
+static int
+head917d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+       struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+       const int i = head->base.index;
+       int ret;
+
+       ret = PUSH_WAIT(push, 5);
+       if (ret)
+               return ret;
+
+       PUSH_MTHD(push, NV917D, HEAD_SET_CONTROL_CURSOR(i),
+                 NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) |
+                 NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) |
+                 NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) |
+                 NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) |
+                 NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) |
+                 NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND),
+
+                               HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8);
+
+       PUSH_MTHD(push, NV917D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle);
+       return 0;
+}
+
 int
 head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw,
                     struct nv50_head_atom *asyh)
@@ -101,7 +127,7 @@ head917d = {
        .core_clr = head907d_core_clr,
        .curs_layout = head917d_curs_layout,
        .curs_format = head507d_curs_format,
-       .curs_set = head907d_curs_set,
+       .curs_set = head917d_curs_set,
        .curs_clr = head907d_curs_clr,
        .base = head917d_base,
        .ovly = head907d_ovly,
index ce451242f79eb2fc16403a5cf1d039b70ede0ced..271de3a63f21c2158645e415014759f548d965fa 100644 (file)
@@ -702,6 +702,11 @@ nv50_wndw_init(struct nv50_wndw *wndw)
        nvif_notify_get(&wndw->notify);
 }
 
+static const u64 nv50_cursor_format_modifiers[] = {
+       DRM_FORMAT_MOD_LINEAR,
+       DRM_FORMAT_MOD_INVALID,
+};
+
 int
 nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
               enum drm_plane_type type, const char *name, int index,
@@ -713,6 +718,7 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
        struct nvif_mmu *mmu = &drm->client.mmu;
        struct nv50_disp *disp = nv50_disp(dev);
        struct nv50_wndw *wndw;
+       const u64 *format_modifiers;
        int nformat;
        int ret;
 
@@ -728,10 +734,13 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
 
        for (nformat = 0; format[nformat]; nformat++);
 
-       ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
-                                      format, nformat,
-                                      nouveau_display(dev)->format_modifiers,
-                                      type, "%s-%d", name, index);
+       if (type == DRM_PLANE_TYPE_CURSOR)
+               format_modifiers = nv50_cursor_format_modifiers;
+       else
+               format_modifiers = nouveau_display(dev)->format_modifiers;
+
+       ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw, format, nformat,
+                                      format_modifiers, type, "%s-%d", name, index);
        if (ret) {
                kfree(*pwndw);
                *pwndw = NULL;
index 2a2612d6e1e0e6f6fd687f5212eca2ec49730568..fb223723a38add00ef3419b9bd0a16c398f31ef1 100644 (file)
 #define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND                  (0x00000000)
 #define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND          (0x00000001)
 #define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR                          (0x00000002)
+#define NV917D_HEAD_SET_OFFSET_CURSOR(a)                                        (0x00000484 + (a)*0x00000300)
+#define NV917D_HEAD_SET_OFFSET_CURSOR_ORIGIN                                    31:0
+#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR(a)                                   (0x0000048C + (a)*0x00000300)
+#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE                               31:0
 #define NV917D_HEAD_SET_DITHER_CONTROL(a)                                       (0x000004A0 + (a)*0x00000300)
 #define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE                                   0:0
 #define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE                           (0x00000000)
index 168d7694ede5cc7de4d77e9ad56b71eb4d7d05c4..6d3a8a3d2087b5310a75e18466373d6a40364f12 100644 (file)
@@ -123,131 +123,131 @@ PUSH_KICK(struct nvif_push *push)
 } while(0)
 #endif
 
-#define PUSH_1(X,f,ds,n,c,o,p,s,mA,dA) do {                            \
-       PUSH_##o##_HDR((p), s, mA, (c)+(n));                           \
-       PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, "");                   \
+#define PUSH_1(X,f,ds,n,o,p,s,mA,dA) do {                             \
+       PUSH_##o##_HDR((p), s, mA, (ds)+(n));                         \
+       PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, "");                  \
 } while(0)
-#define PUSH_2(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do {                 \
-       PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1");       \
-       PUSH_1(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
-       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                   \
+#define PUSH_2(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do {                  \
+       PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1");      \
+       PUSH_1(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                  \
 } while(0)
-#define PUSH_3(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do {                 \
-       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2");       \
-       PUSH_2(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
-       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                   \
+#define PUSH_3(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do {                  \
+       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2");      \
+       PUSH_2(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                  \
 } while(0)
-#define PUSH_4(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do {                 \
-       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3");       \
-       PUSH_3(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
-       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                   \
+#define PUSH_4(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do {                  \
+       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3");      \
+       PUSH_3(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                  \
 } while(0)
-#define PUSH_5(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do {                 \
-       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4");       \
-       PUSH_4(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
-       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                   \
+#define PUSH_5(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do {                  \
+       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4");      \
+       PUSH_4(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                  \
 } while(0)
-#define PUSH_6(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do {                 \
-       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5");       \
-       PUSH_5(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
-       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                   \
+#define PUSH_6(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do {                  \
+       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5");      \
+       PUSH_5(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                  \
 } while(0)
-#define PUSH_7(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do {                 \
-       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6");       \
-       PUSH_6(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
-       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                   \
+#define PUSH_7(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do {                  \
+       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6");      \
+       PUSH_6(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                  \
 } while(0)
-#define PUSH_8(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do {                 \
-       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7");       \
-       PUSH_7(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
-       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                   \
+#define PUSH_8(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do {                  \
+       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7");      \
+       PUSH_7(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                  \
 } while(0)
-#define PUSH_9(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do {                 \
-       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8");       \
-       PUSH_8(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
-       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                   \
+#define PUSH_9(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do {                  \
+       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8");      \
+       PUSH_8(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                  \
 } while(0)
-#define PUSH_10(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do {                \
-       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9");       \
-       PUSH_9(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \
-       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                   \
+#define PUSH_10(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do {                 \
+       PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9");      \
+       PUSH_9(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \
+       PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, "");                  \
 } while(0)
 
-#define PUSH_1D(X,o,p,s,mA,dA)                            \
-       PUSH_1(X, DATA_, 1, 1, 0, o, (p), s, X##mA, (dA))
-#define PUSH_2D(X,o,p,s,mA,dA,mB,dB)                      \
-       PUSH_2(X, DATA_, 1, 1, 0, o, (p), s, X##mB, (dB), \
-                                            X##mA, (dA))
-#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC)                \
-       PUSH_3(X, DATA_, 1, 1, 0, o, (p), s, X##mC, (dC), \
-                                            X##mB, (dB), \
-                                            X##mA, (dA))
-#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD)          \
-       PUSH_4(X, DATA_, 1, 1, 0, o, (p), s, X##mD, (dD), \
-                                            X##mC, (dC), \
-                                            X##mB, (dB), \
-                                            X##mA, (dA))
-#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE)    \
-       PUSH_5(X, DATA_, 1, 1, 0, o, (p), s, X##mE, (dE), \
-                                            X##mD, (dD), \
-                                            X##mC, (dC), \
-                                            X##mB, (dB), \
-                                            X##mA, (dA))
+#define PUSH_1D(X,o,p,s,mA,dA)                         \
+       PUSH_1(X, DATA_, 1, 0, o, (p), s, X##mA, (dA))
+#define PUSH_2D(X,o,p,s,mA,dA,mB,dB)                   \
+       PUSH_2(X, DATA_, 1, 0, o, (p), s, X##mB, (dB), \
+                                         X##mA, (dA))
+#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC)             \
+       PUSH_3(X, DATA_, 1, 0, o, (p), s, X##mC, (dC), \
+                                         X##mB, (dB), \
+                                         X##mA, (dA))
+#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD)       \
+       PUSH_4(X, DATA_, 1, 0, o, (p), s, X##mD, (dD), \
+                                         X##mC, (dC), \
+                                         X##mB, (dB), \
+                                         X##mA, (dA))
+#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \
+       PUSH_5(X, DATA_, 1, 0, o, (p), s, X##mE, (dE), \
+                                         X##mD, (dD), \
+                                         X##mC, (dC), \
+                                         X##mB, (dB), \
+                                         X##mA, (dA))
 #define PUSH_6D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF) \
-       PUSH_6(X, DATA_, 1, 1, 0, o, (p), s, X##mF, (dF),    \
-                                            X##mE, (dE),    \
-                                            X##mD, (dD),    \
-                                            X##mC, (dC),    \
-                                            X##mB, (dB),    \
-                                            X##mA, (dA))
+       PUSH_6(X, DATA_, 1, 0, o, (p), s, X##mF, (dF),       \
+                                         X##mE, (dE),       \
+                                         X##mD, (dD),       \
+                                         X##mC, (dC),       \
+                                         X##mB, (dB),       \
+                                         X##mA, (dA))
 #define PUSH_7D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG) \
-       PUSH_7(X, DATA_, 1, 1, 0, o, (p), s, X##mG, (dG),          \
-                                            X##mF, (dF),          \
-                                            X##mE, (dE),          \
-                                            X##mD, (dD),          \
-                                            X##mC, (dC),          \
-                                            X##mB, (dB),          \
-                                            X##mA, (dA))
+       PUSH_7(X, DATA_, 1, 0, o, (p), s, X##mG, (dG),             \
+                                         X##mF, (dF),             \
+                                         X##mE, (dE),             \
+                                         X##mD, (dD),             \
+                                         X##mC, (dC),             \
+                                         X##mB, (dB),             \
+                                         X##mA, (dA))
 #define PUSH_8D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH) \
-       PUSH_8(X, DATA_, 1, 1, 0, o, (p), s, X##mH, (dH),                \
-                                            X##mG, (dG),                \
-                                            X##mF, (dF),                \
-                                            X##mE, (dE),                \
-                                            X##mD, (dD),                \
-                                            X##mC, (dC),                \
-                                            X##mB, (dB),                \
-                                            X##mA, (dA))
+       PUSH_8(X, DATA_, 1, 0, o, (p), s, X##mH, (dH),                   \
+                                         X##mG, (dG),                   \
+                                         X##mF, (dF),                   \
+                                         X##mE, (dE),                   \
+                                         X##mD, (dD),                   \
+                                         X##mC, (dC),                   \
+                                         X##mB, (dB),                   \
+                                         X##mA, (dA))
 #define PUSH_9D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI) \
-       PUSH_9(X, DATA_, 1, 1, 0, o, (p), s, X##mI, (dI),                      \
-                                            X##mH, (dH),                      \
-                                            X##mG, (dG),                      \
-                                            X##mF, (dF),                      \
-                                            X##mE, (dE),                      \
-                                            X##mD, (dD),                      \
-                                            X##mC, (dC),                      \
-                                            X##mB, (dB),                      \
-                                            X##mA, (dA))
+       PUSH_9(X, DATA_, 1, 0, o, (p), s, X##mI, (dI),                         \
+                                         X##mH, (dH),                         \
+                                         X##mG, (dG),                         \
+                                         X##mF, (dF),                         \
+                                         X##mE, (dE),                         \
+                                         X##mD, (dD),                         \
+                                         X##mC, (dC),                         \
+                                         X##mB, (dB),                         \
+                                         X##mA, (dA))
 #define PUSH_10D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI,mJ,dJ) \
-       PUSH_10(X, DATA_, 1, 1, 0, o, (p), s, X##mJ, (dJ),                            \
-                                             X##mI, (dI),                            \
-                                             X##mH, (dH),                            \
-                                             X##mG, (dG),                            \
-                                             X##mF, (dF),                            \
-                                             X##mE, (dE),                            \
-                                             X##mD, (dD),                            \
-                                             X##mC, (dC),                            \
-                                             X##mB, (dB),                            \
-                                             X##mA, (dA))
+       PUSH_10(X, DATA_, 1, 0, o, (p), s, X##mJ, (dJ),                               \
+                                          X##mI, (dI),                               \
+                                          X##mH, (dH),                               \
+                                          X##mG, (dG),                               \
+                                          X##mF, (dF),                               \
+                                          X##mE, (dE),                               \
+                                          X##mD, (dD),                               \
+                                          X##mC, (dC),                               \
+                                          X##mB, (dB),                               \
+                                          X##mA, (dA))
 
-#define PUSH_1P(X,o,p,s,mA,dp,ds)                           \
-       PUSH_1(X, DATAp, ds, ds, 0, o, (p), s, X##mA, (dp))
-#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds)                     \
-       PUSH_2(X, DATAp, ds, ds, 0, o, (p), s, X##mB, (dp), \
-                                              X##mA, (dA))
-#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds)               \
-       PUSH_3(X, DATAp, ds, ds, 0, o, (p), s, X##mC, (dp), \
-                                              X##mB, (dB), \
-                                              X##mA, (dA))
+#define PUSH_1P(X,o,p,s,mA,dp,ds)                       \
+       PUSH_1(X, DATAp, ds, 0, o, (p), s, X##mA, (dp))
+#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds)                 \
+       PUSH_2(X, DATAp, ds, 0, o, (p), s, X##mB, (dp), \
+                                          X##mA, (dA))
+#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds)           \
+       PUSH_3(X, DATAp, ds, 0, o, (p), s, X##mC, (dp), \
+                                          X##mB, (dB), \
+                                          X##mA, (dA))
 
 #define PUSH_(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,IMPL,...) IMPL
 #define PUSH(A...) PUSH_(A, PUSH_10P, PUSH_10D,          \
index c85b1af06b7bf97627a9248651032f05a431aeaf..7ea367a5444dd3e26d96a601200e3670ff663f72 100644 (file)
@@ -547,7 +547,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
 {
        struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
        struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
-       int i;
+       int i, j;
 
        if (!ttm_dma)
                return;
@@ -556,10 +556,21 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
        if (nvbo->force_coherent)
                return;
 
-       for (i = 0; i < ttm_dma->num_pages; i++)
+       for (i = 0; i < ttm_dma->num_pages; ++i) {
+               struct page *p = ttm_dma->pages[i];
+               size_t num_pages = 1;
+
+               for (j = i + 1; j < ttm_dma->num_pages; ++j) {
+                       if (++p != ttm_dma->pages[j])
+                               break;
+
+                       ++num_pages;
+               }
                dma_sync_single_for_device(drm->dev->dev,
                                           ttm_dma->dma_address[i],
-                                          PAGE_SIZE, DMA_TO_DEVICE);
+                                          num_pages * PAGE_SIZE, DMA_TO_DEVICE);
+               i += num_pages;
+       }
 }
 
 void
@@ -567,7 +578,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
 {
        struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
        struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
-       int i;
+       int i, j;
 
        if (!ttm_dma)
                return;
@@ -576,9 +587,21 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
        if (nvbo->force_coherent)
                return;
 
-       for (i = 0; i < ttm_dma->num_pages; i++)
+       for (i = 0; i < ttm_dma->num_pages; ++i) {
+               struct page *p = ttm_dma->pages[i];
+               size_t num_pages = 1;
+
+               for (j = i + 1; j < ttm_dma->num_pages; ++j) {
+                       if (++p != ttm_dma->pages[j])
+                               break;
+
+                       ++num_pages;
+               }
+
                dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
-                                       PAGE_SIZE, DMA_FROM_DEVICE);
+                                       num_pages * PAGE_SIZE, DMA_FROM_DEVICE);
+               i += num_pages;
+       }
 }
 
 void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
index 4f69e4c3dafde298f6f7707b719a074a5dca5076..1c3f890377d2c283220c2868c389c4135c332eae 100644 (file)
@@ -315,6 +315,10 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
        struct drm_nouveau_svm_init *args = data;
        int ret;
 
+       /* We need to fail if svm is disabled */
+       if (!cli->drm->svm)
+               return -ENOSYS;
+
        /* Allocate tracking for SVM-enabled VMM. */
        if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
                return -ENOMEM;
index 8cd776adc592be46dff106dc8fe25b1219015502..74bf1c84b63742065c546a8442e973169c31db8f 100644 (file)
@@ -79,12 +79,13 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
        struct page *p;
        void *vaddr;
 
-       if (order) {
-               gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
+       /* Don't set the __GFP_COMP flag for higher order allocations.
+        * Mapping pages directly into an userspace process and calling
+        * put_page() on a TTM allocated page is illegal.
+        */
+       if (order)
+               gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
                        __GFP_KSWAPD_RECLAIM;
-               gfp_flags &= ~__GFP_MOVABLE;
-               gfp_flags &= ~__GFP_COMP;
-       }
 
        if (!pool->use_dma_alloc) {
                p = alloc_pages(gfp_flags, order);
index 55510622057826ee3376fc8552cb492b6ed87136..98cab0bbe92d881086f09e1b916d8d27c5c320e4 100644 (file)
@@ -1267,6 +1267,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
        card->dai_link = dai_link;
        card->num_links = 1;
        card->name = vc4_hdmi->variant->card_name;
+       card->driver_name = "vc4-hdmi";
        card->dev = dev;
        card->owner = THIS_MODULE;
 
index cccd341e5d6707858141dc60278a5051e9f77e9d..3b722252d1fbe1090ecba3d4059a2fd7765c476e 100644 (file)
@@ -620,11 +620,11 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
         * for now we just allocate globally.
         */
        if (!hvs->hvs5)
-               /* 96kB */
-               drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024);
+               /* 48k words of 2x12-bit pixels */
+               drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
        else
-               /* 70k words */
-               drm_mm_init(&hvs->lbm_mm, 0, 70 * 2 * 1024);
+               /* 60k words of 4x12-bit pixels */
+               drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
 
        /* Upload filter kernels.  We only have the one for now, so we
         * keep it around for the lifetime of the driver.
index 6b39cc2ca18d0953bebf8b4c3d0aeacf74d500cb..5612cab552270d68f35e31b6828293c37bca72d4 100644 (file)
@@ -437,6 +437,7 @@ static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst)
 static u32 vc4_lbm_size(struct drm_plane_state *state)
 {
        struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+       struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
        u32 pix_per_line;
        u32 lbm;
 
@@ -472,7 +473,11 @@ static u32 vc4_lbm_size(struct drm_plane_state *state)
                lbm = pix_per_line * 16;
        }
 
-       lbm = roundup(lbm, 32);
+       /* Align it to 64 or 128 (hvs5) bytes */
+       lbm = roundup(lbm, vc4->hvs->hvs5 ? 128 : 64);
+
+       /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
+       lbm /= vc4->hvs->hvs5 ? 4 : 2;
 
        return lbm;
 }
@@ -912,9 +917,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
                if (!vc4_state->is_unity) {
                        vc4_dlist_write(vc4_state,
                                        VC4_SET_FIELD(vc4_state->crtc_w,
-                                                     SCALER_POS1_SCL_WIDTH) |
+                                                     SCALER5_POS1_SCL_WIDTH) |
                                        VC4_SET_FIELD(vc4_state->crtc_h,
-                                                     SCALER_POS1_SCL_HEIGHT));
+                                                     SCALER5_POS1_SCL_HEIGHT));
                }
 
                /* Position Word 2: Source Image Size */
index 0743ef51d3b246ab2086ca3dda387a842b79e19e..8429ebe7097e4c8f9235c35a22e5ab2ace6eb6f6 100644 (file)
@@ -758,7 +758,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                        MT_STORE_FIELD(inrange_state);
                        return 1;
                case HID_DG_CONFIDENCE:
-                       if (cls->name == MT_CLS_WIN_8 &&
+                       if ((cls->name == MT_CLS_WIN_8 ||
+                            cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) &&
                                (field->application == HID_DG_TOUCHPAD ||
                                 field->application == HID_DG_TOUCHSCREEN))
                                app->quirks |= MT_QUIRK_CONFIDENCE;
index e8acd235db2a90f156ec516d37990921a759e0db..aa9e48876cedaaa6b4b3abbbef924b27a9c913d1 100644 (file)
@@ -147,9 +147,9 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev,
        }
 
        if (flush)
-               wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo);
+               wacom_wac_queue_flush(hdev, wacom_wac->pen_fifo);
        else if (insert)
-               wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo,
+               wacom_wac_queue_insert(hdev, wacom_wac->pen_fifo,
                                       raw_data, report_size);
 
        return insert && !flush;
@@ -1280,7 +1280,7 @@ static void wacom_devm_kfifo_release(struct device *dev, void *res)
 static int wacom_devm_kfifo_alloc(struct wacom *wacom)
 {
        struct wacom_wac *wacom_wac = &wacom->wacom_wac;
-       struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo;
+       struct kfifo_rec_ptr_2 *pen_fifo;
        int error;
 
        pen_fifo = devres_alloc(wacom_devm_kfifo_release,
@@ -1297,6 +1297,7 @@ static int wacom_devm_kfifo_alloc(struct wacom *wacom)
        }
 
        devres_add(&wacom->hdev->dev, pen_fifo);
+       wacom_wac->pen_fifo = pen_fifo;
 
        return 0;
 }
index da612b6e9c7796f32202c37e7b2a0b56b6db4eec..195910dd2154e2e5471df45a17f4454ffa219787 100644 (file)
@@ -342,7 +342,7 @@ struct wacom_wac {
        struct input_dev *pen_input;
        struct input_dev *touch_input;
        struct input_dev *pad_input;
-       struct kfifo_rec_ptr_2 pen_fifo;
+       struct kfifo_rec_ptr_2 *pen_fifo;
        int pid;
        int num_contacts_left;
        u8 bt_features;
index 52acd77438ede98280f0ab6e3364f4e28c8becc7..251e75c9ba9d0dda26e17d2558715a00a7a923bb 100644 (file)
@@ -268,6 +268,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7aa6),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
+       {
+               /* Alder Lake-P */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
        {
                /* Alder Lake CPU */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
index 3e7df1c0477f75a6e75680c0e55816e672202dce..81d7b21d31ec27312ab39b853f28593781d005d8 100644 (file)
@@ -64,7 +64,7 @@ static void stm_heartbeat_unlink(struct stm_source_data *data)
 
 static int stm_heartbeat_init(void)
 {
-       int i, ret = -ENOMEM;
+       int i, ret;
 
        if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX)
                return -EINVAL;
@@ -72,8 +72,10 @@ static int stm_heartbeat_init(void)
        for (i = 0; i < nr_devs; i++) {
                stm_heartbeat[i].data.name =
                        kasprintf(GFP_KERNEL, "heartbeat.%d", i);
-               if (!stm_heartbeat[i].data.name)
+               if (!stm_heartbeat[i].data.name) {
+                       ret = -ENOMEM;
                        goto fail_unregister;
+               }
 
                stm_heartbeat[i].data.nr_chans  = 1;
                stm_heartbeat[i].data.link      = stm_heartbeat_link;
index d4d60ad0eda0b04746ba2664071efae6e1745dc3..ab1f39ac39f4f081fb943c877efa0195770c508f 100644 (file)
@@ -1013,6 +1013,7 @@ config I2C_SIRF
 config I2C_SPRD
        tristate "Spreadtrum I2C interface"
        depends on I2C=y && (ARCH_SPRD || COMPILE_TEST)
+       depends on COMMON_CLK
        help
          If you say yes to this option, support will be included for the
          Spreadtrum I2C interface.
index b444fbf1a26255988ef94407583ebd6d08aef9c7..a8e8af57e33f466ae463d230feeef0ebddf7e59c 100644 (file)
@@ -241,6 +241,19 @@ static struct imx_i2c_hwdata vf610_i2c_hwdata = {
 
 };
 
+static const struct platform_device_id imx_i2c_devtype[] = {
+       {
+               .name = "imx1-i2c",
+               .driver_data = (kernel_ulong_t)&imx1_i2c_hwdata,
+       }, {
+               .name = "imx21-i2c",
+               .driver_data = (kernel_ulong_t)&imx21_i2c_hwdata,
+       }, {
+               /* sentinel */
+       }
+};
+MODULE_DEVICE_TABLE(platform, imx_i2c_devtype);
+
 static const struct of_device_id i2c_imx_dt_ids[] = {
        { .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, },
        { .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, },
@@ -1330,7 +1343,11 @@ static int i2c_imx_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        match = device_get_match_data(&pdev->dev);
-       i2c_imx->hwdata = match;
+       if (match)
+               i2c_imx->hwdata = match;
+       else
+               i2c_imx->hwdata = (struct imx_i2c_hwdata *)
+                               platform_get_device_id(pdev)->driver_data;
 
        /* Setup i2c_imx driver structure */
        strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
@@ -1498,6 +1515,7 @@ static struct platform_driver i2c_imx_driver = {
                .of_match_table = i2c_imx_dt_ids,
                .acpi_match_table = i2c_imx_acpi_ids,
        },
+       .id_table = imx_i2c_devtype,
 };
 
 static int __init i2c_adap_imx_init(void)
index 0818d3e50734771b854b875b78bcc60c37ded121..2ffd2f354d0ae3e3edbcd552e191d72626a21fa4 100644 (file)
@@ -1275,7 +1275,8 @@ static int mtk_i2c_probe(struct platform_device *pdev)
        mtk_i2c_clock_disable(i2c);
 
        ret = devm_request_irq(&pdev->dev, irq, mtk_i2c_irq,
-                              IRQF_TRIGGER_NONE, I2C_DRV_NAME, i2c);
+                              IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
+                              I2C_DRV_NAME, i2c);
        if (ret < 0) {
                dev_err(&pdev->dev,
                        "Request I2C IRQ %d fail\n", irq);
@@ -1302,7 +1303,16 @@ static int mtk_i2c_remove(struct platform_device *pdev)
 }
 
 #ifdef CONFIG_PM_SLEEP
-static int mtk_i2c_resume(struct device *dev)
+static int mtk_i2c_suspend_noirq(struct device *dev)
+{
+       struct mtk_i2c *i2c = dev_get_drvdata(dev);
+
+       i2c_mark_adapter_suspended(&i2c->adap);
+
+       return 0;
+}
+
+static int mtk_i2c_resume_noirq(struct device *dev)
 {
        int ret;
        struct mtk_i2c *i2c = dev_get_drvdata(dev);
@@ -1317,12 +1327,15 @@ static int mtk_i2c_resume(struct device *dev)
 
        mtk_i2c_clock_disable(i2c);
 
+       i2c_mark_adapter_resumed(&i2c->adap);
+
        return 0;
 }
 #endif
 
 static const struct dev_pm_ops mtk_i2c_pm = {
-       SET_SYSTEM_SLEEP_PM_OPS(NULL, mtk_i2c_resume)
+       SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_i2c_suspend_noirq,
+                                     mtk_i2c_resume_noirq)
 };
 
 static struct platform_driver mtk_i2c_driver = {
index d9607905dc2f1dd0db15c6ca457ebc445921d829..845eda70b8cab52a0453c9f4cb545010fba4305d 100644 (file)
@@ -347,7 +347,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target,
                if (result)
                        return result;
                if (recv_len && i == 0) {
-                       if (data[i] > I2C_SMBUS_BLOCK_MAX + 1)
+                       if (data[i] > I2C_SMBUS_BLOCK_MAX)
                                return -EPROTO;
                        length += data[i];
                }
index ec7a7e917eddb2ce94b65f97f4c2b813a64ee45f..c0c7d01473f2ba110553c0349e10ca6b03dbb7ca 100644 (file)
@@ -80,7 +80,7 @@ static int tegra_bpmp_xlate_flags(u16 flags, u16 *out)
                flags &= ~I2C_M_RECV_LEN;
        }
 
-       return (flags != 0) ? -EINVAL : 0;
+       return 0;
 }
 
 /**
index 6f08c0c3238d5ade9a25f4c9900ba79d23fe2782..8b113ae32dc713f26d28726a934a30c9f3b55863 100644 (file)
@@ -326,6 +326,8 @@ static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned int reg)
        /* read back register to make sure that register writes completed */
        if (reg != I2C_TX_FIFO)
                readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg));
+       else if (i2c_dev->is_vi)
+               readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, I2C_INT_STATUS));
 }
 
 static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned int reg)
@@ -339,6 +341,21 @@ static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data,
        writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len);
 }
 
+static void i2c_writesl_vi(struct tegra_i2c_dev *i2c_dev, void *data,
+                          unsigned int reg, unsigned int len)
+{
+       u32 *data32 = data;
+
+       /*
+        * VI I2C controller has known hardware bug where writes get stuck
+        * when immediate multiple writes happen to TX_FIFO register.
+        * Recommended software work around is to read I2C register after
+        * each write to TX_FIFO register to flush out the data.
+        */
+       while (len--)
+               i2c_writel(i2c_dev, *data32++, reg);
+}
+
 static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data,
                       unsigned int reg, unsigned int len)
 {
@@ -533,7 +550,7 @@ static int tegra_i2c_poll_register(struct tegra_i2c_dev *i2c_dev,
        void __iomem *addr = i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg);
        u32 val;
 
-       if (!i2c_dev->atomic_mode)
+       if (!i2c_dev->atomic_mode && !in_irq())
                return readl_relaxed_poll_timeout(addr, val, !(val & mask),
                                                  delay_us, timeout_us);
 
@@ -811,7 +828,10 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
                i2c_dev->msg_buf_remaining = buf_remaining;
                i2c_dev->msg_buf = buf + words_to_transfer * BYTES_PER_FIFO_WORD;
 
-               i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+               if (i2c_dev->is_vi)
+                       i2c_writesl_vi(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
+               else
+                       i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer);
 
                buf += words_to_transfer * BYTES_PER_FIFO_WORD;
        }
index b11c8c47ba2aaee658e6d68709062910736e0cf4..e946903b099367b02faafbc46e5e15da646071ba 100644 (file)
@@ -397,16 +397,12 @@ static int tiadc_iio_buffered_hardware_setup(struct device *dev,
        ret = devm_request_threaded_irq(dev, irq, pollfunc_th, pollfunc_bh,
                                flags, indio_dev->name, indio_dev);
        if (ret)
-               goto error_kfifo_free;
+               return ret;
 
        indio_dev->setup_ops = setup_ops;
        indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
 
        return 0;
-
-error_kfifo_free:
-       iio_kfifo_free(indio_dev->buffer);
-       return ret;
 }
 
 static const char * const chan_name_ain[] = {
index 0507283bd4c1dedd78780f9c20dcca39beef0748..2dbd2646e44e97f3edd899143b5380245156ebe7 100644 (file)
  * @sdata: Sensor data.
  *
  * returns:
- * 0 - no new samples available
- * 1 - new samples available
- * negative - error or unknown
+ * false - no new samples available or read error
+ * true - new samples available
  */
-static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
-                                           struct st_sensor_data *sdata)
+static bool st_sensors_new_samples_available(struct iio_dev *indio_dev,
+                                            struct st_sensor_data *sdata)
 {
        int ret, status;
 
        /* How would I know if I can't check it? */
        if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr)
-               return -EINVAL;
+               return true;
 
        /* No scan mask, no interrupt */
        if (!indio_dev->active_scan_mask)
-               return 0;
+               return false;
 
        ret = regmap_read(sdata->regmap,
                          sdata->sensor_settings->drdy_irq.stat_drdy.addr,
                          &status);
        if (ret < 0) {
                dev_err(sdata->dev, "error checking samples available\n");
-               return ret;
+               return false;
        }
 
-       if (status & sdata->sensor_settings->drdy_irq.stat_drdy.mask)
-               return 1;
-
-       return 0;
+       return !!(status & sdata->sensor_settings->drdy_irq.stat_drdy.mask);
 }
 
 /**
@@ -180,9 +176,15 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
 
        /* Tell the interrupt handler that we're dealing with edges */
        if (irq_trig == IRQF_TRIGGER_FALLING ||
-           irq_trig == IRQF_TRIGGER_RISING)
+           irq_trig == IRQF_TRIGGER_RISING) {
+               if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) {
+                       dev_err(&indio_dev->dev,
+                               "edge IRQ not supported w/o stat register.\n");
+                       err = -EOPNOTSUPP;
+                       goto iio_trigger_free;
+               }
                sdata->edge_irq = true;
-       else
+       } else {
                /*
                 * If we're not using edges (i.e. level interrupts) we
                 * just mask off the IRQ, handle one interrupt, then
@@ -190,6 +192,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                 * interrupt handler top half again and start over.
                 */
                irq_trig |= IRQF_ONESHOT;
+       }
 
        /*
         * If the interrupt pin is Open Drain, by definition this
index 28921b62e64203427a79272798ac9cea4f3c93ce..e9297c25d4ef63b38c5407baadf47a10d8b0e716 100644 (file)
@@ -187,9 +187,9 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev,
                return ret;
 
        if (pwr_down)
-               st->pwr_down_mask |= (1 << chan->channel);
-       else
                st->pwr_down_mask &= ~(1 << chan->channel);
+       else
+               st->pwr_down_mask |= (1 << chan->channel);
 
        ret = ad5504_spi_write(st, AD5504_ADDR_CTRL,
                                AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) |
index a2f820997afc2a13f7960093e0d3c3570c2076b4..37fd0b65a0140085a9672269bd02adf8da391d6f 100644 (file)
@@ -601,7 +601,7 @@ static int sx9310_read_thresh(struct sx9310_data *data,
                return ret;
 
        regval = FIELD_GET(SX9310_REG_PROX_CTRL8_9_PTHRESH_MASK, regval);
-       if (regval > ARRAY_SIZE(sx9310_pthresh_codes))
+       if (regval >= ARRAY_SIZE(sx9310_pthresh_codes))
                return -EINVAL;
 
        *val = sx9310_pthresh_codes[regval];
@@ -1305,7 +1305,8 @@ sx9310_get_default_reg(struct sx9310_data *data, int i,
                if (ret)
                        break;
 
-               pos = min(max(ilog2(pos), 3), 10) - 3;
+               /* Powers of 2, except for a gap between 16 and 64 */
+               pos = clamp(ilog2(pos), 3, 11) - (pos >= 32 ? 4 : 3);
                reg_def->def &= ~SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK;
                reg_def->def |= FIELD_PREP(SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK,
                                           pos);
index 503fe54a0bb937ad9684b001cf3dd59dbf2741b1..608ccb1d8bc82feb28eef57a1a31f58f27b1851a 100644 (file)
@@ -248,6 +248,12 @@ static int mlx90632_set_meas_type(struct regmap *regmap, u8 type)
        if (ret < 0)
                return ret;
 
+       /*
+        * Give the mlx90632 some time to reset properly before sending a new I2C command
+        * if this is not done, the following I2C command(s) will not be accepted.
+        */
+       usleep_range(150, 200);
+
        ret = regmap_write_bits(regmap, MLX90632_REG_CONTROL,
                                 (MLX90632_CFG_MTYP_MASK | MLX90632_CFG_PWR_MASK),
                                 (MLX90632_MTYP_STATUS(type) | MLX90632_PWR_STATUS_HALT));
index a7401398cb344a0dc128d50787697ecf3b941f55..d109bb3822a5f862235d18ba004f9ba530df3a89 100644 (file)
@@ -2474,7 +2474,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
        init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
        init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
-       init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
+       init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
        init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
        init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
        return 0;
index 55d538625e36139c40ab93122944699816b0d95d..ad8253245a85fd73963c4c9d78034d5de2344884 100644 (file)
@@ -532,7 +532,7 @@ struct hns_roce_qp_table {
        struct hns_roce_hem_table       sccc_table;
        struct mutex                    scc_mutex;
        struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM];
-       spinlock_t bank_lock;
+       struct mutex bank_mutex;
 };
 
 struct hns_roce_cq_table {
index d8e2fe5558d29d371ac31776a564f8638e21c17c..1116371adf74f4bc267aa5b7e2c8085a5c54356e 100644 (file)
@@ -209,7 +209,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
 
                hr_qp->doorbell_qpn = 1;
        } else {
-               spin_lock(&qp_table->bank_lock);
+               mutex_lock(&qp_table->bank_mutex);
                bankid = get_least_load_bankid_for_qp(qp_table->bank);
 
                ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid,
@@ -217,12 +217,12 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
                if (ret) {
                        ibdev_err(&hr_dev->ib_dev,
                                  "failed to alloc QPN, ret = %d\n", ret);
-                       spin_unlock(&qp_table->bank_lock);
+                       mutex_unlock(&qp_table->bank_mutex);
                        return ret;
                }
 
                qp_table->bank[bankid].inuse++;
-               spin_unlock(&qp_table->bank_lock);
+               mutex_unlock(&qp_table->bank_mutex);
 
                hr_qp->doorbell_qpn = (u32)num;
        }
@@ -408,9 +408,9 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
 
        ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3);
 
-       spin_lock(&hr_dev->qp_table.bank_lock);
+       mutex_lock(&hr_dev->qp_table.bank_mutex);
        hr_dev->qp_table.bank[bankid].inuse--;
-       spin_unlock(&hr_dev->qp_table.bank_lock);
+       mutex_unlock(&hr_dev->qp_table.bank_mutex);
 }
 
 static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
@@ -1371,6 +1371,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
        unsigned int i;
 
        mutex_init(&qp_table->scc_mutex);
+       mutex_init(&qp_table->bank_mutex);
        xa_init(&hr_dev->qp_table_xa);
 
        reserved_from_bot = hr_dev->caps.reserved_qps;
index d26f3f3e0462a03e8cc38058124add5bcb0f86d8..aabdc07e475374fbe591fa15a42a0e20b931c497 100644 (file)
@@ -3311,8 +3311,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
        int err;
 
        dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
-       err = register_netdevice_notifier_net(mlx5_core_net(dev->mdev),
-                                             &dev->port[port_num].roce.nb);
+       err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
        if (err) {
                dev->port[port_num].roce.nb.notifier_call = NULL;
                return err;
@@ -3324,8 +3323,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
 static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num)
 {
        if (dev->port[port_num].roce.nb.notifier_call) {
-               unregister_netdevice_notifier_net(mlx5_core_net(dev->mdev),
-                                                 &dev->port[port_num].roce.nb);
+               unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
                dev->port[port_num].roce.nb.notifier_call = NULL;
        }
 }
index e59615a4c9d98e2451e25b790d605b057967f33b..586b0e52ba7f8eb804d09711a7392a6a090351b4 100644 (file)
@@ -214,7 +214,7 @@ static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
        struct usnic_vnic_res *vnic_res;
        int len;
 
-       len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu ",
+       len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu",
                         qp_grp->ibqp.qp_num,
                         usnic_ib_qp_grp_state_to_string(qp_grp->state),
                         qp_grp->owner_pid,
@@ -224,14 +224,13 @@ static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf)
                res_chunk = qp_grp->res_chunk_list[i];
                for (j = 0; j < res_chunk->cnt; j++) {
                        vnic_res = res_chunk->res[j];
-                       len += sysfs_emit_at(
-                               buf, len, "%s[%d] ",
+                       len += sysfs_emit_at(buf, len, " %s[%d]",
                                usnic_vnic_res_type_to_str(vnic_res->type),
                                vnic_res->vnic_idx);
                }
        }
 
-       len = sysfs_emit_at(buf, len, "\n");
+       len += sysfs_emit_at(buf, len, "\n");
 
        return len;
 }
index c142f5e7f25f82d0d163570c5eb5663e1172e54c..de57f2fed743755b8d29ca4b7ae75563cd1c81ba 100644 (file)
@@ -509,6 +509,20 @@ static inline int ib_send_flags_to_pvrdma(int flags)
        return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX);
 }
 
+static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type)
+{
+       switch (type) {
+       case PVRDMA_NETWORK_ROCE_V1:
+               return RDMA_NETWORK_ROCE_V1;
+       case PVRDMA_NETWORK_IPV4:
+               return RDMA_NETWORK_IPV4;
+       case PVRDMA_NETWORK_IPV6:
+               return RDMA_NETWORK_IPV6;
+       default:
+               return RDMA_NETWORK_IPV6;
+       }
+}
+
 void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst,
                         const struct pvrdma_qp_cap *src);
 void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst,
index a119ac3e103c8d6aebbaf945750a9c26cefc106e..6aa40bd2fd52d3dd921b32b7425adf338e1b0188 100644 (file)
@@ -367,7 +367,7 @@ retry:
        wc->dlid_path_bits = cqe->dlid_path_bits;
        wc->port_num = cqe->port_num;
        wc->vendor_err = cqe->vendor_err;
-       wc->network_hdr_type = cqe->network_hdr_type;
+       wc->network_hdr_type = pvrdma_network_type_to_ib(cqe->network_hdr_type);
 
        /* Update shared ring state */
        pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
index c4b06ced30a75b1b7441d130adfaa3a6d7bc1d8d..943914c2a50c70bd18e19306b743cc6709040fb5 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/if_arp.h>
 #include <linux/netdevice.h>
 #include <linux/if.h>
+#include <linux/if_vlan.h>
 #include <net/udp_tunnel.h>
 #include <net/sch_generic.h>
 #include <linux/netfilter.h>
@@ -153,9 +154,14 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 {
        struct udphdr *udph;
        struct net_device *ndev = skb->dev;
+       struct net_device *rdev = ndev;
        struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
        struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
 
+       if (!rxe && is_vlan_dev(rdev)) {
+               rdev = vlan_dev_real_dev(ndev);
+               rxe = rxe_get_dev_from_net(rdev);
+       }
        if (!rxe)
                goto drop;
 
index 5a098083a9d225517de7b02dfeaf10587060076d..c7e3b6a4af38f929e9e9018465705cd75b069f34 100644 (file)
@@ -872,6 +872,11 @@ static enum resp_states do_complete(struct rxe_qp *qp,
                        else
                                wc->network_hdr_type = RDMA_NETWORK_IPV6;
 
+                       if (is_vlan_dev(skb->dev)) {
+                               wc->wc_flags |= IB_WC_WITH_VLAN;
+                               wc->vlan_id = vlan_dev_vlan_id(skb->dev);
+                       }
+
                        if (pkt->mask & RXE_IMMDT_MASK) {
                                wc->wc_flags |= IB_WC_WITH_IMM;
                                wc->ex.imm_data = immdt_imm(pkt);
index 0687f0ed60b838a96a00f6d015c192467fd0696a..8cc8ca4a9ac01d68dc8b82f753feb8f18f7cbac7 100644 (file)
@@ -215,9 +215,17 @@ static const struct xpad_device {
        { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
        { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
        { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
-       { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
+       { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+       { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+       { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE },
        { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
        { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
+       { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+       { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+       { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
+       { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
+       { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
+       { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
        { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
        { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
        { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -296,6 +304,9 @@ static const struct xpad_device {
        { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
        { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
        { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
+       { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
+       { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
+       { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
        { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
        { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
@@ -429,8 +440,12 @@ static const struct usb_device_id xpad_table[] = {
        XPAD_XBOX360_VENDOR(0x162e),            /* Joytech X-Box 360 controllers */
        XPAD_XBOX360_VENDOR(0x1689),            /* Razer Onza */
        XPAD_XBOX360_VENDOR(0x1bad),            /* Harminix Rock Band Guitar and Drums */
+       XPAD_XBOX360_VENDOR(0x20d6),            /* PowerA Controllers */
+       XPAD_XBOXONE_VENDOR(0x20d6),            /* PowerA Controllers */
        XPAD_XBOX360_VENDOR(0x24c6),            /* PowerA Controllers */
        XPAD_XBOXONE_VENDOR(0x24c6),            /* PowerA Controllers */
+       XPAD_XBOXONE_VENDOR(0x2e24),            /* Hyperkin Duke X-Box One pad */
+       XPAD_XBOX360_VENDOR(0x2f24),            /* GameSir Controllers */
        { }
 };
 
index eda86ab552b9cea04e49afffd9f63bfd05a39a69..17bbaac8b80c892692264cc63772f0a5f25879d3 100644 (file)
@@ -149,12 +149,6 @@ static const struct of_device_id ariel_pwrbutton_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, ariel_pwrbutton_of_match);
 
-static const struct spi_device_id ariel_pwrbutton_id_table[] = {
-       { "wyse-ariel-ec-input", 0 },
-       {}
-};
-MODULE_DEVICE_TABLE(spi, ariel_pwrbutton_id_table);
-
 static struct spi_driver ariel_pwrbutton_driver = {
        .driver = {
                .name = "dell-wyse-ariel-ec-input",
index 3a2dcf0805f12386116d46a99a65d1ab5b0c60c7..c74b020796a94134434d79014c48e562136144d1 100644 (file)
@@ -219,6 +219,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
                },
+       },
+       {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"),
index 19765f1c04f7e3bad7b0e8357b5332545a01c89e..c682b028f0a29d2edb623030c790ebea4540e574 100644 (file)
@@ -157,6 +157,7 @@ static const struct goodix_chip_id goodix_chip_ids[] = {
        { .id = "5663", .data = &gt1x_chip_data },
        { .id = "5688", .data = &gt1x_chip_data },
        { .id = "917S", .data = &gt1x_chip_data },
+       { .id = "9286", .data = &gt1x_chip_data },
 
        { .id = "911", .data = &gt911_chip_data },
        { .id = "9271", .data = &gt911_chip_data },
@@ -1448,6 +1449,7 @@ static const struct of_device_id goodix_of_match[] = {
        { .compatible = "goodix,gt927" },
        { .compatible = "goodix,gt9271" },
        { .compatible = "goodix,gt928" },
+       { .compatible = "goodix,gt9286" },
        { .compatible = "goodix,gt967" },
        { }
 };
index 199cf3daec10661d13e248cec654b3fddd3b4ae6..d8fccf048bf449b465422922e96deaaf71a2a40f 100644 (file)
@@ -29,11 +29,13 @@ struct ili2xxx_chip {
                        void *buf, size_t len);
        int (*get_touch_data)(struct i2c_client *client, u8 *data);
        bool (*parse_touch_data)(const u8 *data, unsigned int finger,
-                                unsigned int *x, unsigned int *y);
+                                unsigned int *x, unsigned int *y,
+                                unsigned int *z);
        bool (*continue_polling)(const u8 *data, bool touch);
        unsigned int max_touches;
        unsigned int resolution;
        bool has_calibrate_reg;
+       bool has_pressure_reg;
 };
 
 struct ili210x {
@@ -82,7 +84,8 @@ static int ili210x_read_touch_data(struct i2c_client *client, u8 *data)
 
 static bool ili210x_touchdata_to_coords(const u8 *touchdata,
                                        unsigned int finger,
-                                       unsigned int *x, unsigned int *y)
+                                       unsigned int *x, unsigned int *y,
+                                       unsigned int *z)
 {
        if (touchdata[0] & BIT(finger))
                return false;
@@ -137,7 +140,8 @@ static int ili211x_read_touch_data(struct i2c_client *client, u8 *data)
 
 static bool ili211x_touchdata_to_coords(const u8 *touchdata,
                                        unsigned int finger,
-                                       unsigned int *x, unsigned int *y)
+                                       unsigned int *x, unsigned int *y,
+                                       unsigned int *z)
 {
        u32 data;
 
@@ -169,7 +173,8 @@ static const struct ili2xxx_chip ili211x_chip = {
 
 static bool ili212x_touchdata_to_coords(const u8 *touchdata,
                                        unsigned int finger,
-                                       unsigned int *x, unsigned int *y)
+                                       unsigned int *x, unsigned int *y,
+                                       unsigned int *z)
 {
        u16 val;
 
@@ -235,7 +240,8 @@ static int ili251x_read_touch_data(struct i2c_client *client, u8 *data)
 
 static bool ili251x_touchdata_to_coords(const u8 *touchdata,
                                        unsigned int finger,
-                                       unsigned int *x, unsigned int *y)
+                                       unsigned int *x, unsigned int *y,
+                                       unsigned int *z)
 {
        u16 val;
 
@@ -245,6 +251,7 @@ static bool ili251x_touchdata_to_coords(const u8 *touchdata,
 
        *x = val & 0x3fff;
        *y = get_unaligned_be16(touchdata + 1 + (finger * 5) + 2);
+       *z = touchdata[1 + (finger * 5) + 4];
 
        return true;
 }
@@ -261,6 +268,7 @@ static const struct ili2xxx_chip ili251x_chip = {
        .continue_polling       = ili251x_check_continue_polling,
        .max_touches            = 10,
        .has_calibrate_reg      = true,
+       .has_pressure_reg       = true,
 };
 
 static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
@@ -268,14 +276,16 @@ static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata)
        struct input_dev *input = priv->input;
        int i;
        bool contact = false, touch;
-       unsigned int x = 0, y = 0;
+       unsigned int x = 0, y = 0, z = 0;
 
        for (i = 0; i < priv->chip->max_touches; i++) {
-               touch = priv->chip->parse_touch_data(touchdata, i, &x, &y);
+               touch = priv->chip->parse_touch_data(touchdata, i, &x, &y, &z);
 
                input_mt_slot(input, i);
                if (input_mt_report_slot_state(input, MT_TOOL_FINGER, touch)) {
                        touchscreen_report_pos(input, &priv->prop, x, y, true);
+                       if (priv->chip->has_pressure_reg)
+                               input_report_abs(input, ABS_MT_PRESSURE, z);
                        contact = true;
                }
        }
@@ -437,6 +447,8 @@ static int ili210x_i2c_probe(struct i2c_client *client,
        max_xy = (chip->resolution ?: SZ_64K) - 1;
        input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_xy, 0, 0);
        input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0);
+       if (priv->chip->has_pressure_reg)
+               input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xa, 0, 0);
        touchscreen_parse_properties(input, true, &priv->prop);
 
        error = input_mt_init_slots(input, priv->chip->max_touches,
index bda96762744e6da3e9c3d9a1549a5fc54c9c35b8..b4e7bcbe9b91d9c5e527ccb4d773e20ae4b8d847 100644 (file)
 #define ST1232_TS_NAME "st1232-ts"
 #define ST1633_TS_NAME "st1633-ts"
 
+#define REG_STATUS             0x01    /* Device Status | Error Code */
+
+#define STATUS_NORMAL          0x00
+#define STATUS_INIT            0x01
+#define STATUS_ERROR           0x02
+#define STATUS_AUTO_TUNING     0x03
+#define STATUS_IDLE            0x04
+#define STATUS_POWER_DOWN      0x05
+
+#define ERROR_NONE             0x00
+#define ERROR_INVALID_ADDRESS  0x10
+#define ERROR_INVALID_VALUE    0x20
+#define ERROR_INVALID_PLATFORM 0x30
+
 #define REG_XY_RESOLUTION      0x04
 #define REG_XY_COORDINATES     0x12
 #define ST_TS_MAX_FINGERS      10
@@ -47,7 +61,8 @@ struct st1232_ts_data {
        u8 *read_buf;
 };
 
-static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg)
+static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg,
+                              unsigned int n)
 {
        struct i2c_client *client = ts->client;
        struct i2c_msg msg[] = {
@@ -59,7 +74,7 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg)
                {
                        .addr   = client->addr,
                        .flags  = I2C_M_RD | I2C_M_DMA_SAFE,
-                       .len    = ts->read_buf_len,
+                       .len    = n,
                        .buf    = ts->read_buf,
                }
        };
@@ -72,6 +87,22 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg)
        return 0;
 }
 
+static int st1232_ts_wait_ready(struct st1232_ts_data *ts)
+{
+       unsigned int retries;
+       int error;
+
+       for (retries = 10; retries; retries--) {
+               error = st1232_ts_read_data(ts, REG_STATUS, 1);
+               if (!error && ts->read_buf[0] == (STATUS_NORMAL | ERROR_NONE))
+                       return 0;
+
+               usleep_range(1000, 2000);
+       }
+
+       return -ENXIO;
+}
+
 static int st1232_ts_read_resolution(struct st1232_ts_data *ts, u16 *max_x,
                                     u16 *max_y)
 {
@@ -79,14 +110,14 @@ static int st1232_ts_read_resolution(struct st1232_ts_data *ts, u16 *max_x,
        int error;
 
        /* select resolution register */
-       error = st1232_ts_read_data(ts, REG_XY_RESOLUTION);
+       error = st1232_ts_read_data(ts, REG_XY_RESOLUTION, 3);
        if (error)
                return error;
 
        buf = ts->read_buf;
 
-       *max_x = ((buf[0] & 0x0070) << 4) | buf[1];
-       *max_y = ((buf[0] & 0x0007) << 8) | buf[2];
+       *max_x = (((buf[0] & 0x0070) << 4) | buf[1]) - 1;
+       *max_y = (((buf[0] & 0x0007) << 8) | buf[2]) - 1;
 
        return 0;
 }
@@ -140,7 +171,7 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
        int count;
        int error;
 
-       error = st1232_ts_read_data(ts, REG_XY_COORDINATES);
+       error = st1232_ts_read_data(ts, REG_XY_COORDINATES, ts->read_buf_len);
        if (error)
                goto out;
 
@@ -251,6 +282,11 @@ static int st1232_ts_probe(struct i2c_client *client,
        input_dev->name = "st1232-touchscreen";
        input_dev->id.bustype = BUS_I2C;
 
+       /* Wait until device is ready */
+       error = st1232_ts_wait_ready(ts);
+       if (error)
+               return error;
+
        /* Read resolution from the chip */
        error = st1232_ts_read_resolution(ts, &max_x, &max_y);
        if (error) {
index 6b8cbdf7171407bfbe8f6114cf7cc40a8b02b897..b4adab69856323b3f9158720dac5dc673249974e 100644 (file)
@@ -84,12 +84,9 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev)
               (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
 }
 
-static inline bool iommu_feature(struct amd_iommu *iommu, u64 f)
+static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask)
 {
-       if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
-               return false;
-
-       return !!(iommu->features & f);
+       return !!(iommu->features & mask);
 }
 
 static inline u64 iommu_virt_to_phys(void *vaddr)
index 5535878277710e4dbf9abe666a9b34254d6287b3..1a0495dd5fcbc1a3568a93cda4eb799fb5baf92e 100644 (file)
 #define IOMMU_CAP_NPCACHE 26
 #define IOMMU_CAP_EFR     27
 
+/* IOMMU IVINFO */
+#define IOMMU_IVINFO_OFFSET     36
+#define IOMMU_IVINFO_EFRSUP     BIT(0)
+
 /* IOMMU Feature Reporting Field (for IVHD type 10h */
 #define IOMMU_FEAT_GASUP_SHIFT 6
 
index 6a1f7048dacc6642ddca950bbdd497eafb9463e6..83d8ab2aed9f45e32e505350a511ca10d7a41d80 100644 (file)
@@ -257,6 +257,8 @@ static void init_device_table_dma(void);
 
 static bool amd_iommu_pre_enabled = true;
 
+static u32 amd_iommu_ivinfo __initdata;
+
 bool translation_pre_enabled(struct amd_iommu *iommu)
 {
        return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
@@ -296,6 +298,18 @@ int amd_iommu_get_num_iommus(void)
        return amd_iommus_present;
 }
 
+/*
+ * For IVHD type 0x11/0x40, EFR is also available via IVHD.
+ * Default to IVHD EFR since it is available sooner
+ * (i.e. before PCI init).
+ */
+static void __init early_iommu_features_init(struct amd_iommu *iommu,
+                                            struct ivhd_header *h)
+{
+       if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP)
+               iommu->features = h->efr_reg;
+}
+
 /* Access to l1 and l2 indexed register spaces */
 
 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
@@ -1577,6 +1591,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
 
                if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
                        amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
+
+               early_iommu_features_init(iommu, h);
+
                break;
        default:
                return -EINVAL;
@@ -1770,6 +1787,35 @@ static const struct attribute_group *amd_iommu_groups[] = {
        NULL,
 };
 
+/*
+ * Note: IVHD 0x11 and 0x40 also contains exact copy
+ * of the IOMMU Extended Feature Register [MMIO Offset 0030h].
+ * Default to EFR in IVHD since it is available sooner (i.e. before PCI init).
+ */
+static void __init late_iommu_features_init(struct amd_iommu *iommu)
+{
+       u64 features;
+
+       if (!(iommu->cap & (1 << IOMMU_CAP_EFR)))
+               return;
+
+       /* read extended feature bits */
+       features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
+
+       if (!iommu->features) {
+               iommu->features = features;
+               return;
+       }
+
+       /*
+        * Sanity check and warn if EFR values from
+        * IVHD and MMIO conflict.
+        */
+       if (features != iommu->features)
+               pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
+                       features, iommu->features);
+}
+
 static int __init iommu_init_pci(struct amd_iommu *iommu)
 {
        int cap_ptr = iommu->cap_ptr;
@@ -1789,8 +1835,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
        if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
                amd_iommu_iotlb_sup = false;
 
-       /* read extended feature bits */
-       iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
+       late_iommu_features_init(iommu);
 
        if (iommu_feature(iommu, FEATURE_GT)) {
                int glxval;
@@ -2607,6 +2652,11 @@ static void __init free_dma_resources(void)
        free_unity_maps();
 }
 
+static void __init ivinfo_init(void *ivrs)
+{
+       amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET));
+}
+
 /*
  * This is the hardware init function for AMD IOMMU in the system.
  * This function is called either from amd_iommu_init or from the interrupt
@@ -2661,6 +2711,8 @@ static int __init early_amd_iommu_init(void)
        if (ret)
                goto out;
 
+       ivinfo_init(ivrs_base);
+
        amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
        DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
 
index 004feaed3c72cf0304b3392f51955143926e7f94..02e7c10a4224b0b351965ff4bd760c55b46885a5 100644 (file)
@@ -1496,7 +1496,7 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
         * Max Invs Pending (MIP) is set to 0 for now until we have DIT in
         * ECAP.
         */
-       if (addr & GENMASK_ULL(size_order + VTD_PAGE_SHIFT, 0))
+       if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
                pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
                                    addr, size_order);
 
index f665322a09919fbaea5dd3eae59c7ac87a7fe04d..06b00b5363d8630ebe7421fc78c127f517f2074d 100644 (file)
@@ -5440,6 +5440,36 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain,
        return ret;
 }
 
+static bool domain_use_flush_queue(void)
+{
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+       bool r = true;
+
+       if (intel_iommu_strict)
+               return false;
+
+       /*
+        * The flush queue implementation does not perform page-selective
+        * invalidations that are required for efficient TLB flushes in virtual
+        * environments. The benefit of batching is likely to be much lower than
+        * the overhead of synchronizing the virtual and physical IOMMU
+        * page-tables.
+        */
+       rcu_read_lock();
+       for_each_active_iommu(iommu, drhd) {
+               if (!cap_caching_mode(iommu->cap))
+                       continue;
+
+               pr_warn_once("IOMMU batching is disabled due to virtualization");
+               r = false;
+               break;
+       }
+       rcu_read_unlock();
+
+       return r;
+}
+
 static int
 intel_iommu_domain_get_attr(struct iommu_domain *domain,
                            enum iommu_attr attr, void *data)
@@ -5450,7 +5480,7 @@ intel_iommu_domain_get_attr(struct iommu_domain *domain,
        case IOMMU_DOMAIN_DMA:
                switch (attr) {
                case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
-                       *(int *)data = !intel_iommu_strict;
+                       *(int *)data = domain_use_flush_queue();
                        return 0;
                default:
                        return -ENODEV;
index 94920a51c6286375ea055d74b13c1c11073782c2..b147f22a78f484509f4205ab9fb50715387f40cb 100644 (file)
@@ -493,8 +493,9 @@ config TI_SCI_INTA_IRQCHIP
          TI System Controller, say Y here. Otherwise, say N.
 
 config TI_PRUSS_INTC
-       tristate "TI PRU-ICSS Interrupt Controller"
-       depends on ARCH_DAVINCI || SOC_AM33XX || SOC_AM43XX || SOC_DRA7XX || ARCH_KEYSTONE || ARCH_K3
+       tristate
+       depends on TI_PRUSS
+       default TI_PRUSS
        select IRQ_DOMAIN
        help
          This enables support for the PRU-ICSS Local Interrupt Controller
index 5f5eb8877c4134849d1c484a723274dfd467cb74..25c9a9c06e4100d727e8a192e69b3c31387181b8 100644 (file)
@@ -167,7 +167,7 @@ static void bcm2836_arm_irqchip_handle_ipi(struct irq_desc *desc)
        chained_irq_exit(chip, desc);
 }
 
-static void bcm2836_arm_irqchip_ipi_eoi(struct irq_data *d)
+static void bcm2836_arm_irqchip_ipi_ack(struct irq_data *d)
 {
        int cpu = smp_processor_id();
 
@@ -195,7 +195,7 @@ static struct irq_chip bcm2836_arm_irqchip_ipi = {
        .name           = "IPI",
        .irq_mask       = bcm2836_arm_irqchip_dummy_op,
        .irq_unmask     = bcm2836_arm_irqchip_dummy_op,
-       .irq_eoi        = bcm2836_arm_irqchip_ipi_eoi,
+       .irq_ack        = bcm2836_arm_irqchip_ipi_ack,
        .ipi_send_mask  = bcm2836_arm_irqchip_ipi_send_mask,
 };
 
index 9ed1bc47366348b5b9066bc4873a317d82c011af..09b91b81851cca97df7c3a7784b0e683152c2e42 100644 (file)
@@ -142,8 +142,8 @@ static void liointc_resume(struct irq_chip_generic *gc)
 
 static const char * const parent_names[] = {"int0", "int1", "int2", "int3"};
 
-int __init liointc_of_init(struct device_node *node,
-                               struct device_node *parent)
+static int __init liointc_of_init(struct device_node *node,
+                                 struct device_node *parent)
 {
        struct irq_chip_generic *gc;
        struct irq_domain *domain;
index 95d4fd8f7a96818e2b9687503e3521317a939da8..0bbb0b2d0dd5f72f79e078061744af225bd83aa7 100644 (file)
@@ -197,6 +197,13 @@ static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
                if (ret)
                        return ret;
 
+               ret = irq_domain_set_hwirq_and_chip(domain->parent, virq + i, hwirq,
+                                                   &mips_mt_cpu_irq_controller,
+                                                   NULL);
+
+               if (ret)
+                       return ret;
+
                ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
                if (ret)
                        return ret;
index 0aa50d025ef6c2f877750b7d7b981bdc1d7fd8f3..fbb354413ffa139a31844559a0fb0db56c1390f1 100644 (file)
@@ -66,7 +66,7 @@ static int sl28cpld_intc_probe(struct platform_device *pdev)
        irqchip->chip.num_regs = 1;
        irqchip->chip.status_base = base + INTC_IP;
        irqchip->chip.mask_base = base + INTC_IE;
-       irqchip->chip.mask_invert = true,
+       irqchip->chip.mask_invert = true;
        irqchip->chip.ack_base = base + INTC_IP;
 
        return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
index 849d3c5f908e4d597f4cf0225d596549ccdee770..6c1d8b69a465a12bd8d19a0e77e3a1b3747cc1eb 100644 (file)
@@ -928,6 +928,9 @@ config LEDS_ACER_A500
          This option enables support for the Power Button LED of
          Acer Iconia Tab A500.
 
+comment "Flash and Torch LED drivers"
+source "drivers/leds/flash/Kconfig"
+
 comment "LED Triggers"
 source "drivers/leds/trigger/Kconfig"
 
index 73e603e1727e7440ae1d43d072ff4a2ee6096ea0..156c0b4e60d95db595bc806780e11efc0c5e3050 100644 (file)
@@ -103,5 +103,8 @@ obj-$(CONFIG_LEDS_SPI_BYTE)         += leds-spi-byte.o
 # LED Userspace Drivers
 obj-$(CONFIG_LEDS_USER)                        += uleds.o
 
+# Flash and Torch LED Drivers
+obj-$(CONFIG_LEDS_CLASS_FLASH)         += flash/
+
 # LED Triggers
 obj-$(CONFIG_LEDS_TRIGGERS)            += trigger/
diff --git a/drivers/leds/flash/Kconfig b/drivers/leds/flash/Kconfig
new file mode 100644 (file)
index 0000000..d21d273
--- /dev/null
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+
+if LEDS_CLASS_FLASH
+
+config LEDS_RT8515
+       tristate "LED support for Richtek RT8515 flash/torch LED"
+       depends on GPIOLIB
+       help
+         This option enables support for the Richtek RT8515 flash
+         and torch LEDs found on some mobile phones.
+
+         To compile this driver as a module, choose M here: the module
+         will be called leds-rt8515.
+
+endif # LEDS_CLASS_FLASH
diff --git a/drivers/leds/flash/Makefile b/drivers/leds/flash/Makefile
new file mode 100644 (file)
index 0000000..e990e25
--- /dev/null
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_LEDS_RT8515)      += leds-rt8515.o
diff --git a/drivers/leds/flash/leds-rt8515.c b/drivers/leds/flash/leds-rt8515.c
new file mode 100644 (file)
index 0000000..590bfa1
--- /dev/null
@@ -0,0 +1,397 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * LED driver for Richtek RT8515 flash/torch white LEDs
+ * found on some Samsung mobile phones.
+ *
+ * This is a 1.5A Boost dual channel driver produced around 2011.
+ *
+ * The component lacks a datasheet, but in the schematic picture
+ * from the LG P970 service manual you can see the connections
+ * from the RT8515 to the LED, with two resistors connected
+ * from the pins "RFS" and "RTS" to ground.
+ *
+ * On the LG P970:
+ * RFS (resistance flash setting?) is 20 kOhm
+ * RTS (resistance torch setting?) is 39 kOhm
+ *
+ * Some sleuthing finds us the RT9387A which we have a datasheet for:
+ * https://static5.arrow.com/pdfs/2014/7/27/8/21/12/794/rtt_/manual/94download_ds.jspprt9387a.jspprt9387a.pdf
+ * This apparently works the same way so in theory this driver
+ * should cover RT9387A as well. This has not been tested, please
+ * update the compatibles if you add RT9387A support.
+ *
+ * Linus Walleij <linus.walleij@linaro.org>
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/led-class-flash.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <media/v4l2-flash-led-class.h>
+
+/* We can provide 15-700 mA out to the LED */
+#define RT8515_MIN_IOUT_MA     15
+#define RT8515_MAX_IOUT_MA     700
+/* The maximum intensity is 1-16 for flash and 1-100 for torch */
+#define RT8515_FLASH_MAX       16
+#define RT8515_TORCH_MAX       100
+
+#define RT8515_TIMEOUT_US      250000U
+#define RT8515_MAX_TIMEOUT_US  300000U
+
+struct rt8515 {
+       struct led_classdev_flash fled;
+       struct device *dev;
+       struct v4l2_flash *v4l2_flash;
+       struct mutex lock;
+       struct regulator *reg;
+       struct gpio_desc *enable_torch;
+       struct gpio_desc *enable_flash;
+       struct timer_list powerdown_timer;
+       u32 max_timeout; /* Flash max timeout */
+       int flash_max_intensity;
+       int torch_max_intensity;
+};
+
+static struct rt8515 *to_rt8515(struct led_classdev_flash *fled)
+{
+       return container_of(fled, struct rt8515, fled);
+}
+
+static void rt8515_gpio_led_off(struct rt8515 *rt)
+{
+       gpiod_set_value(rt->enable_flash, 0);
+       gpiod_set_value(rt->enable_torch, 0);
+}
+
+static void rt8515_gpio_brightness_commit(struct gpio_desc *gpiod,
+                                         int brightness)
+{
+       int i;
+
+       /*
+        * Toggling a GPIO line with a small delay increases the
+        * brightness one step at a time.
+        */
+       for (i = 0; i < brightness; i++) {
+               gpiod_set_value(gpiod, 0);
+               udelay(1);
+               gpiod_set_value(gpiod, 1);
+               udelay(1);
+       }
+}
+
+/* This is setting the torch light level */
+static int rt8515_led_brightness_set(struct led_classdev *led,
+                                    enum led_brightness brightness)
+{
+       struct led_classdev_flash *fled = lcdev_to_flcdev(led);
+       struct rt8515 *rt = to_rt8515(fled);
+
+       mutex_lock(&rt->lock);
+
+       if (brightness == LED_OFF) {
+               /* Off */
+               rt8515_gpio_led_off(rt);
+       } else if (brightness < RT8515_TORCH_MAX) {
+               /* Step it up to movie mode brightness using the flash pin */
+               rt8515_gpio_brightness_commit(rt->enable_torch, brightness);
+       } else {
+               /* Max torch brightness requested */
+               gpiod_set_value(rt->enable_torch, 1);
+       }
+
+       mutex_unlock(&rt->lock);
+
+       return 0;
+}
+
+static int rt8515_led_flash_strobe_set(struct led_classdev_flash *fled,
+                                      bool state)
+{
+       struct rt8515 *rt = to_rt8515(fled);
+       struct led_flash_setting *timeout = &fled->timeout;
+       int brightness = rt->flash_max_intensity;
+
+       mutex_lock(&rt->lock);
+
+       if (state) {
+               /* Enable LED flash mode and set brightness */
+               rt8515_gpio_brightness_commit(rt->enable_flash, brightness);
+               /* Set timeout */
+               mod_timer(&rt->powerdown_timer,
+                         jiffies + usecs_to_jiffies(timeout->val));
+       } else {
+               del_timer_sync(&rt->powerdown_timer);
+               /* Turn the LED off */
+               rt8515_gpio_led_off(rt);
+       }
+
+       fled->led_cdev.brightness = LED_OFF;
+       /* After this the torch LED will be disabled */
+
+       mutex_unlock(&rt->lock);
+
+       return 0;
+}
+
+static int rt8515_led_flash_strobe_get(struct led_classdev_flash *fled,
+                                      bool *state)
+{
+       struct rt8515 *rt = to_rt8515(fled);
+
+       *state = timer_pending(&rt->powerdown_timer);
+
+       return 0;
+}
+
+static int rt8515_led_flash_timeout_set(struct led_classdev_flash *fled,
+                                       u32 timeout)
+{
+       /* The timeout is stored in the led-class-flash core */
+       return 0;
+}
+
+static const struct led_flash_ops rt8515_flash_ops = {
+       .strobe_set = rt8515_led_flash_strobe_set,
+       .strobe_get = rt8515_led_flash_strobe_get,
+       .timeout_set = rt8515_led_flash_timeout_set,
+};
+
+static void rt8515_powerdown_timer(struct timer_list *t)
+{
+       struct rt8515 *rt = from_timer(rt, t, powerdown_timer);
+
+       /* Turn the LED off */
+       rt8515_gpio_led_off(rt);
+}
+
+static void rt8515_init_flash_timeout(struct rt8515 *rt)
+{
+       struct led_classdev_flash *fled = &rt->fled;
+       struct led_flash_setting *s;
+
+       /* Init flash timeout setting */
+       s = &fled->timeout;
+       s->min = 1;
+       s->max = rt->max_timeout;
+       s->step = 1;
+       /*
+        * Set default timeout to RT8515_TIMEOUT_US except if
+        * max_timeout from DT is lower.
+        */
+       s->val = min(rt->max_timeout, RT8515_TIMEOUT_US);
+}
+
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+/* Configure the V2L2 flash subdevice */
+static void rt8515_init_v4l2_flash_config(struct rt8515 *rt,
+                                         struct v4l2_flash_config *v4l2_sd_cfg)
+{
+       struct led_classdev *led = &rt->fled.led_cdev;
+       struct led_flash_setting *s;
+
+       strscpy(v4l2_sd_cfg->dev_name, led->dev->kobj.name,
+               sizeof(v4l2_sd_cfg->dev_name));
+
+       /*
+        * Init flash intensity setting: this is a linear scale
+        * capped from the device tree max intensity setting
+        * 1..flash_max_intensity
+        */
+       s = &v4l2_sd_cfg->intensity;
+       s->min = 1;
+       s->max = rt->flash_max_intensity;
+       s->step = 1;
+       s->val = s->max;
+}
+
+static void rt8515_v4l2_flash_release(struct rt8515 *rt)
+{
+       v4l2_flash_release(rt->v4l2_flash);
+}
+
+#else
+static void rt8515_init_v4l2_flash_config(struct rt8515 *rt,
+                                         struct v4l2_flash_config *v4l2_sd_cfg)
+{
+}
+
+static void rt8515_v4l2_flash_release(struct rt8515 *rt)
+{
+}
+#endif
+
+static void rt8515_determine_max_intensity(struct rt8515 *rt,
+                                          struct fwnode_handle *led,
+                                          const char *resistance,
+                                          const char *max_ua_prop, int hw_max,
+                                          int *max_intensity_setting)
+{
+       u32 res = 0; /* Can't be 0 so 0 is undefined */
+       u32 ua;
+       u32 max_ma;
+       int max_intensity;
+       int ret;
+
+       fwnode_property_read_u32(rt->dev->fwnode, resistance, &res);
+       ret = fwnode_property_read_u32(led, max_ua_prop, &ua);
+
+       /* Missing info in DT, OK go with hardware maxima */
+       if (ret || res == 0) {
+               dev_err(rt->dev,
+                       "either %s or %s missing from DT, using HW max\n",
+                       resistance, max_ua_prop);
+               max_ma = RT8515_MAX_IOUT_MA;
+               max_intensity = hw_max;
+               goto out_assign_max;
+       }
+
+       /*
+        * Formula from the datasheet, this is the maximum current
+        * defined by the hardware.
+        */
+       max_ma = (5500 * 1000) / res;
+       /*
+        * Calculate max intensity (linear scaling)
+        * Formula is ((ua / 1000) / max_ma) * 100, then simplified
+        */
+       max_intensity = (ua / 10) / max_ma;
+
+       dev_info(rt->dev,
+                "current restricted from %u to %u mA, max intensity %d/100\n",
+                max_ma, (ua / 1000), max_intensity);
+
+out_assign_max:
+       dev_info(rt->dev, "max intensity %d/%d = %d mA\n",
+                max_intensity, hw_max, max_ma);
+       *max_intensity_setting = max_intensity;
+}
+
+static int rt8515_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct fwnode_handle *child;
+       struct rt8515 *rt;
+       struct led_classdev *led;
+       struct led_classdev_flash *fled;
+       struct led_init_data init_data = {};
+       struct v4l2_flash_config v4l2_sd_cfg = {};
+       int ret;
+
+       rt = devm_kzalloc(dev, sizeof(*rt), GFP_KERNEL);
+       if (!rt)
+               return -ENOMEM;
+
+       rt->dev = dev;
+       fled = &rt->fled;
+       led = &fled->led_cdev;
+
+       /* ENF - Enable Flash line */
+       rt->enable_flash = devm_gpiod_get(dev, "enf", GPIOD_OUT_LOW);
+       if (IS_ERR(rt->enable_flash))
+               return dev_err_probe(dev, PTR_ERR(rt->enable_flash),
+                                    "cannot get ENF (enable flash) GPIO\n");
+
+       /* ENT - Enable Torch line */
+       rt->enable_torch = devm_gpiod_get(dev, "ent", GPIOD_OUT_LOW);
+       if (IS_ERR(rt->enable_torch))
+               return dev_err_probe(dev, PTR_ERR(rt->enable_torch),
+                                    "cannot get ENT (enable torch) GPIO\n");
+
+       child = fwnode_get_next_available_child_node(dev->fwnode, NULL);
+       if (!child) {
+               dev_err(dev,
+                       "No fwnode child node found for connected LED.\n");
+               return -EINVAL;
+       }
+       init_data.fwnode = child;
+
+       rt8515_determine_max_intensity(rt, child, "richtek,rfs-ohms",
+                                      "flash-max-microamp",
+                                      RT8515_FLASH_MAX,
+                                      &rt->flash_max_intensity);
+       rt8515_determine_max_intensity(rt, child, "richtek,rts-ohms",
+                                      "led-max-microamp",
+                                      RT8515_TORCH_MAX,
+                                      &rt->torch_max_intensity);
+
+       ret = fwnode_property_read_u32(child, "flash-max-timeout-us",
+                                      &rt->max_timeout);
+       if (ret) {
+               rt->max_timeout = RT8515_MAX_TIMEOUT_US;
+               dev_warn(dev,
+                        "flash-max-timeout-us property missing\n");
+       }
+       timer_setup(&rt->powerdown_timer, rt8515_powerdown_timer, 0);
+       rt8515_init_flash_timeout(rt);
+
+       fled->ops = &rt8515_flash_ops;
+
+       led->max_brightness = rt->torch_max_intensity;
+       led->brightness_set_blocking = rt8515_led_brightness_set;
+       led->flags |= LED_CORE_SUSPENDRESUME | LED_DEV_CAP_FLASH;
+
+       mutex_init(&rt->lock);
+
+       platform_set_drvdata(pdev, rt);
+
+       ret = devm_led_classdev_flash_register_ext(dev, fled, &init_data);
+       if (ret) {
+               dev_err(dev, "can't register LED %s\n", led->name);
+               mutex_destroy(&rt->lock);
+               return ret;
+       }
+
+       rt8515_init_v4l2_flash_config(rt, &v4l2_sd_cfg);
+
+       /* Create a V4L2 Flash device if V4L2 flash is enabled */
+       rt->v4l2_flash = v4l2_flash_init(dev, child, fled, NULL, &v4l2_sd_cfg);
+       if (IS_ERR(rt->v4l2_flash)) {
+               ret = PTR_ERR(rt->v4l2_flash);
+               dev_err(dev, "failed to register V4L2 flash device (%d)\n",
+                       ret);
+               /*
+                * Continue without the V4L2 flash
+                * (we still have the classdev)
+                */
+       }
+
+       return 0;
+}
+
+static int rt8515_remove(struct platform_device *pdev)
+{
+       struct rt8515 *rt = platform_get_drvdata(pdev);
+
+       rt8515_v4l2_flash_release(rt);
+       del_timer_sync(&rt->powerdown_timer);
+       mutex_destroy(&rt->lock);
+
+       return 0;
+}
+
+static const struct of_device_id rt8515_match[] = {
+       { .compatible = "richtek,rt8515", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rt8515_match);
+
+static struct platform_driver rt8515_driver = {
+       .driver = {
+               .name  = "rt8515",
+               .of_match_table = rt8515_match,
+       },
+       .probe  = rt8515_probe,
+       .remove = rt8515_remove,
+};
+module_platform_driver(rt8515_driver);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
+MODULE_DESCRIPTION("Richtek RT8515 LED driver");
+MODULE_LICENSE("GPL");
index 91da90cfb11d9f60b9ff564eb387a49419a90a1e..4e7b78a84149be077e10bc4d0119350c2fabf810 100644 (file)
@@ -378,14 +378,15 @@ void led_trigger_event(struct led_trigger *trig,
                        enum led_brightness brightness)
 {
        struct led_classdev *led_cdev;
+       unsigned long flags;
 
        if (!trig)
                return;
 
-       read_lock(&trig->leddev_list_lock);
+       read_lock_irqsave(&trig->leddev_list_lock, flags);
        list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list)
                led_set_brightness(led_cdev, brightness);
-       read_unlock(&trig->leddev_list_lock);
+       read_unlock_irqrestore(&trig->leddev_list_lock, flags);
 }
 EXPORT_SYMBOL_GPL(led_trigger_event);
 
@@ -396,11 +397,12 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
                             int invert)
 {
        struct led_classdev *led_cdev;
+       unsigned long flags;
 
        if (!trig)
                return;
 
-       read_lock(&trig->leddev_list_lock);
+       read_lock_irqsave(&trig->leddev_list_lock, flags);
        list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) {
                if (oneshot)
                        led_blink_set_oneshot(led_cdev, delay_on, delay_off,
@@ -408,7 +410,7 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
                else
                        led_blink_set(led_cdev, delay_on, delay_off);
        }
-       read_unlock(&trig->leddev_list_lock);
+       read_unlock_irqrestore(&trig->leddev_list_lock, flags);
 }
 
 void led_trigger_blink(struct led_trigger *trig,
index bb68ba23a7d448e32a8a97f93356131305559db6..49e1bddaa15e0f6a845c76468b827e91c778c4e7 100644 (file)
@@ -96,14 +96,14 @@ static int ariel_led_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        leds[0].ec_index = EC_BLUE_LED;
-       leds[0].led_cdev.name = "blue:power",
+       leds[0].led_cdev.name = "blue:power";
        leds[0].led_cdev.default_trigger = "default-on";
 
        leds[1].ec_index = EC_AMBER_LED;
-       leds[1].led_cdev.name = "amber:status",
+       leds[1].led_cdev.name = "amber:status";
 
        leds[2].ec_index = EC_GREEN_LED;
-       leds[2].led_cdev.name = "green:status",
+       leds[2].led_cdev.name = "green:status";
        leds[2].led_cdev.default_trigger = "default-on";
 
        for (i = 0; i < NLEDS; i++) {
index b3edee7031931eae380a762f30d866ac3af36da3..9dd205870525c4f753d656b169e94d2754fbb768 100644 (file)
@@ -679,7 +679,7 @@ static int lm3533_led_probe(struct platform_device *pdev)
        led->cdev.brightness_get = lm3533_led_get;
        led->cdev.blink_set = lm3533_led_blink_set;
        led->cdev.brightness = LED_OFF;
-       led->cdev.groups = lm3533_led_attribute_groups,
+       led->cdev.groups = lm3533_led_attribute_groups;
        led->id = pdev->id;
 
        mutex_init(&led->mutex);
index c1bcac71008c67374ad76e9ea455cc220f8bba31..28ddcaa5358b141db53af65dc595b08ef80044e8 100644 (file)
@@ -844,11 +844,10 @@ static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
        rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
 
        ret = nvm_submit_io_sync_raw(dev, &rqd);
+       __free_page(page);
        if (ret)
                return ret;
 
-       __free_page(page);
-
        return rqd.error;
 }
 
index 84fc2c0f01015b280ed86ba702c99ef29db95465..d1c8fd3977fc64461215673c18e67ef9f937d03d 100644 (file)
@@ -33,6 +33,8 @@
 #define BCH_FEATURE_COMPAT_FUNCS(name, flagname) \
 static inline int bch_has_feature_##name(struct cache_sb *sb) \
 { \
+       if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
+               return 0; \
        return (((sb)->feature_compat & \
                BCH##_FEATURE_COMPAT_##flagname) != 0); \
 } \
@@ -50,6 +52,8 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
 #define BCH_FEATURE_RO_COMPAT_FUNCS(name, flagname) \
 static inline int bch_has_feature_##name(struct cache_sb *sb) \
 { \
+       if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
+               return 0; \
        return (((sb)->feature_ro_compat & \
                BCH##_FEATURE_RO_COMPAT_##flagname) != 0); \
 } \
@@ -67,6 +71,8 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
 #define BCH_FEATURE_INCOMPAT_FUNCS(name, flagname) \
 static inline int bch_has_feature_##name(struct cache_sb *sb) \
 { \
+       if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \
+               return 0; \
        return (((sb)->feature_incompat & \
                BCH##_FEATURE_INCOMPAT_##flagname) != 0); \
 } \
index 8c874710f0bcfdca9c741018fca1ebd53f7ebfa2..5a55617a08e687c0ef2f454d6b8754f8af0bfade 100644 (file)
@@ -1481,9 +1481,9 @@ static int crypt_alloc_req_skcipher(struct crypt_config *cc,
 static int crypt_alloc_req_aead(struct crypt_config *cc,
                                 struct convert_context *ctx)
 {
-       if (!ctx->r.req) {
-               ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
-               if (!ctx->r.req)
+       if (!ctx->r.req_aead) {
+               ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
+               if (!ctx->r.req_aead)
                        return -ENOMEM;
        }
 
index 81df019ab284a031218f66fcbbe881be82fef634..b64fede032dc5314eda9ccf353738ef7617fb89a 100644 (file)
@@ -257,8 +257,9 @@ struct dm_integrity_c {
        bool journal_uptodate;
        bool just_formatted;
        bool recalculate_flag;
-       bool fix_padding;
        bool discard;
+       bool fix_padding;
+       bool legacy_recalculate;
 
        struct alg_spec internal_hash_alg;
        struct alg_spec journal_crypt_alg;
@@ -386,6 +387,14 @@ static int dm_integrity_failed(struct dm_integrity_c *ic)
        return READ_ONCE(ic->failed);
 }
 
+static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
+{
+       if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) &&
+           !ic->legacy_recalculate)
+               return true;
+       return false;
+}
+
 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
                                          unsigned j, unsigned char seq)
 {
@@ -3140,6 +3149,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
                arg_count += !!ic->journal_crypt_alg.alg_string;
                arg_count += !!ic->journal_mac_alg.alg_string;
                arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
+               arg_count += ic->legacy_recalculate;
                DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
                       ic->tag_size, ic->mode, arg_count);
                if (ic->meta_dev)
@@ -3163,6 +3173,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
                }
                if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
                        DMEMIT(" fix_padding");
+               if (ic->legacy_recalculate)
+                       DMEMIT(" legacy_recalculate");
 
 #define EMIT_ALG(a, n)                                                 \
                do {                                                    \
@@ -3792,7 +3804,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
        unsigned extra_args;
        struct dm_arg_set as;
        static const struct dm_arg _args[] = {
-               {0, 15, "Invalid number of feature args"},
+               {0, 16, "Invalid number of feature args"},
        };
        unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
        bool should_write_sb;
@@ -3940,6 +3952,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
                        ic->discard = true;
                } else if (!strcmp(opt_string, "fix_padding")) {
                        ic->fix_padding = true;
+               } else if (!strcmp(opt_string, "legacy_recalculate")) {
+                       ic->legacy_recalculate = true;
                } else {
                        r = -EINVAL;
                        ti->error = "Invalid argument";
@@ -4235,6 +4249,20 @@ try_smaller_buffer:
                        r = -ENOMEM;
                        goto bad;
                }
+       } else {
+               if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
+                       ti->error = "Recalculate can only be specified with internal_hash";
+                       r = -EINVAL;
+                       goto bad;
+               }
+       }
+
+       if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+           le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
+           dm_integrity_disable_recalculate(ic)) {
+               ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
+               r = -EOPNOTSUPP;
+               goto bad;
        }
 
        ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
index 188f41287f180baf5a8ea7bc8defb02717a1da5f..4acf2342f7adf0896077835eef7b1332965db9bb 100644 (file)
@@ -363,14 +363,23 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
 {
        int r;
        dev_t dev;
+       unsigned int major, minor;
+       char dummy;
        struct dm_dev_internal *dd;
        struct dm_table *t = ti->table;
 
        BUG_ON(!t);
 
-       dev = dm_get_dev_t(path);
-       if (!dev)
-               return -ENODEV;
+       if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) {
+               /* Extract the major/minor numbers */
+               dev = MKDEV(major, minor);
+               if (MAJOR(dev) != major || MINOR(dev) != minor)
+                       return -EOVERFLOW;
+       } else {
+               dev = dm_get_dev_t(path);
+               if (!dev)
+                       return -ENODEV;
+       }
 
        dd = find_device(&t->devices, dev);
        if (!dd) {
index ca409428b4fcb9404a208e920bacbdb997b5ec52..04384452a7abd408baf177a20f114e85d3d5b016 100644 (file)
@@ -639,8 +639,10 @@ static void md_submit_flush_data(struct work_struct *ws)
         * could wait for this and below md_handle_request could wait for those
         * bios because of suspend check
         */
+       spin_lock_irq(&mddev->lock);
        mddev->prev_flush_start = mddev->start_flush;
        mddev->flush_bio = NULL;
+       spin_unlock_irq(&mddev->lock);
        wake_up(&mddev->sb_wait);
 
        if (bio->bi_iter.bi_size == 0) {
index 3a947159b25ac95e2804c8a6d890408840a48427..ea6f8ee8161c963ccab70588871ab0519af1d974 100644 (file)
@@ -10,5 +10,6 @@ obj-$(CONFIG_CEC_MESON_AO)    += meson/
 obj-$(CONFIG_CEC_SAMSUNG_S5P)  += s5p/
 obj-$(CONFIG_CEC_SECO)         += seco/
 obj-$(CONFIG_CEC_STI)          += sti/
+obj-$(CONFIG_CEC_STM32)                += stm32/
 obj-$(CONFIG_CEC_TEGRA)                += tegra/
 
index 96d3b2b2aa31882869448b83ae8a0f66cb46d90a..3f61f5863bf7748caa0855eb0bfe02f71a2fe8ec 100644 (file)
@@ -118,8 +118,7 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
                                return -EINVAL;
                }
        } else {
-               length = (b->memory == VB2_MEMORY_USERPTR ||
-                         b->memory == VB2_MEMORY_DMABUF)
+               length = (b->memory == VB2_MEMORY_USERPTR)
                        ? b->length : vb->planes[0].length;
 
                if (b->bytesused > length)
index eb7b6f01f6231ccbdb1d32f463be79ab34e9b3af..58ca47e904a1496ad3837ba509ee1ed3a789fcd6 100644 (file)
@@ -772,14 +772,8 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
 
        switch (pll->bus_type) {
        case CCS_PLL_BUS_TYPE_CSI2_DPHY:
-               /* CSI transfers 2 bits per clock per lane; thus times 2 */
-               op_sys_clk_freq_hz_sdr = pll->link_freq * 2
-                       * (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ?
-                          1 : pll->csi2.lanes);
-               break;
        case CCS_PLL_BUS_TYPE_CSI2_CPHY:
-               op_sys_clk_freq_hz_sdr =
-                       pll->link_freq
+               op_sys_clk_freq_hz_sdr = pll->link_freq * 2
                        * (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ?
                           1 : pll->csi2.lanes);
                break;
index 9a6097b088bdfb69d69f9f1d26da62149e0a6a7a..6555bd4b325a61c4dd93218010cdb1df83e8ac64 100644 (file)
@@ -152,7 +152,7 @@ static int ccs_data_parse_version(struct bin_container *bin,
        vv->version_major = ((u16)v->static_data_version_major[0] << 8) +
                v->static_data_version_major[1];
        vv->version_minor = ((u16)v->static_data_version_minor[0] << 8) +
-               v->static_data_version_major[1];
+               v->static_data_version_minor[1];
        vv->date_year =  ((u16)v->year[0] << 8) + v->year[1];
        vv->date_month = v->month;
        vv->date_day = v->day;
index 36e354ecf71ecdcae1e9b378c26cd184088c95e8..6cada8a6e50cc21989ac7be9359da9a47f537ea2 100644 (file)
@@ -302,7 +302,7 @@ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
        if (!q->sensor)
                return -ENODEV;
 
-       freq = v4l2_get_link_rate(q->sensor->ctrl_handler, bpp, lanes);
+       freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes);
        if (freq < 0) {
                dev_err(dev, "error %lld, invalid link_freq\n", freq);
                return freq;
index bdd293faaad0f5e2441754e7faaf93cd6dafb143..7233a731175774001148411217efb8690a1f3461 100644 (file)
@@ -349,8 +349,10 @@ static void venus_core_shutdown(struct platform_device *pdev)
 {
        struct venus_core *core = platform_get_drvdata(pdev);
 
+       pm_runtime_get_sync(core->dev);
        venus_shutdown(core);
        venus_firmware_deinit(core);
+       pm_runtime_put_sync(core->dev);
 }
 
 static __maybe_unused int venus_runtime_suspend(struct device *dev)
index 98bff765b02e67d9c5c4cba841ddd506eec4de87..e48d666f2c63abefe1746ac7bb4ac4bef64d006f 100644 (file)
@@ -654,7 +654,7 @@ static int rvin_parallel_parse_of(struct rvin_dev *vin)
 out:
        fwnode_handle_put(fwnode);
 
-       return 0;
+       return ret;
 }
 
 static int rvin_parallel_init(struct rvin_dev *vin)
index 68da1eed753dc87bb0565ae8af658467d530942f..f7e9fd3055485fe897c694ff2599af08ee51d1e3 100644 (file)
 struct rkisp1_match_data {
        const char * const *clks;
        unsigned int size;
+       enum rkisp1_cif_isp_version isp_ver;
 };
 
 /* ----------------------------------------------------------------------------
@@ -411,15 +412,16 @@ static const char * const rk3399_isp_clks[] = {
        "hclk",
 };
 
-static const struct rkisp1_match_data rk3399_isp_clk_data = {
+static const struct rkisp1_match_data rk3399_isp_match_data = {
        .clks = rk3399_isp_clks,
        .size = ARRAY_SIZE(rk3399_isp_clks),
+       .isp_ver = RKISP1_V10,
 };
 
 static const struct of_device_id rkisp1_of_match[] = {
        {
                .compatible = "rockchip,rk3399-cif-isp",
-               .data = &rk3399_isp_clk_data,
+               .data = &rk3399_isp_match_data,
        },
        {},
 };
@@ -457,15 +459,15 @@ static void rkisp1_debug_init(struct rkisp1_device *rkisp1)
 
 static int rkisp1_probe(struct platform_device *pdev)
 {
-       const struct rkisp1_match_data *clk_data;
+       const struct rkisp1_match_data *match_data;
        struct device *dev = &pdev->dev;
        struct rkisp1_device *rkisp1;
        struct v4l2_device *v4l2_dev;
        unsigned int i;
        int ret, irq;
 
-       clk_data = of_device_get_match_data(&pdev->dev);
-       if (!clk_data)
+       match_data = of_device_get_match_data(&pdev->dev);
+       if (!match_data)
                return -ENODEV;
 
        rkisp1 = devm_kzalloc(dev, sizeof(*rkisp1), GFP_KERNEL);
@@ -494,15 +496,16 @@ static int rkisp1_probe(struct platform_device *pdev)
 
        rkisp1->irq = irq;
 
-       for (i = 0; i < clk_data->size; i++)
-               rkisp1->clks[i].id = clk_data->clks[i];
-       ret = devm_clk_bulk_get(dev, clk_data->size, rkisp1->clks);
+       for (i = 0; i < match_data->size; i++)
+               rkisp1->clks[i].id = match_data->clks[i];
+       ret = devm_clk_bulk_get(dev, match_data->size, rkisp1->clks);
        if (ret)
                return ret;
-       rkisp1->clk_size = clk_data->size;
+       rkisp1->clk_size = match_data->size;
 
        pm_runtime_enable(&pdev->dev);
 
+       rkisp1->media_dev.hw_revision = match_data->isp_ver;
        strscpy(rkisp1->media_dev.model, RKISP1_DRIVER_NAME,
                sizeof(rkisp1->media_dev.model));
        rkisp1->media_dev.dev = &pdev->dev;
index 6af4d551ffb54714123335718ca4b46db1bb6b30..aa5f45749543b3cc0282f3180aca8d6157762070 100644 (file)
@@ -391,7 +391,7 @@ static void rkisp1_goc_config(struct rkisp1_params *params,
                                RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
        rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE);
 
-       for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES; i++)
+       for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10; i++)
                rkisp1_write(params->rkisp1, arg->gamma_y[i],
                             RKISP1_CIF_ISP_GAMMA_OUT_Y_0 + i * 4);
 }
@@ -589,7 +589,6 @@ static void rkisp1_hst_config(struct rkisp1_params *params,
                RKISP1_CIF_ISP_HIST_WEIGHT_22TO03,
                RKISP1_CIF_ISP_HIST_WEIGHT_13TO43,
                RKISP1_CIF_ISP_HIST_WEIGHT_04TO34,
-               RKISP1_CIF_ISP_HIST_WEIGHT_44,
        };
        const u8 *weight;
        unsigned int i;
@@ -622,6 +621,8 @@ static void rkisp1_hst_config(struct rkisp1_params *params,
                                                            weight[2],
                                                            weight[3]),
                                 hist_weight_regs[i]);
+
+       rkisp1_write(params->rkisp1, weight[0] & 0x1F, RKISP1_CIF_ISP_HIST_WEIGHT_44);
 }
 
 static void
index 8a8d960a679c25d0c0c56c15c12ed4846524268b..fa33080f51db542a2990f8560bd2b2df1992d09d 100644 (file)
 #define RKISP1_CIF_ISP_MAX_HIST_PREDIVIDER             0x0000007F
 #define RKISP1_CIF_ISP_HIST_ROW_NUM                    5
 #define RKISP1_CIF_ISP_HIST_COLUMN_NUM                 5
+#define RKISP1_CIF_ISP_HIST_GET_BIN(x)                 ((x) & 0x000FFFFF)
 
 /* AUTO FOCUS MEASUREMENT:  ISP_AFM_CTRL */
 #define RKISP1_ISP_AFM_CTRL_ENABLE                     BIT(0)
index 3ddab8fa8f2d32bbbddfccd71c7c9ce91cd0175c..c1d07a2e8839fb78a93b7e0f6a1eb9e19402655f 100644 (file)
@@ -203,7 +203,7 @@ static void rkisp1_stats_get_aec_meas(struct rkisp1_stats *stats,
        unsigned int i;
 
        pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AUTOEXP;
-       for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX; i++)
+       for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX_V10; i++)
                pbuf->params.ae.exp_mean[i] =
                        (u8)rkisp1_read(rkisp1,
                                        RKISP1_CIF_ISP_EXP_MEAN_00 + i * 4);
@@ -233,10 +233,11 @@ static void rkisp1_stats_get_hst_meas(struct rkisp1_stats *stats,
        unsigned int i;
 
        pbuf->meas_type |= RKISP1_CIF_ISP_STAT_HIST;
-       for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX; i++)
-               pbuf->params.hist.hist_bins[i] =
-                       (u8)rkisp1_read(rkisp1,
-                                       RKISP1_CIF_ISP_HIST_BIN_0 + i * 4);
+       for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10; i++) {
+               u32 reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_HIST_BIN_0 + i * 4);
+
+               pbuf->params.hist.hist_bins[i] = RKISP1_CIF_ISP_HIST_GET_BIN(reg_val);
+       }
 }
 
 static void rkisp1_stats_get_bls_meas(struct rkisp1_stats *stats,
index be8f2756a444e725579f95e4f8dd59eab88a71dd..1524dc0fc566e25530b6969201ed5a289619f225 100644 (file)
@@ -320,7 +320,7 @@ again:
                                data->body);
                        spin_lock(&data->keylock);
                        if (scancode) {
-                               delay = nsecs_to_jiffies(dev->timeout) +
+                               delay = usecs_to_jiffies(dev->timeout) +
                                        msecs_to_jiffies(100);
                                mod_timer(&data->rx_timeout, jiffies + delay);
                        } else {
index a905113fef6eae0a68fa4b5bb09111068c22856c..0c6229592e132de9046b7230f99a749ef5163729 100644 (file)
@@ -1551,7 +1551,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
        rdev->s_rx_carrier_range = ite_set_rx_carrier_range;
        /* FIFO threshold is 17 bytes, so 17 * 8 samples minimum */
        rdev->min_timeout = 17 * 8 * ITE_BAUDRATE_DIVISOR *
-                           itdev->params.sample_period;
+                           itdev->params.sample_period / 1000;
        rdev->timeout = IR_DEFAULT_TIMEOUT;
        rdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
        rdev->rx_resolution = ITE_BAUDRATE_DIVISOR *
index 1d811e5ffb557fd5eb6837a68abefa4ec721e900..1fd62c1dac768d705cffa725492d54d477e41f58 100644 (file)
@@ -737,7 +737,7 @@ static unsigned int repeat_period(int protocol)
 void rc_repeat(struct rc_dev *dev)
 {
        unsigned long flags;
-       unsigned int timeout = nsecs_to_jiffies(dev->timeout) +
+       unsigned int timeout = usecs_to_jiffies(dev->timeout) +
                msecs_to_jiffies(repeat_period(dev->last_protocol));
        struct lirc_scancode sc = {
                .scancode = dev->last_scancode, .rc_proto = dev->last_protocol,
@@ -855,7 +855,7 @@ void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode,
        ir_do_keydown(dev, protocol, scancode, keycode, toggle);
 
        if (dev->keypressed) {
-               dev->keyup_jiffies = jiffies + nsecs_to_jiffies(dev->timeout) +
+               dev->keyup_jiffies = jiffies + usecs_to_jiffies(dev->timeout) +
                        msecs_to_jiffies(repeat_period(protocol));
                mod_timer(&dev->timer_keyup, dev->keyup_jiffies);
        }
@@ -1928,6 +1928,8 @@ int rc_register_device(struct rc_dev *dev)
                        goto out_raw;
        }
 
+       dev->registered = true;
+
        rc = device_add(&dev->dev);
        if (rc)
                goto out_rx_free;
@@ -1937,8 +1939,6 @@ int rc_register_device(struct rc_dev *dev)
                 dev->device_name ?: "Unspecified device", path ?: "N/A");
        kfree(path);
 
-       dev->registered = true;
-
        /*
         * once the the input device is registered in rc_setup_rx_device,
         * userspace can open the input device and rc_open() will be called
index 8cc28c92d05d66623c3b4cbf808313941c158e90..96ae0294ac102a9238b5f9453c6185485343ebed 100644 (file)
@@ -385,7 +385,7 @@ static irqreturn_t serial_ir_irq_handler(int i, void *blah)
        } while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */
 
        mod_timer(&serial_ir.timeout_timer,
-                 jiffies + nsecs_to_jiffies(serial_ir.rcdev->timeout));
+                 jiffies + usecs_to_jiffies(serial_ir.rcdev->timeout));
 
        ir_raw_event_handle(serial_ir.rcdev);
 
index 78007dba4677cb54ae44bf268494d5d787dada70..133d20e40f82a5f44d5bd37802c33b28cb80c08d 100644 (file)
@@ -442,7 +442,7 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
 }
 EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt);
 
-s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul,
+s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
                       unsigned int div)
 {
        struct v4l2_ctrl *ctrl;
@@ -473,4 +473,4 @@ s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul,
 
        return freq > 0 ? freq : -EINVAL;
 }
-EXPORT_SYMBOL_GPL(v4l2_get_link_rate);
+EXPORT_SYMBOL_GPL(v4l2_get_link_freq);
index 2aa6648fa41f953051e839f37664ad08dbb9734e..5a491d2cd1ae61f930583e2fb968f74c06ddb14d 100644 (file)
@@ -1512,6 +1512,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
        struct pcr_handle *handle;
        u32 base, len;
        int ret, i, bar = 0;
+       u8 val;
 
        dev_dbg(&(pcidev->dev),
                ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
@@ -1577,7 +1578,11 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
        pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
        pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
        pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
-
+       rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
+       if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
+               pcr->aspm_enabled = false;
+       else
+               pcr->aspm_enabled = true;
        pcr->card_inserted = 0;
        pcr->card_removed = 0;
        INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
index 1456eabf96010f94584e17db22dfda4263195801..69d04eca767f5ff61ed709e6910d3f411e2cbb44 100644 (file)
@@ -1037,7 +1037,7 @@ kill_processes:
 
        if (hard_reset) {
                /* Release kernel context */
-               if (hl_ctx_put(hdev->kernel_ctx) == 1)
+               if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
                        hdev->kernel_ctx = NULL;
                hl_vm_fini(hdev);
                hl_mmu_fini(hdev);
@@ -1487,6 +1487,15 @@ void hl_device_fini(struct hl_device *hdev)
                }
        }
 
+       /* Disable PCI access from device F/W so it won't send us additional
+        * interrupts. We disable MSI/MSI-X at the halt_engines function and we
+        * can't have the F/W sending us interrupts after that. We need to
+        * disable the access here because if the device is marked disable, the
+        * message won't be send. Also, in case of heartbeat, the device CPU is
+        * marked as disable so this message won't be sent
+        */
+       hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
+
        /* Mark device as disabled */
        hdev->disabled = true;
 
index 20f77f58edef5573577592110289f3937202208f..c9a12980218ac55545c5f6a31684d85a039f419f 100644 (file)
@@ -402,6 +402,10 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
        }
        counters->rx_throughput = result;
 
+       memset(&pkt, 0, sizeof(pkt));
+       pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET <<
+                       CPUCP_PKT_CTL_OPCODE_SHIFT);
+
        /* Fetch PCI tx counter */
        pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx);
        rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
@@ -414,6 +418,7 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
        counters->tx_throughput = result;
 
        /* Fetch PCI replay counter */
+       memset(&pkt, 0, sizeof(pkt));
        pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET <<
                        CPUCP_PKT_CTL_OPCODE_SHIFT);
 
index e0d7f5fbaa5c3f2950068d4618b2d5fb5b540a9c..60e16dc4bcac33b0f2bd4dc073d2f483e3197d00 100644 (file)
@@ -2182,6 +2182,7 @@ void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
 int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
 int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
                        struct hl_mmu_hop_info *hops);
+bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr);
 
 int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
                                void __iomem *dst, u32 src_offset, u32 size);
index 12efbd9d2e3a0b7459dcb8298e69248bef1e1750..d25892d61ec9ddc2ceda987b39d45d0ec6ec75d9 100644 (file)
@@ -133,6 +133,8 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
 
        hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
                                        &hw_idle.busy_engines_mask_ext, NULL);
+       hw_idle.busy_engines_mask =
+                       lower_32_bits(hw_idle.busy_engines_mask_ext);
 
        return copy_to_user(out, &hw_idle,
                min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
index cbe9da4e0211b5118b112b2bdf92f91cfef46360..5d4fbdcaefe3f2bb0c4522d47e68372720c95c41 100644 (file)
@@ -886,8 +886,10 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
 {
        struct hl_device *hdev = ctx->hdev;
        u64 next_vaddr, i;
+       bool is_host_addr;
        u32 page_size;
 
+       is_host_addr = !hl_is_dram_va(hdev, vaddr);
        page_size = phys_pg_pack->page_size;
        next_vaddr = vaddr;
 
@@ -900,9 +902,13 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
                /*
                 * unmapping on Palladium can be really long, so avoid a CPU
                 * soft lockup bug by sleeping a little between unmapping pages
+                *
+                * In addition, when unmapping host memory we pass through
+                * the Linux kernel to unpin the pages and that takes a long
+                * time. Therefore, sleep every 32K pages to avoid soft lockup
                 */
-               if (hdev->pldm)
-                       usleep_range(500, 1000);
+               if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
+                       usleep_range(50, 200);
        }
 }
 
index 33ae953d3a3680126cbe42090ea677fb25b9f582..28a4638741d8881d4589a3023297e6e0718ece2f 100644 (file)
@@ -9,7 +9,7 @@
 
 #include "habanalabs.h"
 
-static bool is_dram_va(struct hl_device *hdev, u64 virt_addr)
+bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr)
 {
        struct asic_fixed_properties *prop = &hdev->asic_prop;
 
@@ -156,7 +156,7 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
        if (!hdev->mmu_enable)
                return 0;
 
-       is_dram_addr = is_dram_va(hdev, virt_addr);
+       is_dram_addr = hl_is_dram_va(hdev, virt_addr);
 
        if (is_dram_addr)
                mmu_prop = &prop->dmmu;
@@ -236,7 +236,7 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
        if (!hdev->mmu_enable)
                return 0;
 
-       is_dram_addr = is_dram_va(hdev, virt_addr);
+       is_dram_addr = hl_is_dram_va(hdev, virt_addr);
 
        if (is_dram_addr)
                mmu_prop = &prop->dmmu;
index 2ce6ea89d4fa22930aef8e325e2db9ce1b193d49..06d8a44dd5d428e0bfdbb12a5571afef8c7163c8 100644 (file)
@@ -467,8 +467,16 @@ static void hl_mmu_v1_fini(struct hl_device *hdev)
 {
        /* MMU H/W fini was already done in device hw_fini() */
 
-       kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
-       gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+       if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) {
+               kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0);
+               gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool);
+       }
+
+       /* Make sure that if we arrive here again without init was called we
+        * won't cause kernel panic. This can happen for example if we fail
+        * during hard reset code at certain points
+        */
+       hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL;
 }
 
 /**
index 8c09e4466af8ce72eff7787b1986389f68bdb595..b328ddaa64ee51efde1a58188c2dfd965c87be46 100644 (file)
@@ -4002,7 +4002,8 @@ static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
        vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
                        VM_DONTCOPY | VM_NORESERVE;
 
-       rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
+       rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
+                               (dma_addr - HOST_PHYS_BASE), size);
        if (rc)
                dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
 
index b8b4aa636b7cb652a4e7e5790ef26cfb6792fee6..63679a747d2cd285e74f8f5681e3688873efbf32 100644 (file)
@@ -2719,7 +2719,8 @@ static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
        vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
                        VM_DONTCOPY | VM_NORESERVE;
 
-       rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
+       rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
+                               (dma_addr - HOST_PHYS_BASE), size);
        if (rc)
                dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
 
index de7cb0369c308f9fdd664552c9f14c985f3f7c1e..002426e3cf76c9d2f0fec4c35f757a7153717710 100644 (file)
@@ -384,8 +384,10 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
                     "merging was advertised but not possible");
        blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
 
-       if (mmc_card_mmc(card))
+       if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
                block_size = card->ext_csd.data_sector_size;
+               WARN_ON(block_size != 512 && block_size != 4096);
+       }
 
        blk_queue_logical_block_size(mq->queue, block_size);
        /*
index 44bea5e4aeda19b05c8e5c8149f2276f7e4e4ae9..b23773583179df8071bce6f64772c0a6e2560159 100644 (file)
@@ -20,6 +20,8 @@
 #include "sdio_cis.h"
 #include "sdio_ops.h"
 
+#define SDIO_READ_CIS_TIMEOUT_MS  (10 * 1000) /* 10s */
+
 static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
                         const unsigned char *buf, unsigned size)
 {
@@ -274,6 +276,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
 
        do {
                unsigned char tpl_code, tpl_link;
+               unsigned long timeout = jiffies +
+                       msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS);
 
                ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code);
                if (ret)
@@ -326,6 +330,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
                        prev = &this->next;
 
                        if (ret == -ENOENT) {
+                               if (time_after(jiffies, timeout))
+                                       break;
                                /* warn about unknown tuples */
                                pr_warn_ratelimited("%s: queuing unknown"
                                       " CIS tuple 0x%02x (%u bytes)\n",
index bbf3496f44955f333ed2085499d99999403542ab..f9780c65ebe98a76d94fe014b3299ccde99ddd23 100644 (file)
@@ -314,11 +314,7 @@ err_clk:
 
 static void sdhci_brcmstb_shutdown(struct platform_device *pdev)
 {
-       int ret;
-
-       ret = sdhci_pltfm_unregister(pdev);
-       if (ret)
-               dev_err(&pdev->dev, "failed to shutdown\n");
+       sdhci_pltfm_suspend(&pdev->dev);
 }
 
 MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match);
index 4b673792b5a42ef9b2a5a20ee8152393e93087ad..d90020ed362273a47bdbabde4423fb283a4098be 100644 (file)
@@ -16,6 +16,8 @@
 
 #include "sdhci-pltfm.h"
 
+#define SDHCI_DWCMSHC_ARG2_STUFF       GENMASK(31, 16)
+
 /* DWCMSHC specific Mode Select value */
 #define DWCMSHC_CTRL_HS400             0x7
 
@@ -49,6 +51,29 @@ static void dwcmshc_adma_write_desc(struct sdhci_host *host, void **desc,
        sdhci_adma_write_desc(host, desc, addr, len, cmd);
 }
 
+static void dwcmshc_check_auto_cmd23(struct mmc_host *mmc,
+                                    struct mmc_request *mrq)
+{
+       struct sdhci_host *host = mmc_priv(mmc);
+
+       /*
+        * No matter V4 is enabled or not, ARGUMENT2 register is 32-bit
+        * block count register which doesn't support stuff bits of
+        * CMD23 argument on dwcmsch host controller.
+        */
+       if (mrq->sbc && (mrq->sbc->arg & SDHCI_DWCMSHC_ARG2_STUFF))
+               host->flags &= ~SDHCI_AUTO_CMD23;
+       else
+               host->flags |= SDHCI_AUTO_CMD23;
+}
+
+static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+       dwcmshc_check_auto_cmd23(mmc, mrq);
+
+       sdhci_request(mmc, mrq);
+}
+
 static void dwcmshc_set_uhs_signaling(struct sdhci_host *host,
                                      unsigned int timing)
 {
@@ -133,6 +158,8 @@ static int dwcmshc_probe(struct platform_device *pdev)
 
        sdhci_get_of_property(pdev);
 
+       host->mmc_host_ops.request = dwcmshc_request;
+
        err = sdhci_add_host(host);
        if (err)
                goto err_clk;
index 6301b81cf5731b871e1c7d1523e18583d20f0c63..9bd717ff784be7775fdd31dd9e412799ee7e68f0 100644 (file)
@@ -111,8 +111,13 @@ static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host)
        return host->private;
 }
 
+extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#ifdef CONFIG_PM_SLEEP
 int sdhci_pltfm_suspend(struct device *dev);
 int sdhci_pltfm_resume(struct device *dev);
-extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#else
+static inline int sdhci_pltfm_suspend(struct device *dev) { return 0; }
+static inline int sdhci_pltfm_resume(struct device *dev) { return 0; }
+#endif
 
 #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
index c67611fdaa8aa13d5cb82b7dc345ce5ce486a3ae..d19eef5f725f88d2df7847e6cf97e3669d52cbfa 100644 (file)
@@ -168,7 +168,12 @@ static void xenon_reset_exit(struct sdhci_host *host,
        /* Disable tuning request and auto-retuning again */
        xenon_retune_setup(host);
 
-       xenon_set_acg(host, true);
+       /*
+        * The ACG should be turned off at the early init time, in order
+        * to solve a possible issues with the 1.8V regulator stabilization.
+        * The feature is enabled in later stage.
+        */
+       xenon_set_acg(host, false);
 
        xenon_set_sdclk_off_idle(host, sdhc_id, false);
 
index 5cdf05bcbf8faacfb24ee71758ff0182a3517836..3fa8c22d3f36af1da132b9f3932f48101af0ccd7 100644 (file)
@@ -1615,7 +1615,7 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
        /* Extract interleaved payload data and ECC bits */
        for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
                if (buf)
-                       nand_extract_bits(buf, step * eccsize, tmp_buf,
+                       nand_extract_bits(buf, step * eccsize * 8, tmp_buf,
                                          src_bit_off, eccsize * 8);
                src_bit_off += eccsize * 8;
 
index fdb112e8a90d294e64632782eacad0010184d2cb..a304fda5d1fa5684be2b0a8a48caaa5cd9eaaa95 100644 (file)
@@ -579,7 +579,7 @@ static int ebu_nand_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct ebu_nand_controller *ebu_host;
        struct nand_chip *nand;
-       struct mtd_info *mtd = NULL;
+       struct mtd_info *mtd;
        struct resource *res;
        char *resname;
        int ret;
@@ -647,12 +647,13 @@ static int ebu_nand_probe(struct platform_device *pdev)
               ebu_host->ebu + EBU_ADDR_SEL(cs));
 
        nand_set_flash_node(&ebu_host->chip, dev->of_node);
+
+       mtd = nand_to_mtd(&ebu_host->chip);
        if (!mtd->name) {
                dev_err(ebu_host->dev, "NAND label property is mandatory\n");
                return -EINVAL;
        }
 
-       mtd = nand_to_mtd(&ebu_host->chip);
        mtd->dev.parent = dev;
        ebu_host->dev = dev;
 
index f2b9250c0ea830b4096c2011e4442498fca95944..0750121ac371c73081160bf11a5548dcc38b8f50 100644 (file)
@@ -2210,6 +2210,9 @@ static int ns_attach_chip(struct nand_chip *chip)
 {
        unsigned int eccsteps, eccbytes;
 
+       chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
+       chip->ecc.algo = bch ? NAND_ECC_ALGO_BCH : NAND_ECC_ALGO_HAMMING;
+
        if (!bch)
                return 0;
 
@@ -2233,8 +2236,6 @@ static int ns_attach_chip(struct nand_chip *chip)
                return -EINVAL;
        }
 
-       chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
-       chip->ecc.algo = NAND_ECC_ALGO_BCH;
        chip->ecc.size = 512;
        chip->ecc.strength = bch;
        chip->ecc.bytes = eccbytes;
@@ -2273,8 +2274,6 @@ static int __init ns_init_module(void)
        nsmtd       = nand_to_mtd(chip);
        nand_set_controller_data(chip, (void *)ns);
 
-       chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
-       chip->ecc.algo   = NAND_ECC_ALGO_HAMMING;
        /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
        /* and 'badblocks' parameters to work */
        chip->options   |= NAND_SKIP_BBTSCAN;
index fbb9955f2467118355e5f0217859d9cbd8de1a69..2c3e65cb68f33053e3cb1901c4f8be876bd00163 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/jiffies.h>
 #include <linux/sched.h>
 #include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-bch.h>
 #include <linux/mtd/rawnand.h>
 #include <linux/mtd/partitions.h>
 #include <linux/omap-dma.h>
@@ -1866,18 +1867,19 @@ static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
 static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
                                 struct mtd_oob_region *oobregion)
 {
-       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct nand_device *nand = mtd_to_nanddev(mtd);
+       const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
        int off = BADBLOCK_MARKER_LENGTH;
 
-       if (section >= chip->ecc.steps)
+       if (section >= engine_conf->nsteps)
                return -ERANGE;
 
        /*
         * When SW correction is employed, one OMAP specific marker byte is
         * reserved after each ECC step.
         */
-       oobregion->offset = off + (section * (chip->ecc.bytes + 1));
-       oobregion->length = chip->ecc.bytes;
+       oobregion->offset = off + (section * (engine_conf->code_size + 1));
+       oobregion->length = engine_conf->code_size;
 
        return 0;
 }
@@ -1885,7 +1887,8 @@ static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
 static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
                                  struct mtd_oob_region *oobregion)
 {
-       struct nand_chip *chip = mtd_to_nand(mtd);
+       struct nand_device *nand = mtd_to_nanddev(mtd);
+       const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv;
        int off = BADBLOCK_MARKER_LENGTH;
 
        if (section)
@@ -1895,7 +1898,7 @@ static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
         * When SW correction is employed, one OMAP specific marker byte is
         * reserved after each ECC step.
         */
-       off += ((chip->ecc.bytes + 1) * chip->ecc.steps);
+       off += ((engine_conf->code_size + 1) * engine_conf->nsteps);
        if (off >= mtd->oobsize)
                return -ERANGE;
 
index 8ea545bb924d2b2ee83edfc8629b397d12f5bdf4..61d932c1b71806166c089a173182c33a7d4fcc1a 100644 (file)
@@ -343,6 +343,7 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
                                      const struct nand_page_io_req *req)
 {
        struct nand_device *nand = spinand_to_nand(spinand);
+       struct mtd_info *mtd = spinand_to_mtd(spinand);
        struct spi_mem_dirmap_desc *rdesc;
        unsigned int nbytes = 0;
        void *buf = NULL;
@@ -382,9 +383,16 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
                memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
                       req->datalen);
 
-       if (req->ooblen)
-               memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
-                      req->ooblen);
+       if (req->ooblen) {
+               if (req->mode == MTD_OPS_AUTO_OOB)
+                       mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
+                                                   spinand->oobbuf,
+                                                   req->ooboffs,
+                                                   req->ooblen);
+               else
+                       memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
+                              req->ooblen);
+       }
 
        return 0;
 }
index 98df38fe553ced05b714767d4ad628dcba45819a..12d085405bd052c3621280d97bd3b92575466379 100644 (file)
@@ -332,7 +332,7 @@ static int __init arc_rimi_init(void)
                dev->irq = 9;
 
        if (arcrimi_probe(dev)) {
-               free_netdev(dev);
+               free_arcdev(dev);
                return -EIO;
        }
 
@@ -349,7 +349,7 @@ static void __exit arc_rimi_exit(void)
        iounmap(lp->mem_start);
        release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
        free_irq(dev->irq, dev);
-       free_netdev(dev);
+       free_arcdev(dev);
 }
 
 #ifndef MODULE
index 22a49c6d7ae6ef590b3b0e2d8b6bcf2a10f8b0b7..5d4a4c7efbbff3763193844e2052ca32dcb27490 100644 (file)
@@ -298,6 +298,10 @@ struct arcnet_local {
 
        int excnak_pending;    /* We just got an excesive nak interrupt */
 
+       /* RESET flag handling */
+       int reset_in_progress;
+       struct work_struct reset_work;
+
        struct {
                uint16_t sequence;      /* sequence number (incs with each packet) */
                __be16 aborted_seq;
@@ -350,7 +354,9 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc)
 
 void arcnet_unregister_proto(struct ArcProto *proto);
 irqreturn_t arcnet_interrupt(int irq, void *dev_id);
+
 struct net_device *alloc_arcdev(const char *name);
+void free_arcdev(struct net_device *dev);
 
 int arcnet_open(struct net_device *dev);
 int arcnet_close(struct net_device *dev);
index e04efc0a5c977ee8b33a0ff77fe15c85ee322119..d76dd7d14299e198f305f4effed8611a8c9b480f 100644 (file)
@@ -387,10 +387,44 @@ static void arcnet_timer(struct timer_list *t)
        struct arcnet_local *lp = from_timer(lp, t, timer);
        struct net_device *dev = lp->dev;
 
-       if (!netif_carrier_ok(dev)) {
+       spin_lock_irq(&lp->lock);
+
+       if (!lp->reset_in_progress && !netif_carrier_ok(dev)) {
                netif_carrier_on(dev);
                netdev_info(dev, "link up\n");
        }
+
+       spin_unlock_irq(&lp->lock);
+}
+
+static void reset_device_work(struct work_struct *work)
+{
+       struct arcnet_local *lp;
+       struct net_device *dev;
+
+       lp = container_of(work, struct arcnet_local, reset_work);
+       dev = lp->dev;
+
+       /* Do not bring the network interface back up if an ifdown
+        * was already done.
+        */
+       if (!netif_running(dev) || !lp->reset_in_progress)
+               return;
+
+       rtnl_lock();
+
+       /* Do another check, in case of an ifdown that was triggered in
+        * the small race window between the exit condition above and
+        * acquiring RTNL.
+        */
+       if (!netif_running(dev) || !lp->reset_in_progress)
+               goto out;
+
+       dev_close(dev);
+       dev_open(dev, NULL);
+
+out:
+       rtnl_unlock();
 }
 
 static void arcnet_reply_tasklet(unsigned long data)
@@ -452,12 +486,25 @@ struct net_device *alloc_arcdev(const char *name)
                lp->dev = dev;
                spin_lock_init(&lp->lock);
                timer_setup(&lp->timer, arcnet_timer, 0);
+               INIT_WORK(&lp->reset_work, reset_device_work);
        }
 
        return dev;
 }
 EXPORT_SYMBOL(alloc_arcdev);
 
+void free_arcdev(struct net_device *dev)
+{
+       struct arcnet_local *lp = netdev_priv(dev);
+
+       /* Do not cancel this at ->ndo_close(), as the workqueue itself
+        * indirectly calls the ifdown path through dev_close().
+        */
+       cancel_work_sync(&lp->reset_work);
+       free_netdev(dev);
+}
+EXPORT_SYMBOL(free_arcdev);
+
 /* Open/initialize the board.  This is called sometime after booting when
  * the 'ifconfig' program is run.
  *
@@ -587,6 +634,10 @@ int arcnet_close(struct net_device *dev)
 
        /* shut down the card */
        lp->hw.close(dev);
+
+       /* reset counters */
+       lp->reset_in_progress = 0;
+
        module_put(lp->hw.owner);
        return 0;
 }
@@ -820,6 +871,9 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
 
        spin_lock_irqsave(&lp->lock, flags);
 
+       if (lp->reset_in_progress)
+               goto out;
+
        /* RESET flag was enabled - if device is not running, we must
         * clear it right away (but nothing else).
         */
@@ -852,11 +906,14 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
                if (status & RESETflag) {
                        arc_printk(D_NORMAL, dev, "spurious reset (status=%Xh)\n",
                                   status);
-                       arcnet_close(dev);
-                       arcnet_open(dev);
+
+                       lp->reset_in_progress = 1;
+                       netif_stop_queue(dev);
+                       netif_carrier_off(dev);
+                       schedule_work(&lp->reset_work);
 
                        /* get out of the interrupt handler! */
-                       break;
+                       goto out;
                }
                /* RX is inhibited - we must have received something.
                 * Prepare to receive into the next buffer.
@@ -1052,6 +1109,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
        udelay(1);
        lp->hw.intmask(dev, lp->intmask);
 
+out:
        spin_unlock_irqrestore(&lp->lock, flags);
        return retval;
 }
index f983c4ce6b07f186fd2b6cc136df9eabfa7c8df4..be618e4b9ed5e0aa20f949ea23abf416ce5239b6 100644 (file)
@@ -169,7 +169,7 @@ static int __init com20020_init(void)
                dev->irq = 9;
 
        if (com20020isa_probe(dev)) {
-               free_netdev(dev);
+               free_arcdev(dev);
                return -EIO;
        }
 
@@ -182,7 +182,7 @@ static void __exit com20020_exit(void)
        unregister_netdev(my_dev);
        free_irq(my_dev->irq, my_dev);
        release_region(my_dev->base_addr, ARCNET_TOTAL_SIZE);
-       free_netdev(my_dev);
+       free_arcdev(my_dev);
 }
 
 #ifndef MODULE
index eb7f76753c9c0dbdd2ca07985d9c407b433ab506..8bdc44b7e09a188e678eaade1faf5fe035fd0434 100644 (file)
@@ -291,7 +291,7 @@ static void com20020pci_remove(struct pci_dev *pdev)
 
                unregister_netdev(dev);
                free_irq(dev->irq, dev);
-               free_netdev(dev);
+               free_arcdev(dev);
        }
 }
 
index cf607ffcf358e95eab6a63c3c7bb63382cd57c49..9cc5eb6a8e9051aaaf16e876b34a3803c94d571a 100644 (file)
@@ -177,7 +177,7 @@ static void com20020_detach(struct pcmcia_device *link)
                dev = info->dev;
                if (dev) {
                        dev_dbg(&link->dev, "kfree...\n");
-                       free_netdev(dev);
+                       free_arcdev(dev);
                }
                dev_dbg(&link->dev, "kfree2...\n");
                kfree(info);
index cf214b7306715487b84cd2301fca4de1bada6f48..3856b447d38ed27ca5ef98a1d0b4191f92152d31 100644 (file)
@@ -396,7 +396,7 @@ static int __init com90io_init(void)
        err = com90io_probe(dev);
 
        if (err) {
-               free_netdev(dev);
+               free_arcdev(dev);
                return err;
        }
 
@@ -419,7 +419,7 @@ static void __exit com90io_exit(void)
 
        free_irq(dev->irq, dev);
        release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
-       free_netdev(dev);
+       free_arcdev(dev);
 }
 
 module_init(com90io_init)
index 3dc3d533cb19a258e72f57d46443b9b52d1b8df7..d8dfb9ea0de89bf9929ee334d9f332c9156cef18 100644 (file)
@@ -554,7 +554,7 @@ err_free_irq:
 err_release_mem:
        release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
 err_free_dev:
-       free_netdev(dev);
+       free_arcdev(dev);
        return -EIO;
 }
 
@@ -672,7 +672,7 @@ static void __exit com90xx_exit(void)
                release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
                release_mem_region(dev->mem_start,
                                   dev->mem_end - dev->mem_start + 1);
-               free_netdev(dev);
+               free_arcdev(dev);
        }
 }
 
index 3486704c8a95736fe3b917ddbe85da6c9639dc9c..c73e2a65c90443fe73e8522bca9d9a4e54f62868 100644 (file)
@@ -592,11 +592,11 @@ static void can_restart(struct net_device *dev)
 
        cf->can_id |= CAN_ERR_RESTARTED;
 
-       netif_rx_ni(skb);
-
        stats->rx_packets++;
        stats->rx_bytes += cf->len;
 
+       netif_rx_ni(skb);
+
 restart:
        netdev_dbg(dev, "restarted\n");
        priv->can_stats.restarts++;
@@ -1163,7 +1163,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
 {
        struct can_priv *priv = netdev_priv(dev);
        struct can_ctrlmode cm = {.flags = priv->ctrlmode};
-       struct can_berr_counter bec;
+       struct can_berr_counter bec = { };
        enum can_state state = priv->state;
 
        if (priv->do_get_state)
index 61631f4fd92a1d2dea864873431d60beca8b4318..f347ecc79aef2d5395ecc44167fbdb999eb4edef 100644 (file)
@@ -514,11 +514,11 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
        else
                memcpy(cfd->data, rm->d, cfd->len);
 
-       peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
-
        netdev->stats.rx_packets++;
        netdev->stats.rx_bytes += cfd->len;
 
+       peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
+
        return 0;
 }
 
@@ -580,11 +580,11 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
        if (!skb)
                return -ENOMEM;
 
-       peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
-
        netdev->stats.rx_packets++;
        netdev->stats.rx_bytes += cf->len;
 
+       peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
+
        return 0;
 }
 
index fa47bab510bb90cb35bcc56fe9d0275dccc57cf4..f9a524c5f6d62710722b920b615154670dfc8992 100644 (file)
@@ -39,6 +39,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
        struct net_device *peer;
        struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
        struct net_device_stats *peerstats, *srcstats = &dev->stats;
+       u8 len;
 
        if (can_dropped_invalid_skb(dev, skb))
                return NETDEV_TX_OK;
@@ -61,12 +62,13 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
        skb->dev        = peer;
        skb->ip_summed  = CHECKSUM_UNNECESSARY;
 
+       len = cfd->len;
        if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
                srcstats->tx_packets++;
-               srcstats->tx_bytes += cfd->len;
+               srcstats->tx_bytes += len;
                peerstats = &peer->stats;
                peerstats->rx_packets++;
-               peerstats->rx_bytes += cfd->len;
+               peerstats->rx_bytes += len;
        }
 
 out_unlock:
index 288b5a5c3e0dbcae40788d27f9843cd06bf3624e..95c7fa171e35acd7a04886ca9e03408b77f3ce16 100644 (file)
@@ -1404,7 +1404,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
            !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED))
                return -EINVAL;
 
-       if (vlan->vid_end > dev->num_vlans)
+       if (vlan->vid_end >= dev->num_vlans)
                return -ERANGE;
 
        b53_enable_vlan(dev, true, ds->vlan_filtering);
index 1e9a0adda2d6928fe5c46cdecac25af05af8a0db..445226720ff299899a4ab8b3bc2f85645aec4525 100644 (file)
@@ -509,15 +509,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
        /* Find our integrated MDIO bus node */
        dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
        priv->master_mii_bus = of_mdio_find_bus(dn);
-       if (!priv->master_mii_bus)
+       if (!priv->master_mii_bus) {
+               of_node_put(dn);
                return -EPROBE_DEFER;
+       }
 
        get_device(&priv->master_mii_bus->dev);
        priv->master_mii_dn = dn;
 
        priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
-       if (!priv->slave_mii_bus)
+       if (!priv->slave_mii_bus) {
+               of_node_put(dn);
                return -ENOMEM;
+       }
 
        priv->slave_mii_bus->priv = priv;
        priv->slave_mii_bus->name = "sf2 slave mii";
index c973db101b7291782be7960984833bffce8d8267..a4570ba29c831c95f07b9395e75863cacd5422f0 100644 (file)
@@ -1187,6 +1187,20 @@ static const struct ksz_chip_data ksz8795_switch_chips[] = {
                .port_cnt = 5,          /* total cpu and user ports */
        },
        {
+               /*
+                * WARNING
+                * =======
+                * KSZ8794 is similar to KSZ8795, except the port map
+                * contains a gap between external and CPU ports, the
+                * port map is NOT continuous. The per-port register
+                * map is shifted accordingly too, i.e. registers at
+                * offset 0x40 are NOT used on KSZ8794 and they ARE
+                * used on KSZ8795 for external port 3.
+                *           external  cpu
+                * KSZ8794   0,1,2      4
+                * KSZ8795   0,1,2,3    4
+                * KSZ8765   0,1,2,3    4
+                */
                .chip_id = 0x8794,
                .dev_name = "KSZ8794",
                .num_vlans = 4096,
@@ -1220,9 +1234,13 @@ static int ksz8795_switch_init(struct ksz_device *dev)
                        dev->num_vlans = chip->num_vlans;
                        dev->num_alus = chip->num_alus;
                        dev->num_statics = chip->num_statics;
-                       dev->port_cnt = chip->port_cnt;
+                       dev->port_cnt = fls(chip->cpu_ports);
+                       dev->cpu_port = fls(chip->cpu_ports) - 1;
+                       dev->phy_port_cnt = dev->port_cnt - 1;
                        dev->cpu_ports = chip->cpu_ports;
-
+                       dev->host_mask = chip->cpu_ports;
+                       dev->port_mask = (BIT(dev->phy_port_cnt) - 1) |
+                                        chip->cpu_ports;
                        break;
                }
        }
@@ -1231,17 +1249,9 @@ static int ksz8795_switch_init(struct ksz_device *dev)
        if (!dev->cpu_ports)
                return -ENODEV;
 
-       dev->port_mask = BIT(dev->port_cnt) - 1;
-       dev->port_mask |= dev->host_mask;
-
        dev->reg_mib_cnt = KSZ8795_COUNTER_NUM;
        dev->mib_cnt = ARRAY_SIZE(mib_names);
 
-       dev->phy_port_cnt = dev->port_cnt - 1;
-
-       dev->cpu_port = dev->port_cnt - 1;
-       dev->host_mask = BIT(dev->cpu_port);
-
        dev->ports = devm_kzalloc(dev->dev,
                                  dev->port_cnt * sizeof(struct ksz_port),
                                  GFP_KERNEL);
index cf743133b0b93a0af95b0fa25b368def9184840e..389abfd277702968558d435f482c21c2ec4b30ac 100644 (file)
@@ -400,7 +400,7 @@ int ksz_switch_register(struct ksz_device *dev,
                gpiod_set_value_cansleep(dev->reset_gpio, 1);
                usleep_range(10000, 12000);
                gpiod_set_value_cansleep(dev->reset_gpio, 0);
-               usleep_range(100, 1000);
+               msleep(100);
        }
 
        mutex_init(&dev->dev_mutex);
@@ -434,7 +434,7 @@ int ksz_switch_register(struct ksz_device *dev,
                                if (of_property_read_u32(port, "reg",
                                                         &port_num))
                                        continue;
-                               if (port_num >= dev->port_cnt)
+                               if (!(dev->port_mask & BIT(port_num)))
                                        return -EINVAL;
                                of_get_phy_mode(port,
                                                &dev->ports[port_num].interface);
index eafe6bedc692e283fcf3a125f00d634c3e2c5500..54aa942eedaa6c28d15db3acb95b59520726ee40 100644 (file)
@@ -1676,7 +1676,11 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port,
                if (!entry.portvec)
                        entry.state = 0;
        } else {
-               entry.portvec |= BIT(port);
+               if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC)
+                       entry.portvec = BIT(port);
+               else
+                       entry.portvec |= BIT(port);
+
                entry.state = state;
        }
 
index 66ddf67b87371d0a9c12667b700a6996710023f9..7b96396be609e7bb1331d63c6fd11a403922e0b7 100644 (file)
@@ -351,6 +351,10 @@ int mv88e6250_g1_vtu_getnext(struct mv88e6xxx_chip *chip,
                if (err)
                        return err;
 
+               err = mv88e6185_g1_stu_data_read(chip, entry);
+               if (err)
+                       return err;
+
                /* VTU DBNum[3:0] are located in VTU Operation 3:0
                 * VTU DBNum[5:4] are located in VTU Operation 9:8
                 */
index b1ae9eb8f24793a56089926fe78fedbbfa3399db..0404aafd5ce56ddc1c1ecac402d24dd2f9c51455 100644 (file)
@@ -2503,8 +2503,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
        priv = netdev_priv(dev);
 
        priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
-       if (IS_ERR(priv->clk))
-               return PTR_ERR(priv->clk);
+       if (IS_ERR(priv->clk)) {
+               ret = PTR_ERR(priv->clk);
+               goto err_free_netdev;
+       }
 
        /* Allocate number of TX rings */
        priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
index e5cfbe196ba664decb2a32216a47a1803373d947..19dc7dc054a29075fa42e05a92824e9ced2ee7c2 100644 (file)
@@ -1158,11 +1158,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
 #endif
        }
        if (!n || !n->dev)
-               goto free_sk;
+               goto free_dst;
 
        ndev = n->dev;
-       if (!ndev)
-               goto free_dst;
        if (is_vlan_dev(ndev))
                ndev = vlan_dev_real_dev(ndev);
 
@@ -1250,7 +1248,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
 free_csk:
        chtls_sock_release(&csk->kref);
 free_dst:
-       neigh_release(n);
+       if (n)
+               neigh_release(n);
        dst_release(dst);
 free_sk:
        inet_csk_prepare_forced_close(newsk);
index c527f4ee1d3aed313124d2befbb6a536801e5594..0602d5d5d2eee25cdb1169ea430b18a1a76b3468 100644 (file)
@@ -462,6 +462,11 @@ struct bufdesc_ex {
  */
 #define FEC_QUIRK_CLEAR_SETUP_MII      (1 << 17)
 
+/* Some link partners do not tolerate the momentary reset of the REF_CLK
+ * frequency when the RNCTL register is cleared by hardware reset.
+ */
+#define FEC_QUIRK_NO_HARD_RESET                (1 << 18)
+
 struct bufdesc_prop {
        int qid;
        /* Address of Rx and Tx buffers */
index 04f24c66cf3668113014208d8a2dedc76180f131..9ebdb0e54291b204d5eacb60c3defbc2a8a669b6 100644 (file)
@@ -100,7 +100,8 @@ static const struct fec_devinfo fec_imx27_info = {
 static const struct fec_devinfo fec_imx28_info = {
        .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
                  FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
-                 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII,
+                 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
+                 FEC_QUIRK_NO_HARD_RESET,
 };
 
 static const struct fec_devinfo fec_imx6q_info = {
@@ -953,7 +954,8 @@ fec_restart(struct net_device *ndev)
         * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
         * instead of reset MAC itself.
         */
-       if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+       if (fep->quirks & FEC_QUIRK_HAS_AVB ||
+           ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
                writel(0, fep->hwp + FEC_ECNTRL);
        } else {
                writel(1, fep->hwp + FEC_ECNTRL);
@@ -2165,9 +2167,9 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        fep->mii_bus->parent = &pdev->dev;
 
        err = of_mdiobus_register(fep->mii_bus, node);
-       of_node_put(node);
        if (err)
                goto err_out_free_mdiobus;
+       of_node_put(node);
 
        mii_cnt++;
 
@@ -2180,6 +2182,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
 err_out_free_mdiobus:
        mdiobus_free(fep->mii_bus);
 err_out:
+       of_node_put(node);
        return err;
 }
 
index 9778c83150f1cc4508864d593bb5d33487c416db..f79034c786c847427fbf8bc9ad6921049a9d84e7 100644 (file)
@@ -5084,6 +5084,12 @@ static void ibmvnic_tasklet(struct tasklet_struct *t)
        while (!done) {
                /* Pull all the valid messages off the CRQ */
                while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
+                       /* This barrier makes sure ibmvnic_next_crq()'s
+                        * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
+                        * before ibmvnic_handle_crq()'s
+                        * switch(gen_crq->first) and switch(gen_crq->cmd).
+                        */
+                       dma_rmb();
                        ibmvnic_handle_crq(crq, adapter);
                        crq->generic.first = 0;
                }
@@ -5438,11 +5444,6 @@ static int ibmvnic_remove(struct vio_dev *dev)
        unsigned long flags;
 
        spin_lock_irqsave(&adapter->state_lock, flags);
-       if (test_bit(0, &adapter->resetting)) {
-               spin_unlock_irqrestore(&adapter->state_lock, flags);
-               return -EBUSY;
-       }
-
        adapter->state = VNIC_REMOVING;
        spin_unlock_irqrestore(&adapter->state_lock, flags);
 
index 21ee56420c3aee60f1428ebf2566611addd34c04..1b6ec9be155a6352eb1a330172c50b70b1bad98a 100644 (file)
@@ -55,12 +55,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
 
        pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
        pfe.severity = PF_EVENT_SEVERITY_INFO;
-
-       /* Always report link is down if the VF queues aren't enabled */
-       if (!vf->queues_enabled) {
-               pfe.event_data.link_event.link_status = false;
-               pfe.event_data.link_event.link_speed = 0;
-       } else if (vf->link_forced) {
+       if (vf->link_forced) {
                pfe.event_data.link_event.link_status = vf->link_up;
                pfe.event_data.link_event.link_speed =
                        (vf->link_up ? i40e_virtchnl_link_speed(ls->link_speed) : 0);
@@ -70,7 +65,6 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
                pfe.event_data.link_event.link_speed =
                        i40e_virtchnl_link_speed(ls->link_speed);
        }
-
        i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT,
                               0, (u8 *)&pfe, sizeof(pfe), NULL);
 }
@@ -2443,8 +2437,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
                }
        }
 
-       vf->queues_enabled = true;
-
 error_param:
        /* send the response to the VF */
        return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
@@ -2466,9 +2458,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
        struct i40e_pf *pf = vf->pf;
        i40e_status aq_ret = 0;
 
-       /* Immediately mark queues as disabled */
-       vf->queues_enabled = false;
-
        if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
@@ -4046,20 +4035,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
                goto error_param;
 
        vf = &pf->vf[vf_id];
-       vsi = pf->vsi[vf->lan_vsi_idx];
 
        /* When the VF is resetting wait until it is done.
         * It can take up to 200 milliseconds,
         * but wait for up to 300 milliseconds to be safe.
-        * If the VF is indeed in reset, the vsi pointer has
-        * to show on the newly loaded vsi under pf->vsi[id].
+        * Acquire the VSI pointer only after the VF has been
+        * properly initialized.
         */
        for (i = 0; i < 15; i++) {
-               if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
-                       if (i > 0)
-                               vsi = pf->vsi[vf->lan_vsi_idx];
+               if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states))
                        break;
-               }
                msleep(20);
        }
        if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) {
@@ -4068,6 +4053,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
                ret = -EAGAIN;
                goto error_param;
        }
+       vsi = pf->vsi[vf->lan_vsi_idx];
 
        if (is_multicast_ether_addr(mac)) {
                dev_err(&pf->pdev->dev,
index 5491215d81debee4f1a9371d3eab7f015da1c0b2..091e32c1bb46fa12dc4a91afa10293ccac535cfe 100644 (file)
@@ -98,7 +98,6 @@ struct i40e_vf {
        unsigned int tx_rate;   /* Tx bandwidth limit in Mbps */
        bool link_forced;
        bool link_up;           /* only valid if VF link is forced */
-       bool queues_enabled;    /* true if the VF queues are enabled */
        bool spoofchk;
        u16 num_vlan;
 
index 56725356a17b80e3bdd0e6ae3a116206c66e43f6..fa1e128c24ecac69b4abb8023b4c4f3b8cee2bf8 100644 (file)
@@ -68,7 +68,9 @@
 #define ICE_INT_NAME_STR_LEN   (IFNAMSIZ + 16)
 #define ICE_AQ_LEN             64
 #define ICE_MBXSQ_LEN          64
-#define ICE_MIN_MSIX           2
+#define ICE_MIN_LAN_TXRX_MSIX  1
+#define ICE_MIN_LAN_OICR_MSIX  1
+#define ICE_MIN_MSIX           (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
 #define ICE_FDIR_MSIX          1
 #define ICE_NO_VSI             0xffff
 #define ICE_VSI_MAP_CONTIG     0
index 9e8e9531cd87185adf80870e70ce473dea460cb4..69c113a4de7e6a57d7e915e48d46c0e7ad659f55 100644 (file)
@@ -3258,8 +3258,8 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
  */
 static int ice_get_max_txq(struct ice_pf *pf)
 {
-       return min_t(int, num_online_cpus(),
-                    pf->hw.func_caps.common_cap.num_txq);
+       return min3(pf->num_lan_msix, (u16)num_online_cpus(),
+                   (u16)pf->hw.func_caps.common_cap.num_txq);
 }
 
 /**
@@ -3268,8 +3268,8 @@ static int ice_get_max_txq(struct ice_pf *pf)
  */
 static int ice_get_max_rxq(struct ice_pf *pf)
 {
-       return min_t(int, num_online_cpus(),
-                    pf->hw.func_caps.common_cap.num_rxq);
+       return min3(pf->num_lan_msix, (u16)num_online_cpus(),
+                   (u16)pf->hw.func_caps.common_cap.num_rxq);
 }
 
 /**
index 2d27f66ac8534c422e72885d5f3d7593bc51a622..192729546bbfc4e67757846d598f719669f72af5 100644 (file)
@@ -1576,7 +1576,13 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
                       sizeof(struct in6_addr));
                input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
                input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass;
-               input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+
+               /* if no protocol requested, use IPPROTO_NONE */
+               if (!fsp->m_u.usr_ip6_spec.l4_proto)
+                       input->ip.v6.proto = IPPROTO_NONE;
+               else
+                       input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto;
+
                memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
                       sizeof(struct in6_addr));
                memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
index 3df67486d42d981d829d0989b0a4f9d461e1e8af..ad9c22a1b97a0796983afa989d663fd4e5c619b0 100644 (file)
@@ -161,8 +161,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
 
        switch (vsi->type) {
        case ICE_VSI_PF:
-               vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
-                                      num_online_cpus());
+               vsi->alloc_txq = min3(pf->num_lan_msix,
+                                     ice_get_avail_txq_count(pf),
+                                     (u16)num_online_cpus());
                if (vsi->req_txq) {
                        vsi->alloc_txq = vsi->req_txq;
                        vsi->num_txq = vsi->req_txq;
@@ -174,8 +175,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
                if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
                        vsi->alloc_rxq = 1;
                } else {
-                       vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
-                                              num_online_cpus());
+                       vsi->alloc_rxq = min3(pf->num_lan_msix,
+                                             ice_get_avail_rxq_count(pf),
+                                             (u16)num_online_cpus());
                        if (vsi->req_rxq) {
                                vsi->alloc_rxq = vsi->req_rxq;
                                vsi->num_rxq = vsi->req_rxq;
@@ -184,7 +186,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
 
                pf->num_lan_rx = vsi->alloc_rxq;
 
-               vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq);
+               vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
+                                          max_t(int, vsi->alloc_rxq,
+                                                vsi->alloc_txq));
                break;
        case ICE_VSI_VF:
                vf = &pf->vf[vsi->vf_id];
index c52b9bb0e3ab173ba387b2e889b58036b10ee18d..e10ca8929f85e2f5aeda3810326b063d811bebb6 100644 (file)
@@ -3430,18 +3430,14 @@ static int ice_ena_msix_range(struct ice_pf *pf)
        if (v_actual < v_budget) {
                dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
                         v_budget, v_actual);
-/* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */
-#define ICE_MIN_LAN_VECS 2
-#define ICE_MIN_RDMA_VECS 2
-#define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1)
 
-               if (v_actual < ICE_MIN_LAN_VECS) {
+               if (v_actual < ICE_MIN_MSIX) {
                        /* error if we can't get minimum vectors */
                        pci_disable_msix(pf->pdev);
                        err = -ERANGE;
                        goto msix_err;
                } else {
-                       pf->num_lan_msix = ICE_MIN_LAN_VECS;
+                       pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
                }
        }
 
@@ -4884,9 +4880,15 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
                goto err_update_filters;
        }
 
-       /* Add filter for new MAC. If filter exists, just return success */
+       /* Add filter for new MAC. If filter exists, return success */
        status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
        if (status == ICE_ERR_ALREADY_EXISTS) {
+               /* Although this MAC filter is already present in hardware it's
+                * possible in some cases (e.g. bonding) that dev_addr was
+                * modified outside of the driver and needs to be restored back
+                * to this value.
+                */
+               memcpy(netdev->dev_addr, mac, netdev->addr_len);
                netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
                return 0;
        }
index a2d0aad8cfdd765decb8e089bb2660f94e3442e2..b6fa83c619dd723424c540943bb91f2772fe868a 100644 (file)
@@ -1923,12 +1923,15 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
                                  ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
                        l4_proto = ip.v4->protocol;
                } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
+                       int ret;
+
                        tunnel |= ICE_TX_CTX_EIPT_IPV6;
                        exthdr = ip.hdr + sizeof(*ip.v6);
                        l4_proto = ip.v6->nexthdr;
-                       if (l4.hdr != exthdr)
-                               ipv6_skip_exthdr(skb, exthdr - skb->data,
-                                                &l4_proto, &frag_off);
+                       ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
+                                              &l4_proto, &frag_off);
+                       if (ret < 0)
+                               return -1;
                }
 
                /* define outer transport */
index 61d331ce38cddc2fb0ddb83cdd42caf133cc3465..ec8cd69d49928e81ac8792d090ac0c5b11991e66 100644 (file)
@@ -1675,12 +1675,18 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
        cmd->base.phy_address = hw->phy.addr;
 
        /* advertising link modes */
-       ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
-       ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
-       ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
-       ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
-       ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
-       ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
+       if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half);
+       if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full);
+       if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half);
+       if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full);
+       if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full);
+       if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full);
 
        /* set autoneg settings */
        if (hw->mac.autoneg == 1) {
@@ -1708,7 +1714,8 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
                                                     Asym_Pause);
        }
 
-       status = rd32(IGC_STATUS);
+       status = pm_runtime_suspended(&adapter->pdev->dev) ?
+                0 : rd32(IGC_STATUS);
 
        if (status & IGC_STATUS_LU) {
                if (status & IGC_STATUS_SPEED_1000) {
@@ -1792,6 +1799,12 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
 
        ethtool_convert_link_mode_to_legacy_u32(&advertising,
                                                cmd->link_modes.advertising);
+       /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
+        * We have to check this and convert it to ADVERTISE_2500_FULL
+        * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
+        */
+       if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
+               advertising |= ADVERTISE_2500_FULL;
 
        if (cmd->base.autoneg == AUTONEG_ENABLE) {
                hw->mac.autoneg = 1;
index 8b67d9b49a83a61d76b3fcf44c65b46f33f5dd4f..7ec04e48860c6a52e1c6a09f35a31d3ad3507473 100644 (file)
@@ -219,9 +219,9 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
                              u16 *data)
 {
        struct igc_nvm_info *nvm = &hw->nvm;
+       s32 ret_val = -IGC_ERR_NVM;
        u32 attempts = 100000;
        u32 i, k, eewr = 0;
-       s32 ret_val = 0;
 
        /* A check for invalid values:  offset too large, too many words,
         * too many words for the offset, and not enough words.
@@ -229,7 +229,6 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words,
        if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) ||
            words == 0) {
                hw_dbg("nvm parameter(s) out of bounds\n");
-               ret_val = -IGC_ERR_NVM;
                goto out;
        }
 
index 09cd0ec7ee87d8231acef20d6c9a468177057c69..67b8ffd21d8af39d543605ba98a9b70cf06051c4 100644 (file)
@@ -638,7 +638,7 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw)
        }
 
 out:
-       return 0;
+       return ret_val;
 }
 
 /**
index a30eb90ba3d28a04e57a6a822745bfe232144f48..dd590086fe6a51ef356a8899bfc7a1338410161e 100644 (file)
@@ -29,16 +29,16 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
        /* Clear entry invalidation bit */
        pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
 
-       /* Write tcam index - indirect access */
-       mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
-       for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
-               mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
-
        /* Write sram index - indirect access */
        mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
        for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
                mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]);
 
+       /* Write tcam index - indirect access */
+       mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+       for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+               mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]);
+
        return 0;
 }
 
index d298b935717784e5ba3bdf3344a0898e624a5759..6c6b411e78fd87c66da58761a2f55cb765189a8b 100644 (file)
@@ -469,6 +469,9 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
        int pf = rvu_get_pf(req->hdr.pcifunc);
        u8 cgx_id, lmac_id;
 
+       if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+               return -EPERM;
+
        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 
        cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
@@ -485,6 +488,9 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
        int rc = 0, i;
        u64 cfg;
 
+       if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+               return -EPERM;
+
        rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
 
        rsp->hdr.rc = rc;
index 73fb94dd5fbcc54cd277635c7ebda39e7c9af531..e6869435e1f323041dd73c40e0ddf27aef27ba93 100644 (file)
@@ -478,10 +478,11 @@ dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
        dma_addr_t iova;
        u8 *buf;
 
-       buf = napi_alloc_frag(pool->rbsize);
+       buf = napi_alloc_frag(pool->rbsize + OTX2_ALIGN);
        if (unlikely(!buf))
                return -ENOMEM;
 
+       buf = PTR_ALIGN(buf, OTX2_ALIGN);
        iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
                                    DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
        if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
index 718f8c0a4f6b7cc695ccac7aac245e2776a01fca..84e501e057b4f97aa736a7ee64fff10c34d0e7bc 100644 (file)
@@ -273,7 +273,7 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key
 
        err = devlink_fmsg_binary_pair_nest_start(fmsg, "data");
        if (err)
-               return err;
+               goto free_page;
 
        cmd = mlx5_rsc_dump_cmd_create(mdev, key);
        if (IS_ERR(cmd)) {
index 072363e73f1cec36da973533bee411d6a0cd7ee0..6bc6b48a56dc79de2a8c0ef818932f4abb3bdedc 100644 (file)
@@ -167,6 +167,12 @@ static const struct rhashtable_params tuples_nat_ht_params = {
        .min_size = 16 * 1024,
 };
 
+static bool
+mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
+{
+       return !!(entry->tuple_nat_node.next);
+}
+
 static int
 mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
 {
@@ -911,13 +917,13 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
 err_insert:
        mlx5_tc_ct_entry_del_rules(ct_priv, entry);
 err_rules:
-       rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
-                              &entry->tuple_nat_node, tuples_nat_ht_params);
+       if (mlx5_tc_ct_entry_has_nat(entry))
+               rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+                                      &entry->tuple_nat_node, tuples_nat_ht_params);
 err_tuple_nat:
-       if (entry->tuple_node.next)
-               rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
-                                      &entry->tuple_node,
-                                      tuples_ht_params);
+       rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
+                              &entry->tuple_node,
+                              tuples_ht_params);
 err_tuple:
 err_set:
        kfree(entry);
@@ -932,7 +938,7 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
 {
        mlx5_tc_ct_entry_del_rules(ct_priv, entry);
        mutex_lock(&ct_priv->shared_counter_lock);
-       if (entry->tuple_node.next)
+       if (mlx5_tc_ct_entry_has_nat(entry))
                rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
                                       &entry->tuple_nat_node,
                                       tuples_nat_ht_params);
index 6c5c54bcd9be089ff091e6ad80f3adf58b45eb90..5cb936541b9e9e5da717c441121c7ca6f18e7009 100644 (file)
@@ -76,7 +76,7 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
 
 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
 {
-       return NUM_IPSEC_SW_COUNTERS;
+       return priv->ipsec ? NUM_IPSEC_SW_COUNTERS : 0;
 }
 
 static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_sw) {}
@@ -105,7 +105,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
 
 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
 {
-       return (mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
+       return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0;
 }
 
 static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw)
index d20243d6a0326000643b6411d185354bf7bb0655..f23c67575073a5b0a9e58eac93f4594721435747 100644 (file)
@@ -1151,6 +1151,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
 {
        struct mlx5e_channels new_channels = {};
        bool reset_channels = true;
+       bool opened;
        int err = 0;
 
        mutex_lock(&priv->state_lock);
@@ -1159,22 +1160,24 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
        mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params,
                                                   trust_state);
 
-       if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
-               priv->channels.params = new_channels.params;
+       opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+       if (!opened)
                reset_channels = false;
-       }
 
        /* Skip if tx_min_inline is the same */
        if (new_channels.params.tx_min_inline_mode ==
            priv->channels.params.tx_min_inline_mode)
                reset_channels = false;
 
-       if (reset_channels)
+       if (reset_channels) {
                err = mlx5e_safe_switch_channels(priv, &new_channels,
                                                 mlx5e_update_trust_state_hw,
                                                 &trust_state);
-       else
+       } else {
                err = mlx5e_update_trust_state_hw(priv, &trust_state);
+               if (!err && !opened)
+                       priv->channels.params = new_channels.params;
+       }
 
        mutex_unlock(&priv->state_lock);
 
index 2d37742a888c1514a8506a1adf6a87ca0b6ec8f3..302001d6661ea341de628fc9ef1f7dafcc2534f6 100644 (file)
@@ -447,12 +447,18 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
                goto out;
        }
 
-       new_channels.params = priv->channels.params;
+       new_channels.params = *cur_params;
        new_channels.params.num_channels = count;
 
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+               struct mlx5e_params old_params;
+
+               old_params = *cur_params;
                *cur_params = new_channels.params;
                err = mlx5e_num_channels_changed(priv);
+               if (err)
+                       *cur_params = old_params;
+
                goto out;
        }
 
index 6a852b4901aa0a742989d9882b37f21f07108750..3fc7d18ac868b20520dc33b46e496981d430afe2 100644 (file)
@@ -3614,18 +3614,23 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
        new_channels.params.num_tc = tc ? tc : 1;
 
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+               struct mlx5e_params old_params;
+
+               old_params = priv->channels.params;
                priv->channels.params = new_channels.params;
+               err = mlx5e_num_channels_changed(priv);
+               if (err)
+                       priv->channels.params = old_params;
+
                goto out;
        }
 
        err = mlx5e_safe_switch_channels(priv, &new_channels,
                                         mlx5e_num_channels_changed_ctx, NULL);
-       if (err)
-               goto out;
 
-       priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
-                                   new_channels.params.num_tc);
 out:
+       priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
+                                   priv->channels.params.num_tc);
        mutex_unlock(&priv->state_lock);
        return err;
 }
@@ -3757,7 +3762,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5e_channels new_channels = {};
-       struct mlx5e_params *old_params;
+       struct mlx5e_params *cur_params;
        int err = 0;
        bool reset;
 
@@ -3770,8 +3775,8 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
                goto out;
        }
 
-       old_params = &priv->channels.params;
-       if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
+       cur_params = &priv->channels.params;
+       if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
                netdev_warn(netdev, "can't set LRO with legacy RQ\n");
                err = -EINVAL;
                goto out;
@@ -3779,18 +3784,23 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
 
        reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
 
-       new_channels.params = *old_params;
+       new_channels.params = *cur_params;
        new_channels.params.lro_en = enable;
 
-       if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
-               if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params, NULL) ==
+       if (cur_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) {
+               if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
                    mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL))
                        reset = false;
        }
 
        if (!reset) {
-               *old_params = new_channels.params;
+               struct mlx5e_params old_params;
+
+               old_params = *cur_params;
+               *cur_params = new_channels.params;
                err = mlx5e_modify_tirs_lro(priv);
+               if (err)
+                       *cur_params = old_params;
                goto out;
        }
 
@@ -4067,9 +4077,16 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
        }
 
        if (!reset) {
+               unsigned int old_mtu = params->sw_mtu;
+
                params->sw_mtu = new_mtu;
-               if (preactivate)
-                       preactivate(priv, NULL);
+               if (preactivate) {
+                       err = preactivate(priv, NULL);
+                       if (err) {
+                               params->sw_mtu = old_mtu;
+                               goto out;
+                       }
+               }
                netdev->mtu = params->sw_mtu;
                goto out;
        }
@@ -5027,7 +5044,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
            FT_CAP(modify_root) &&
            FT_CAP(identified_miss_table_mode) &&
            FT_CAP(flow_table_modify)) {
-#ifdef CONFIG_MLX5_ESWITCH
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
                netdev->hw_features      |= NETIF_F_HW_TC;
 #endif
 #ifdef CONFIG_MLX5_EN_ARFS
index 989c70c1eda37f83ac861e6039d5aa93690754ce..f0ceae65f6cfaba59b04b1053c5588c9b0efa71e 100644 (file)
@@ -737,7 +737,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
 
        netdev->features       |= NETIF_F_NETNS_LOCAL;
 
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
        netdev->hw_features    |= NETIF_F_HW_TC;
+#endif
        netdev->hw_features    |= NETIF_F_SG;
        netdev->hw_features    |= NETIF_F_IP_CSUM;
        netdev->hw_features    |= NETIF_F_IPV6_CSUM;
index 7f5851c612181e42489dddbaa2e6450950ce64f9..ca4b55839a8a71622deb2b69b0ef568cfbc85005 100644 (file)
@@ -1262,8 +1262,10 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
        mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
 
        if (mlx5e_cqe_regb_chain(cqe))
-               if (!mlx5e_tc_update_skb(cqe, skb))
+               if (!mlx5e_tc_update_skb(cqe, skb)) {
+                       dev_kfree_skb_any(skb);
                        goto free_wqe;
+               }
 
        napi_gro_receive(rq->cq.napi, skb);
 
@@ -1316,8 +1318,10 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
        if (rep->vlan && skb_vlan_tag_present(skb))
                skb_vlan_pop(skb);
 
-       if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
+       if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
+               dev_kfree_skb_any(skb);
                goto free_wqe;
+       }
 
        napi_gro_receive(rq->cq.napi, skb);
 
@@ -1371,8 +1375,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
 
        mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
 
-       if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))
+       if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
+               dev_kfree_skb_any(skb);
                goto mpwrq_cqe_out;
+       }
 
        napi_gro_receive(rq->cq.napi, skb);
 
@@ -1528,8 +1534,10 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
        mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
 
        if (mlx5e_cqe_regb_chain(cqe))
-               if (!mlx5e_tc_update_skb(cqe, skb))
+               if (!mlx5e_tc_update_skb(cqe, skb)) {
+                       dev_kfree_skb_any(skb);
                        goto mpwrq_cqe_out;
+               }
 
        napi_gro_receive(rq->cq.napi, skb);
 
index 4cdf834fa74af679a0e2c8a9eb827f2b6882b06e..dd0bfbacad47417149a99a17ed2c38d02b3351db 100644 (file)
@@ -67,6 +67,7 @@
 #include "lib/geneve.h"
 #include "lib/fs_chains.h"
 #include "diag/en_tc_tracepoint.h"
+#include <asm/div64.h>
 
 #define nic_chains(priv) ((priv)->fs.tc.chains)
 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
@@ -1162,6 +1163,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
        struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
        struct mlx5_flow_handle *rule;
 
+       if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+               return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+
        if (flow_flag_test(flow, CT)) {
                mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
 
@@ -1192,6 +1196,9 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
 {
        flow_flag_clear(flow, OFFLOADED);
 
+       if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
+               goto offload_rule_0;
+
        if (flow_flag_test(flow, CT)) {
                mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
                return;
@@ -1200,6 +1207,7 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
        if (attr->esw_attr->split_count)
                mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
 
+offload_rule_0:
        mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
 }
 
@@ -2269,8 +2277,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
              BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
              BIT(FLOW_DISSECTOR_KEY_MPLS))) {
                NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
-               netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
-                           dissector->used_keys);
+               netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
+                          dissector->used_keys);
                return -EOPNOTSUPP;
        }
 
@@ -5007,13 +5015,13 @@ errout:
        return err;
 }
 
-static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
+static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
                               struct netlink_ext_ack *extack)
 {
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
        struct mlx5_eswitch *esw;
+       u32 rate_mbps = 0;
        u16 vport_num;
-       u32 rate_mbps;
        int err;
 
        vport_num = rpriv->rep->vport;
@@ -5030,7 +5038,11 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
         * Moreover, if rate is non zero we choose to configure to a minimum of
         * 1 mbit/sec.
         */
-       rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
+       if (rate) {
+               rate = (rate * BITS_PER_BYTE) + 500000;
+               rate_mbps = max_t(u32, do_div(rate, 1000000), 1);
+       }
+
        err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
        if (err)
                NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
index b899539a07860778fb8810843abbdc17df6ebcf9..ee4d86c1f436032e34799ed154b18bd71e905428 100644 (file)
@@ -1141,6 +1141,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
 destroy_ft:
        root->cmds->destroy_flow_table(root, ft);
 free_ft:
+       rhltable_destroy(&ft->fgs_hash);
        kfree(ft);
 unlock_root:
        mutex_unlock(&root->chain_lock);
@@ -1759,6 +1760,7 @@ search_again_locked:
                if (!fte_tmp)
                        continue;
                rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
+               /* No error check needed here, because insert_fte() is not called */
                up_write_ref_node(&fte_tmp->node, false);
                tree_put_node(&fte_tmp->node, false);
                kmem_cache_free(steering->ftes_cache, fte);
@@ -1811,6 +1813,8 @@ skip_search:
                up_write_ref_node(&g->node, false);
                rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
                up_write_ref_node(&fte->node, false);
+               if (IS_ERR(rule))
+                       tree_put_node(&fte->node, false);
                return rule;
        }
        rule = ERR_PTR(-ENOENT);
@@ -1909,6 +1913,8 @@ search_again_locked:
        up_write_ref_node(&g->node, false);
        rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
        up_write_ref_node(&fte->node, false);
+       if (IS_ERR(rule))
+               tree_put_node(&fte->node, false);
        tree_put_node(&g->node, false);
        return rule;
 
index 3a9fa629503f0e0eb889ae0e4b79dc05eafee79e..d046db7bb047d5f9fc9dc59b415bef0a53efb356 100644 (file)
@@ -90,4 +90,9 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
                               u32 key_type, u32 *p_key_id);
 void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
 
+static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+{
+       return devlink_net(priv_to_devlink(dev));
+}
+
 #endif
index eb956ce904bc01e1e9ac2057c805048399a6ae4c..c0656d4782e1c3bc3d5b63091fb21eb13f6d63ba 100644 (file)
@@ -58,7 +58,7 @@ struct fw_page {
        struct rb_node          rb_node;
        u64                     addr;
        struct page            *page;
-       u16                     func_id;
+       u32                     function;
        unsigned long           bitmask;
        struct list_head        list;
        unsigned                free_count;
@@ -74,12 +74,17 @@ enum {
        MLX5_NUM_4K_IN_PAGE             = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
 };
 
-static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id)
+static u32 get_function(u16 func_id, bool ec_function)
+{
+       return (u32)func_id | (ec_function << 16);
+}
+
+static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
 {
        struct rb_root *root;
        int err;
 
-       root = xa_load(&dev->priv.page_root_xa, func_id);
+       root = xa_load(&dev->priv.page_root_xa, function);
        if (root)
                return root;
 
@@ -87,7 +92,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func
        if (!root)
                return ERR_PTR(-ENOMEM);
 
-       err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL);
+       err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
        if (err) {
                kfree(root);
                return ERR_PTR(err);
@@ -98,7 +103,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func
        return root;
 }
 
-static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
+static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
 {
        struct rb_node *parent = NULL;
        struct rb_root *root;
@@ -107,7 +112,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
        struct fw_page *tfp;
        int i;
 
-       root = page_root_per_func_id(dev, func_id);
+       root = page_root_per_function(dev, function);
        if (IS_ERR(root))
                return PTR_ERR(root);
 
@@ -130,7 +135,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
 
        nfp->addr = addr;
        nfp->page = page;
-       nfp->func_id = func_id;
+       nfp->function = function;
        nfp->free_count = MLX5_NUM_4K_IN_PAGE;
        for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
                set_bit(i, &nfp->bitmask);
@@ -143,14 +148,14 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
 }
 
 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
-                                   u32 func_id)
+                                   u32 function)
 {
        struct fw_page *result = NULL;
        struct rb_root *root;
        struct rb_node *tmp;
        struct fw_page *tfp;
 
-       root = xa_load(&dev->priv.page_root_xa, func_id);
+       root = xa_load(&dev->priv.page_root_xa, function);
        if (WARN_ON_ONCE(!root))
                return NULL;
 
@@ -194,14 +199,14 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
        return err;
 }
 
-static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id)
+static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
 {
        struct fw_page *fp = NULL;
        struct fw_page *iter;
        unsigned n;
 
        list_for_each_entry(iter, &dev->priv.free_list, list) {
-               if (iter->func_id != func_id)
+               if (iter->function != function)
                        continue;
                fp = iter;
        }
@@ -231,7 +236,7 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
 {
        struct rb_root *root;
 
-       root = xa_load(&dev->priv.page_root_xa, fwp->func_id);
+       root = xa_load(&dev->priv.page_root_xa, fwp->function);
        if (WARN_ON_ONCE(!root))
                return;
 
@@ -244,12 +249,12 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
        kfree(fwp);
 }
 
-static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
+static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
 {
        struct fw_page *fwp;
        int n;
 
-       fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id);
+       fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
        if (!fwp) {
                mlx5_core_warn_rl(dev, "page not found\n");
                return;
@@ -263,7 +268,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
                list_add(&fwp->list, &dev->priv.free_list);
 }
 
-static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
+static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
 {
        struct device *device = mlx5_core_dma_dev(dev);
        int nid = dev_to_node(device);
@@ -291,7 +296,7 @@ map:
                goto map;
        }
 
-       err = insert_page(dev, addr, page, func_id);
+       err = insert_page(dev, addr, page, function);
        if (err) {
                mlx5_core_err(dev, "failed to track allocated page\n");
                dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
@@ -328,6 +333,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
                      int notify_fail, bool ec_function)
 {
+       u32 function = get_function(func_id, ec_function);
        u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
        int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
        u64 addr;
@@ -345,10 +351,10 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
 
        for (i = 0; i < npages; i++) {
 retry:
-               err = alloc_4k(dev, &addr, func_id);
+               err = alloc_4k(dev, &addr, function);
                if (err) {
                        if (err == -ENOMEM)
-                               err = alloc_system_page(dev, func_id);
+                               err = alloc_system_page(dev, function);
                        if (err)
                                goto out_4k;
 
@@ -384,7 +390,7 @@ retry:
 
 out_4k:
        for (i--; i >= 0; i--)
-               free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id);
+               free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
 out_free:
        kvfree(in);
        if (notify_fail)
@@ -392,14 +398,15 @@ out_free:
        return err;
 }
 
-static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id,
+static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
                              bool ec_function)
 {
+       u32 function = get_function(func_id, ec_function);
        struct rb_root *root;
        struct rb_node *p;
        int npages = 0;
 
-       root = xa_load(&dev->priv.page_root_xa, func_id);
+       root = xa_load(&dev->priv.page_root_xa, function);
        if (WARN_ON_ONCE(!root))
                return;
 
@@ -446,6 +453,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
        struct rb_root *root;
        struct fw_page *fwp;
        struct rb_node *p;
+       bool ec_function;
        u32 func_id;
        u32 npages;
        u32 i = 0;
@@ -456,8 +464,9 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
        /* No hard feelings, we want our pages back! */
        npages = MLX5_GET(manage_pages_in, in, input_num_entries);
        func_id = MLX5_GET(manage_pages_in, in, function_id);
+       ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
 
-       root = xa_load(&dev->priv.page_root_xa, func_id);
+       root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
        if (WARN_ON_ONCE(!root))
                return -EEXIST;
 
@@ -473,9 +482,10 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
        return 0;
 }
 
-static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
+static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
                         int *nclaimed, bool ec_function)
 {
+       u32 function = get_function(func_id, ec_function);
        int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
        u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
        int num_claimed;
@@ -514,7 +524,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
        }
 
        for (i = 0; i < num_claimed; i++)
-               free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id);
+               free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
 
        if (nclaimed)
                *nclaimed = num_claimed;
index c6c5826aba41e8c31f1b8daa1672b9e032959e4d..1892cea05ee7cc94d5597206bed0ca8643cb2984 100644 (file)
@@ -157,6 +157,7 @@ mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
 
 static const
 struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = {
+       .is_static = true,
        .can_handle = mlxsw_sp1_span_cpu_can_handle,
        .parms_set = mlxsw_sp1_span_entry_cpu_parms,
        .configure = mlxsw_sp1_span_entry_cpu_configure,
@@ -214,6 +215,7 @@ mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
 
 static const
 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
+       .is_static = true,
        .can_handle = mlxsw_sp_port_dev_check,
        .parms_set = mlxsw_sp_span_entry_phys_parms,
        .configure = mlxsw_sp_span_entry_phys_configure,
@@ -721,6 +723,7 @@ mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
 
 static const
 struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = {
+       .is_static = true,
        .can_handle = mlxsw_sp2_span_cpu_can_handle,
        .parms_set = mlxsw_sp2_span_entry_cpu_parms,
        .configure = mlxsw_sp2_span_entry_cpu_configure,
@@ -1036,6 +1039,9 @@ static void mlxsw_sp_span_respin_work(struct work_struct *work)
                if (!refcount_read(&curr->ref_count))
                        continue;
 
+               if (curr->ops->is_static)
+                       continue;
+
                err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms);
                if (err)
                        continue;
index d907718bc8c584005547c8dcd7c8374e52eebd49..aa1cd409c0e2ea15d585cb10d6cc344bc3dce879 100644 (file)
@@ -60,6 +60,7 @@ struct mlxsw_sp_span_entry {
 };
 
 struct mlxsw_sp_span_entry_ops {
+       bool is_static;
        bool (*can_handle)(const struct net_device *to_dev);
        int (*parms_set)(struct mlxsw_sp *mlxsw_sp,
                         const struct net_device *to_dev,
index 0b9992bd66262935f2c00adefdfe52f6cdd7d603..ff87a0bc089cffc6cbe71c34ea88d86132ae1d53 100644 (file)
@@ -60,14 +60,27 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
                      const unsigned char mac[ETH_ALEN],
                      unsigned int vid, enum macaccess_entry_type type)
 {
+       u32 cmd = ANA_TABLES_MACACCESS_VALID |
+               ANA_TABLES_MACACCESS_DEST_IDX(port) |
+               ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
+               ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
+       unsigned int mc_ports;
+
+       /* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
+       if (type == ENTRYTYPE_MACv4)
+               mc_ports = (mac[1] << 8) | mac[2];
+       else if (type == ENTRYTYPE_MACv6)
+               mc_ports = (mac[0] << 8) | mac[1];
+       else
+               mc_ports = 0;
+
+       if (mc_ports & BIT(ocelot->num_phys_ports))
+               cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
+
        ocelot_mact_select(ocelot, mac, vid);
 
        /* Issue a write command */
-       ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID |
-                            ANA_TABLES_MACACCESS_DEST_IDX(port) |
-                            ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
-                            ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN),
-                            ANA_TABLES_MACACCESS);
+       ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
 
        return ocelot_mact_wait_for_completion(ocelot);
 }
index 2bd2840d88bdc89835f08a56fd7c35581492162c..42230f92ca9c8a142f2212a93b98a3190cbaa6de 100644 (file)
@@ -1042,10 +1042,8 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
        int ret = 0;
 
-       if (!ocelot_netdevice_dev_check(dev))
-               return 0;
-
        if (event == NETDEV_PRECHANGEUPPER &&
+           ocelot_netdevice_dev_check(dev) &&
            netif_is_lag_master(info->upper_dev)) {
                struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
                struct netlink_ext_ack *extack;
index a569abe7f5ef2e4f2ba1e578908987362d8a3f56..0d78408b4e269fb0b6a21c5fb91f0eff2a28ff37 100644 (file)
@@ -4046,17 +4046,72 @@ err_out:
        return -EIO;
 }
 
-static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp)
+static bool rtl_skb_is_udp(struct sk_buff *skb)
+{
+       int no = skb_network_offset(skb);
+       struct ipv6hdr *i6h, _i6h;
+       struct iphdr *ih, _ih;
+
+       switch (vlan_get_protocol(skb)) {
+       case htons(ETH_P_IP):
+               ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih);
+               return ih && ih->protocol == IPPROTO_UDP;
+       case htons(ETH_P_IPV6):
+               i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h);
+               return i6h && i6h->nexthdr == IPPROTO_UDP;
+       default:
+               return false;
+       }
+}
+
+#define RTL_MIN_PATCH_LEN      47
+
+/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */
+static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp,
+                                           struct sk_buff *skb)
 {
+       unsigned int padto = 0, len = skb->len;
+
+       if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN &&
+           rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) {
+               unsigned int trans_data_len = skb_tail_pointer(skb) -
+                                             skb_transport_header(skb);
+
+               if (trans_data_len >= offsetof(struct udphdr, len) &&
+                   trans_data_len < RTL_MIN_PATCH_LEN) {
+                       u16 dest = ntohs(udp_hdr(skb)->dest);
+
+                       /* dest is a standard PTP port */
+                       if (dest == 319 || dest == 320)
+                               padto = len + RTL_MIN_PATCH_LEN - trans_data_len;
+               }
+
+               if (trans_data_len < sizeof(struct udphdr))
+                       padto = max_t(unsigned int, padto,
+                                     len + sizeof(struct udphdr) - trans_data_len);
+       }
+
+       return padto;
+}
+
+static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
+                                          struct sk_buff *skb)
+{
+       unsigned int padto;
+
+       padto = rtl8125_quirk_udp_padto(tp, skb);
+
        switch (tp->mac_version) {
        case RTL_GIGA_MAC_VER_34:
        case RTL_GIGA_MAC_VER_60:
        case RTL_GIGA_MAC_VER_61:
        case RTL_GIGA_MAC_VER_63:
-               return true;
+               padto = max_t(unsigned int, padto, ETH_ZLEN);
        default:
-               return false;
+               break;
        }
+
+       return padto;
 }
 
 static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts)
@@ -4128,9 +4183,10 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
 
                opts[1] |= transport_offset << TCPHO_SHIFT;
        } else {
-               if (unlikely(skb->len < ETH_ZLEN && rtl_test_hw_pad_bug(tp)))
-                       /* eth_skb_pad would free the skb on error */
-                       return !__skb_put_padto(skb, ETH_ZLEN, false);
+               unsigned int padto = rtl_quirk_packet_padto(tp, skb);
+
+               /* skb_padto would free the skb on error */
+               return !__skb_put_padto(skb, padto, false);
        }
 
        return true;
@@ -4307,6 +4363,9 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb,
                if (skb->len < ETH_ZLEN)
                        features &= ~NETIF_F_CSUM_MASK;
 
+               if (rtl_quirk_packet_padto(tp, skb))
+                       features &= ~NETIF_F_CSUM_MASK;
+
                if (transport_offset > TCPHO_MAX &&
                    rtl_chip_supports_csum_v2(tp))
                        features &= ~NETIF_F_CSUM_MASK;
@@ -4645,10 +4704,10 @@ static int rtl8169_close(struct net_device *dev)
 
        cancel_work_sync(&tp->wk.work);
 
-       phy_disconnect(tp->phydev);
-
        free_irq(pci_irq_vector(pdev, 0), tp);
 
+       phy_disconnect(tp->phydev);
+
        dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
                          tp->RxPhyAddr);
        dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
index c633046329352601ce76ad7d12913fffd420e9ec..590b088bc4c7f3e2f0d6d79443fcff6a6f4da077 100644 (file)
@@ -2606,10 +2606,10 @@ static int sh_eth_close(struct net_device *ndev)
        /* Free all the skbuffs in the Rx queue and the DMA buffer. */
        sh_eth_ring_free(ndev);
 
-       pm_runtime_put_sync(&mdp->pdev->dev);
-
        mdp->is_opened = 0;
 
+       pm_runtime_put(&mdp->pdev->dev);
+
        return 0;
 }
 
@@ -3034,6 +3034,28 @@ static int sh_mdio_release(struct sh_eth_private *mdp)
        return 0;
 }
 
+static int sh_mdiobb_read(struct mii_bus *bus, int phy, int reg)
+{
+       int res;
+
+       pm_runtime_get_sync(bus->parent);
+       res = mdiobb_read(bus, phy, reg);
+       pm_runtime_put(bus->parent);
+
+       return res;
+}
+
+static int sh_mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+       int res;
+
+       pm_runtime_get_sync(bus->parent);
+       res = mdiobb_write(bus, phy, reg, val);
+       pm_runtime_put(bus->parent);
+
+       return res;
+}
+
 /* MDIO bus init function */
 static int sh_mdio_init(struct sh_eth_private *mdp,
                        struct sh_eth_plat_data *pd)
@@ -3058,6 +3080,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
        if (!mdp->mii_bus)
                return -ENOMEM;
 
+       /* Wrap accessors with Runtime PM-aware ops */
+       mdp->mii_bus->read = sh_mdiobb_read;
+       mdp->mii_bus->write = sh_mdiobb_write;
+
        /* Hook up MII support for ethtool */
        mdp->mii_bus->name = "sh_mii";
        mdp->mii_bus->parent = dev;
index 82b1c7a5a7a948eef7615fcf426c11321971b8c7..ba0e4d2b256a4b158b7bddb7bc5a8235c38b97ac 100644 (file)
@@ -129,7 +129,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
                                if (ret) {
                                        dev_err(&pdev->dev,
                                                "Failed to set tx_clk\n");
-                                       return ret;
+                                       goto err_remove_config_dt;
                                }
                        }
                }
@@ -143,7 +143,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
                        if (ret) {
                                dev_err(&pdev->dev,
                                        "Failed to set clk_ptp_ref\n");
-                               return ret;
+                               goto err_remove_config_dt;
                        }
                }
        }
index 9a6a519426a08ade395e09acdd46fc8b83bcdb19..103d2448e9e0dd82d1b11b53e189073c5a9fe99c 100644 (file)
@@ -375,6 +375,7 @@ static int ehl_pse0_common_data(struct pci_dev *pdev,
                                struct plat_stmmacenet_data *plat)
 {
        plat->bus_id = 2;
+       plat->addr64 = 32;
        return ehl_common_data(pdev, plat);
 }
 
@@ -406,6 +407,7 @@ static int ehl_pse1_common_data(struct pci_dev *pdev,
                                struct plat_stmmacenet_data *plat)
 {
        plat->bus_id = 3;
+       plat->addr64 = 32;
        return ehl_common_data(pdev, plat);
 }
 
index 14d9a791924bf9b6b16f53cba222ec6ff20cf2bf..34e5f2155d6206463523624b12c8091839ebba1d 100644 (file)
@@ -440,7 +440,7 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
 static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
 {
        u32 channel_id = gsi_channel_id(channel);
-       void *virt = channel->gsi->virt;
+       void __iomem *virt = channel->gsi->virt;
        u32 val;
 
        val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
@@ -1373,7 +1373,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
        /* Hardware requires a 2^n ring size, with alignment equal to size */
        ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
        if (ring->virt && addr % size) {
-               dma_free_coherent(dev, size, ring->virt, ring->addr);
+               dma_free_coherent(dev, size, ring->virt, addr);
                dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
                        size);
                return -EINVAL; /* Not a good error value, but distinct */
index 9f4be9812a1f31d48732f96ce27b77624b4f9a2d..612afece303f3519a35fd8fc9c834bc72bd8d7f4 100644 (file)
@@ -588,7 +588,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
 
        /* Note that HDR_ENDIANNESS indicates big endian header fields */
        if (endpoint->data->qmap)
-               val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
+               val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
 
        iowrite32(val, endpoint->ipa->reg_virt + offset);
 }
@@ -1164,8 +1164,8 @@ static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
                return true;
        if (!status->pkt_len)
                return true;
-       endpoint_id = u32_get_bits(status->endp_dst_idx,
-                                  IPA_STATUS_DST_IDX_FMASK);
+       endpoint_id = u8_get_bits(status->endp_dst_idx,
+                                 IPA_STATUS_DST_IDX_FMASK);
        if (endpoint_id != endpoint->endpoint_id)
                return true;
 
index 0cc3a3374caa260c6d4401b50b82b47f8d14d5a2..f25029b9ec8573e1b92819d5b9c36abe49633728 100644 (file)
@@ -336,7 +336,7 @@ static void ipa_imem_exit(struct ipa *ipa)
 
                size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size);
                if (size != ipa->imem_size)
-                       dev_warn(dev, "unmapped %zu IMEM bytes, expected %lu\n",
+                       dev_warn(dev, "unmapped %zu IMEM bytes, expected %zu\n",
                                 size, ipa->imem_size);
        } else {
                dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n");
@@ -440,7 +440,7 @@ static void ipa_smem_exit(struct ipa *ipa)
 
                size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size);
                if (size != ipa->smem_size)
-                       dev_warn(dev, "unmapped %zu SMEM bytes, expected %lu\n",
+                       dev_warn(dev, "unmapped %zu SMEM bytes, expected %zu\n",
                                 size, ipa->smem_size);
 
        } else {
index 5136275c8e7399fbbd74d048abb89f50929e5af3..d3915f83185430e9db7e6cfb3903af50ba244d7a 100644 (file)
@@ -149,7 +149,7 @@ static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
        return dev_addr;
 }
 
-static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
+int mdiobb_read(struct mii_bus *bus, int phy, int reg)
 {
        struct mdiobb_ctrl *ctrl = bus->priv;
        int ret, i;
@@ -180,8 +180,9 @@ static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
        mdiobb_get_bit(ctrl);
        return ret;
 }
+EXPORT_SYMBOL(mdiobb_read);
 
-static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
 {
        struct mdiobb_ctrl *ctrl = bus->priv;
 
@@ -201,6 +202,7 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
        mdiobb_get_bit(ctrl);
        return 0;
 }
+EXPORT_SYMBOL(mdiobb_write);
 
 struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
 {
index c19dac21c468b3e931fbeb05f7a26915817368ea..dd7917cab2b124c2f5e697c221f8308c82235fba 100644 (file)
@@ -992,7 +992,8 @@ static void __team_compute_features(struct team *team)
        unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
                                        IFF_XMIT_DST_RELEASE_PERM;
 
-       list_for_each_entry(port, &team->port_list, list) {
+       rcu_read_lock();
+       list_for_each_entry_rcu(port, &team->port_list, list) {
                vlan_features = netdev_increment_features(vlan_features,
                                        port->dev->vlan_features,
                                        TEAM_VLAN_FEATURES);
@@ -1006,6 +1007,7 @@ static void __team_compute_features(struct team *team)
                if (port->dev->hard_header_len > max_hard_header_len)
                        max_hard_header_len = port->dev->hard_header_len;
        }
+       rcu_read_unlock();
 
        team->dev->vlan_features = vlan_features;
        team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
@@ -1020,9 +1022,7 @@ static void __team_compute_features(struct team *team)
 
 static void team_compute_features(struct team *team)
 {
-       mutex_lock(&team->lock);
        __team_compute_features(team);
-       mutex_unlock(&team->lock);
        netdev_change_features(team->dev);
 }
 
index 6aaa0675c28a397c30991375f22256e9209bb2a3..a9b55102865955761eca676caca4149fdb42cb16 100644 (file)
@@ -968,6 +968,12 @@ static const struct usb_device_id  products[] = {
                                      USB_CDC_SUBCLASS_ETHERNET,
                                      USB_CDC_PROTO_NONE),
        .driver_info = (unsigned long)&wwan_info,
+}, {
+       /* Cinterion PLS83/PLS63 modem by GEMALTO/THALES */
+       USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0069, USB_CLASS_COMM,
+                                     USB_CDC_SUBCLASS_ETHERNET,
+                                     USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long)&wwan_info,
 }, {
        USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
                        USB_CDC_PROTO_NONE),
index 5a78848db93fdaba8a6888fad3cd2b13ac03cbd6..291e76d32abe7c4ee928f8af1e669d9720495667 100644 (file)
@@ -1827,6 +1827,15 @@ cdc_ncm_speed_change(struct usbnet *dev,
        uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
        uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
 
+       /* if the speed hasn't changed, don't report it.
+        * RTL8156 shipped before 2021 sends notification about every 32ms.
+        */
+       if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed)
+               return;
+
+       dev->rx_speed = rx_speed;
+       dev->tx_speed = tx_speed;
+
        /*
         * Currently the USB-NET API does not support reporting the actual
         * device speed. Do print it instead.
@@ -1867,7 +1876,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
                 * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
                 * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
                 */
-               usbnet_link_change(dev, !!event->wValue, 0);
+               if (netif_carrier_ok(dev->net) != !!event->wValue)
+                       usbnet_link_change(dev, !!event->wValue, 0);
                break;
 
        case USB_CDC_NOTIFY_SPEED_CHANGE:
index af19513a9f75b5a72add34788a7a6134d10b2e09..cc4819282820bf575ae8c8d2f364e8e1c7d898b9 100644 (file)
@@ -1302,6 +1302,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)},    /* Olivetti Olicard 160 */
        {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)},    /* Olivetti Olicard 500 */
        {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)},    /* Cinterion PLxx */
+       {QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */
        {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)},    /* Cinterion PHxx,PXxx */
        {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)},   /* Cinterion ALASxx (1 RmNet) */
        {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)},    /* Cinterion PHxx,PXxx (2 RmNet) */
index 7220fc8fd9b0dbee1fb2c483fc0394b9f2aab943..8280092066e7706b0f98c561edbac6df052f75bc 100644 (file)
@@ -314,6 +314,7 @@ const struct iwl_cfg_trans_params iwl_ma_trans_cfg = {
 const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
 const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
 const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
+const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203";
 const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6 AX211 160MHz";
 const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6 AX411 160MHz";
 const char iwl_ma_name[] = "Intel(R) Wi-Fi 6";
@@ -340,6 +341,18 @@ const struct iwl_cfg iwl_qu_b0_hr1_b0 = {
        .num_rbds = IWL_NUM_RBDS_22000_HE,
 };
 
+const struct iwl_cfg iwl_qu_b0_hr_b0 = {
+       .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
+       IWL_DEVICE_22500,
+       /*
+        * This device doesn't support receiving BlockAck with a large bitmap
+        * so we need to restrict the size of transmitted aggregation to the
+        * HT size; mac80211 would otherwise pick the HE max (256) by default.
+        */
+       .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+       .num_rbds = IWL_NUM_RBDS_22000_HE,
+};
+
 const struct iwl_cfg iwl_ax201_cfg_qu_hr = {
        .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
        .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
@@ -366,6 +379,18 @@ const struct iwl_cfg iwl_qu_c0_hr1_b0 = {
        .num_rbds = IWL_NUM_RBDS_22000_HE,
 };
 
+const struct iwl_cfg iwl_qu_c0_hr_b0 = {
+       .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
+       IWL_DEVICE_22500,
+       /*
+        * This device doesn't support receiving BlockAck with a large bitmap
+        * so we need to restrict the size of transmitted aggregation to the
+        * HT size; mac80211 would otherwise pick the HE max (256) by default.
+        */
+       .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+       .num_rbds = IWL_NUM_RBDS_22000_HE,
+};
+
 const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = {
        .name = "Intel(R) Wi-Fi 6 AX201 160MHz",
        .fw_name_pre = IWL_QU_C_HR_B_FW_PRE,
index 15248b06438036724b07bbca02f65cb6176e7c81..d8b7776a8dde1fe1797b664abfea43c1e545e1ca 100644 (file)
@@ -80,19 +80,45 @@ static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
 }
 
 /*
- * Evaluate a DSM with no arguments and a single u8 return value (inside a
- * buffer object), verify and return that value.
+ * Generic function to evaluate a DSM with no arguments
+ * and an integer return value,
+ * (as an integer object or inside a buffer object),
+ * verify and assign the value in the "value" parameter.
+ * return 0 in success and the appropriate errno otherwise.
  */
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
+static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func,
+                                   u64 *value, size_t expected_size)
 {
        union acpi_object *obj;
-       int ret;
+       int ret = 0;
 
        obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL);
-       if (IS_ERR(obj))
+       if (IS_ERR(obj)) {
+               IWL_DEBUG_DEV_RADIO(dev,
+                                   "Failed to get  DSM object. func= %d\n",
+                                   func);
                return -ENOENT;
+       }
+
+       if (obj->type == ACPI_TYPE_INTEGER) {
+               *value = obj->integer.value;
+       } else if (obj->type == ACPI_TYPE_BUFFER) {
+               __le64 le_value = 0;
 
-       if (obj->type != ACPI_TYPE_BUFFER) {
+               if (WARN_ON_ONCE(expected_size > sizeof(le_value)))
+                       return -EINVAL;
+
+               /* if the buffer size doesn't match the expected size */
+               if (obj->buffer.length != expected_size)
+                       IWL_DEBUG_DEV_RADIO(dev,
+                                           "ACPI: DSM invalid buffer size, padding or truncating (%d)\n",
+                                           obj->buffer.length);
+
+                /* assuming LE from Intel BIOS spec */
+               memcpy(&le_value, obj->buffer.pointer,
+                      min_t(size_t, expected_size, (size_t)obj->buffer.length));
+               *value = le64_to_cpu(le_value);
+       } else {
                IWL_DEBUG_DEV_RADIO(dev,
                                    "ACPI: DSM method did not return a valid object, type=%d\n",
                                    obj->type);
@@ -100,15 +126,6 @@ int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
                goto out;
        }
 
-       if (obj->buffer.length != sizeof(u8)) {
-               IWL_DEBUG_DEV_RADIO(dev,
-                                   "ACPI: DSM method returned invalid buffer, length=%d\n",
-                                   obj->buffer.length);
-               ret = -EINVAL;
-               goto out;
-       }
-
-       ret = obj->buffer.pointer[0];
        IWL_DEBUG_DEV_RADIO(dev,
                            "ACPI: DSM method evaluated: func=%d, ret=%d\n",
                            func, ret);
@@ -116,6 +133,24 @@ out:
        ACPI_FREE(obj);
        return ret;
 }
+
+/*
+ * Evaluate a DSM with no arguments and a u8 return value,
+ */
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value)
+{
+       int ret;
+       u64 val;
+
+       ret = iwl_acpi_get_dsm_integer(dev, rev, func, &val, sizeof(u8));
+
+       if (ret < 0)
+               return ret;
+
+       /* cast val (u64) to be u8 */
+       *value = (u8)val;
+       return 0;
+}
 IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8);
 
 union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
index 042dd247d387d7dbe047d400898cf638f52fad37..1cce30d1ef5590f6fdb7aed2e20045e846f745a2 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
  * Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
  */
 #ifndef __iwl_fw_acpi__
 #define __iwl_fw_acpi__
@@ -99,7 +99,7 @@ struct iwl_fw_runtime;
 
 void *iwl_acpi_get_object(struct device *dev, acpi_string method);
 
-int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func);
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value);
 
 union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
                                         union acpi_object *data,
@@ -159,7 +159,8 @@ static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
        return ERR_PTR(-ENOENT);
 }
 
-static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func)
+static inline
+int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value)
 {
        return -ENOENT;
 }
index 6d8f7bff12432c5b2ef165c92167b53e60079fce..895a907acdf0fb881b667b0025923b7635c71e50 100644 (file)
@@ -224,40 +224,46 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
 int iwl_pnvm_load(struct iwl_trans *trans,
                  struct iwl_notif_wait_data *notif_wait)
 {
-       const struct firmware *pnvm;
        struct iwl_notification_wait pnvm_wait;
        static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
                                                PNVM_INIT_COMPLETE_NTFY) };
-       char pnvm_name[64];
-       int ret;
 
        /* if the SKU_ID is empty, there's nothing to do */
        if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2])
                return 0;
 
-       /* if we already have it, nothing to do either */
-       if (trans->pnvm_loaded)
-               return 0;
+       /* load from disk only if we haven't done it (or tried) before */
+       if (!trans->pnvm_loaded) {
+               const struct firmware *pnvm;
+               char pnvm_name[64];
+               int ret;
+
+               /*
+                * The prefix unfortunately includes a hyphen at the end, so
+                * don't add the dot here...
+                */
+               snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
+                        trans->cfg->fw_name_pre);
+
+               /* ...but replace the hyphen with the dot here. */
+               if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
+                       pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
+
+               ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
+               if (ret) {
+                       IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
+                                    pnvm_name, ret);
+                       /*
+                        * Pretend we've loaded it - at least we've tried and
+                        * couldn't load it at all, so there's no point in
+                        * trying again over and over.
+                        */
+                       trans->pnvm_loaded = true;
+               } else {
+                       iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
 
-       /*
-        * The prefix unfortunately includes a hyphen at the end, so
-        * don't add the dot here...
-        */
-       snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
-                trans->cfg->fw_name_pre);
-
-       /* ...but replace the hyphen with the dot here. */
-       if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
-               pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
-
-       ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
-       if (ret) {
-               IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
-                            pnvm_name, ret);
-       } else {
-               iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
-
-               release_firmware(pnvm);
+                       release_firmware(pnvm);
+               }
        }
 
        iwl_init_notification_wait(notif_wait, &pnvm_wait,
index 27cb0406ba9a216e549da101b92062294a404b6e..86e1d57df65ed54a8dc32f64b78ece02732a255d 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
 #ifndef __IWL_CONFIG_H__
@@ -445,7 +445,7 @@ struct iwl_cfg {
 #define IWL_CFG_CORES_BT_GNSS          0x5
 
 #define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4)
-#define IWL_SUBDEVICE_NO_160(subdevice)        ((u16)((subdevice) & 0x0100) >> 9)
+#define IWL_SUBDEVICE_NO_160(subdevice)        ((u16)((subdevice) & 0x0200) >> 9)
 #define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10)
 
 struct iwl_dev_info {
@@ -491,6 +491,7 @@ extern const char iwl9260_killer_1550_name[];
 extern const char iwl9560_killer_1550i_name[];
 extern const char iwl9560_killer_1550s_name[];
 extern const char iwl_ax200_name[];
+extern const char iwl_ax203_name[];
 extern const char iwl_ax201_name[];
 extern const char iwl_ax101_name[];
 extern const char iwl_ax200_killer_1650w_name[];
@@ -574,6 +575,8 @@ extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
 extern const struct iwl_cfg iwl_qu_b0_hr1_b0;
 extern const struct iwl_cfg iwl_qu_c0_hr1_b0;
 extern const struct iwl_cfg iwl_quz_a0_hr1_b0;
+extern const struct iwl_cfg iwl_qu_b0_hr_b0;
+extern const struct iwl_cfg iwl_qu_c0_hr_b0;
 extern const struct iwl_cfg iwl_ax200_cfg_cc;
 extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
 extern const struct iwl_cfg iwl_ax201_cfg_qu_hr;
index a654147d3cd61a162c9c9cb1a70078cf331e3e6c..a80a35a7740f38927f431d27764669647e6ab9ec 100644 (file)
@@ -180,13 +180,6 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
        if (le32_to_cpu(tlv->length) < sizeof(*reg))
                return -EINVAL;
 
-       /* For safe using a string from FW make sure we have a
-        * null terminator
-        */
-       reg->name[IWL_FW_INI_MAX_NAME - 1] = 0;
-
-       IWL_DEBUG_FW(trans, "WRT: parsing region: %s\n", reg->name);
-
        if (id >= IWL_FW_INI_MAX_REGION_ID) {
                IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
                return -EINVAL;
index 2ac20d0a30eb654d07de933610215c133b00d6cc..2b7ef1583e7fd44aa3a3294963233421593f6fea 100644 (file)
@@ -150,16 +150,17 @@ u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
 }
 IWL_EXPORT_SYMBOL(iwl_read_prph);
 
-void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs, u32 val, u32 delay_ms)
 {
        unsigned long flags;
 
        if (iwl_trans_grab_nic_access(trans, &flags)) {
+               mdelay(delay_ms);
                iwl_write_prph_no_grab(trans, ofs, val);
                iwl_trans_release_nic_access(trans, &flags);
        }
 }
-IWL_EXPORT_SYMBOL(iwl_write_prph);
+IWL_EXPORT_SYMBOL(iwl_write_prph_delay);
 
 int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
                      u32 bits, u32 mask, int timeout)
@@ -219,8 +220,8 @@ IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
 void iwl_force_nmi(struct iwl_trans *trans)
 {
        if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
-               iwl_write_prph(trans, DEVICE_SET_NMI_REG,
-                              DEVICE_SET_NMI_VAL_DRV);
+               iwl_write_prph_delay(trans, DEVICE_SET_NMI_REG,
+                                    DEVICE_SET_NMI_VAL_DRV, 1);
        else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
                iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER,
                                UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER);
index 39bceee4e9e76f64b21d6de3599db52b896253c3..3c21c0e081f8e959f09b99e51b4829215b7b6cad 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
 /*
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
  */
 #ifndef __iwl_io_h__
 #define __iwl_io_h__
@@ -37,7 +37,13 @@ u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs);
 u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
 void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val);
 void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val);
-void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
+void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs,
+                         u32 val, u32 delay_ms);
+static inline void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+       iwl_write_prph_delay(trans, ofs, val, 0);
+}
+
 int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
                      u32 bits, u32 mask, int timeout);
 void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
index 0b03fdedc1f70e4861adc4479926400a9d36fa9f..1158e256f6012b1b2f145a0ef5466755f641cf84 100644 (file)
 #define RADIO_RSP_ADDR_POS             (6)
 #define RADIO_RSP_RD_CMD               (3)
 
+/* LTR control (Qu only) */
+#define HPM_MAC_LTR_CSR                        0xa0348c
+#define HPM_MAC_LRT_ENABLE_ALL         0xf
+/* also uses CSR_LTR_* for values */
+#define HPM_UMAC_LTR                   0xa03480
+
 /* FW monitor */
 #define MON_BUFF_SAMPLE_CTL            (0xa03c00)
 #define MON_BUFF_BASE_ADDR             (0xa03c1c)
index c025188fa9bc5017688f5fe7a097f37cda27dd5d..df018972a46bfa914a8478433130ba7b5c25acf9 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -2032,8 +2032,6 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
 
        mutex_lock(&mvm->mutex);
 
-       clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
-
        /* get the BSS vif pointer again */
        vif = iwl_mvm_get_bss_vif(mvm);
        if (IS_ERR_OR_NULL(vif))
@@ -2148,6 +2146,8 @@ out_iterate:
                        iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
 
 out:
+       clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
        /* no need to reset the device in unified images, if successful */
        if (unified_image && !ret) {
                /* nothing else to do if we already sent D0I3_END_CMD */
index 573e46956c14d6ce9ce80253880c618287aeac58..38d0bfb649ccc7db998bba57e0582e93210ab35a 100644 (file)
@@ -459,7 +459,10 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
        const size_t bufsz = sizeof(buf);
        int pos = 0;
 
+       mutex_lock(&mvm->mutex);
        iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
+       mutex_unlock(&mvm->mutex);
+
        do_div(curr_os, NSEC_PER_USEC);
        diff = curr_os - curr_gp2;
        pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
index 0637eb1cff4e5c7a96f20d4f5cf5fb1bba88687a..313e9f106f4659caae20ab8250540c32ac83a5de 100644 (file)
@@ -1090,20 +1090,22 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
 
 static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
 {
+       u8 value;
+
        int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
-                                     DSM_FUNC_ENABLE_INDONESIA_5G2);
+                                     DSM_FUNC_ENABLE_INDONESIA_5G2, &value);
 
        if (ret < 0)
                IWL_DEBUG_RADIO(mvm,
                                "Failed to evaluate DSM function ENABLE_INDONESIA_5G2, ret=%d\n",
                                ret);
 
-       else if (ret >= DSM_VALUE_INDONESIA_MAX)
+       else if (value >= DSM_VALUE_INDONESIA_MAX)
                IWL_DEBUG_RADIO(mvm,
-                               "DSM function ENABLE_INDONESIA_5G2 return invalid value, ret=%d\n",
-                               ret);
+                               "DSM function ENABLE_INDONESIA_5G2 return invalid value, value=%d\n",
+                               value);
 
-       else if (ret == DSM_VALUE_INDONESIA_ENABLE) {
+       else if (value == DSM_VALUE_INDONESIA_ENABLE) {
                IWL_DEBUG_RADIO(mvm,
                                "Evaluated DSM function ENABLE_INDONESIA_5G2: Enabling 5g2\n");
                return DSM_VALUE_INDONESIA_ENABLE;
@@ -1114,25 +1116,26 @@ static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
 
 static u8 iwl_mvm_eval_dsm_disable_srd(struct iwl_mvm *mvm)
 {
+       u8 value;
        int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
-                                     DSM_FUNC_DISABLE_SRD);
+                                     DSM_FUNC_DISABLE_SRD, &value);
 
        if (ret < 0)
                IWL_DEBUG_RADIO(mvm,
                                "Failed to evaluate DSM function DISABLE_SRD, ret=%d\n",
                                ret);
 
-       else if (ret >= DSM_VALUE_SRD_MAX)
+       else if (value >= DSM_VALUE_SRD_MAX)
                IWL_DEBUG_RADIO(mvm,
-                               "DSM function DISABLE_SRD return invalid value, ret=%d\n",
-                               ret);
+                               "DSM function DISABLE_SRD return invalid value, value=%d\n",
+                               value);
 
-       else if (ret == DSM_VALUE_SRD_PASSIVE) {
+       else if (value == DSM_VALUE_SRD_PASSIVE) {
                IWL_DEBUG_RADIO(mvm,
                                "Evaluated DSM function DISABLE_SRD: setting SRD to passive\n");
                return DSM_VALUE_SRD_PASSIVE;
 
-       } else if (ret == DSM_VALUE_SRD_DISABLE) {
+       } else if (value == DSM_VALUE_SRD_DISABLE) {
                IWL_DEBUG_RADIO(mvm,
                                "Evaluated DSM function DISABLE_SRD: disabling SRD\n");
                return DSM_VALUE_SRD_DISABLE;
index da32937ba9a7898a5517d8418e1daba0f95252b0..43ff0407916affb47eecdb7238b629971013232a 100644 (file)
@@ -4194,6 +4194,9 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
        iwl_mvm_binding_remove_vif(mvm, vif);
 
 out:
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) &&
+           switching_chanctx)
+               return;
        mvmvif->phy_ctxt = NULL;
        iwl_mvm_power_update_mac(mvm);
 }
index 98f62d78cf9ca3da2d782e74ec40005b33172807..61618f607927d82e435183a4fe31c87c5e8d39c9 100644 (file)
@@ -791,6 +791,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        if (!mvm->scan_cmd)
                goto out_free;
 
+       /* invalidate ids to prevent accidental removal of sta_id 0 */
+       mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA;
+       mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA;
+
        /* Set EBS as successful as long as not stated otherwise by the FW. */
        mvm->last_ebs_successful = true;
 
@@ -1205,6 +1209,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
        reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
        if (device_reprobe(reprobe->dev))
                dev_err(reprobe->dev, "reprobe failed!\n");
+       put_device(reprobe->dev);
        kfree(reprobe);
        module_put(THIS_MODULE);
 }
@@ -1255,7 +1260,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
                        module_put(THIS_MODULE);
                        return;
                }
-               reprobe->dev = mvm->trans->dev;
+               reprobe->dev = get_device(mvm->trans->dev);
                INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
                schedule_work(&reprobe->work);
        } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
index dc174410bf9c29075f5bdf31ae94ea3beb038021..578c353ae02c9f830ad860b0eb474d59fa8055c5 100644 (file)
@@ -2057,6 +2057,9 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 
        lockdep_assert_held(&mvm->mutex);
 
+       if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
+               return -EINVAL;
+
        iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
        ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
        if (ret)
@@ -2071,6 +2074,9 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
 
        lockdep_assert_held(&mvm->mutex);
 
+       if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
+               return -EINVAL;
+
        iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
        ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
        if (ret)
index a983c215df310776ffe67f3b3ffa203eab609bfc..3712adc3ccc2511d46bcc855efbfba41c487d8e6 100644 (file)
@@ -773,6 +773,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
 
        next = skb_gso_segment(skb, netdev_flags);
        skb_shinfo(skb)->gso_size = mss;
+       skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
        if (WARN_ON_ONCE(IS_ERR(next)))
                return -EINVAL;
        else if (next)
@@ -795,6 +796,8 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
 
                if (tcp_payload_len > mss) {
                        skb_shinfo(tmp)->gso_size = mss;
+                       skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 :
+                                                          SKB_GSO_TCPV6;
                } else {
                        if (qos) {
                                u8 *qc;
index 36bf414a388af5101060d9b6a2b3189fa82df52f..5b5134dd49af8d5ee36a954412466e480c4e92d3 100644 (file)
@@ -75,6 +75,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
                                 const struct fw_img *fw)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
+                     u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+                                     CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
+                     u32_encode_bits(250,
+                                     CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
+                     CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
+                     u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
+                                     CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
+                     u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
        struct iwl_context_info_gen3 *ctxt_info_gen3;
        struct iwl_prph_scratch *prph_scratch;
        struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
@@ -189,8 +198,10 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
        /* Allocate IML */
        iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
                                     &trans_pcie->iml_dma_addr, GFP_KERNEL);
-       if (!iml_img)
-               return -ENOMEM;
+       if (!iml_img) {
+               ret = -ENOMEM;
+               goto err_free_ctxt_info;
+       }
 
        memcpy(iml_img, trans->iml, trans->iml_len);
 
@@ -206,23 +217,19 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
        iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL,
                    CSR_AUTO_FUNC_BOOT_ENA);
 
-       if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
-               /*
-                * The firmware initializes this again later (to a smaller
-                * value), but for the boot process initialize the LTR to
-                * ~250 usec.
-                */
-               u32 val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
-                         u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
-                                         CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
-                         u32_encode_bits(250,
-                                         CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
-                         CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
-                         u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
-                                         CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
-                         u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
-
-               iwl_write32(trans, CSR_LTR_LONG_VAL_AD, val);
+       /*
+        * To workaround hardware latency issues during the boot process,
+        * initialize the LTR to ~250 usec (see ltr_val above).
+        * The firmware initializes this again later (to a smaller value).
+        */
+       if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
+            trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
+           !trans->trans_cfg->integrated) {
+               iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
+       } else if (trans->trans_cfg->integrated &&
+                  trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
+               iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
+               iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
        }
 
        if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
@@ -232,6 +239,11 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
 
        return 0;
 
+err_free_ctxt_info:
+       dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
+                         trans_pcie->ctxt_info_gen3,
+                         trans_pcie->ctxt_info_dma_addr);
+       trans_pcie->ctxt_info_gen3 = NULL;
 err_free_prph_info:
        dma_free_coherent(trans->dev,
                          sizeof(*prph_info),
@@ -294,6 +306,9 @@ int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
                return ret;
        }
 
+       if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size))
+               return -EBUSY;
+
        prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
                cpu_to_le64(trans_pcie->pnvm_dram.physical);
        prph_sc_ctrl->pnvm_cfg.pnvm_size =
index 965982612e740894f5f27dea7d8fc38d4d654c11..ed3f5b7aa71e9b63914061b175349b4ed17a963d 100644 (file)
@@ -910,6 +910,11 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
                      IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
                      IWL_CFG_ANY, IWL_CFG_ANY,
                      iwl_qu_b0_hr1_b0, iwl_ax101_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+                     IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+                     IWL_CFG_ANY, IWL_CFG_ANY,
+                     iwl_qu_b0_hr_b0, iwl_ax203_name),
 
        /* Qu C step */
        _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
@@ -917,6 +922,11 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
                      IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
                      IWL_CFG_ANY, IWL_CFG_ANY,
                      iwl_qu_c0_hr1_b0, iwl_ax101_name),
+       _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+                     IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+                     IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
+                     IWL_CFG_ANY, IWL_CFG_ANY,
+                     iwl_qu_c0_hr_b0, iwl_ax203_name),
 
        /* QuZ */
        _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
index 285e0d586021061d46d4f6c7dc5f41802e1013c0..ab93a848a46675d7d92242591f8663e71f7dd7e7 100644 (file)
@@ -2107,7 +2107,8 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
 
        while (offs < dwords) {
                /* limit the time we spin here under lock to 1/2s */
-               ktime_t timeout = ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC);
+               unsigned long end = jiffies + HZ / 2;
+               bool resched = false;
 
                if (iwl_trans_grab_nic_access(trans, &flags)) {
                        iwl_write32(trans, HBUS_TARG_MEM_RADDR,
@@ -2118,14 +2119,15 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
                                                        HBUS_TARG_MEM_RDAT);
                                offs++;
 
-                               /* calling ktime_get is expensive so
-                                * do it once in 128 reads
-                                */
-                               if (offs % 128 == 0 && ktime_after(ktime_get(),
-                                                                  timeout))
+                               if (time_after(jiffies, end)) {
+                                       resched = true;
                                        break;
+                               }
                        }
                        iwl_trans_release_nic_access(trans, &flags);
+
+                       if (resched)
+                               cond_resched();
                } else {
                        return -EBUSY;
                }
index 5dda0015522dd92f53d3df4c36dad16fd2afb45f..83f4964f3cb29d6b43ffddacf3d75c0b5e3806a8 100644 (file)
@@ -201,6 +201,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        struct iwl_txq *txq = trans->txqs.txq[txq_id];
 
+       if (!txq) {
+               IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
+               return;
+       }
+
        spin_lock_bh(&txq->lock);
        while (txq->write_ptr != txq->read_ptr) {
                IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
index 27eea909e32da6df2a75a0fa7c9a2da037b075e3..7ff1bb0ccc9cd5aec1bbf077014883180d6c0aee 100644 (file)
@@ -142,26 +142,25 @@ void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
         * idx is bounded by n_window
         */
        int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+       struct sk_buff *skb;
 
        lockdep_assert_held(&txq->lock);
 
+       if (!txq->entries)
+               return;
+
        iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
                               iwl_txq_get_tfd(trans, txq, idx));
 
-       /* free SKB */
-       if (txq->entries) {
-               struct sk_buff *skb;
-
-               skb = txq->entries[idx].skb;
+       skb = txq->entries[idx].skb;
 
-               /* Can be called from irqs-disabled context
-                * If skb is not NULL, it means that the whole queue is being
-                * freed and that the queue is not empty - free the skb
-                */
-               if (skb) {
-                       iwl_op_mode_free_skb(trans->op_mode, skb);
-                       txq->entries[idx].skb = NULL;
-               }
+       /* Can be called from irqs-disabled context
+        * If skb is not NULL, it means that the whole queue is being
+        * freed and that the queue is not empty - free the skb
+        */
+       if (skb) {
+               iwl_op_mode_free_skb(trans->op_mode, skb);
+               txq->entries[idx].skb = NULL;
        }
 }
 
@@ -841,10 +840,8 @@ void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
                        int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
                        struct sk_buff *skb = txq->entries[idx].skb;
 
-                       if (WARN_ON_ONCE(!skb))
-                               continue;
-
-                       iwl_txq_free_tso_page(trans, skb);
+                       if (!WARN_ON_ONCE(!skb))
+                               iwl_txq_free_tso_page(trans, skb);
                }
                iwl_txq_gen2_free_tfd(trans, txq);
                txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
@@ -1494,28 +1491,28 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
         */
        int rd_ptr = txq->read_ptr;
        int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
+       struct sk_buff *skb;
 
        lockdep_assert_held(&txq->lock);
 
+       if (!txq->entries)
+               return;
+
        /* We have only q->n_window txq->entries, but we use
         * TFD_QUEUE_SIZE_MAX tfds
         */
        iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
 
        /* free SKB */
-       if (txq->entries) {
-               struct sk_buff *skb;
-
-               skb = txq->entries[idx].skb;
+       skb = txq->entries[idx].skb;
 
-               /* Can be called from irqs-disabled context
-                * If skb is not NULL, it means that the whole queue is being
-                * freed and that the queue is not empty - free the skb
-                */
-               if (skb) {
-                       iwl_op_mode_free_skb(trans->op_mode, skb);
-                       txq->entries[idx].skb = NULL;
-               }
+       /* Can be called from irqs-disabled context
+        * If skb is not NULL, it means that the whole queue is being
+        * freed and that the queue is not empty - free the skb
+        */
+       if (skb) {
+               iwl_op_mode_free_skb(trans->op_mode, skb);
+               txq->entries[idx].skb = NULL;
        }
 }
 
index a44b7766dec6434ba8220853b98102d46b13700b..c13547841a4e9b14d2ac39badf59bd800b564519 100644 (file)
@@ -231,7 +231,7 @@ mt7615_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
                        int cmd, int *seq)
 {
        struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
-       enum mt76_txq_id qid;
+       enum mt76_mcuq_id qid;
 
        mt7615_mcu_fill_msg(dev, skb, cmd, seq);
        if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
index 13d77f8fca86664217c9299319cbea9f1e413ce8..9fb506f2ace6d8d84effb3ba443537d282c697b6 100644 (file)
@@ -83,7 +83,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
 {
        struct mt76_queue *q = &dev->q_rx[qid];
        struct mt76_sdio *sdio = &dev->sdio;
-       int len = 0, err, i, order;
+       int len = 0, err, i;
        struct page *page;
        u8 *buf;
 
@@ -96,8 +96,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
        if (len > sdio->func->cur_blksize)
                len = roundup(len, sdio->func->cur_blksize);
 
-       order = get_order(len);
-       page = __dev_alloc_pages(GFP_KERNEL, order);
+       page = __dev_alloc_pages(GFP_KERNEL, get_order(len));
        if (!page)
                return -ENOMEM;
 
@@ -106,7 +105,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
        err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
        if (err < 0) {
                dev_err(dev->dev, "sdio read data failed:%d\n", err);
-               __free_pages(page, order);
+               put_page(page);
                return err;
        }
 
@@ -123,7 +122,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
                if (q->queued + i + 1 == q->ndesc)
                        break;
        }
-       __free_pages(page, order);
+       put_page(page);
 
        spin_lock_bh(&q->lock);
        q->head = (q->head + i) % q->ndesc;
index 5fdd1a6d32ee1273d84b602ae9d253b908294b65..e211a2bd4d3c00330ba38adc7d2cf4aebf0cda7f 100644 (file)
@@ -256,7 +256,7 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
        struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
        struct mt7915_mcu_txd *mcu_txd;
        u8 seq, pkt_fmt, qidx;
-       enum mt76_txq_id txq;
+       enum mt76_mcuq_id qid;
        __le32 *txd;
        u32 val;
 
@@ -268,18 +268,18 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
                seq = ++dev->mt76.mcu.msg_seq & 0xf;
 
        if (cmd == -MCU_CMD_FW_SCATTER) {
-               txq = MT_MCUQ_FWDL;
+               qid = MT_MCUQ_FWDL;
                goto exit;
        }
 
        mcu_txd = (struct mt7915_mcu_txd *)skb_push(skb, sizeof(*mcu_txd));
 
        if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) {
-               txq = MT_MCUQ_WA;
+               qid = MT_MCUQ_WA;
                qidx = MT_TX_MCU_PORT_RX_Q0;
                pkt_fmt = MT_TX_TYPE_CMD;
        } else {
-               txq = MT_MCUQ_WM;
+               qid = MT_MCUQ_WM;
                qidx = MT_TX_MCU_PORT_RX_Q0;
                pkt_fmt = MT_TX_TYPE_CMD;
        }
@@ -326,7 +326,7 @@ exit:
        if (wait_seq)
                *wait_seq = seq;
 
-       return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0);
+       return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0);
 }
 
 static void
index 5f99054f535b4d6ae3da175462136da2f4eb024b..af7d1ecb777c0a51fccb375179b7847096172bc8 100644 (file)
@@ -152,8 +152,7 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
 
        if (new_p) {
                /* we have one extra ref from the allocator */
-               __free_pages(e->p, MT_RX_ORDER);
-
+               put_page(e->p);
                e->p = new_p;
        }
 }
@@ -310,7 +309,6 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
        }
 
        e = &q->e[q->end];
-       e->skb = skb;
        usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
                          mt7601u_complete_tx, q);
        ret = usb_submit_urb(e->urb, GFP_ATOMIC);
@@ -328,6 +326,7 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
 
        q->end = (q->end + 1) % q->entries;
        q->used++;
+       e->skb = skb;
 
        if (q->used >= q->entries)
                ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
index 200bdd672c281ffc54d7d5bc19be654ca2302d48..f13eb4ded95fad0ddf6076ed29f7c63f5ca67984 100644 (file)
@@ -1543,8 +1543,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
        }
 
        length = (io.nblocks + 1) << ns->lba_shift;
-       meta_len = (io.nblocks + 1) * ns->ms;
-       metadata = nvme_to_user_ptr(io.metadata);
+
+       if ((io.control & NVME_RW_PRINFO_PRACT) &&
+           ns->ms == sizeof(struct t10_pi_tuple)) {
+               /*
+                * Protection information is stripped/inserted by the
+                * controller.
+                */
+               if (nvme_to_user_ptr(io.metadata))
+                       return -EINVAL;
+               meta_len = 0;
+               metadata = NULL;
+       } else {
+               meta_len = (io.nblocks + 1) * ns->ms;
+               metadata = nvme_to_user_ptr(io.metadata);
+       }
 
        if (ns->features & NVME_NS_EXT_LBAS) {
                length += meta_len;
@@ -3816,7 +3829,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
                }
        }
 
-       list_add_tail(&ns->siblings, &head->list);
+       list_add_tail_rcu(&ns->siblings, &head->list);
        ns->head = head;
        mutex_unlock(&ctrl->subsys->lock);
        return 0;
index 9ac762b28811293d0895eba568bbd00fd8848fc0..282b7a4ea9a9a7a85c999dfb8e58e0f438ef6d41 100644 (file)
@@ -221,7 +221,7 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
        }
 
        for (ns = nvme_next_ns(head, old);
-            ns != old;
+            ns && ns != old;
             ns = nvme_next_ns(head, ns)) {
                if (nvme_path_is_disabled(ns))
                        continue;
index 50d9a20568a28df74abd85584259b246dbe67eac..6bad4d4dcdf073a13c4b71af5a7e5a97cf7b1173 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/t10-pi.h>
 #include <linux/types.h>
 #include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
 #include <linux/sed-opal.h>
 #include <linux/pci-p2pdma.h>
 
@@ -542,50 +543,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
        return true;
 }
 
-static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
 {
-       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
-       dma_addr_t dma_addr = iod->first_dma, next_dma_addr;
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       dma_addr_t dma_addr = iod->first_dma;
        int i;
 
-       if (iod->dma_len) {
-               dma_unmap_page(dev->dev, dma_addr, iod->dma_len,
-                              rq_dma_dir(req));
-               return;
+       for (i = 0; i < iod->npages; i++) {
+               __le64 *prp_list = nvme_pci_iod_list(req)[i];
+               dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+
+               dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
+               dma_addr = next_dma_addr;
        }
 
-       WARN_ON_ONCE(!iod->nents);
+}
 
-       if (is_pci_p2pdma_page(sg_page(iod->sg)))
-               pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
-                                   rq_dma_dir(req));
-       else
-               dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
+{
+       const int last_sg = SGES_PER_PAGE - 1;
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       dma_addr_t dma_addr = iod->first_dma;
+       int i;
 
+       for (i = 0; i < iod->npages; i++) {
+               struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
+               dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
 
-       if (iod->npages == 0)
-               dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
-                       dma_addr);
+               dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
+               dma_addr = next_dma_addr;
+       }
 
-       for (i = 0; i < iod->npages; i++) {
-               void *addr = nvme_pci_iod_list(req)[i];
+}
 
-               if (iod->use_sgl) {
-                       struct nvme_sgl_desc *sg_list = addr;
+static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
+{
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 
-                       next_dma_addr =
-                           le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr);
-               } else {
-                       __le64 *prp_list = addr;
+       if (is_pci_p2pdma_page(sg_page(iod->sg)))
+               pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
+                                   rq_dma_dir(req));
+       else
+               dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
+}
 
-                       next_dma_addr = le64_to_cpu(prp_list[last_prp]);
-               }
+static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
+{
+       struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
 
-               dma_pool_free(dev->prp_page_pool, addr, dma_addr);
-               dma_addr = next_dma_addr;
+       if (iod->dma_len) {
+               dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
+                              rq_dma_dir(req));
+               return;
        }
 
+       WARN_ON_ONCE(!iod->nents);
+
+       nvme_unmap_sg(dev, req);
+       if (iod->npages == 0)
+               dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
+                             iod->first_dma);
+       else if (iod->use_sgl)
+               nvme_free_sgls(dev, req);
+       else
+               nvme_free_prps(dev, req);
        mempool_free(iod->sg, dev->iod_mempool);
 }
 
@@ -661,7 +683,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
                        __le64 *old_prp_list = prp_list;
                        prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
                        if (!prp_list)
-                               return BLK_STS_RESOURCE;
+                               goto free_prps;
                        list[iod->npages++] = prp_list;
                        prp_list[0] = old_prp_list[i - 1];
                        old_prp_list[i - 1] = cpu_to_le64(prp_dma);
@@ -681,14 +703,14 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
                dma_addr = sg_dma_address(sg);
                dma_len = sg_dma_len(sg);
        }
-
 done:
        cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
        cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
-
        return BLK_STS_OK;
-
- bad_sgl:
+free_prps:
+       nvme_free_prps(dev, req);
+       return BLK_STS_RESOURCE;
+bad_sgl:
        WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
                        "Invalid SGL for payload:%d nents:%d\n",
                        blk_rq_payload_bytes(req), iod->nents);
@@ -760,7 +782,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
 
                        sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
                        if (!sg_list)
-                               return BLK_STS_RESOURCE;
+                               goto free_sgls;
 
                        i = 0;
                        nvme_pci_iod_list(req)[iod->npages++] = sg_list;
@@ -773,6 +795,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
        } while (--entries > 0);
 
        return BLK_STS_OK;
+free_sgls:
+       nvme_free_sgls(dev, req);
+       return BLK_STS_RESOURCE;
 }
 
 static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
@@ -841,7 +866,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
        sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
        iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
        if (!iod->nents)
-               goto out;
+               goto out_free_sg;
 
        if (is_pci_p2pdma_page(sg_page(iod->sg)))
                nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
@@ -850,16 +875,21 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
                nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
                                             rq_dma_dir(req), DMA_ATTR_NO_WARN);
        if (!nr_mapped)
-               goto out;
+               goto out_free_sg;
 
        iod->use_sgl = nvme_pci_use_sgls(dev, req);
        if (iod->use_sgl)
                ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
        else
                ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
-out:
        if (ret != BLK_STS_OK)
-               nvme_unmap_data(dev, req);
+               goto out_unmap_sg;
+       return BLK_STS_OK;
+
+out_unmap_sg:
+       nvme_unmap_sg(dev, req);
+out_free_sg:
+       mempool_free(iod->sg, dev->iod_mempool);
        return ret;
 }
 
@@ -1795,6 +1825,9 @@ static void nvme_map_cmb(struct nvme_dev *dev)
        if (dev->cmb_size)
                return;
 
+       if (NVME_CAP_CMBS(dev->ctrl.cap))
+               writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC);
+
        dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
        if (!dev->cmbsz)
                return;
@@ -1808,6 +1841,16 @@ static void nvme_map_cmb(struct nvme_dev *dev)
        if (offset > bar_size)
                return;
 
+       /*
+        * Tell the controller about the host side address mapping the CMB,
+        * and enable CMB decoding for the NVMe 1.4+ scheme:
+        */
+       if (NVME_CAP_CMBS(dev->ctrl.cap)) {
+               hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE |
+                            (pci_bus_address(pdev, bar) + offset),
+                            dev->bar + NVME_REG_CMBMSC);
+       }
+
        /*
         * Controllers may support a CMB size larger than their BAR,
         * for example, due to being behind a bridge. Reduce the CMB to
@@ -3199,6 +3242,8 @@ static const struct pci_device_id nvme_id_table[] = {
        { PCI_DEVICE(0x144d, 0xa822),   /* Samsung PM1725a */
                .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
                                NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+       { PCI_DEVICE(0x1987, 0x5016),   /* Phison E16 */
+               .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
        { PCI_DEVICE(0x1d1d, 0x1f1f),   /* LighNVM qemu device */
                .driver_data = NVME_QUIRK_LIGHTNVM, },
        { PCI_DEVICE(0x1d1d, 0x2807),   /* CNEX WL */
@@ -3214,6 +3259,10 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
        { PCI_DEVICE(0x15b7, 0x2001),   /*  Sandisk Skyhawk */
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+       { PCI_DEVICE(0x1d97, 0x2263),   /* SPCC */
+               .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+       { PCI_DEVICE(0x2646, 0x2263),   /* KINGSTON A2000 NVMe SSD  */
+               .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
        { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
                .driver_data = NVME_QUIRK_SINGLE_VECTOR },
        { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
index cf6c49d09c820aeddbd02fe49974c7644f7f062e..b7ce4f221d990304a24db7ef0ae16f7db52cc078 100644 (file)
@@ -97,6 +97,7 @@ struct nvme_rdma_queue {
        struct completion       cm_done;
        bool                    pi_support;
        int                     cq_size;
+       struct mutex            queue_lock;
 };
 
 struct nvme_rdma_ctrl {
@@ -579,6 +580,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
        int ret;
 
        queue = &ctrl->queues[idx];
+       mutex_init(&queue->queue_lock);
        queue->ctrl = ctrl;
        if (idx && ctrl->ctrl.max_integrity_segments)
                queue->pi_support = true;
@@ -598,7 +600,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
        if (IS_ERR(queue->cm_id)) {
                dev_info(ctrl->ctrl.device,
                        "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
-               return PTR_ERR(queue->cm_id);
+               ret = PTR_ERR(queue->cm_id);
+               goto out_destroy_mutex;
        }
 
        if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
@@ -628,6 +631,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
 out_destroy_cm_id:
        rdma_destroy_id(queue->cm_id);
        nvme_rdma_destroy_queue_ib(queue);
+out_destroy_mutex:
+       mutex_destroy(&queue->queue_lock);
        return ret;
 }
 
@@ -639,9 +644,10 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
 
 static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
 {
-       if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
-               return;
-       __nvme_rdma_stop_queue(queue);
+       mutex_lock(&queue->queue_lock);
+       if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
+               __nvme_rdma_stop_queue(queue);
+       mutex_unlock(&queue->queue_lock);
 }
 
 static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
@@ -651,6 +657,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
 
        nvme_rdma_destroy_queue_ib(queue);
        rdma_destroy_id(queue->cm_id);
+       mutex_destroy(&queue->queue_lock);
 }
 
 static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
index 216619926563ed2c40373ba6981da58d5481c235..881d28eb15e9d743c3f9a363e18baf1b4beb16f5 100644 (file)
@@ -76,6 +76,7 @@ struct nvme_tcp_queue {
        struct work_struct      io_work;
        int                     io_cpu;
 
+       struct mutex            queue_lock;
        struct mutex            send_mutex;
        struct llist_head       req_list;
        struct list_head        send_list;
@@ -1219,6 +1220,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
 
        sock_release(queue->sock);
        kfree(queue->pdu);
+       mutex_destroy(&queue->queue_lock);
 }
 
 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
@@ -1380,6 +1382,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
        struct nvme_tcp_queue *queue = &ctrl->queues[qid];
        int ret, rcv_pdu_size;
 
+       mutex_init(&queue->queue_lock);
        queue->ctrl = ctrl;
        init_llist_head(&queue->req_list);
        INIT_LIST_HEAD(&queue->send_list);
@@ -1398,7 +1401,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
        if (ret) {
                dev_err(nctrl->device,
                        "failed to create socket: %d\n", ret);
-               return ret;
+               goto err_destroy_mutex;
        }
 
        /* Single syn retry */
@@ -1507,6 +1510,8 @@ err_crypto:
 err_sock:
        sock_release(queue->sock);
        queue->sock = NULL;
+err_destroy_mutex:
+       mutex_destroy(&queue->queue_lock);
        return ret;
 }
 
@@ -1534,9 +1539,10 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
        struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
        struct nvme_tcp_queue *queue = &ctrl->queues[qid];
 
-       if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
-               return;
-       __nvme_tcp_stop_queue(queue);
+       mutex_lock(&queue->queue_lock);
+       if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
+               __nvme_tcp_stop_queue(queue);
+       mutex_unlock(&queue->queue_lock);
 }
 
 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
index 8d90235e4fcc5a4f1a5de293725e2912aa78c858..dc1ea468b182b608527c3441187d927a4061e585 100644 (file)
@@ -487,8 +487,10 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
 
        /* return an all zeroed buffer if we can't find an active namespace */
        ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
-       if (!ns)
+       if (!ns) {
+               status = NVME_SC_INVALID_NS;
                goto done;
+       }
 
        nvmet_ns_revalidate(ns);
 
@@ -541,7 +543,9 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
                id->nsattr |= (1 << 0);
        nvmet_put_namespace(ns);
 done:
-       status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+       if (!status)
+               status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+
        kfree(id);
 out:
        nvmet_req_complete(req, status);
index dc1f0f6471896e3dfd3f38dee4084184bc5d588c..aacf06f0b4312539c0d295d96ce5a2d7542c7c17 100644 (file)
@@ -305,7 +305,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
        length = cmd->pdu_len;
        cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE);
        offset = cmd->rbytes_done;
-       cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE);
+       cmd->sg_idx = offset / PAGE_SIZE;
        sg_offset = offset % PAGE_SIZE;
        sg = &cmd->req.sg[cmd->sg_idx];
 
@@ -318,6 +318,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
                length -= iov_len;
                sg = sg_next(sg);
                iov++;
+               sg_offset = 0;
        }
 
        iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov,
index aedfaaafd3e7edb73a4468cba26cf2c177ea1164..1122daa8e273648ce1b30e92d5f0d43254e04381 100644 (file)
@@ -162,9 +162,11 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
        mask = DMA_BIT_MASK(ilog2(end) + 1);
        dev->coherent_dma_mask &= mask;
        *dev->dma_mask &= mask;
-       /* ...but only set bus limit if we found valid dma-ranges earlier */
-       if (!ret)
+       /* ...but only set bus limit and range map if we found valid dma-ranges earlier */
+       if (!ret) {
                dev->bus_dma_limit = end;
+               dev->dma_range_map = map;
+       }
 
        coherent = of_dma_is_coherent(np);
        dev_dbg(dev, "device is%sdma coherent\n",
@@ -172,6 +174,9 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
 
        iommu = of_iommu_configure(dev, np, id);
        if (PTR_ERR(iommu) == -EPROBE_DEFER) {
+               /* Don't touch range map if it wasn't set from a valid dma-ranges */
+               if (!ret)
+                       dev->dma_range_map = NULL;
                kfree(map);
                return -EPROBE_DEFER;
        }
@@ -181,7 +186,6 @@ int of_dma_configure_id(struct device *dev, struct device_node *np,
 
        arch_setup_dma_ops(dev, dma_start, size, iommu, coherent);
 
-       dev->dma_range_map = map;
        return 0;
 }
 EXPORT_SYMBOL_GPL(of_dma_configure_id);
index b9fecc25d21316f8d7c1076d2d126305c58db82c..790393d1e31896d9d18f33e2f872c18f3e794e8e 100644 (file)
@@ -1558,7 +1558,6 @@ int pci_save_state(struct pci_dev *dev)
                return i;
 
        pci_save_ltr_state(dev);
-       pci_save_aspm_l1ss_state(dev);
        pci_save_dpc_state(dev);
        pci_save_aer_state(dev);
        pci_save_ptm_state(dev);
@@ -1665,7 +1664,6 @@ void pci_restore_state(struct pci_dev *dev)
         * LTR itself (in the PCIe capability).
         */
        pci_restore_ltr_state(dev);
-       pci_restore_aspm_l1ss_state(dev);
 
        pci_restore_pcie_state(dev);
        pci_restore_pasid_state(dev);
@@ -3353,11 +3351,6 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
        if (error)
                pci_err(dev, "unable to allocate suspend buffer for LTR\n");
 
-       error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_L1SS,
-                                           2 * sizeof(u32));
-       if (error)
-               pci_err(dev, "unable to allocate suspend buffer for ASPM-L1SS\n");
-
        pci_allocate_vc_save_buffers(dev);
 }
 
index 5c59365092fa44da2cff9985194642d7ee67cfdf..a7bdf0b1d45db8de894ef0f4a20b9d155c17e122 100644 (file)
@@ -582,15 +582,11 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev);
 void pcie_aspm_exit_link_state(struct pci_dev *pdev);
 void pcie_aspm_pm_state_change(struct pci_dev *pdev);
 void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
-void pci_save_aspm_l1ss_state(struct pci_dev *dev);
-void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
 #else
 static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
 static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
 static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { }
 static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
-static inline void pci_save_aspm_l1ss_state(struct pci_dev *dev) { }
-static inline void pci_restore_aspm_l1ss_state(struct pci_dev *dev) { }
 #endif
 
 #ifdef CONFIG_PCIE_ECRC
index a08e7d6dc248807dc5064aa682c62e7e4ab7b13d..ac0557a305aff383f7a0d9e7ea4112904081f351 100644 (file)
@@ -734,50 +734,6 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
                                PCI_L1SS_CTL1_L1SS_MASK, val);
 }
 
-void pci_save_aspm_l1ss_state(struct pci_dev *dev)
-{
-       int aspm_l1ss;
-       struct pci_cap_saved_state *save_state;
-       u32 *cap;
-
-       if (!pci_is_pcie(dev))
-               return;
-
-       aspm_l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS);
-       if (!aspm_l1ss)
-               return;
-
-       save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
-       if (!save_state)
-               return;
-
-       cap = (u32 *)&save_state->cap.data[0];
-       pci_read_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL1, cap++);
-       pci_read_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL2, cap++);
-}
-
-void pci_restore_aspm_l1ss_state(struct pci_dev *dev)
-{
-       int aspm_l1ss;
-       struct pci_cap_saved_state *save_state;
-       u32 *cap;
-
-       if (!pci_is_pcie(dev))
-               return;
-
-       aspm_l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS);
-       if (!aspm_l1ss)
-               return;
-
-       save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
-       if (!save_state)
-               return;
-
-       cap = (u32 *)&save_state->cap.data[0];
-       pci_write_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL1, *cap++);
-       pci_write_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL2, *cap++);
-}
-
 static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
 {
        pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
index 65d5ea00fc9d4bb8eac784605f3b15527afdfe16..1cb158d7233f483bb26cd5fe10607042e11aec5e 100644 (file)
@@ -1,2 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0
-obj-y          += phy-ingenic-usb.o
+obj-$(CONFIG_PHY_INGENIC_USB)          += phy-ingenic-usb.o
index d38def43b1bf6214303deef36d6066b848005f5d..55f8e6c048ab3ac26f60055d0814cfd9dd8683e7 100644 (file)
@@ -49,7 +49,9 @@ config PHY_MTK_HDMI
 
 config PHY_MTK_MIPI_DSI
        tristate "MediaTek MIPI-DSI Driver"
-       depends on ARCH_MEDIATEK && OF
+       depends on ARCH_MEDIATEK || COMPILE_TEST
+       depends on COMMON_CLK
+       depends on OF
        select GENERIC_PHY
        help
          Support MIPI DSI for Mediatek SoCs.
index 442522ba487f07c1aa7d87b179202cd9f7923c7a..4728e2bff6620bea2d6971cdba4445df34b1ff36 100644 (file)
@@ -662,35 +662,42 @@ static int cpcap_usb_phy_probe(struct platform_device *pdev)
        generic_phy = devm_phy_create(ddata->dev, NULL, &ops);
        if (IS_ERR(generic_phy)) {
                error = PTR_ERR(generic_phy);
-               return PTR_ERR(generic_phy);
+               goto out_reg_disable;
        }
 
        phy_set_drvdata(generic_phy, ddata);
 
        phy_provider = devm_of_phy_provider_register(ddata->dev,
                                                     of_phy_simple_xlate);
-       if (IS_ERR(phy_provider))
-               return PTR_ERR(phy_provider);
+       if (IS_ERR(phy_provider)) {
+               error = PTR_ERR(phy_provider);
+               goto out_reg_disable;
+       }
 
        error = cpcap_usb_init_optional_pins(ddata);
        if (error)
-               return error;
+               goto out_reg_disable;
 
        cpcap_usb_init_optional_gpios(ddata);
 
        error = cpcap_usb_init_iio(ddata);
        if (error)
-               return error;
+               goto out_reg_disable;
 
        error = cpcap_usb_init_interrupts(pdev, ddata);
        if (error)
-               return error;
+               goto out_reg_disable;
 
        usb_add_phy_dev(&ddata->phy);
        atomic_set(&ddata->active, 1);
        schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1));
 
        return 0;
+
+out_reg_disable:
+       regulator_disable(ddata->vusb);
+
+       return error;
 }
 
 static int cpcap_usb_phy_remove(struct platform_device *pdev)
index 34803a6c76643259def0d596cc6f62fb3db6b4a0..5c1a109842a76054e369873b2386875ab567b2a4 100644 (file)
@@ -347,7 +347,7 @@ FUNC_GROUP_DECL(RMII4, F24, E23, E24, E25, C25, C24, B26, B25, B24);
 
 #define D22 40
 SIG_EXPR_LIST_DECL_SESG(D22, SD1CLK, SD1, SIG_DESC_SET(SCU414, 8));
-SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU414, 8));
+SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU4B4, 8));
 PIN_DECL_2(D22, GPIOF0, SD1CLK, PWM8);
 GROUP_DECL(PWM8G0, D22);
 
index 7aeb552d16ce9f1496c88e5c41af82f009937764..72f17f26acd80df9880a4b92e0bf1f2c0b42a436 100644 (file)
@@ -920,6 +920,10 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw,
                        err = hw->soc->bias_set(hw, desc, pullup);
                        if (err)
                                return err;
+               } else if (hw->soc->bias_set_combo) {
+                       err = hw->soc->bias_set_combo(hw, desc, pullup, arg);
+                       if (err)
+                               return err;
                } else {
                        return -ENOTSUPP;
                }
index d4ea10803fd9077b81bf844713b7291cb1c7ecc6..abfe11c7b49fb039ba050a070d35097705f527f3 100644 (file)
@@ -949,7 +949,6 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
        } else {
                int irq = chip->to_irq(chip, offset);
                const int pullidx = pull ? 1 : 0;
-               bool wake;
                int val;
                static const char * const pulls[] = {
                        "none        ",
index 53a6a24bd05270291b690de275b072cba5cfb722..3ea163498647f558bd2be134b14b26417f68f3ee 100644 (file)
 #define JZ4740_GPIO_TRIG       0x70
 #define JZ4740_GPIO_FLAG       0x80
 
-#define JZ4760_GPIO_INT                0x10
-#define JZ4760_GPIO_PAT1       0x30
-#define JZ4760_GPIO_PAT0       0x40
-#define JZ4760_GPIO_FLAG       0x50
-#define JZ4760_GPIO_PEN                0x70
+#define JZ4770_GPIO_INT                0x10
+#define JZ4770_GPIO_PAT1       0x30
+#define JZ4770_GPIO_PAT0       0x40
+#define JZ4770_GPIO_FLAG       0x50
+#define JZ4770_GPIO_PEN                0x70
 
 #define X1830_GPIO_PEL                 0x110
 #define X1830_GPIO_PEH                 0x120
@@ -1688,8 +1688,8 @@ static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc,
 static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc,
                                   u8 offset, int value)
 {
-       if (jzgc->jzpc->info->version >= ID_JZ4760)
-               ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_PAT0, offset, !!value);
+       if (jzgc->jzpc->info->version >= ID_JZ4770)
+               ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value);
        else
                ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value);
 }
@@ -1718,9 +1718,9 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc,
                break;
        }
 
-       if (jzgc->jzpc->info->version >= ID_JZ4760) {
-               reg1 = JZ4760_GPIO_PAT1;
-               reg2 = JZ4760_GPIO_PAT0;
+       if (jzgc->jzpc->info->version >= ID_JZ4770) {
+               reg1 = JZ4770_GPIO_PAT1;
+               reg2 = JZ4770_GPIO_PAT0;
        } else {
                reg1 = JZ4740_GPIO_TRIG;
                reg2 = JZ4740_GPIO_DIR;
@@ -1758,8 +1758,8 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd)
        struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc);
        int irq = irqd->hwirq;
 
-       if (jzgc->jzpc->info->version >= ID_JZ4760)
-               ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, true);
+       if (jzgc->jzpc->info->version >= ID_JZ4770)
+               ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true);
        else
                ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true);
 
@@ -1774,8 +1774,8 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd)
 
        ingenic_gpio_irq_mask(irqd);
 
-       if (jzgc->jzpc->info->version >= ID_JZ4760)
-               ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, false);
+       if (jzgc->jzpc->info->version >= ID_JZ4770)
+               ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, false);
        else
                ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false);
 }
@@ -1799,8 +1799,8 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
                        irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH);
        }
 
-       if (jzgc->jzpc->info->version >= ID_JZ4760)
-               ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_FLAG, irq, false);
+       if (jzgc->jzpc->info->version >= ID_JZ4770)
+               ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false);
        else
                ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true);
 }
@@ -1856,8 +1856,8 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc)
 
        chained_irq_enter(irq_chip, desc);
 
-       if (jzgc->jzpc->info->version >= ID_JZ4760)
-               flag = ingenic_gpio_read_reg(jzgc, JZ4760_GPIO_FLAG);
+       if (jzgc->jzpc->info->version >= ID_JZ4770)
+               flag = ingenic_gpio_read_reg(jzgc, JZ4770_GPIO_FLAG);
        else
                flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG);
 
@@ -1938,9 +1938,9 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
        struct ingenic_pinctrl *jzpc = jzgc->jzpc;
        unsigned int pin = gc->base + offset;
 
-       if (jzpc->info->version >= ID_JZ4760) {
-               if (ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_INT) ||
-                   ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1))
+       if (jzpc->info->version >= ID_JZ4770) {
+               if (ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_INT) ||
+                   ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PAT1))
                        return GPIO_LINE_DIRECTION_IN;
                return GPIO_LINE_DIRECTION_OUT;
        }
@@ -1991,20 +1991,20 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc,
                        'A' + offt, idx, func);
 
        if (jzpc->info->version >= ID_X1000) {
-               ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+               ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
                ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, false);
-               ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
-               ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
+               ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
+               ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
                ingenic_shadow_config_pin_load(jzpc, pin);
-       } else if (jzpc->info->version >= ID_JZ4760) {
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+       } else if (jzpc->info->version >= ID_JZ4770) {
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
                ingenic_config_pin(jzpc, pin, GPIO_MSK, false);
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2);
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1);
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2);
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1);
        } else {
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, true);
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_TRIG, func & 0x2);
-               ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func > 0);
+               ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func & 0x1);
        }
 
        return 0;
@@ -2057,14 +2057,14 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
                        'A' + offt, idx, input ? "in" : "out");
 
        if (jzpc->info->version >= ID_X1000) {
-               ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+               ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
                ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, true);
-               ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
+               ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
                ingenic_shadow_config_pin_load(jzpc, pin);
-       } else if (jzpc->info->version >= ID_JZ4760) {
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false);
+       } else if (jzpc->info->version >= ID_JZ4770) {
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false);
                ingenic_config_pin(jzpc, pin, GPIO_MSK, true);
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input);
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
        } else {
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input);
@@ -2091,8 +2091,8 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev,
        unsigned int offt = pin / PINS_PER_GPIO_CHIP;
        bool pull;
 
-       if (jzpc->info->version >= ID_JZ4760)
-               pull = !ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PEN);
+       if (jzpc->info->version >= ID_JZ4770)
+               pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN);
        else
                pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS);
 
@@ -2141,8 +2141,8 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
                                        REG_SET(X1830_GPIO_PEH), bias << idxh);
                }
 
-       } else if (jzpc->info->version >= ID_JZ4760) {
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !bias);
+       } else if (jzpc->info->version >= ID_JZ4770) {
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PEN, !bias);
        } else {
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !bias);
        }
@@ -2151,8 +2151,8 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc,
 static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc,
                                     unsigned int pin, bool high)
 {
-       if (jzpc->info->version >= ID_JZ4760)
-               ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high);
+       if (jzpc->info->version >= ID_JZ4770)
+               ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, high);
        else
                ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high);
 }
index e051aecf95c4ee5f8fc4feec3be30861dd61db0f..d70caecd21d25af56c54cd71be10221a36d98fd7 100644 (file)
@@ -51,6 +51,7 @@
  * @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge
  *                  detection.
  * @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller
+ * @disabled_for_mux: These IRQs were disabled because we muxed away.
  * @soc:            Reference to soc_data of platform specific data.
  * @regs:           Base addresses for the TLMM tiles.
  * @phys_base:      Physical base address
@@ -72,6 +73,7 @@ struct msm_pinctrl {
        DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
        DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
        DECLARE_BITMAP(skip_wake_irqs, MAX_NR_GPIO);
+       DECLARE_BITMAP(disabled_for_mux, MAX_NR_GPIO);
 
        const struct msm_pinctrl_soc_data *soc;
        void __iomem *regs[MAX_NR_TILES];
@@ -96,6 +98,14 @@ MSM_ACCESSOR(intr_cfg)
 MSM_ACCESSOR(intr_status)
 MSM_ACCESSOR(intr_target)
 
+static void msm_ack_intr_status(struct msm_pinctrl *pctrl,
+                               const struct msm_pingroup *g)
+{
+       u32 val = g->intr_ack_high ? BIT(g->intr_status_bit) : 0;
+
+       msm_writel_intr_status(val, pctrl, g);
+}
+
 static int msm_get_groups_count(struct pinctrl_dev *pctldev)
 {
        struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
@@ -171,6 +181,10 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
                              unsigned group)
 {
        struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+       struct gpio_chip *gc = &pctrl->chip;
+       unsigned int irq = irq_find_mapping(gc->irq.domain, group);
+       struct irq_data *d = irq_get_irq_data(irq);
+       unsigned int gpio_func = pctrl->soc->gpio_func;
        const struct msm_pingroup *g;
        unsigned long flags;
        u32 val, mask;
@@ -187,6 +201,20 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
        if (WARN_ON(i == g->nfuncs))
                return -EINVAL;
 
+       /*
+        * If an GPIO interrupt is setup on this pin then we need special
+        * handling.  Specifically interrupt detection logic will still see
+        * the pin twiddle even when we're muxed away.
+        *
+        * When we see a pin with an interrupt setup on it then we'll disable
+        * (mask) interrupts on it when we mux away until we mux back.  Note
+        * that disable_irq() refcounts and interrupts are disabled as long as
+        * at least one disable_irq() has been called.
+        */
+       if (d && i != gpio_func &&
+           !test_and_set_bit(d->hwirq, pctrl->disabled_for_mux))
+               disable_irq(irq);
+
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
        val = msm_readl_ctl(pctrl, g);
@@ -196,6 +224,20 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
 
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
+       if (d && i == gpio_func &&
+           test_and_clear_bit(d->hwirq, pctrl->disabled_for_mux)) {
+               /*
+                * Clear interrupts detected while not GPIO since we only
+                * masked things.
+                */
+               if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
+                       irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, false);
+               else
+                       msm_ack_intr_status(pctrl, g);
+
+               enable_irq(irq);
+       }
+
        return 0;
 }
 
@@ -210,8 +252,7 @@ static int msm_pinmux_request_gpio(struct pinctrl_dev *pctldev,
        if (!g->nfuncs)
                return 0;
 
-       /* For now assume function 0 is GPIO because it always is */
-       return msm_pinmux_set_mux(pctldev, g->funcs[0], offset);
+       return msm_pinmux_set_mux(pctldev, g->funcs[pctrl->soc->gpio_func], offset);
 }
 
 static const struct pinmux_ops msm_pinmux_ops = {
@@ -774,7 +815,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 }
 
-static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
+static void msm_gpio_irq_unmask(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
        struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
@@ -792,17 +833,6 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear)
 
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
-       if (status_clear) {
-               /*
-                * clear the interrupt status bit before unmask to avoid
-                * any erroneous interrupts that would have got latched
-                * when the interrupt is not in use.
-                */
-               val = msm_readl_intr_status(pctrl, g);
-               val &= ~BIT(g->intr_status_bit);
-               msm_writel_intr_status(val, pctrl, g);
-       }
-
        val = msm_readl_intr_cfg(pctrl, g);
        val |= BIT(g->intr_raw_status_bit);
        val |= BIT(g->intr_enable_bit);
@@ -822,7 +852,7 @@ static void msm_gpio_irq_enable(struct irq_data *d)
                irq_chip_enable_parent(d);
 
        if (!test_bit(d->hwirq, pctrl->skip_wake_irqs))
-               msm_gpio_irq_clear_unmask(d, true);
+               msm_gpio_irq_unmask(d);
 }
 
 static void msm_gpio_irq_disable(struct irq_data *d)
@@ -837,11 +867,6 @@ static void msm_gpio_irq_disable(struct irq_data *d)
                msm_gpio_irq_mask(d);
 }
 
-static void msm_gpio_irq_unmask(struct irq_data *d)
-{
-       msm_gpio_irq_clear_unmask(d, false);
-}
-
 /**
  * msm_gpio_update_dual_edge_parent() - Prime next edge for IRQs handled by parent.
  * @d: The irq dta.
@@ -894,7 +919,6 @@ static void msm_gpio_irq_ack(struct irq_data *d)
        struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
        const struct msm_pingroup *g;
        unsigned long flags;
-       u32 val;
 
        if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) {
                if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
@@ -906,12 +930,7 @@ static void msm_gpio_irq_ack(struct irq_data *d)
 
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
-       val = msm_readl_intr_status(pctrl, g);
-       if (g->intr_ack_high)
-               val |= BIT(g->intr_status_bit);
-       else
-               val &= ~BIT(g->intr_status_bit);
-       msm_writel_intr_status(val, pctrl, g);
+       msm_ack_intr_status(pctrl, g);
 
        if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
                msm_gpio_update_dual_edge_pos(pctrl, g, d);
@@ -936,6 +955,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
        struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
        const struct msm_pingroup *g;
        unsigned long flags;
+       bool was_enabled;
        u32 val;
 
        if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) {
@@ -997,6 +1017,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
         * could cause the INTR_STATUS to be set for EDGE interrupts.
         */
        val = msm_readl_intr_cfg(pctrl, g);
+       was_enabled = val & BIT(g->intr_raw_status_bit);
        val |= BIT(g->intr_raw_status_bit);
        if (g->intr_detection_width == 2) {
                val &= ~(3 << g->intr_detection_bit);
@@ -1046,6 +1067,14 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
        }
        msm_writel_intr_cfg(val, pctrl, g);
 
+       /*
+        * The first time we set RAW_STATUS_EN it could trigger an interrupt.
+        * Clear the interrupt.  This is safe because we have
+        * IRQCHIP_SET_TYPE_MASKED.
+        */
+       if (!was_enabled)
+               msm_ack_intr_status(pctrl, g);
+
        if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
                msm_gpio_update_dual_edge_pos(pctrl, g, d);
 
@@ -1099,16 +1128,11 @@ static int msm_gpio_irq_reqres(struct irq_data *d)
        }
 
        /*
-        * Clear the interrupt that may be pending before we enable
-        * the line.
-        * This is especially a problem with the GPIOs routed to the
-        * PDC. These GPIOs are direct-connect interrupts to the GIC.
-        * Disabling the interrupt line at the PDC does not prevent
-        * the interrupt from being latched at the GIC. The state at
-        * GIC needs to be cleared before enabling.
+        * The disable / clear-enable workaround we do in msm_pinmux_set_mux()
+        * only works if disable is not lazy since we only clear any bogus
+        * interrupt in hardware. Explicitly mark the interrupt as UNLAZY.
         */
-       if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs))
-               irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0);
+       irq_set_status_flags(d->irq, IRQ_DISABLE_UNLAZY);
 
        return 0;
 out:
index 333f99243c43ac24bad1a83ccd1cb9089d2a1870..e31a5167c91ec9710f0d111b9320de909b1c4dc0 100644 (file)
@@ -118,6 +118,7 @@ struct msm_gpio_wakeirq_map {
  * @wakeirq_dual_edge_errata: If true then GPIOs using the wakeirq_map need
  *                            to be aware that their parent can't handle dual
  *                            edge interrupts.
+ * @gpio_func: Which function number is GPIO (usually 0).
  */
 struct msm_pinctrl_soc_data {
        const struct pinctrl_pin_desc *pins;
@@ -134,6 +135,7 @@ struct msm_pinctrl_soc_data {
        const struct msm_gpio_wakeirq_map *wakeirq_map;
        unsigned int nwakeirq_map;
        bool wakeirq_dual_edge_errata;
+       unsigned int gpio_func;
 };
 
 extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
index 33040b0b3b799c2d520f001b85ef9f3af3eab971..2c941cdac9eedc6fb569f0d912a82361a02bb17c 100644 (file)
@@ -5,6 +5,7 @@
 
 menuconfig SURFACE_PLATFORMS
        bool "Microsoft Surface Platform-Specific Device Drivers"
+       depends on ACPI
        default y
        help
          Say Y here to get to see options for platform-specific device drivers
@@ -29,20 +30,19 @@ config SURFACE3_WMI
 
 config SURFACE_3_BUTTON
        tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet"
-       depends on ACPI && KEYBOARD_GPIO && I2C
+       depends on KEYBOARD_GPIO && I2C
        help
          This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet.
 
 config SURFACE_3_POWER_OPREGION
        tristate "Surface 3 battery platform operation region support"
-       depends on ACPI && I2C
+       depends on I2C
        help
          This driver provides support for ACPI operation
          region of the Surface 3 battery platform driver.
 
 config SURFACE_GPE
        tristate "Surface GPE/Lid Support Driver"
-       depends on ACPI
        depends on DMI
        help
          This driver marks the GPEs related to the ACPI lid device found on
@@ -52,7 +52,7 @@ config SURFACE_GPE
 
 config SURFACE_PRO3_BUTTON
        tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet"
-       depends on ACPI && INPUT
+       depends on INPUT
        help
          This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet.
 
index e49e5d6d5d4e134e3800d4d1449caeaa7082608c..86f6991b1215710307b6965e5643043a3da964c7 100644 (file)
@@ -181,12 +181,12 @@ static int surface_lid_enable_wakeup(struct device *dev, bool enable)
        return 0;
 }
 
-static int surface_gpe_suspend(struct device *dev)
+static int __maybe_unused surface_gpe_suspend(struct device *dev)
 {
        return surface_lid_enable_wakeup(dev, true);
 }
 
-static int surface_gpe_resume(struct device *dev)
+static int __maybe_unused surface_gpe_resume(struct device *dev)
 {
        return surface_lid_enable_wakeup(dev, false);
 }
index 0102bf1c7916028b0b785c19e1f42e1fa211872f..ef83425724634a1d41862d6d307170dcc7ce305c 100644 (file)
@@ -85,7 +85,7 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
        iowrite32(val, dev->regbase + reg_offset);
 }
 
-#if CONFIG_DEBUG_FS
+#ifdef CONFIG_DEBUG_FS
 static int smu_fw_info_show(struct seq_file *s, void *unused)
 {
        struct amd_pmc_dev *dev = s->private;
index dc6dd531c99642b1544db039336a91669967903d..cb81010ba1a21b108e76fd248ed8bb3ac79c8cb2 100644 (file)
@@ -419,13 +419,17 @@ static int init_bios_attributes(int attr_type, const char *guid)
                return retval;
        /* need to use specific instance_id and guid combination to get right data */
        obj = get_wmiobj_pointer(instance_id, guid);
-       if (!obj)
+       if (!obj || obj->type != ACPI_TYPE_PACKAGE)
                return -ENODEV;
        elements = obj->package.elements;
 
        mutex_lock(&wmi_priv.mutex);
        while (elements) {
                /* sanity checking */
+               if (elements[ATTR_NAME].type != ACPI_TYPE_STRING) {
+                       pr_debug("incorrect element type\n");
+                       goto nextobj;
+               }
                if (strlen(elements[ATTR_NAME].string.pointer) == 0) {
                        pr_debug("empty attribute found\n");
                        goto nextobj;
index ecd477964d117a13235c5bc212217daa803fff7c..e94e59283ecb9f5f601f005f5359d35c60522be1 100644 (file)
@@ -32,6 +32,10 @@ MODULE_LICENSE("GPL");
 MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
 MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
 
+static int enable_tablet_mode_sw = -1;
+module_param(enable_tablet_mode_sw, int, 0444);
+MODULE_PARM_DESC(enable_tablet_mode_sw, "Enable SW_TABLET_MODE reporting (-1=auto, 0=no, 1=yes)");
+
 #define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
 #define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
 
@@ -247,7 +251,8 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command,
        ret = bios_return->return_code;
 
        if (ret) {
-               if (ret != HPWMI_RET_UNKNOWN_CMDTYPE)
+               if (ret != HPWMI_RET_UNKNOWN_COMMAND &&
+                   ret != HPWMI_RET_UNKNOWN_CMDTYPE)
                        pr_warn("query 0x%x returned error 0x%x\n", query, ret);
                goto out_free;
        }
@@ -653,10 +658,12 @@ static int __init hp_wmi_input_setup(void)
        }
 
        /* Tablet mode */
-       val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
-       if (!(val < 0)) {
-               __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
-               input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+       if (enable_tablet_mode_sw > 0) {
+               val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
+               if (val >= 0) {
+                       __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+                       input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+               }
        }
 
        err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
index b457b0babde390a9b3108103f81c9817d64ca834..2cce82579d09164a95876e7924ee19265425fdc5 100644 (file)
@@ -164,13 +164,29 @@ static const struct i2c_inst_data bsg2150_data[]  = {
        {}
 };
 
-static const struct i2c_inst_data int3515_data[]  = {
-       { "tps6598x", IRQ_RESOURCE_APIC, 0 },
-       { "tps6598x", IRQ_RESOURCE_APIC, 1 },
-       { "tps6598x", IRQ_RESOURCE_APIC, 2 },
-       { "tps6598x", IRQ_RESOURCE_APIC, 3 },
-       {}
-};
+/*
+ * Device with _HID INT3515 (TI PD controllers) has some unresolved interrupt
+ * issues. The most common problem seen is interrupt flood.
+ *
+ * There are at least two known causes. Firstly, on some boards, the
+ * I2CSerialBus resource index does not match the Interrupt resource, i.e. they
+ * are not one-to-one mapped like in the array below. Secondly, on some boards
+ * the IRQ line from the PD controller is not actually connected at all. But the
+ * interrupt flood is also seen on some boards where those are not a problem, so
+ * there are some other problems as well.
+ *
+ * Because of the issues with the interrupt, the device is disabled for now. If
+ * you wish to debug the issues, uncomment the below, and add an entry for the
+ * INT3515 device to the i2c_multi_instance_ids table.
+ *
+ * static const struct i2c_inst_data int3515_data[]  = {
+ *     { "tps6598x", IRQ_RESOURCE_APIC, 0 },
+ *     { "tps6598x", IRQ_RESOURCE_APIC, 1 },
+ *     { "tps6598x", IRQ_RESOURCE_APIC, 2 },
+ *     { "tps6598x", IRQ_RESOURCE_APIC, 3 },
+ *     { }
+ * };
+ */
 
 /*
  * Note new device-ids must also be added to i2c_multi_instantiate_ids in
@@ -179,7 +195,6 @@ static const struct i2c_inst_data int3515_data[]  = {
 static const struct acpi_device_id i2c_multi_inst_acpi_ids[] = {
        { "BSG1160", (unsigned long)bsg1160_data },
        { "BSG2150", (unsigned long)bsg2150_data },
-       { "INT3515", (unsigned long)int3515_data },
        { }
 };
 MODULE_DEVICE_TABLE(acpi, i2c_multi_inst_acpi_ids);
index 7598cd46cf606d441ce3f61d4837a59da5278bae..5b81bafa5c1650fb591889365db072ccd7f4e06e 100644 (file)
@@ -92,6 +92,7 @@ struct ideapad_private {
        struct dentry *debug;
        unsigned long cfg;
        bool has_hw_rfkill_switch;
+       bool has_touchpad_switch;
        const char *fnesc_guid;
 };
 
@@ -535,7 +536,9 @@ static umode_t ideapad_is_visible(struct kobject *kobj,
        } else if (attr == &dev_attr_fn_lock.attr) {
                supported = acpi_has_method(priv->adev->handle, "HALS") &&
                        acpi_has_method(priv->adev->handle, "SALS");
-       } else
+       } else if (attr == &dev_attr_touchpad.attr)
+               supported = priv->has_touchpad_switch;
+       else
                supported = true;
 
        return supported ? attr->mode : 0;
@@ -867,6 +870,9 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv)
 {
        unsigned long value;
 
+       if (!priv->has_touchpad_switch)
+               return;
+
        /* Without reading from EC touchpad LED doesn't switch state */
        if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) {
                /* Some IdeaPads don't really turn off touchpad - they only
@@ -989,6 +995,9 @@ static int ideapad_acpi_add(struct platform_device *pdev)
        priv->platform_device = pdev;
        priv->has_hw_rfkill_switch = dmi_check_system(hw_rfkill_list);
 
+       /* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */
+       priv->has_touchpad_switch = !acpi_dev_present("ELAN0634", NULL, -1);
+
        ret = ideapad_sysfs_init(priv);
        if (ret)
                return ret;
@@ -1006,6 +1015,10 @@ static int ideapad_acpi_add(struct platform_device *pdev)
        if (!priv->has_hw_rfkill_switch)
                write_ec_cmd(priv->adev->handle, VPCCMD_W_RF, 1);
 
+       /* The same for Touchpad */
+       if (!priv->has_touchpad_switch)
+               write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, 1);
+
        for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++)
                if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
                        ideapad_register_rfkill(priv, i);
index 3b49a1f4061bcdaf689cdcda28980d3b924debd8..30a9062d2b4b8858d05846a1ffb9f6475e0288fd 100644 (file)
@@ -207,19 +207,19 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
        {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Stream x360 Convertible PC 11"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
                },
        },
        {
                .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
                },
        },
        {
                .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"),
                },
        },
        {} /* Array terminator */
index e03df2881dc6dfff51ebbd366892fd6f4c1f96e5..f3e8eca8d86d6c5d5c638aea97c6493e0ce81d7b 100644 (file)
@@ -8783,6 +8783,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
        TPACPI_Q_LNV3('N', '1', 'T', TPACPI_FAN_2CTL),  /* P71 */
        TPACPI_Q_LNV3('N', '1', 'U', TPACPI_FAN_2CTL),  /* P51 */
        TPACPI_Q_LNV3('N', '2', 'C', TPACPI_FAN_2CTL),  /* P52 / P72 */
+       TPACPI_Q_LNV3('N', '2', 'N', TPACPI_FAN_2CTL),  /* P53 / P73 */
        TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (1st gen) */
        TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (2nd gen) */
        TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL),  /* P1 / X1 Extreme (3nd gen) */
@@ -9951,9 +9952,9 @@ static int tpacpi_proxsensor_init(struct ibm_init_struct *iibm)
        if ((palm_err == -ENODEV) && (lap_err == -ENODEV))
                return 0;
        /* Otherwise, if there was an error return it */
-       if (palm_err && (palm_err != ENODEV))
+       if (palm_err && (palm_err != -ENODEV))
                return palm_err;
-       if (lap_err && (lap_err != ENODEV))
+       if (lap_err && (lap_err != -ENODEV))
                return lap_err;
 
        if (has_palmsensor) {
index 5783139d0a1198abcaccf112a955ee14398ed243..c4de932302d6b5f52479d99b95e9558deddaa0ba 100644 (file)
@@ -263,6 +263,16 @@ static const struct ts_dmi_data digma_citi_e200_data = {
        .properties     = digma_citi_e200_props,
 };
 
+static const struct property_entry estar_beauty_hd_props[] = {
+       PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+       { }
+};
+
+static const struct ts_dmi_data estar_beauty_hd_data = {
+       .acpi_name      = "GDIX1001:00",
+       .properties     = estar_beauty_hd_props,
+};
+
 static const struct property_entry gp_electronic_t701_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
@@ -942,6 +952,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
                },
        },
+       {
+               /* Estar Beauty HD (MID 7316R) */
+               .driver_data = (void *)&estar_beauty_hd_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Estar"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
+               },
+       },
        {
                /* GP-electronic T701 */
                .driver_data = (void *)&gp_electronic_t701_data,
index ca03d8e70bd1359f586e82aaa75878ecf58f75c7..67a768fe5b2a308eee0a88f267a192e99a68dd73 100644 (file)
@@ -1813,13 +1813,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
 {
        struct regulator_dev *r;
        struct device *dev = rdev->dev.parent;
-       int ret;
+       int ret = 0;
 
        /* No supply to resolve? */
        if (!rdev->supply_name)
                return 0;
 
-       /* Supply already resolved? */
+       /* Supply already resolved? (fast-path without locking contention) */
        if (rdev->supply)
                return 0;
 
@@ -1829,7 +1829,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
 
                /* Did the lookup explicitly defer for us? */
                if (ret == -EPROBE_DEFER)
-                       return ret;
+                       goto out;
 
                if (have_full_constraints()) {
                        r = dummy_regulator_rdev;
@@ -1837,15 +1837,18 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
                } else {
                        dev_err(dev, "Failed to resolve %s-supply for %s\n",
                                rdev->supply_name, rdev->desc->name);
-                       return -EPROBE_DEFER;
+                       ret = -EPROBE_DEFER;
+                       goto out;
                }
        }
 
        if (r == rdev) {
                dev_err(dev, "Supply for %s (%s) resolved to itself\n",
                        rdev->desc->name, rdev->supply_name);
-               if (!have_full_constraints())
-                       return -EINVAL;
+               if (!have_full_constraints()) {
+                       ret = -EINVAL;
+                       goto out;
+               }
                r = dummy_regulator_rdev;
                get_device(&r->dev);
        }
@@ -1859,7 +1862,8 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
        if (r->dev.parent && r->dev.parent != rdev->dev.parent) {
                if (!device_is_bound(r->dev.parent)) {
                        put_device(&r->dev);
-                       return -EPROBE_DEFER;
+                       ret = -EPROBE_DEFER;
+                       goto out;
                }
        }
 
@@ -1867,15 +1871,32 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
        ret = regulator_resolve_supply(r);
        if (ret < 0) {
                put_device(&r->dev);
-               return ret;
+               goto out;
+       }
+
+       /*
+        * Recheck rdev->supply with rdev->mutex lock held to avoid a race
+        * between rdev->supply null check and setting rdev->supply in
+        * set_supply() from concurrent tasks.
+        */
+       regulator_lock(rdev);
+
+       /* Supply just resolved by a concurrent task? */
+       if (rdev->supply) {
+               regulator_unlock(rdev);
+               put_device(&r->dev);
+               goto out;
        }
 
        ret = set_supply(rdev, r);
        if (ret < 0) {
+               regulator_unlock(rdev);
                put_device(&r->dev);
-               return ret;
+               goto out;
        }
 
+       regulator_unlock(rdev);
+
        /*
         * In set_machine_constraints() we may have turned this regulator on
         * but we couldn't propagate to the supply if it hadn't been resolved
@@ -1886,11 +1907,12 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
                if (ret < 0) {
                        _regulator_put(rdev->supply);
                        rdev->supply = NULL;
-                       return ret;
+                       goto out;
                }
        }
 
-       return 0;
+out:
+       return ret;
 }
 
 /* Internal regulator request function */
index 51e80bc70d42325d14aea0baa72dff39b1113f48..68a9ac6f2fe1776a1f3d2206f5a2145f815f51b0 100644 (file)
@@ -805,6 +805,14 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
 
        spin_lock_irq(&rtc_lock);
 
+       /* Ensure that the RTC is accessible. Bit 0-6 must be 0! */
+       if ((CMOS_READ(RTC_VALID) & 0x7f) != 0) {
+               spin_unlock_irq(&rtc_lock);
+               dev_warn(dev, "not accessible\n");
+               retval = -ENXIO;
+               goto cleanup1;
+       }
+
        if (!(flags & CMOS_RTC_FLAGS_NOFREQ)) {
                /* force periodic irq to CMOS reset default of 1024Hz;
                 *
index 972a5b9a629d3bfd992bac180e623927b4144adb..f83c13818af3b6d207ae73226ef07a2b351118ea 100644 (file)
@@ -21,6 +21,13 @@ unsigned int mc146818_get_time(struct rtc_time *time)
 
 again:
        spin_lock_irqsave(&rtc_lock, flags);
+       /* Ensure that the RTC is accessible. Bit 0-6 must be 0! */
+       if (WARN_ON_ONCE((CMOS_READ(RTC_VALID) & 0x7f) != 0)) {
+               spin_unlock_irqrestore(&rtc_lock, flags);
+               memset(time, 0xff, sizeof(*time));
+               return 0;
+       }
+
        /*
         * Check whether there is an update in progress during which the
         * readout is unspecified. The maximum update time is ~2ms. Poll
index 16bb135c20aa525de5581e73537f153ccd1358cb..03d27ee9cac65e62102fbbb9d0637b77164f9d8e 100644 (file)
@@ -1874,18 +1874,26 @@ void dasd_path_create_kobjects(struct dasd_device *device)
 }
 EXPORT_SYMBOL(dasd_path_create_kobjects);
 
-/*
- * As we keep kobjects for the lifetime of a device, this function must not be
- * called anywhere but in the context of offlining a device.
- */
-void dasd_path_remove_kobj(struct dasd_device *device, int chp)
+static void dasd_path_remove_kobj(struct dasd_device *device, int chp)
 {
        if (device->path[chp].in_sysfs) {
                kobject_put(&device->path[chp].kobj);
                device->path[chp].in_sysfs = false;
        }
 }
-EXPORT_SYMBOL(dasd_path_remove_kobj);
+
+/*
+ * As we keep kobjects for the lifetime of a device, this function must not be
+ * called anywhere but in the context of offlining a device.
+ */
+void dasd_path_remove_kobjects(struct dasd_device *device)
+{
+       int i;
+
+       for (i = 0; i < 8; i++)
+               dasd_path_remove_kobj(device, i);
+}
+EXPORT_SYMBOL(dasd_path_remove_kobjects);
 
 int dasd_add_sysfs_files(struct ccw_device *cdev)
 {
index 3caa1ee5f4b0ab03fc35de01f9662c67aedbe3c0..65eb87cbbb9b2d553aa85d0c039f98391623a584 100644 (file)
@@ -1036,7 +1036,6 @@ static void dasd_eckd_clear_conf_data(struct dasd_device *device)
                device->path[i].ssid = 0;
                device->path[i].chpid = 0;
                dasd_path_notoper(device, i);
-               dasd_path_remove_kobj(device, i);
        }
 }
 
@@ -2173,6 +2172,7 @@ out_err2:
        device->block = NULL;
 out_err1:
        dasd_eckd_clear_conf_data(device);
+       dasd_path_remove_kobjects(device);
        kfree(device->private);
        device->private = NULL;
        return rc;
@@ -2191,6 +2191,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
        private->vdsneq = NULL;
        private->gneq = NULL;
        dasd_eckd_clear_conf_data(device);
+       dasd_path_remove_kobjects(device);
 }
 
 static struct dasd_ccw_req *
index 3bc008f9136cc7d8c739514e96b6c259a0887c4d..b8a04c42d1d2e4926c4e166f909ad14527050e22 100644 (file)
@@ -858,7 +858,7 @@ int dasd_add_sysfs_files(struct ccw_device *);
 void dasd_remove_sysfs_files(struct ccw_device *);
 void dasd_path_create_kobj(struct dasd_device *, int);
 void dasd_path_create_kobjects(struct dasd_device *);
-void dasd_path_remove_kobj(struct dasd_device *, int);
+void dasd_path_remove_kobjects(struct dasd_device *);
 
 struct dasd_device *dasd_device_from_cdev(struct ccw_device *);
 struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
index be2520cc010bef249509eca4505e114026507331..7dc72cb718b0e2866226eefcde6a4832b99cbeb3 100644 (file)
@@ -71,15 +71,11 @@ static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
 static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
 {
        struct vfio_ap_queue *q;
-       int apid, apqi;
 
        mutex_lock(&matrix_dev->lock);
        q = dev_get_drvdata(&apdev->device);
+       vfio_ap_mdev_reset_queue(q, 1);
        dev_set_drvdata(&apdev->device, NULL);
-       apid = AP_QID_CARD(q->apqn);
-       apqi = AP_QID_QUEUE(q->apqn);
-       vfio_ap_mdev_reset_queue(apid, apqi, 1);
-       vfio_ap_irq_disable(q);
        kfree(q);
        mutex_unlock(&matrix_dev->lock);
 }
index e0bde85187451bb4c66c61049a8541937fe83b36..41fc2e4135fe18714aba0a9e39748fe826886067 100644 (file)
@@ -25,6 +25,7 @@
 #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
 
 static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev);
+static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
 
 static int match_apqn(struct device *dev, const void *data)
 {
@@ -49,20 +50,15 @@ static struct vfio_ap_queue *vfio_ap_get_queue(
                                        int apqn)
 {
        struct vfio_ap_queue *q;
-       struct device *dev;
 
        if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm))
                return NULL;
        if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm))
                return NULL;
 
-       dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
-                                &apqn, match_apqn);
-       if (!dev)
-               return NULL;
-       q = dev_get_drvdata(dev);
-       q->matrix_mdev = matrix_mdev;
-       put_device(dev);
+       q = vfio_ap_find_queue(apqn);
+       if (q)
+               q->matrix_mdev = matrix_mdev;
 
        return q;
 }
@@ -119,13 +115,18 @@ static void vfio_ap_wait_for_irqclear(int apqn)
  */
 static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
 {
-       if (q->saved_isc != VFIO_AP_ISC_INVALID && q->matrix_mdev)
+       if (!q)
+               return;
+       if (q->saved_isc != VFIO_AP_ISC_INVALID &&
+           !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) {
                kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
-       if (q->saved_pfn && q->matrix_mdev)
+               q->saved_isc = VFIO_AP_ISC_INVALID;
+       }
+       if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) {
                vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev),
                                 &q->saved_pfn, 1);
-       q->saved_pfn = 0;
-       q->saved_isc = VFIO_AP_ISC_INVALID;
+               q->saved_pfn = 0;
+       }
 }
 
 /**
@@ -144,7 +145,7 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
  * Returns if ap_aqic function failed with invalid, deconfigured or
  * checkstopped AP.
  */
-struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
+static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
 {
        struct ap_qirq_ctrl aqic_gisa = {};
        struct ap_queue_status status;
@@ -1037,19 +1038,14 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
 {
        struct ap_matrix_mdev *m;
 
-       mutex_lock(&matrix_dev->lock);
-
        list_for_each_entry(m, &matrix_dev->mdev_list, node) {
-               if ((m != matrix_mdev) && (m->kvm == kvm)) {
-                       mutex_unlock(&matrix_dev->lock);
+               if ((m != matrix_mdev) && (m->kvm == kvm))
                        return -EPERM;
-               }
        }
 
        matrix_mdev->kvm = kvm;
        kvm_get_kvm(kvm);
        kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
-       mutex_unlock(&matrix_dev->lock);
 
        return 0;
 }
@@ -1083,79 +1079,118 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb,
        return NOTIFY_DONE;
 }
 
+static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
+{
+       kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+       matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
+       vfio_ap_mdev_reset_queues(matrix_mdev->mdev);
+       kvm_put_kvm(matrix_mdev->kvm);
+       matrix_mdev->kvm = NULL;
+}
+
 static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
                                       unsigned long action, void *data)
 {
-       int ret;
+       int ret, notify_rc = NOTIFY_OK;
        struct ap_matrix_mdev *matrix_mdev;
 
        if (action != VFIO_GROUP_NOTIFY_SET_KVM)
                return NOTIFY_OK;
 
        matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
+       mutex_lock(&matrix_dev->lock);
 
        if (!data) {
-               matrix_mdev->kvm = NULL;
-               return NOTIFY_OK;
+               if (matrix_mdev->kvm)
+                       vfio_ap_mdev_unset_kvm(matrix_mdev);
+               goto notify_done;
        }
 
        ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
-       if (ret)
-               return NOTIFY_DONE;
+       if (ret) {
+               notify_rc = NOTIFY_DONE;
+               goto notify_done;
+       }
 
        /* If there is no CRYCB pointer, then we can't copy the masks */
-       if (!matrix_mdev->kvm->arch.crypto.crycbd)
-               return NOTIFY_DONE;
+       if (!matrix_mdev->kvm->arch.crypto.crycbd) {
+               notify_rc = NOTIFY_DONE;
+               goto notify_done;
+       }
 
        kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
                                  matrix_mdev->matrix.aqm,
                                  matrix_mdev->matrix.adm);
 
-       return NOTIFY_OK;
+notify_done:
+       mutex_unlock(&matrix_dev->lock);
+       return notify_rc;
 }
 
-static void vfio_ap_irq_disable_apqn(int apqn)
+static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
 {
        struct device *dev;
-       struct vfio_ap_queue *q;
+       struct vfio_ap_queue *q = NULL;
 
        dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL,
                                 &apqn, match_apqn);
        if (dev) {
                q = dev_get_drvdata(dev);
-               vfio_ap_irq_disable(q);
                put_device(dev);
        }
+
+       return q;
 }
 
-int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
+int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
                             unsigned int retry)
 {
        struct ap_queue_status status;
+       int ret;
        int retry2 = 2;
-       int apqn = AP_MKQID(apid, apqi);
 
-       do {
-               status = ap_zapq(apqn);
-               switch (status.response_code) {
-               case AP_RESPONSE_NORMAL:
-                       while (!status.queue_empty && retry2--) {
-                               msleep(20);
-                               status = ap_tapq(apqn, NULL);
-                       }
-                       WARN_ON_ONCE(retry2 <= 0);
-                       return 0;
-               case AP_RESPONSE_RESET_IN_PROGRESS:
-               case AP_RESPONSE_BUSY:
+       if (!q)
+               return 0;
+
+retry_zapq:
+       status = ap_zapq(q->apqn);
+       switch (status.response_code) {
+       case AP_RESPONSE_NORMAL:
+               ret = 0;
+               break;
+       case AP_RESPONSE_RESET_IN_PROGRESS:
+               if (retry--) {
                        msleep(20);
-                       break;
-               default:
-                       /* things are really broken, give up */
-                       return -EIO;
+                       goto retry_zapq;
                }
-       } while (retry--);
+               ret = -EBUSY;
+               break;
+       case AP_RESPONSE_Q_NOT_AVAIL:
+       case AP_RESPONSE_DECONFIGURED:
+       case AP_RESPONSE_CHECKSTOPPED:
+               WARN_ON_ONCE(status.irq_enabled);
+               ret = -EBUSY;
+               goto free_resources;
+       default:
+               /* things are really broken, give up */
+               WARN(true, "PQAP/ZAPQ completed with invalid rc (%x)\n",
+                    status.response_code);
+               return -EIO;
+       }
+
+       /* wait for the reset to take effect */
+       while (retry2--) {
+               if (status.queue_empty && !status.irq_enabled)
+                       break;
+               msleep(20);
+               status = ap_tapq(q->apqn, NULL);
+       }
+       WARN_ON_ONCE(retry2 <= 0);
 
-       return -EBUSY;
+free_resources:
+       vfio_ap_free_aqic_resources(q);
+
+       return ret;
 }
 
 static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
@@ -1163,13 +1198,15 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
        int ret;
        int rc = 0;
        unsigned long apid, apqi;
+       struct vfio_ap_queue *q;
        struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
 
        for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
                             matrix_mdev->matrix.apm_max + 1) {
                for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
                                     matrix_mdev->matrix.aqm_max + 1) {
-                       ret = vfio_ap_mdev_reset_queue(apid, apqi, 1);
+                       q = vfio_ap_find_queue(AP_MKQID(apid, apqi));
+                       ret = vfio_ap_mdev_reset_queue(q, 1);
                        /*
                         * Regardless whether a queue turns out to be busy, or
                         * is not operational, we need to continue resetting
@@ -1177,7 +1214,6 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
                         */
                        if (ret)
                                rc = ret;
-                       vfio_ap_irq_disable_apqn(AP_MKQID(apid, apqi));
                }
        }
 
@@ -1222,13 +1258,8 @@ static void vfio_ap_mdev_release(struct mdev_device *mdev)
        struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
 
        mutex_lock(&matrix_dev->lock);
-       if (matrix_mdev->kvm) {
-               kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
-               matrix_mdev->kvm->arch.crypto.pqap_hook = NULL;
-               vfio_ap_mdev_reset_queues(mdev);
-               kvm_put_kvm(matrix_mdev->kvm);
-               matrix_mdev->kvm = NULL;
-       }
+       if (matrix_mdev->kvm)
+               vfio_ap_mdev_unset_kvm(matrix_mdev);
        mutex_unlock(&matrix_dev->lock);
 
        vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
index f46dde56b4644fc9965406a067775806d722a220..28e9d998976820deac127d45ed48c53a1c3d998f 100644 (file)
@@ -88,11 +88,6 @@ struct ap_matrix_mdev {
        struct mdev_device *mdev;
 };
 
-extern int vfio_ap_mdev_register(void);
-extern void vfio_ap_mdev_unregister(void);
-int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
-                            unsigned int retry);
-
 struct vfio_ap_queue {
        struct ap_matrix_mdev *matrix_mdev;
        unsigned long saved_pfn;
@@ -100,5 +95,10 @@ struct vfio_ap_queue {
 #define VFIO_AP_ISC_INVALID 0xff
        unsigned char saved_isc;
 };
-struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q);
+
+int vfio_ap_mdev_register(void);
+void vfio_ap_mdev_unregister(void);
+int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
+                            unsigned int retry);
+
 #endif /* _VFIO_AP_PRIVATE_H_ */
index a2beee6e09f06b2cdd644e1e819e69e1cdc427f2..5988c300cc82edf1d6fe1f1f4c8b909653ffee91 100644 (file)
@@ -444,7 +444,8 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
        fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
        if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone  */
                pr_err("error in devcmd2 init");
-               return -ENODEV;
+               err = -ENODEV;
+               goto err_free_wq;
        }
 
        /*
@@ -460,7 +461,7 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
        err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
                        DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
        if (err)
-               goto err_free_wq;
+               goto err_disable_wq;
 
        vdev->devcmd2->result =
                (struct devcmd2_result *) vdev->devcmd2->results_ring.descs;
@@ -481,8 +482,9 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
 
 err_free_desc_ring:
        vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
-err_free_wq:
+err_disable_wq:
        vnic_wq_disable(&vdev->devcmd2->wq);
+err_free_wq:
        vnic_wq_free(&vdev->devcmd2->wq);
 err_free_devcmd2:
        kfree(vdev->devcmd2);
index 42e4d35e0d3556985e1685c642c9cfcb04f84930..65f168c41d233211ea6a34f294ce11ca2bf14af5 100644 (file)
@@ -1744,7 +1744,7 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
                iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
        }
 
-       vfc_cmd->correlation = cpu_to_be64(evt);
+       vfc_cmd->correlation = cpu_to_be64((u64)evt);
 
        if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
                return ibmvfc_send_event(evt, vhost, 0);
@@ -2418,7 +2418,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
                tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
                evt->sync_iu = &rsp_iu;
 
-               tmf->correlation = cpu_to_be64(evt);
+               tmf->correlation = cpu_to_be64((u64)evt);
 
                init_completion(&evt->comp);
                rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
@@ -3007,8 +3007,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
        unsigned long flags = 0;
 
        spin_lock_irqsave(shost->host_lock, flags);
-       if (sdev->type == TYPE_DISK)
+       if (sdev->type == TYPE_DISK) {
                sdev->allow_restart = 1;
+               blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
+       }
        spin_unlock_irqrestore(shost->host_lock, flags);
        return 0;
 }
index d71afae6191cbe7be99f006e563dda0ec89b8643..841000445b9a16f31a28b94f4aa7bd2d4ad4bd7d 100644 (file)
@@ -1623,8 +1623,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
                rc = fc_exch_done_locked(ep);
                WARN_ON(fc_seq_exch(sp) != ep);
                spin_unlock_bh(&ep->ex_lock);
-               if (!rc)
+               if (!rc) {
                        fc_exch_delete(ep);
+               } else {
+                       FC_EXCH_DBG(ep, "ep is completed already,"
+                                       "hence skip calling the resp\n");
+                       goto skip_resp;
+               }
        }
 
        /*
@@ -1643,6 +1648,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
        if (!fc_invoke_resp(ep, sp, fp))
                fc_frame_free(fp);
 
+skip_resp:
        fc_exch_release(ep);
        return;
 rel:
@@ -1899,10 +1905,16 @@ static void fc_exch_reset(struct fc_exch *ep)
 
        fc_exch_hold(ep);
 
-       if (!rc)
+       if (!rc) {
                fc_exch_delete(ep);
+       } else {
+               FC_EXCH_DBG(ep, "ep is completed already,"
+                               "hence skip calling the resp\n");
+               goto skip_resp;
+       }
 
        fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
+skip_resp:
        fc_seq_set_resp(sp, NULL, ep->arg);
        fc_exch_release(ep);
 }
index 1cb82fa6a60e4c40d6e1c9d35287c274dc286fd0..39d147e251bf4f2e103a9b003b2fcf4a025b880a 100644 (file)
@@ -559,6 +559,9 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                return -ENODEV;
        }
 
+       if (!vport->phba->sli4_hba.nvmels_wq)
+               return -ENOMEM;
+
        /*
         * there are two dma buf in the request, actually there is one and
         * the second one is just the start address + cmd size.
index af192096a82b172ec188d83e2619f8da4216f21d..63a4f48bdc755a76c421a42976b2ba28b3f68bbe 100644 (file)
@@ -8244,11 +8244,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
                        goto out;
                }
 
+               /* always store 64 bits regardless of addressing */
                sense_ptr = (void *)cmd->frame + ioc->sense_off;
-               if (instance->consistent_mask_64bit)
-                       put_unaligned_le64(sense_handle, sense_ptr);
-               else
-                       put_unaligned_le32(sense_handle, sense_ptr);
+               put_unaligned_le64(sense_handle, sense_ptr);
        }
 
        /*
index f80abe28f35a4da7630a5fbc572e03c7b9a733c1..0e0fe5b094966bb6cc5b5411d3ba2064faabe1a6 100644 (file)
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
 int ql2xenforce_iocb_limit = 1;
 module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql2xenforce_iocb_limit,
-                "Enforce IOCB throttling, to avoid FW congestion. (default: 0)");
+                "Enforce IOCB throttling, to avoid FW congestion. (default: 1)");
 
 /*
  * CT6 CTX allocation cache
index cba1cf6a1c12dc720277b20e6ecafbc30a3e5677..1e939a2a387f3f60d26cc35577aff20d22522534 100644 (file)
@@ -541,7 +541,14 @@ int srp_reconnect_rport(struct srp_rport *rport)
        res = mutex_lock_interruptible(&rport->mutex);
        if (res)
                goto out;
-       scsi_target_block(&shost->shost_gendev);
+       if (rport->state != SRP_RPORT_FAIL_FAST)
+               /*
+                * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
+                * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
+                * later is ok though, scsi_internal_device_unblock_nowait()
+                * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK.
+                */
+               scsi_target_block(&shost->shost_gendev);
        res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
        pr_debug("%s (state %d): transport.reconnect() returned %d\n",
                 dev_name(&shost->shost_gendev), rport->state, res);
index 3f6dfed4fe84b2178695c851f2892457e82bcb68..b915b38c2b277c5f132a90cdd31d3e9d931fd5ea 100644 (file)
@@ -72,6 +72,7 @@ config SCSI_UFS_DWC_TC_PCI
 config SCSI_UFSHCD_PLATFORM
        tristate "Platform bus based UFS Controller support"
        depends on SCSI_UFSHCD
+       depends on HAS_IOMEM
        help
        This selects the UFS host controller support. Select this if
        you have an UFS controller on Platform bus.
index e31d2c5c7b23b7d152322723fdc449eb5eb5400d..fb32d122f2e3866f6ce3053f4dfc4c07732142b1 100644 (file)
@@ -3996,6 +3996,8 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
        if (ret)
                dev_err(hba->dev, "%s: link recovery failed, err %d",
                        __func__, ret);
+       else
+               ufshcd_clear_ua_wluns(hba);
 
        return ret;
 }
@@ -4992,7 +4994,8 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
                break;
        } /* end of switch */
 
-       if ((host_byte(result) != DID_OK) && !hba->silence_err_logs)
+       if ((host_byte(result) != DID_OK) &&
+           (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
                ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
        return result;
 }
@@ -6001,6 +6004,9 @@ skip_err_handling:
        ufshcd_scsi_unblock_requests(hba);
        ufshcd_err_handling_unprepare(hba);
        up(&hba->eh_sem);
+
+       if (!err && needs_reset)
+               ufshcd_clear_ua_wluns(hba);
 }
 
 /**
@@ -6295,9 +6301,13 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
                intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
        }
 
-       if (enabled_intr_status && retval == IRQ_NONE) {
-               dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
-                                       __func__, intr_status);
+       if (enabled_intr_status && retval == IRQ_NONE &&
+                               !ufshcd_eh_in_progress(hba)) {
+               dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
+                                       __func__,
+                                       intr_status,
+                                       hba->ufs_stats.last_intr_status,
+                                       enabled_intr_status);
                ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
        }
 
@@ -6341,7 +6351,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
         * Even though we use wait_event() which sleeps indefinitely,
         * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
         */
-       req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
+       req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
+
        req->end_io_data = &wait;
        free_slot = req->tag;
        WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs);
@@ -6938,14 +6951,11 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
        ufshcd_set_clk_freq(hba, true);
 
        err = ufshcd_hba_enable(hba);
-       if (err)
-               goto out;
 
        /* Establish the link again and restore the device */
-       err = ufshcd_probe_hba(hba, false);
        if (!err)
-               ufshcd_clear_ua_wluns(hba);
-out:
+               err = ufshcd_probe_hba(hba, false);
+
        if (err)
                dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
        ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
@@ -7716,6 +7726,8 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
        if (ret)
                goto out;
 
+       ufshcd_clear_ua_wluns(hba);
+
        /* Initialize devfreq after UFS device is detected */
        if (ufshcd_is_clkscaling_supported(hba)) {
                memcpy(&hba->clk_scaling.saved_pwr_info.info,
@@ -7917,8 +7929,6 @@ out:
                pm_runtime_put_sync(hba->dev);
                ufshcd_exit_clk_scaling(hba);
                ufshcd_hba_exit(hba);
-       } else {
-               ufshcd_clear_ua_wluns(hba);
        }
 }
 
@@ -8775,6 +8785,7 @@ enable_gating:
                ufshcd_resume_clkscaling(hba);
        hba->clk_gating.is_suspended = false;
        hba->dev_info.b_rpm_dev_flush_capable = false;
+       ufshcd_clear_ua_wluns(hba);
        ufshcd_release(hba);
 out:
        if (hba->dev_info.b_rpm_dev_flush_capable) {
@@ -8885,6 +8896,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
                cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
        }
 
+       ufshcd_clear_ua_wluns(hba);
+
        /* Schedule clock gating in case of no access to UFS device yet */
        ufshcd_release(hba);
 
index f8e070d67fa3266d8c921c58dfb92c74373a3f26..a14684ffe4c1a8ef459a1ce9c5e6691b9e7e6601 100644 (file)
@@ -214,7 +214,7 @@ int __init register_intc_controller(struct intc_desc *desc)
                        d->window[k].phys = res->start;
                        d->window[k].size = resource_size(res);
                        d->window[k].virt = ioremap(res->start,
-                                                        resource_size(res));
+                                                   resource_size(res));
                        if (!d->window[k].virt)
                                goto err2;
                }
index 9e62ba9311f07744e78063560046d34918fd4874..939915a07d9997fcc7d6efbb66c0c992d2bf96d1 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/debugfs.h>
 #include "internals.h"
 
-static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
+static int intc_irq_xlate_show(struct seq_file *m, void *priv)
 {
        int i;
 
@@ -37,17 +37,7 @@ static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
        return 0;
 }
 
-static int intc_irq_xlate_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, intc_irq_xlate_debug, inode->i_private);
-}
-
-static const struct file_operations intc_irq_xlate_fops = {
-       .open = intc_irq_xlate_open,
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(intc_irq_xlate);
 
 static int __init intc_irq_xlate_init(void)
 {
index c4472b68b7c2a2397d4bd037ff8b85697a845900..698d21f505165b2b0ef11838e40b553b9f5ec2b0 100644 (file)
@@ -271,8 +271,21 @@ struct soc_device * __init at91_soc_init(const struct at91_soc *socs)
        return soc_dev;
 }
 
+static const struct of_device_id at91_soc_allowed_list[] __initconst = {
+       { .compatible = "atmel,at91rm9200", },
+       { .compatible = "atmel,at91sam9", },
+       { .compatible = "atmel,sama5", },
+       { .compatible = "atmel,samv7", },
+       { }
+};
+
 static int __init atmel_soc_device_init(void)
 {
+       struct device_node *np = of_find_node_by_path("/");
+
+       if (!of_match_node(at91_soc_allowed_list, np))
+               return 0;
+
        at91_soc_init(socs);
 
        return 0;
index a9370f4aacca97b1e00c5bb56e7d84fda8d932b5..05812f8ae73406f3f706efc953a5878af82659b3 100644 (file)
@@ -13,7 +13,7 @@ config SOC_IMX8M
        depends on ARCH_MXC || COMPILE_TEST
        default ARCH_MXC && ARM64
        select SOC_BUS
-       select ARM_GIC_V3 if ARCH_MXC
+       select ARM_GIC_V3 if ARCH_MXC && ARCH_MULTI_V7
        help
          If you say yes here you get support for the NXP i.MX8M family
          support, it will provide the SoC info like SoC family,
index 7c6b009b6f6cfdb2b00e9a0dfd1f3669ce361b41..7a7c38282e114cdc85fb9ee109aade42baf15f42 100644 (file)
@@ -8,6 +8,7 @@ config LITEX
 config LITEX_SOC_CONTROLLER
        tristate "Enable LiteX SoC Controller driver"
        depends on OF || COMPILE_TEST
+       depends on HAS_IOMEM
        select LITEX
        help
          This option enables the SoC Controller Driver which verifies
index 1217cafdfd4d1d2b691bc6d487dcce5d63755d3e..9b076638457087a01ea168dd67eb8f3981d58e9b 100644 (file)
@@ -140,12 +140,13 @@ struct litex_soc_ctrl_device {
        void __iomem *base;
 };
 
+#ifdef CONFIG_OF
 static const struct of_device_id litex_soc_ctrl_of_match[] = {
        {.compatible = "litex,soc-controller"},
        {},
 };
-
 MODULE_DEVICE_TABLE(of, litex_soc_ctrl_of_match);
+#endif /* CONFIG_OF */
 
 static int litex_soc_ctrl_probe(struct platform_device *pdev)
 {
index e9925c8487d741f29c6f25cd9ee004619a335859..d90e4a264b6f6bfa4ada206c7a7f14bde14856a3 100644 (file)
@@ -23,12 +23,7 @@ static const char * const sunxi_mbus_devices[] = {
        "allwinner,sun7i-a20-display-engine",
        "allwinner,sun8i-a23-display-engine",
        "allwinner,sun8i-a33-display-engine",
-       "allwinner,sun8i-a83t-display-engine",
-       "allwinner,sun8i-h3-display-engine",
-       "allwinner,sun8i-r40-display-engine",
-       "allwinner,sun8i-v3s-display-engine",
        "allwinner,sun9i-a80-display-engine",
-       "allwinner,sun50i-a64-display-engine",
 
        /*
         * And now we have the regular devices connected to the MBUS
index 77f0051358f1c19c68407cb7f0913c720cf3ea53..bf1468e5bccbaa50c44d3ce81b59e37ab7b057e2 100644 (file)
@@ -860,6 +860,7 @@ static int omap_prm_reset_init(struct platform_device *pdev,
        const struct omap_rst_map *map;
        struct ti_prm_platform_data *pdata = dev_get_platdata(&pdev->dev);
        char buf[32];
+       u32 v;
 
        /*
         * Check if we have controllable resets. If either rstctrl is non-zero
@@ -907,6 +908,16 @@ static int omap_prm_reset_init(struct platform_device *pdev,
                map++;
        }
 
+       /* Quirk handling to assert rst_map_012 bits on reset and avoid errors */
+       if (prm->data->rstmap == rst_map_012) {
+               v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
+               if ((v & reset->mask) != reset->mask) {
+                       dev_dbg(&pdev->dev, "Asserting all resets: %08x\n", v);
+                       writel_relaxed(reset->mask, reset->prm->base +
+                                      reset->prm->data->rstctrl);
+               }
+       }
+
        return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
 }
 
index cbc4c28c1541cba3370661b2e7015a15c04b19be..62ea0c9e321b4c1e7e095ed6f70091ae775c5e5b 100644 (file)
@@ -254,7 +254,8 @@ static int altera_spi_probe(struct platform_device *pdev)
                        dev_err(&pdev->dev,
                                "Invalid number of chipselect: %hu\n",
                                pdata->num_chipselect);
-                       return -EINVAL;
+                       err = -EINVAL;
+                       goto exit;
                }
 
                master->num_chipselect = pdata->num_chipselect;
index 70467b9d61baa3d13e3851e3abd25a36b2f5919e..a3afd1b9ac567bcc48c809edef5d0509ae5dd0aa 100644 (file)
@@ -115,6 +115,7 @@ struct cdns_spi {
        void __iomem *regs;
        struct clk *ref_clk;
        struct clk *pclk;
+       unsigned int clk_rate;
        u32 speed_hz;
        const u8 *txbuf;
        u8 *rxbuf;
@@ -250,7 +251,7 @@ static void cdns_spi_config_clock_freq(struct spi_device *spi,
        u32 ctrl_reg, baud_rate_val;
        unsigned long frequency;
 
-       frequency = clk_get_rate(xspi->ref_clk);
+       frequency = xspi->clk_rate;
 
        ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
 
@@ -558,8 +559,9 @@ static int cdns_spi_probe(struct platform_device *pdev)
        master->auto_runtime_pm = true;
        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
 
+       xspi->clk_rate = clk_get_rate(xspi->ref_clk);
        /* Set to default valid value */
-       master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4;
+       master->max_speed_hz = xspi->clk_rate / 4;
        xspi->speed_hz = master->max_speed_hz;
 
        master->bits_per_word_mask = SPI_BPW_MASK(8);
index 9494257e1c33f56173dfefb7ef7c2b49100adc01..6d8e0a05a53554b4393752a6a6cad93b6276d548 100644 (file)
@@ -115,14 +115,13 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value)
 {
        struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
        struct fsl_spi_platform_data *pdata;
-       bool pol = spi->mode & SPI_CS_HIGH;
        struct spi_mpc8xxx_cs   *cs = spi->controller_state;
 
        pdata = spi->dev.parent->parent->platform_data;
 
        if (value == BITBANG_CS_INACTIVE) {
                if (pdata->cs_control)
-                       pdata->cs_control(spi, !pol);
+                       pdata->cs_control(spi, false);
        }
 
        if (value == BITBANG_CS_ACTIVE) {
@@ -134,7 +133,7 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value)
                fsl_spi_change_mode(spi);
 
                if (pdata->cs_control)
-                       pdata->cs_control(spi, pol);
+                       pdata->cs_control(spi, true);
        }
 }
 
index 859910ec8d9f6b649a9de491d830f6b718d82f1e..8cb4d923aeaab3b9c67332f86b7fa72dc924b484 100644 (file)
@@ -682,6 +682,7 @@ static const struct of_device_id spidev_dt_ids[] = {
        { .compatible = "lwn,bk4" },
        { .compatible = "dh,dhcom-board" },
        { .compatible = "menlo,m53cpld" },
+       { .compatible = "cisco,spi-petra" },
        {},
 };
 MODULE_DEVICE_TABLE(of, spidev_dt_ids);
index b668a82d40ad4606b21832aa1f86dcfd225892e3..f5fbdbc4ffdb183b3bf00a27e7c6b95a25668953 100644 (file)
@@ -367,7 +367,7 @@ hantro_reset_raw_fmt(struct hantro_ctx *ctx)
 
        hantro_reset_fmt(raw_fmt, raw_vpu_fmt);
        raw_fmt->width = encoded_fmt->width;
-       raw_fmt->width = encoded_fmt->width;
+       raw_fmt->height = encoded_fmt->height;
        if (ctx->is_encoder)
                hantro_set_fmt_out(ctx, raw_fmt);
        else
index 781c84a9b1b79e39d10f4f47bab7e5e099e23851..de7442d4834dcab1237dc7f9558021c6871c1551 100644 (file)
@@ -203,7 +203,7 @@ static void _cedrus_write_ref_list(struct cedrus_ctx *ctx,
                position = cedrus_buf->codec.h264.position;
 
                sram_array[i] |= position << 1;
-               if (ref_list[i].fields & V4L2_H264_BOTTOM_FIELD_REF)
+               if (ref_list[i].fields == V4L2_H264_BOTTOM_FIELD_REF)
                        sram_array[i] |= BIT(0);
        }
 
index ab5a8627d3712e07ae3163ecbaf7d14544b35467..f798b0c744a4fc028cb6833b4795ca49b6673cc2 100644 (file)
@@ -20,9 +20,9 @@ enum country_code_type_t {
        COUNTRY_CODE_MAX
 };
 
-int rtw_regd_init(struct adapter *padapter,
-       void (*reg_notifier)(struct wiphy *wiphy,
-               struct regulatory_request *request));
+void rtw_regd_init(struct wiphy *wiphy,
+                  void (*reg_notifier)(struct wiphy *wiphy,
+                                       struct regulatory_request *request));
 void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
 
 
index bf1417236161859e1f0029862ab8f77c2efc0563..11032316c53dcc3bcf086161e48468ae7a0bf8b4 100644 (file)
@@ -3211,9 +3211,6 @@ void rtw_cfg80211_init_wiphy(struct adapter *padapter)
                        rtw_cfg80211_init_ht_capab(&bands->ht_cap, NL80211_BAND_2GHZ, rf_type);
        }
 
-       /* init regulary domain */
-       rtw_regd_init(padapter, rtw_reg_notifier);
-
        /* copy mac_addr to wiphy */
        memcpy(wiphy->perm_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
 
@@ -3328,6 +3325,9 @@ int rtw_wdev_alloc(struct adapter *padapter, struct device *dev)
        *((struct adapter **)wiphy_priv(wiphy)) = padapter;
        rtw_cfg80211_preinit_wiphy(padapter, wiphy);
 
+       /* init regulary domain */
+       rtw_regd_init(wiphy, rtw_reg_notifier);
+
        ret = wiphy_register(wiphy);
        if (ret < 0) {
                DBG_8192C("Couldn't register wiphy device\n");
index b2208e5f190ad3f92d929956e65aa0456420ed0d..301ffff12e82ba590e74e971d1a8796fb0239df7 100644 (file)
@@ -339,8 +339,6 @@ static struct adapter *rtw_sdio_if1_init(struct dvobj_priv *dvobj, const struct
 
        padapter = rtw_netdev_priv(pnetdev);
 
-       rtw_wdev_alloc(padapter, dvobj_to_dev(dvobj));
-
        /* 3 3. init driver special setting, interface, OS and hardware relative */
 
        /* 4 3.1 set hardware operation functions */
@@ -378,6 +376,8 @@ static struct adapter *rtw_sdio_if1_init(struct dvobj_priv *dvobj, const struct
                goto free_hal_data;
        }
 
+       rtw_wdev_alloc(padapter, dvobj_to_dev(dvobj));
+
        /* 3 8. get WLan MAC address */
        /*  set mac addr */
        rtw_macaddr_cfg(&psdio->func->dev, padapter->eeprompriv.mac_addr);
index 578b9f734231e0ce0a81d8c1d3045eb728ce3773..2833fc6901e6e4ad46f6ab0c96b4bb988d0d58f2 100644 (file)
@@ -139,15 +139,11 @@ static void _rtw_regd_init_wiphy(struct rtw_regulatory *reg,
        _rtw_reg_apply_flags(wiphy);
 }
 
-int rtw_regd_init(struct adapter *padapter,
-                 void (*reg_notifier)(struct wiphy *wiphy,
-                                      struct regulatory_request *request))
+void rtw_regd_init(struct wiphy *wiphy,
+                  void (*reg_notifier)(struct wiphy *wiphy,
+                                       struct regulatory_request *request))
 {
-       struct wiphy *wiphy = padapter->rtw_wdev->wiphy;
-
        _rtw_regd_init_wiphy(NULL, wiphy, reg_notifier);
-
-       return 0;
 }
 
 void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
index 893d1b406c29254ca0bb33fa19b4bcadd0253192..1a9c50401bdb55b1c1a63552b81187bf517da616 100644 (file)
@@ -896,7 +896,7 @@ int iscsit_setup_np(
        else
                len = sizeof(struct sockaddr_in);
        /*
-        * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
+        * Set SO_REUSEADDR, and disable Nagle Algorithm with TCP_NODELAY.
         */
        if (np->np_network_transport == ISCSI_TCP)
                tcp_sock_set_nodelay(sock->sk);
index 6b171fff007b63dec6d1cb2d0a4e1cd96bc76b63..a5991df2358113cfc7639a23182cc7d5c814392a 100644 (file)
@@ -562,8 +562,6 @@ tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
 
 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
 {
-       if (tcmu_cmd->se_cmd)
-               tcmu_cmd->se_cmd->priv = NULL;
        kfree(tcmu_cmd->dbi);
        kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
 }
@@ -1174,11 +1172,12 @@ tcmu_queue_cmd(struct se_cmd *se_cmd)
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
        mutex_lock(&udev->cmdr_lock);
-       se_cmd->priv = tcmu_cmd;
        if (!(se_cmd->transport_state & CMD_T_ABORTED))
                ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
        if (ret < 0)
                tcmu_free_cmd(tcmu_cmd);
+       else
+               se_cmd->priv = tcmu_cmd;
        mutex_unlock(&udev->cmdr_lock);
        return scsi_ret;
 }
@@ -1241,6 +1240,7 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
 
                list_del_init(&cmd->queue_entry);
                tcmu_free_cmd(cmd);
+               se_cmd->priv = NULL;
                target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED);
                unqueued = true;
        }
@@ -1332,6 +1332,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
        }
 
 done:
+       se_cmd->priv = NULL;
        if (read_len_valid) {
                pr_debug("read_len = %d\n", read_len);
                target_complete_cmd_with_length(cmd->se_cmd,
@@ -1478,6 +1479,7 @@ static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
        se_cmd = cmd->se_cmd;
        tcmu_free_cmd(cmd);
 
+       se_cmd->priv = NULL;
        target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
 }
 
@@ -1592,6 +1594,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
                         * removed then LIO core will do the right thing and
                         * fail the retry.
                         */
+                       tcmu_cmd->se_cmd->priv = NULL;
                        target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
                        tcmu_free_cmd(tcmu_cmd);
                        continue;
@@ -1605,6 +1608,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
                         * Ignore scsi_ret for now. target_complete_cmd
                         * drops it.
                         */
+                       tcmu_cmd->se_cmd->priv = NULL;
                        target_complete_cmd(tcmu_cmd->se_cmd,
                                            SAM_STAT_CHECK_CONDITION);
                        tcmu_free_cmd(tcmu_cmd);
@@ -2212,6 +2216,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
                if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
                        WARN_ON(!cmd->se_cmd);
                        list_del_init(&cmd->queue_entry);
+                       cmd->se_cmd->priv = NULL;
                        if (err_level == 1) {
                                /*
                                 * Userspace was not able to start the
index c981757ba0d4052733484c89a566be15255c08fc..780d7c4fd75653db9a7d81713eebeb32c94ccd0b 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/mm.h>
+#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/tee_drv.h>
 #include <linux/types.h>
@@ -148,7 +149,8 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
                         */
                        optee_cq_wait_for_completion(&optee->call_queue, &w);
                } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
-                       might_sleep();
+                       if (need_resched())
+                               cond_resched();
                        param.a0 = res.a0;
                        param.a1 = res.a1;
                        param.a2 = res.a2;
index a5f988a9f9482bd54eef3f96cce4928c42ee02a5..b5442f979b4d0170950d9ded3d3dbe9e3066d6da 100644 (file)
@@ -56,7 +56,7 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
         * managed with the xHCI and the SuperSpeed hub so we create the
         * link from xHCI instead.
         */
-       while (!dev_is_pci(dev))
+       while (dev && !dev_is_pci(dev))
                dev = dev->parent;
 
        if (!dev)
index 8b7f941a9bb7f104f79f205cd236053e34cdc293..b8c4159bc32d0163c0454fc01dd761f860d0ed24 100644 (file)
@@ -2316,7 +2316,7 @@ static int icm_usb4_switch_nvm_authenticate_status(struct tb_switch *sw,
 
        if (auth && auth->reply.route_hi == sw->config.route_hi &&
            auth->reply.route_lo == sw->config.route_lo) {
-               tb_dbg(tb, "NVM_AUTH found for %llx flags 0x%#x status %#x\n",
+               tb_dbg(tb, "NVM_AUTH found for %llx flags %#x status %#x\n",
                       tb_route(sw), auth->reply.hdr.flags, auth->reply.status);
                if (auth->reply.hdr.flags & ICM_FLAGS_ERROR)
                        ret = -EIO;
index 319d68c8a5df32b62eb7fec1e7f518910f971ea0..219e85756171be8ecd75132001271252c2d28ac9 100644 (file)
@@ -2081,9 +2081,6 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
        return 0;
 }
 
-extern ssize_t redirected_tty_write(struct file *, const char __user *,
-                                                       size_t, loff_t *);
-
 /**
  *     job_control             -       check job control
  *     @tty: tty
@@ -2105,7 +2102,7 @@ static int job_control(struct tty_struct *tty, struct file *file)
        /* NOTE: not yet done after every sleep pending a thorough
           check of the logic of this change. -- jlc */
        /* don't stop on /dev/console */
-       if (file->f_op->write == redirected_tty_write)
+       if (file->f_op->write_iter == redirected_tty_write)
                return 0;
 
        return __tty_check_change(tty, SIGTTIN);
@@ -2309,7 +2306,7 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
        ssize_t retval = 0;
 
        /* Job control check -- must be done at start (POSIX.1 7.1.1.4). */
-       if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) {
+       if (L_TOSTOP(tty) && file->f_op->write_iter != redirected_tty_write) {
                retval = tty_check_change(tty);
                if (retval)
                        return retval;
index 118b29912289845328e91cba182ecc56ad586473..e0c00a1b07639b5a12a583059af4150c3f8d44aa 100644 (file)
@@ -648,6 +648,14 @@ static void wait_for_xmitr(struct uart_port *port)
                                  (val & STAT_TX_RDY(port)), 1, 10000);
 }
 
+static void wait_for_xmite(struct uart_port *port)
+{
+       u32 val;
+
+       readl_poll_timeout_atomic(port->membase + UART_STAT, val,
+                                 (val & STAT_TX_EMP), 1, 10000);
+}
+
 static void mvebu_uart_console_putchar(struct uart_port *port, int ch)
 {
        wait_for_xmitr(port);
@@ -675,7 +683,7 @@ static void mvebu_uart_console_write(struct console *co, const char *s,
 
        uart_console_write(port, s, count, mvebu_uart_console_putchar);
 
-       wait_for_xmitr(port);
+       wait_for_xmite(port);
 
        if (ier)
                writel(ier, port->membase + UART_CTRL(port));
index 8034489337d75241b1a42d781839679446e83d82..816e709afa5612941fa1241cf21c768710f51282 100644 (file)
@@ -143,12 +143,9 @@ LIST_HEAD(tty_drivers);                    /* linked list of tty drivers */
 DEFINE_MUTEX(tty_mutex);
 
 static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
-ssize_t redirected_tty_write(struct file *, const char __user *,
-                                                       size_t, loff_t *);
+static ssize_t tty_write(struct kiocb *, struct iov_iter *);
 static __poll_t tty_poll(struct file *, poll_table *);
 static int tty_open(struct inode *, struct file *);
-long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 #ifdef CONFIG_COMPAT
 static long tty_compat_ioctl(struct file *file, unsigned int cmd,
                                unsigned long arg);
@@ -438,8 +435,7 @@ static ssize_t hung_up_tty_read(struct file *file, char __user *buf,
        return 0;
 }
 
-static ssize_t hung_up_tty_write(struct file *file, const char __user *buf,
-                                size_t count, loff_t *ppos)
+static ssize_t hung_up_tty_write(struct kiocb *iocb, struct iov_iter *from)
 {
        return -EIO;
 }
@@ -478,7 +474,8 @@ static void tty_show_fdinfo(struct seq_file *m, struct file *file)
 static const struct file_operations tty_fops = {
        .llseek         = no_llseek,
        .read           = tty_read,
-       .write          = tty_write,
+       .write_iter     = tty_write,
+       .splice_write   = iter_file_splice_write,
        .poll           = tty_poll,
        .unlocked_ioctl = tty_ioctl,
        .compat_ioctl   = tty_compat_ioctl,
@@ -491,7 +488,8 @@ static const struct file_operations tty_fops = {
 static const struct file_operations console_fops = {
        .llseek         = no_llseek,
        .read           = tty_read,
-       .write          = redirected_tty_write,
+       .write_iter     = redirected_tty_write,
+       .splice_write   = iter_file_splice_write,
        .poll           = tty_poll,
        .unlocked_ioctl = tty_ioctl,
        .compat_ioctl   = tty_compat_ioctl,
@@ -503,7 +501,7 @@ static const struct file_operations console_fops = {
 static const struct file_operations hung_up_tty_fops = {
        .llseek         = no_llseek,
        .read           = hung_up_tty_read,
-       .write          = hung_up_tty_write,
+       .write_iter     = hung_up_tty_write,
        .poll           = hung_up_tty_poll,
        .unlocked_ioctl = hung_up_tty_ioctl,
        .compat_ioctl   = hung_up_tty_compat_ioctl,
@@ -606,9 +604,9 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
        /* This breaks for file handles being sent over AF_UNIX sockets ? */
        list_for_each_entry(priv, &tty->tty_files, list) {
                filp = priv->file;
-               if (filp->f_op->write == redirected_tty_write)
+               if (filp->f_op->write_iter == redirected_tty_write)
                        cons_filp = filp;
-               if (filp->f_op->write != tty_write)
+               if (filp->f_op->write_iter != tty_write)
                        continue;
                closecount++;
                __tty_fasync(-1, filp, 0);      /* can't block */
@@ -901,9 +899,9 @@ static inline ssize_t do_tty_write(
        ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t),
        struct tty_struct *tty,
        struct file *file,
-       const char __user *buf,
-       size_t count)
+       struct iov_iter *from)
 {
+       size_t count = iov_iter_count(from);
        ssize_t ret, written = 0;
        unsigned int chunk;
 
@@ -955,14 +953,20 @@ static inline ssize_t do_tty_write(
                size_t size = count;
                if (size > chunk)
                        size = chunk;
+
                ret = -EFAULT;
-               if (copy_from_user(tty->write_buf, buf, size))
+               if (copy_from_iter(tty->write_buf, size, from) != size)
                        break;
+
                ret = write(tty, file, tty->write_buf, size);
                if (ret <= 0)
                        break;
+
+               /* FIXME! Have Al check this! */
+               if (ret != size)
+                       iov_iter_revert(from, size-ret);
+
                written += ret;
-               buf += ret;
                count -= ret;
                if (!count)
                        break;
@@ -1022,8 +1026,7 @@ void tty_write_message(struct tty_struct *tty, char *msg)
  *     write method will not be invoked in parallel for each device.
  */
 
-static ssize_t tty_write(struct file *file, const char __user *buf,
-                                               size_t count, loff_t *ppos)
+static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_iter *from)
 {
        struct tty_struct *tty = file_tty(file);
        struct tty_ldisc *ld;
@@ -1038,17 +1041,21 @@ static ssize_t tty_write(struct file *file, const char __user *buf,
                tty_err(tty, "missing write_room method\n");
        ld = tty_ldisc_ref_wait(tty);
        if (!ld)
-               return hung_up_tty_write(file, buf, count, ppos);
+               return hung_up_tty_write(iocb, from);
        if (!ld->ops->write)
                ret = -EIO;
        else
-               ret = do_tty_write(ld->ops->write, tty, file, buf, count);
+               ret = do_tty_write(ld->ops->write, tty, file, from);
        tty_ldisc_deref(ld);
        return ret;
 }
 
-ssize_t redirected_tty_write(struct file *file, const char __user *buf,
-                                               size_t count, loff_t *ppos)
+static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
+{
+       return file_tty_write(iocb->ki_filp, iocb, from);
+}
+
+ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
 {
        struct file *p = NULL;
 
@@ -1057,13 +1064,17 @@ ssize_t redirected_tty_write(struct file *file, const char __user *buf,
                p = get_file(redirect);
        spin_unlock(&redirect_lock);
 
+       /*
+        * We know the redirected tty is just another tty, we can can
+        * call file_tty_write() directly with that file pointer.
+        */
        if (p) {
                ssize_t res;
-               res = vfs_write(p, buf, count, &p->f_pos);
+               res = file_tty_write(p, iocb, iter);
                fput(p);
                return res;
        }
-       return tty_write(file, buf, count, ppos);
+       return tty_write(iocb, iter);
 }
 
 /*
@@ -2295,7 +2306,7 @@ static int tioccons(struct file *file)
 {
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
-       if (file->f_op->write == redirected_tty_write) {
+       if (file->f_op->write_iter == redirected_tty_write) {
                struct file *f;
                spin_lock(&redirect_lock);
                f = redirect;
@@ -2305,6 +2316,12 @@ static int tioccons(struct file *file)
                        fput(f);
                return 0;
        }
+       if (file->f_op->write_iter != tty_write)
+               return -ENOTTY;
+       if (!(file->f_mode & FMODE_WRITE))
+               return -EBADF;
+       if (!(file->f_mode & FMODE_CAN_WRITE))
+               return -EINVAL;
        spin_lock(&redirect_lock);
        if (redirect) {
                spin_unlock(&redirect_lock);
index 22a56c4dce678d507a80a27378412b1f91a30a8d..7990fee03fe4bb03f102a0cdb550771131e75be4 100644 (file)
@@ -185,7 +185,11 @@ static int cdns_imx_probe(struct platform_device *pdev)
        }
 
        data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks);
-       data->clks = (struct clk_bulk_data *)imx_cdns3_core_clks;
+       data->clks = devm_kmemdup(dev, imx_cdns3_core_clks,
+                               sizeof(imx_cdns3_core_clks), GFP_KERNEL);
+       if (!data->clks)
+               return -ENOMEM;
+
        ret = devm_clk_bulk_get(dev, data->num_clks, data->clks);
        if (ret)
                return ret;
@@ -214,20 +218,16 @@ err:
        return ret;
 }
 
-static int cdns_imx_remove_core(struct device *dev, void *data)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-
-       platform_device_unregister(pdev);
-
-       return 0;
-}
-
 static int cdns_imx_remove(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
+       struct cdns_imx *data = dev_get_drvdata(dev);
 
-       device_for_each_child(dev, NULL, cdns_imx_remove_core);
+       pm_runtime_get_sync(dev);
+       of_platform_depopulate(dev);
+       clk_bulk_disable_unprepare(data->num_clks, data->clks);
+       pm_runtime_disable(dev);
+       pm_runtime_put_noidle(dev);
        platform_set_drvdata(pdev, NULL);
 
        return 0;
index 134dc2005ce97de81ee1787510d4772395da8ef4..c9f6e975828852f09206ca11ed84c0fb6582e21c 100644 (file)
@@ -1329,14 +1329,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol)
        if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
                return -EINVAL;
 
-       alts = usblp->protocol[protocol].alt_setting;
-       if (alts < 0)
-               return -EINVAL;
-       r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
-       if (r < 0) {
-               printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
-                       alts, usblp->ifnum);
-               return r;
+       /* Don't unnecessarily set the interface if there's a single alt. */
+       if (usblp->intf->num_altsetting > 1) {
+               alts = usblp->protocol[protocol].alt_setting;
+               if (alts < 0)
+                       return -EINVAL;
+               r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
+               if (r < 0) {
+                       printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
+                               alts, usblp->ifnum);
+                       return r;
+               }
        }
 
        usblp->bidir = (usblp->protocol[protocol].epread != NULL);
index 0a0d11151cfb88cb6ecbd7e9264f26e75de148d6..ad4c94366dadfd4051c8459f4db0bd566fd18b04 100644 (file)
@@ -1543,7 +1543,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
 static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
                                            u32 windex)
 {
-       struct dwc2_hsotg_ep *ep;
        int dir = (windex & USB_DIR_IN) ? 1 : 0;
        int idx = windex & 0x7F;
 
@@ -1553,12 +1552,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
        if (idx > hsotg->num_of_eps)
                return NULL;
 
-       ep = index_to_ep(hsotg, idx, dir);
-
-       if (idx && ep->dir_in != dir)
-               return NULL;
-
-       return ep;
+       return index_to_ep(hsotg, idx, dir);
 }
 
 /**
index 841daec70b6efd808b1407da9a4ba561eb98c660..3101f0dcf6ae853260c2f3abc6c78a61b1b1d89f 100644 (file)
@@ -1758,7 +1758,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
                if (PMSG_IS_AUTO(msg))
                        break;
 
-               ret = dwc3_core_init(dwc);
+               ret = dwc3_core_init_for_resume(dwc);
                if (ret)
                        return ret;
 
index 30313b233680d6a875928846d8fa8bd5eb7b1c25..99c7fc0d1d597369b819d98c238d63b1009f2f6c 100644 (file)
@@ -403,8 +403,10 @@ static int eth_bind(struct usb_composite_dev *cdev)
                struct usb_descriptor_header *usb_desc;
 
                usb_desc = usb_otg_descriptor_alloc(gadget);
-               if (!usb_desc)
+               if (!usb_desc) {
+                       status = -ENOMEM;
                        goto fail1;
+               }
                usb_otg_descriptor_init(gadget, usb_desc);
                otg_desc[0] = usb_desc;
                otg_desc[1] = NULL;
index 0bd6b20435b8a547a1b4dd09cff6be3dbc985f3a..02d8bfae58fb141dfe4aac8720df258fdb0ab44b 100644 (file)
@@ -420,7 +420,10 @@ static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
        u32 state, reg, loops;
 
        /* Stop DMA activity */
-       writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+       if (ep->epn.desc_mode)
+               writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+       else
+               writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
 
        /* Wait for it to complete */
        for (loops = 0; loops < 1000; loops++) {
index 6497185ec4e7ab822cec2651385f64b1981fcdcb..bfd8e77788e29fe1c051b4f3f8ef14c772cb5dae 100644 (file)
@@ -999,8 +999,10 @@ static int ast_vhub_of_parse_str_desc(struct ast_vhub *vhub,
                str_array[offset].s = NULL;
 
                ret = ast_vhub_str_alloc_add(vhub, &lang_str);
-               if (ret)
+               if (ret) {
+                       of_node_put(child);
                        break;
+               }
        }
 
        return ret;
index 3e88c7670b2ed40b019a2d3410236f07f2447079..fb01ff47b64cf63dafda5ab40692d9cf5d18ffc1 100644 (file)
@@ -17,7 +17,7 @@ if USB_BDC_UDC
 comment "Platform Support"
 config USB_BDC_PCI
        tristate "BDC support for PCIe based platforms"
-       depends on USB_PCI
+       depends on USB_PCI && BROKEN
        default USB_BDC_UDC
        help
                Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform.
index 6a62bbd01324f5bb99998a489188a5949a72dd3e..ea114f922ccf6c0336e25d3a7e76c8c2cd0b19ab 100644 (file)
@@ -1529,10 +1529,13 @@ static ssize_t soft_connect_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t n)
 {
        struct usb_udc          *udc = container_of(dev, struct usb_udc, dev);
+       ssize_t                 ret;
 
+       mutex_lock(&udc_lock);
        if (!udc->driver) {
                dev_err(dev, "soft-connect without a gadget driver\n");
-               return -EOPNOTSUPP;
+               ret = -EOPNOTSUPP;
+               goto out;
        }
 
        if (sysfs_streq(buf, "connect")) {
@@ -1543,10 +1546,14 @@ static ssize_t soft_connect_store(struct device *dev,
                usb_gadget_udc_stop(udc);
        } else {
                dev_err(dev, "unsupported command '%s'\n", buf);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
-       return n;
+       ret = n;
+out:
+       mutex_unlock(&udc_lock);
+       return ret;
 }
 static DEVICE_ATTR_WO(soft_connect);
 
index 1a953f44183aaf0cb99a89c8d18784eefead39e2..57067763b1005f7fcd021b5012cb7d1b11b97639 100644 (file)
@@ -2270,17 +2270,20 @@ static int dummy_hub_control(
                        }
                        fallthrough;
                case USB_PORT_FEAT_RESET:
+                       if (!(dum_hcd->port_status & USB_PORT_STAT_CONNECTION))
+                               break;
                        /* if it's already enabled, disable */
                        if (hcd->speed == HCD_USB3) {
-                               dum_hcd->port_status = 0;
                                dum_hcd->port_status =
                                        (USB_SS_PORT_STAT_POWER |
                                         USB_PORT_STAT_CONNECTION |
                                         USB_PORT_STAT_RESET);
-                       } else
+                       } else {
                                dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE
                                        | USB_PORT_STAT_LOW_SPEED
                                        | USB_PORT_STAT_HIGH_SPEED);
+                               dum_hcd->port_status |= USB_PORT_STAT_RESET;
+                       }
                        /*
                         * We want to reset device status. All but the
                         * Self powered feature
@@ -2292,7 +2295,8 @@ static int dummy_hub_control(
                         * interval? Is it still 50msec as for HS?
                         */
                        dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
-                       fallthrough;
+                       set_link_state(dum_hcd);
+                       break;
                case USB_PORT_FEAT_C_CONNECTION:
                case USB_PORT_FEAT_C_RESET:
                case USB_PORT_FEAT_C_ENABLE:
index e358ae17d51e7c1ae0e13ca80c8ba0e1f317593e..1926b328b6aa7a2e5a7726561cd44ae111f40d76 100644 (file)
@@ -574,6 +574,7 @@ static int ehci_run (struct usb_hcd *hcd)
        struct ehci_hcd         *ehci = hcd_to_ehci (hcd);
        u32                     temp;
        u32                     hcc_params;
+       int                     rc;
 
        hcd->uses_new_polling = 1;
 
@@ -629,9 +630,20 @@ static int ehci_run (struct usb_hcd *hcd)
        down_write(&ehci_cf_port_reset_rwsem);
        ehci->rh_state = EHCI_RH_RUNNING;
        ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+
+       /* Wait until HC become operational */
        ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
        msleep(5);
+       rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000);
+
        up_write(&ehci_cf_port_reset_rwsem);
+
+       if (rc) {
+               ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n",
+                        ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc);
+               return rc;
+       }
+
        ehci->last_periodic_enable = ktime_get_real();
 
        temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
index 087402aec5cbeb43ab851364d6a99fff96a2b53a..9f9ab5ccea889df6e8aebf630d941a1c82c392c8 100644 (file)
@@ -345,6 +345,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
 
        unlink_empty_async_suspended(ehci);
 
+       /* Some Synopsys controllers mistakenly leave IAA turned on */
+       ehci_writel(ehci, STS_IAA, &ehci->regs->status);
+
        /* Any IAA cycle that started before the suspend is now invalid */
        end_iaa_cycle(ehci);
        ehci_handle_start_intr_unlinks(ehci);
index 45c54d56ecbd55fd8241983c2c3ce42d76ffb809..b45e5bf0899798efbda164feab120da4c424fd30 100644 (file)
@@ -200,6 +200,8 @@ static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev,
 
        sch_ep->sch_tt = tt;
        sch_ep->ep = ep;
+       INIT_LIST_HEAD(&sch_ep->endpoint);
+       INIT_LIST_HEAD(&sch_ep->tt_endpoint);
 
        return sch_ep;
 }
@@ -373,6 +375,7 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
                                        sch_ep->bw_budget_table[j];
                }
        }
+       sch_ep->allocated = used;
 }
 
 static int check_sch_tt(struct usb_device *udev,
@@ -541,6 +544,22 @@ static int check_sch_bw(struct usb_device *udev,
        return 0;
 }
 
+static void destroy_sch_ep(struct usb_device *udev,
+       struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
+{
+       /* only release ep bw check passed by check_sch_bw() */
+       if (sch_ep->allocated)
+               update_bus_bw(sch_bw, sch_ep, 0);
+
+       list_del(&sch_ep->endpoint);
+
+       if (sch_ep->sch_tt) {
+               list_del(&sch_ep->tt_endpoint);
+               drop_tt(udev);
+       }
+       kfree(sch_ep);
+}
+
 static bool need_bw_sch(struct usb_host_endpoint *ep,
        enum usb_device_speed speed, int has_tt)
 {
@@ -583,6 +602,8 @@ int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk)
 
        mtk->sch_array = sch_array;
 
+       INIT_LIST_HEAD(&mtk->bw_ep_chk_list);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(xhci_mtk_sch_init);
@@ -601,19 +622,14 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
        struct xhci_ep_ctx *ep_ctx;
        struct xhci_slot_ctx *slot_ctx;
        struct xhci_virt_device *virt_dev;
-       struct mu3h_sch_bw_info *sch_bw;
        struct mu3h_sch_ep_info *sch_ep;
-       struct mu3h_sch_bw_info *sch_array;
        unsigned int ep_index;
-       int bw_index;
-       int ret = 0;
 
        xhci = hcd_to_xhci(hcd);
        virt_dev = xhci->devs[udev->slot_id];
        ep_index = xhci_get_endpoint_index(&ep->desc);
        slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
        ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
-       sch_array = mtk->sch_array;
 
        xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n",
                __func__, usb_endpoint_type(&ep->desc), udev->speed,
@@ -632,35 +648,13 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
                return 0;
        }
 
-       bw_index = get_bw_index(xhci, udev, ep);
-       sch_bw = &sch_array[bw_index];
-
        sch_ep = create_sch_ep(udev, ep, ep_ctx);
        if (IS_ERR_OR_NULL(sch_ep))
                return -ENOMEM;
 
        setup_sch_info(udev, ep_ctx, sch_ep);
 
-       ret = check_sch_bw(udev, sch_bw, sch_ep);
-       if (ret) {
-               xhci_err(xhci, "Not enough bandwidth!\n");
-               if (is_fs_or_ls(udev->speed))
-                       drop_tt(udev);
-
-               kfree(sch_ep);
-               return -ENOSPC;
-       }
-
-       list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
-
-       ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
-               | EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode));
-       ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
-               | EP_BREPEAT(sch_ep->repeat));
-
-       xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
-                       sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
-                       sch_ep->offset, sch_ep->repeat);
+       list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list);
 
        return 0;
 }
@@ -675,7 +669,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
        struct xhci_virt_device *virt_dev;
        struct mu3h_sch_bw_info *sch_array;
        struct mu3h_sch_bw_info *sch_bw;
-       struct mu3h_sch_ep_info *sch_ep;
+       struct mu3h_sch_ep_info *sch_ep, *tmp;
        int bw_index;
 
        xhci = hcd_to_xhci(hcd);
@@ -694,17 +688,79 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
        bw_index = get_bw_index(xhci, udev, ep);
        sch_bw = &sch_array[bw_index];
 
-       list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) {
+       list_for_each_entry_safe(sch_ep, tmp, &sch_bw->bw_ep_list, endpoint) {
                if (sch_ep->ep == ep) {
-                       update_bus_bw(sch_bw, sch_ep, 0);
-                       list_del(&sch_ep->endpoint);
-                       if (is_fs_or_ls(udev->speed)) {
-                               list_del(&sch_ep->tt_endpoint);
-                               drop_tt(udev);
-                       }
-                       kfree(sch_ep);
+                       destroy_sch_ep(udev, sch_bw, sch_ep);
                        break;
                }
        }
 }
 EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk);
+
+int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+       struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
+       struct mu3h_sch_bw_info *sch_bw;
+       struct mu3h_sch_ep_info *sch_ep, *tmp;
+       int bw_index, ret;
+
+       xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
+
+       list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) {
+               bw_index = get_bw_index(xhci, udev, sch_ep->ep);
+               sch_bw = &mtk->sch_array[bw_index];
+
+               ret = check_sch_bw(udev, sch_bw, sch_ep);
+               if (ret) {
+                       xhci_err(xhci, "Not enough bandwidth!\n");
+                       return -ENOSPC;
+               }
+       }
+
+       list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) {
+               struct xhci_ep_ctx *ep_ctx;
+               struct usb_host_endpoint *ep = sch_ep->ep;
+               unsigned int ep_index = xhci_get_endpoint_index(&ep->desc);
+
+               bw_index = get_bw_index(xhci, udev, ep);
+               sch_bw = &mtk->sch_array[bw_index];
+
+               list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
+
+               ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+               ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
+                       | EP_BCSCOUNT(sch_ep->cs_count)
+                       | EP_BBM(sch_ep->burst_mode));
+               ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
+                       | EP_BREPEAT(sch_ep->repeat));
+
+               xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
+                       sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
+                       sch_ep->offset, sch_ep->repeat);
+       }
+
+       return xhci_check_bandwidth(hcd, udev);
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_check_bandwidth);
+
+void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+       struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       struct mu3h_sch_bw_info *sch_bw;
+       struct mu3h_sch_ep_info *sch_ep, *tmp;
+       int bw_index;
+
+       xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
+
+       list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) {
+               bw_index = get_bw_index(xhci, udev, sch_ep->ep);
+               sch_bw = &mtk->sch_array[bw_index];
+               destroy_sch_ep(udev, sch_bw, sch_ep);
+       }
+
+       xhci_reset_bandwidth(hcd, udev);
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_reset_bandwidth);
index 8f321f39ab960f169f4fbca420eb3381e3ae206e..fe010cc61f19bbc3180c6ff29876a09dca1a92a0 100644 (file)
@@ -347,6 +347,8 @@ static void usb_wakeup_set(struct xhci_hcd_mtk *mtk, bool enable)
 static int xhci_mtk_setup(struct usb_hcd *hcd);
 static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = {
        .reset = xhci_mtk_setup,
+       .check_bandwidth = xhci_mtk_check_bandwidth,
+       .reset_bandwidth = xhci_mtk_reset_bandwidth,
 };
 
 static struct hc_driver __read_mostly xhci_mtk_hc_driver;
index a93cfe8179049d5d6518d27c4d504aaf13c1b918..cbb09dfea62e0f8be63a5ac4eac18c972c85ed7a 100644 (file)
@@ -59,6 +59,7 @@ struct mu3h_sch_bw_info {
  * @ep_type: endpoint type
  * @maxpkt: max packet size of endpoint
  * @ep: address of usb_host_endpoint struct
+ * @allocated: the bandwidth is aready allocated from bus_bw
  * @offset: which uframe of the interval that transfer should be
  *             scheduled first time within the interval
  * @repeat: the time gap between two uframes that transfers are
@@ -86,6 +87,7 @@ struct mu3h_sch_ep_info {
        u32 ep_type;
        u32 maxpkt;
        void *ep;
+       bool allocated;
        /*
         * mtk xHCI scheduling information put into reserved DWs
         * in ep context
@@ -131,6 +133,7 @@ struct xhci_hcd_mtk {
        struct device *dev;
        struct usb_hcd *hcd;
        struct mu3h_sch_bw_info *sch_array;
+       struct list_head bw_ep_chk_list;
        struct mu3c_ippc_regs __iomem *ippc_regs;
        bool has_ippc;
        int num_u2_ports;
@@ -166,6 +169,8 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
                struct usb_host_endpoint *ep);
 void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
                struct usb_host_endpoint *ep);
+int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
 
 #else
 static inline int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd,
@@ -179,6 +184,16 @@ static inline void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd,
 {
 }
 
+static inline int xhci_mtk_check_bandwidth(struct usb_hcd *hcd,
+               struct usb_device *udev)
+{
+       return 0;
+}
+
+static inline void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd,
+               struct usb_device *udev)
+{
+}
 #endif
 
 #endif         /* _XHCI_MTK_H_ */
index 60651a50770f93aa9e652e4ddbd127169cceba7a..8ca1a235d1645144824e1cdd129926f699f4a298 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/mbus.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
+#include <linux/phy/phy.h>
 
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
@@ -74,6 +75,47 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
        return 0;
 }
 
+int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       struct device *dev = hcd->self.controller;
+       struct phy *phy;
+       int ret;
+
+       /* Old bindings miss the PHY handle */
+       phy = of_phy_get(dev->of_node, "usb3-phy");
+       if (IS_ERR(phy) && PTR_ERR(phy) == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
+       else if (IS_ERR(phy))
+               goto phy_out;
+
+       ret = phy_init(phy);
+       if (ret)
+               goto phy_put;
+
+       ret = phy_set_mode(phy, PHY_MODE_USB_HOST_SS);
+       if (ret)
+               goto phy_exit;
+
+       ret = phy_power_on(phy);
+       if (ret == -EOPNOTSUPP) {
+               /* Skip initializatin of XHCI PHY when it is unsupported by firmware */
+               dev_warn(dev, "PHY unsupported by firmware\n");
+               xhci->quirks |= XHCI_SKIP_PHY_INIT;
+       }
+       if (ret)
+               goto phy_exit;
+
+       phy_power_off(phy);
+phy_exit:
+       phy_exit(phy);
+phy_put:
+       of_phy_put(phy);
+phy_out:
+
+       return 0;
+}
+
 int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
index 3be021793cc8b07fcab2529214e57599c6f20d2d..01bf3fcb3eca5da04cbb7017e11cc176e2bf6eb3 100644 (file)
@@ -12,6 +12,7 @@ struct usb_hcd;
 
 #if IS_ENABLED(CONFIG_USB_XHCI_MVEBU)
 int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd);
+int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd);
 int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd);
 #else
 static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
@@ -19,6 +20,11 @@ static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
        return 0;
 }
 
+static inline int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd)
+{
+       return 0;
+}
+
 static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
 {
        return 0;
index 4d34f6005381e6eb1b9dab31dd2e5ec9180ab5e4..c1edcc9b13cece07c9dc0e00c27a7f0848f4875c 100644 (file)
@@ -44,6 +44,16 @@ static void xhci_priv_plat_start(struct usb_hcd *hcd)
                priv->plat_start(hcd);
 }
 
+static int xhci_priv_plat_setup(struct usb_hcd *hcd)
+{
+       struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+
+       if (!priv->plat_setup)
+               return 0;
+
+       return priv->plat_setup(hcd);
+}
+
 static int xhci_priv_init_quirk(struct usb_hcd *hcd)
 {
        struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
@@ -111,6 +121,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = {
 };
 
 static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = {
+       .plat_setup = xhci_mvebu_a3700_plat_setup,
        .init_quirk = xhci_mvebu_a3700_init_quirk,
 };
 
@@ -330,7 +341,14 @@ static int xhci_plat_probe(struct platform_device *pdev)
 
        hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
        xhci->shared_hcd->tpl_support = hcd->tpl_support;
-       if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
+
+       if (priv) {
+               ret = xhci_priv_plat_setup(hcd);
+               if (ret)
+                       goto disable_usb_phy;
+       }
+
+       if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT)))
                hcd->skip_phy_initialization = 1;
 
        if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK))
index 1fb149d1fbcea319d712ccc9af6857e54c9cc6f8..561d0b7bce0980c10d821c96c338a2c1f909d26a 100644 (file)
@@ -13,6 +13,7 @@
 struct xhci_plat_priv {
        const char *firmware_name;
        unsigned long long quirks;
+       int (*plat_setup)(struct usb_hcd *);
        void (*plat_start)(struct usb_hcd *);
        int (*init_quirk)(struct usb_hcd *);
        int (*suspend_quirk)(struct usb_hcd *);
index 5677b81c0915551581efd60caa50b2ef13007dfc..89c3be9917f6610374b3647e81429b2e04e32753 100644 (file)
@@ -699,11 +699,16 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
        dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
                         DMA_FROM_DEVICE);
        /* for in tranfers we need to copy the data from bounce to sg */
-       len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
-                            seg->bounce_len, seg->bounce_offs);
-       if (len != seg->bounce_len)
-               xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
-                               len, seg->bounce_len);
+       if (urb->num_sgs) {
+               len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
+                                          seg->bounce_len, seg->bounce_offs);
+               if (len != seg->bounce_len)
+                       xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
+                                 len, seg->bounce_len);
+       } else {
+               memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
+                      seg->bounce_len);
+       }
        seg->bounce_len = 0;
        seg->bounce_offs = 0;
 }
@@ -2931,6 +2936,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
        trb->field[0] = cpu_to_le32(field1);
        trb->field[1] = cpu_to_le32(field2);
        trb->field[2] = cpu_to_le32(field3);
+       /* make sure TRB is fully written before giving it to the controller */
+       wmb();
        trb->field[3] = cpu_to_le32(field4);
 
        trace_xhci_queue_trb(ring, trb);
@@ -3275,12 +3282,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
 
        /* create a max max_pkt sized bounce buffer pointed to by last trb */
        if (usb_urb_dir_out(urb)) {
-               len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
-                                  seg->bounce_buf, new_buff_len, enqd_len);
-               if (len != new_buff_len)
-                       xhci_warn(xhci,
-                               "WARN Wrong bounce buffer write length: %zu != %d\n",
-                               len, new_buff_len);
+               if (urb->num_sgs) {
+                       len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
+                                                seg->bounce_buf, new_buff_len, enqd_len);
+                       if (len != new_buff_len)
+                               xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
+                                         len, new_buff_len);
+               } else {
+                       memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
+               }
+
                seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
                                                 max_pkt, DMA_TO_DEVICE);
        } else {
index 934be168635230b3dbc557559c8f78cfceab1f16..50bb91b6a4b8d8e4a39ba7e58d010470d5423776 100644 (file)
@@ -623,6 +623,13 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
                                                                     enable);
                        if (err < 0)
                                break;
+
+                       /*
+                        * wait 500us for LFPS detector to be disabled before
+                        * sending ACK
+                        */
+                       if (!enable)
+                               usleep_range(500, 1000);
                }
 
                if (err < 0) {
index e86940571b4cf1ec7153ad15a0f22a2a2510544e..345a221028c6f254d8095867a8c85fb1f62c9e0d 100644 (file)
@@ -2985,7 +2985,7 @@ static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
  * else should be touching the xhci->devs[slot_id] structure, so we
  * don't need to take the xhci->lock for manipulating that.
  */
-static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
 {
        int i;
        int ret = 0;
@@ -3083,7 +3083,7 @@ command_cleanup:
        return ret;
 }
 
-static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
 {
        struct xhci_hcd *xhci;
        struct xhci_virt_device *virt_dev;
@@ -5510,6 +5510,10 @@ void xhci_init_driver(struct hc_driver *drv,
                        drv->reset = over->reset;
                if (over->start)
                        drv->start = over->start;
+               if (over->check_bandwidth)
+                       drv->check_bandwidth = over->check_bandwidth;
+               if (over->reset_bandwidth)
+                       drv->reset_bandwidth = over->reset_bandwidth;
        }
 }
 EXPORT_SYMBOL_GPL(xhci_init_driver);
index 25e57bc9c3cc6b655b7ec1d4c1dfa54a80eaec85..07ff95016f119abff7c268095d6f4eee897e1e2c 100644 (file)
@@ -1920,6 +1920,8 @@ struct xhci_driver_overrides {
        size_t extra_priv_size;
        int (*reset)(struct usb_hcd *hcd);
        int (*start)(struct usb_hcd *hcd);
+       int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
+       void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
 };
 
 #define        XHCI_CFC_DELAY          10
@@ -2074,6 +2076,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
 void xhci_shutdown(struct usb_hcd *hcd);
 void xhci_init_driver(struct hc_driver *drv,
                      const struct xhci_driver_overrides *over);
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
 int xhci_ext_cap_init(struct xhci_hcd *xhci);
 
index ac9a81ae821642e01d96b2151ca13384c7111cf3..e6fa13701808247542ea6905a0659b194f466fd8 100644 (file)
@@ -126,6 +126,7 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
                }
 
                usbhs_pipe_clear_without_sequence(pipe, 0, 0);
+               usbhs_pipe_running(pipe, 0);
 
                __usbhsf_pkt_del(pkt);
        }
index fbb10dfc56e3184a44277a16a5dc7c1d074a2127..7bec1e730b2093147feda765c189f5cfc349a5b1 100644 (file)
@@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
        { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
        { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
+       { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
        { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
        { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
        { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
@@ -201,6 +202,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
        { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
        { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
+       { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
        { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
        { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
        { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
index 3fe959104311b4019497b288194647afe1aba653..2049e66f34a3fd1c73c832ffd1866b0864869544 100644 (file)
@@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb);
 #define CINTERION_PRODUCT_AHXX_2RMNET          0x0084
 #define CINTERION_PRODUCT_AHXX_AUDIO           0x0085
 #define CINTERION_PRODUCT_CLS8                 0x00b0
+#define CINTERION_PRODUCT_MV31_MBIM            0x00b3
+#define CINTERION_PRODUCT_MV31_RMNET           0x00b7
 
 /* Olivetti products */
 #define OLIVETTI_VENDOR_ID                     0x0b3c
@@ -1914,6 +1916,10 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
        { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
        { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+       { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff),
+         .driver_info = RSVD(3)},
+       { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
+         .driver_info = RSVD(0)},
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
          .driver_info = RSVD(4) },
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
index 5c92a576edae8ec1cd74a9f427b79f9b0908994f..08f742fd24099e4f5e616c4818336dfe9fd26ded 100644 (file)
@@ -15,6 +15,7 @@ struct mlx5_vdpa_direct_mr {
        struct sg_table sg_head;
        int log_size;
        int nsg;
+       int nent;
        struct list_head list;
        u64 offset;
 };
index 4b6195666c589512329f4a5886cf2ba9c7486bf2..d300f799efcd1f982a41a550c61efc964cfb04e6 100644 (file)
@@ -25,17 +25,6 @@ static int get_octo_len(u64 len, int page_shift)
        return (npages + 1) / 2;
 }
 
-static void fill_sg(struct mlx5_vdpa_direct_mr *mr, void *in)
-{
-       struct scatterlist *sg;
-       __be64 *pas;
-       int i;
-
-       pas = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
-       for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
-               (*pas) = cpu_to_be64(sg_dma_address(sg));
-}
-
 static void mlx5_set_access_mode(void *mkc, int mode)
 {
        MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
@@ -45,10 +34,18 @@ static void mlx5_set_access_mode(void *mkc, int mode)
 static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
 {
        struct scatterlist *sg;
+       int nsg = mr->nsg;
+       u64 dma_addr;
+       u64 dma_len;
+       int j = 0;
        int i;
 
-       for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i)
-               mtt[i] = cpu_to_be64(sg_dma_address(sg));
+       for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) {
+               for (dma_addr = sg_dma_address(sg), dma_len = sg_dma_len(sg);
+                    nsg && dma_len;
+                    nsg--, dma_addr += BIT(mr->log_size), dma_len -= BIT(mr->log_size))
+                       mtt[j++] = cpu_to_be64(dma_addr);
+       }
 }
 
 static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
@@ -64,7 +61,6 @@ static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct
                return -ENOMEM;
 
        MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
-       fill_sg(mr, in);
        mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
        MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO));
        MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO));
@@ -276,8 +272,8 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr
 done:
        mr->log_size = log_entity_size;
        mr->nsg = nsg;
-       err = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
-       if (!err)
+       mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
+       if (!mr->nent)
                goto err_map;
 
        err = create_direct_mr(mvdev, mr);
index 88dde3455bfd99ddc585eeba97cdf434a4dd43a6..b5fe6d2ad22f5133b6d27142f5440785a2852c36 100644 (file)
@@ -87,6 +87,7 @@ struct mlx5_vq_restore_info {
        u64 device_addr;
        u64 driver_addr;
        u16 avail_index;
+       u16 used_index;
        bool ready;
        struct vdpa_callback cb;
        bool restore;
@@ -121,6 +122,7 @@ struct mlx5_vdpa_virtqueue {
        u32 virtq_id;
        struct mlx5_vdpa_net *ndev;
        u16 avail_idx;
+       u16 used_idx;
        int fw_state;
 
        /* keep last in the struct */
@@ -804,6 +806,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
 
        obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context);
        MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx);
+       MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx);
        MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3,
                 get_features_12_3(ndev->mvdev.actual_features));
        vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context);
@@ -1022,6 +1025,7 @@ static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m
 struct mlx5_virtq_attr {
        u8 state;
        u16 available_index;
+       u16 used_index;
 };
 
 static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq,
@@ -1052,6 +1056,7 @@ static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueu
        memset(attr, 0, sizeof(*attr));
        attr->state = MLX5_GET(virtio_net_q_object, obj_context, state);
        attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index);
+       attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index);
        kfree(out);
        return 0;
 
@@ -1535,6 +1540,16 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev)
        }
 }
 
+static void clear_virtqueues(struct mlx5_vdpa_net *ndev)
+{
+       int i;
+
+       for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) {
+               ndev->vqs[i].avail_idx = 0;
+               ndev->vqs[i].used_idx = 0;
+       }
+}
+
 /* TODO: cross-endian support */
 static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev)
 {
@@ -1610,6 +1625,7 @@ static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqu
                return err;
 
        ri->avail_index = attr.available_index;
+       ri->used_index = attr.used_index;
        ri->ready = mvq->ready;
        ri->num_ent = mvq->num_ent;
        ri->desc_addr = mvq->desc_addr;
@@ -1654,6 +1670,7 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
                        continue;
 
                mvq->avail_idx = ri->avail_index;
+               mvq->used_idx = ri->used_index;
                mvq->ready = ri->ready;
                mvq->num_ent = ri->num_ent;
                mvq->desc_addr = ri->desc_addr;
@@ -1768,6 +1785,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
        if (!status) {
                mlx5_vdpa_info(mvdev, "performing device reset\n");
                teardown_driver(ndev);
+               clear_virtqueues(ndev);
                mlx5_vdpa_destroy_mr(&ndev->mvdev);
                ndev->mvdev.status = 0;
                ndev->mvdev.mlx_features = 0;
index c8f0282bb64975b34f44459c0565156293b0ea64..18ffd0551b5429cbe3fc7ca4f431f45d0bad478e 100644 (file)
@@ -714,6 +714,23 @@ static bool xs_hvm_defer_init_for_callback(void)
 #endif
 }
 
+static int xenbus_probe_thread(void *unused)
+{
+       DEFINE_WAIT(w);
+
+       /*
+        * We actually just want to wait for *any* trigger of xb_waitq,
+        * and run xenbus_probe() the moment it occurs.
+        */
+       prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE);
+       schedule();
+       finish_wait(&xb_waitq, &w);
+
+       DPRINTK("probing");
+       xenbus_probe();
+       return 0;
+}
+
 static int __init xenbus_probe_initcall(void)
 {
        /*
@@ -725,6 +742,20 @@ static int __init xenbus_probe_initcall(void)
             !xs_hvm_defer_init_for_callback()))
                xenbus_probe();
 
+       /*
+        * For XS_LOCAL, spawn a thread which will wait for xenstored
+        * or a xenstore-stubdom to be started, then probe. It will be
+        * triggered when communication starts happening, by waiting
+        * on xb_waitq.
+        */
+       if (xen_store_domain_type == XS_LOCAL) {
+               struct task_struct *probe_task;
+
+               probe_task = kthread_run(xenbus_probe_thread, NULL,
+                                        "xenbus_probe");
+               if (IS_ERR(probe_task))
+                       return PTR_ERR(probe_task);
+       }
        return 0;
 }
 device_initcall(xenbus_probe_initcall);
index accdd8970e7c0cbaa67bf8eba097302c9b6274cd..b2975256dadbdf3abe4a1f1c784f83ec6d7ba8c2 100644 (file)
@@ -193,7 +193,7 @@ static int __init afs_init(void)
                goto error_cache;
 #endif
 
-       ret = register_pernet_subsys(&afs_net_ops);
+       ret = register_pernet_device(&afs_net_ops);
        if (ret < 0)
                goto error_net;
 
@@ -213,7 +213,7 @@ static int __init afs_init(void)
 error_proc:
        afs_fs_exit();
 error_fs:
-       unregister_pernet_subsys(&afs_net_ops);
+       unregister_pernet_device(&afs_net_ops);
 error_net:
 #ifdef CONFIG_AFS_FSCACHE
        fscache_unregister_netfs(&afs_cache_netfs);
@@ -244,7 +244,7 @@ static void __exit afs_exit(void)
 
        proc_remove(afs_proc_symlink);
        afs_fs_exit();
-       unregister_pernet_subsys(&afs_net_ops);
+       unregister_pernet_device(&afs_net_ops);
 #ifdef CONFIG_AFS_FSCACHE
        fscache_unregister_netfs(&afs_cache_netfs);
 #endif
index 3b8963e228a1ba82c0ac117d3a00892fc6b83abf..235b5042672e9f15cd137c22965eaa3aae9f3f61 100644 (file)
@@ -130,7 +130,15 @@ EXPORT_SYMBOL(truncate_bdev_range);
 
 static void set_init_blocksize(struct block_device *bdev)
 {
-       bdev->bd_inode->i_blkbits = blksize_bits(bdev_logical_block_size(bdev));
+       unsigned int bsize = bdev_logical_block_size(bdev);
+       loff_t size = i_size_read(bdev->bd_inode);
+
+       while (bsize < PAGE_SIZE) {
+               if (size & bsize)
+                       break;
+               bsize <<= 1;
+       }
+       bdev->bd_inode->i_blkbits = blksize_bits(bsize);
 }
 
 int set_blocksize(struct block_device *bdev, int size)
index 02d7d7b2563b5cb9f0fdc7e8fc9e910731016fe3..9cadacf3ec2754def54db3505c4a5cc565329be2 100644 (file)
@@ -3117,7 +3117,7 @@ void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
                list_del_init(&lower->list);
                if (lower == node)
                        node = NULL;
-               btrfs_backref_free_node(cache, lower);
+               btrfs_backref_drop_node(cache, lower);
        }
 
        btrfs_backref_cleanup_node(cache, node);
index 52f2198d44c95c513d942af7a50f6807820cd931..48ebc106a606c73582ef009ca6c9bcb19ef5a50b 100644 (file)
@@ -673,7 +673,15 @@ static noinline void caching_thread(struct btrfs_work *work)
                wake_up(&caching_ctl->wait);
        }
 
-       if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
+       /*
+        * If we are in the transaction that populated the free space tree we
+        * can't actually cache from the free space tree as our commit root and
+        * real root are the same, so we could change the contents of the blocks
+        * while caching.  Instead do the slow caching in this case, and after
+        * the transaction has committed we will be safe.
+        */
+       if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
+           !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags)))
                ret = load_free_space_tree(caching_ctl);
        else
                ret = load_extent_tree_free(caching_ctl);
@@ -2669,7 +2677,8 @@ again:
         * Go through delayed refs for all the stuff we've just kicked off
         * and then loop back (just once)
         */
-       ret = btrfs_run_delayed_refs(trans, 0);
+       if (!ret)
+               ret = btrfs_run_delayed_refs(trans, 0);
        if (!ret && loops == 0) {
                loops++;
                spin_lock(&cur_trans->dirty_bgs_lock);
index e6e37591f1ded91a97c6046b1b021ecdfcab1e7c..4debdbdde2abbc264bf62c1a65fe31e22c9f3008 100644 (file)
@@ -563,6 +563,9 @@ enum {
 
        /* Indicate that we need to cleanup space cache v1 */
        BTRFS_FS_CLEANUP_SPACE_CACHE_V1,
+
+       /* Indicate that we can't trust the free space tree for caching yet */
+       BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED,
 };
 
 /*
index d79b8369e6aafedfae71c0a7dcb1299c6c655aee..0c335dae5af7adca7987ec8c7fcb5343c9a6f6f9 100644 (file)
@@ -2602,8 +2602,6 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
        struct btrfs_block_group *cache;
        int ret;
 
-       btrfs_add_excluded_extent(trans->fs_info, bytenr, num_bytes);
-
        cache = btrfs_lookup_block_group(trans->fs_info, bytenr);
        if (!cache)
                return -EINVAL;
@@ -2615,11 +2613,19 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
         * the pinned extents.
         */
        btrfs_cache_block_group(cache, 1);
+       /*
+        * Make sure we wait until the cache is completely built in case it is
+        * missing or is invalid and therefore needs to be rebuilt.
+        */
+       ret = btrfs_wait_block_group_cache_done(cache);
+       if (ret)
+               goto out;
 
        pin_down_extent(trans, cache, bytenr, num_bytes, 0);
 
        /* remove us from the free space cache (if we're there at all) */
        ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
+out:
        btrfs_put_block_group(cache);
        return ret;
 }
@@ -2629,50 +2635,22 @@ static int __exclude_logged_extent(struct btrfs_fs_info *fs_info,
 {
        int ret;
        struct btrfs_block_group *block_group;
-       struct btrfs_caching_control *caching_ctl;
 
        block_group = btrfs_lookup_block_group(fs_info, start);
        if (!block_group)
                return -EINVAL;
 
-       btrfs_cache_block_group(block_group, 0);
-       caching_ctl = btrfs_get_caching_control(block_group);
-
-       if (!caching_ctl) {
-               /* Logic error */
-               BUG_ON(!btrfs_block_group_done(block_group));
-               ret = btrfs_remove_free_space(block_group, start, num_bytes);
-       } else {
-               /*
-                * We must wait for v1 caching to finish, otherwise we may not
-                * remove our space.
-                */
-               btrfs_wait_space_cache_v1_finished(block_group, caching_ctl);
-               mutex_lock(&caching_ctl->mutex);
-
-               if (start >= caching_ctl->progress) {
-                       ret = btrfs_add_excluded_extent(fs_info, start,
-                                                       num_bytes);
-               } else if (start + num_bytes <= caching_ctl->progress) {
-                       ret = btrfs_remove_free_space(block_group,
-                                                     start, num_bytes);
-               } else {
-                       num_bytes = caching_ctl->progress - start;
-                       ret = btrfs_remove_free_space(block_group,
-                                                     start, num_bytes);
-                       if (ret)
-                               goto out_lock;
+       btrfs_cache_block_group(block_group, 1);
+       /*
+        * Make sure we wait until the cache is completely built in case it is
+        * missing or is invalid and therefore needs to be rebuilt.
+        */
+       ret = btrfs_wait_block_group_cache_done(block_group);
+       if (ret)
+               goto out;
 
-                       num_bytes = (start + num_bytes) -
-                               caching_ctl->progress;
-                       start = caching_ctl->progress;
-                       ret = btrfs_add_excluded_extent(fs_info, start,
-                                                       num_bytes);
-               }
-out_lock:
-               mutex_unlock(&caching_ctl->mutex);
-               btrfs_put_caching_control(caching_ctl);
-       }
+       ret = btrfs_remove_free_space(block_group, start, num_bytes);
+out:
        btrfs_put_block_group(block_group);
        return ret;
 }
@@ -2863,9 +2841,6 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
                        mutex_unlock(&fs_info->unused_bg_unpin_mutex);
                        break;
                }
-               if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
-                       clear_extent_bits(&fs_info->excluded_extents, start,
-                                         end, EXTENT_UPTODATE);
 
                if (btrfs_test_opt(fs_info, DISCARD_SYNC))
                        ret = btrfs_discard_extent(fs_info, start,
@@ -5549,7 +5524,15 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
                                goto out_free;
                        }
 
-                       trans = btrfs_start_transaction(tree_root, 0);
+                      /*
+                       * Use join to avoid potential EINTR from transaction
+                       * start. See wait_reserve_ticket and the whole
+                       * reservation callchain.
+                       */
+                       if (for_reloc)
+                               trans = btrfs_join_transaction(tree_root);
+                       else
+                               trans = btrfs_start_transaction(tree_root, 0);
                        if (IS_ERR(trans)) {
                                err = PTR_ERR(trans);
                                goto out_free;
index e33a65bd9a0c281a83bc824dcd5160209583c3f2..a33bca94d133ecb6022df27da5e2655f58c93fc3 100644 (file)
@@ -1150,6 +1150,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
                return PTR_ERR(trans);
 
        set_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
+       set_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
        free_space_root = btrfs_create_tree(trans,
                                            BTRFS_FREE_SPACE_TREE_OBJECTID);
        if (IS_ERR(free_space_root)) {
@@ -1171,11 +1172,18 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
        btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE);
        btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
        clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
+       ret = btrfs_commit_transaction(trans);
 
-       return btrfs_commit_transaction(trans);
+       /*
+        * Now that we've committed the transaction any reading of our commit
+        * root will be safe, so we can cache from the free space tree now.
+        */
+       clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
+       return ret;
 
 abort:
        clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
+       clear_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags);
        btrfs_abort_transaction(trans, ret);
        btrfs_end_transaction(trans);
        return ret;
index ae97f4dbaff30f9004f915e91d2672f694db74f5..78a35374d492914d94004d65268bda8d4da54a01 100644 (file)
@@ -5512,6 +5512,21 @@ static int clone_range(struct send_ctx *sctx,
                        break;
                offset += clone_len;
                clone_root->offset += clone_len;
+
+               /*
+                * If we are cloning from the file we are currently processing,
+                * and using the send root as the clone root, we must stop once
+                * the current clone offset reaches the current eof of the file
+                * at the receiver, otherwise we would issue an invalid clone
+                * operation (source range going beyond eof) and cause the
+                * receiver to fail. So if we reach the current eof, bail out
+                * and fallback to a regular write.
+                */
+               if (clone_root->root == sctx->send_root &&
+                   clone_root->ino == sctx->cur_ino &&
+                   clone_root->offset >= sctx->cur_inode_next_write_offset)
+                       break;
+
                data_offset += clone_len;
 next:
                path->slots[0]++;
index 8e0f7a1029c6c8002b08a781efd17be012330a1c..6af7f2bf92de7d18c834147b6fbc09eb0dc6f062 100644 (file)
@@ -2264,14 +2264,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
         */
        btrfs_free_log_root_tree(trans, fs_info);
 
-       /*
-        * commit_fs_roots() can call btrfs_save_ino_cache(), which generates
-        * new delayed refs. Must handle them or qgroup can be wrong.
-        */
-       ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
-       if (ret)
-               goto unlock_tree_log;
-
        /*
         * Since fs roots are all committed, we can get a quite accurate
         * new_roots. So let's do quota accounting.
index b62be84833e9a84654b581bdc287e4ffe6d952f8..d6c24c8ad7499b6e16a07d2d7332feee9baf573a 100644 (file)
@@ -433,7 +433,7 @@ static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
 
        atomic_set(&dev->reada_in_flight, 0);
        atomic_set(&dev->dev_stats_ccnt, 0);
-       btrfs_device_data_ordered_init(dev, fs_info);
+       btrfs_device_data_ordered_init(dev);
        INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
        INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
        extent_io_tree_init(fs_info, &dev->alloc_state,
@@ -4317,6 +4317,8 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
                btrfs_warn(fs_info,
        "balance: cannot set exclusive op status, resume manually");
 
+       btrfs_release_path(path);
+
        mutex_lock(&fs_info->balance_mutex);
        BUG_ON(fs_info->balance_ctl);
        spin_lock(&fs_info->balance_lock);
index 1997a4649a66cfc62c364af9bc64d76d17508845..c43663d9c22e0d7fad622434a0ccdc70bdf57ce5 100644 (file)
@@ -39,10 +39,10 @@ struct btrfs_io_geometry {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 #include <linux/seqlock.h>
 #define __BTRFS_NEED_DEVICE_DATA_ORDERED
-#define btrfs_device_data_ordered_init(device, info)                           \
-       seqcount_mutex_init(&device->data_seqcount, &info->chunk_mutex)
+#define btrfs_device_data_ordered_init(device) \
+       seqcount_init(&device->data_seqcount)
 #else
-#define btrfs_device_data_ordered_init(device, info) do { } while (0)
+#define btrfs_device_data_ordered_init(device) do { } while (0)
 #endif
 
 #define BTRFS_DEV_STATE_WRITEABLE      (0)
@@ -76,8 +76,7 @@ struct btrfs_device {
        blk_status_t last_flush_error;
 
 #ifdef __BTRFS_NEED_DEVICE_DATA_ORDERED
-       /* A seqcount_t with associated chunk_mutex (for lockdep) */
-       seqcount_mutex_t data_seqcount;
+       seqcount_t data_seqcount;
 #endif
 
        /* the internal btrfs device id */
@@ -168,9 +167,11 @@ btrfs_device_get_##name(const struct btrfs_device *dev)                    \
 static inline void                                                     \
 btrfs_device_set_##name(struct btrfs_device *dev, u64 size)            \
 {                                                                      \
+       preempt_disable();                                              \
        write_seqcount_begin(&dev->data_seqcount);                      \
        dev->name = size;                                               \
        write_seqcount_end(&dev->data_seqcount);                        \
+       preempt_enable();                                               \
 }
 #elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
 #define BTRFS_DEVICE_GETSET_FUNCS(name)                                        \
index 8bda092e60c5a00118ec8d5159f8a30bdcd38a04..e027c718ca01adea87dbd12bd5fa7b2945fb9c0c 100644 (file)
@@ -413,7 +413,6 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
 
        inode = d_backing_inode(object->backer);
        ASSERT(S_ISREG(inode->i_mode));
-       ASSERT(inode->i_mapping->a_ops->readpages);
 
        /* calculate the shift required to use bmap */
        shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
@@ -713,7 +712,6 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
 
        inode = d_backing_inode(object->backer);
        ASSERT(S_ISREG(inode->i_mode));
-       ASSERT(inode->i_mapping->a_ops->readpages);
 
        /* calculate the shift required to use bmap */
        shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
index 840587037b59bcceefd8a715482c05249c3d3e88..d87bd852ed961d3e99d676479bea899aacf65ac2 100644 (file)
@@ -5038,7 +5038,7 @@ bad:
        return;
 }
 
-static struct ceph_connection *con_get(struct ceph_connection *con)
+static struct ceph_connection *mds_get_con(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
 
@@ -5047,7 +5047,7 @@ static struct ceph_connection *con_get(struct ceph_connection *con)
        return NULL;
 }
 
-static void con_put(struct ceph_connection *con)
+static void mds_put_con(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
 
@@ -5058,7 +5058,7 @@ static void con_put(struct ceph_connection *con)
  * if the client is unresponsive for long enough, the mds will kill
  * the session entirely.
  */
-static void peer_reset(struct ceph_connection *con)
+static void mds_peer_reset(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5067,7 +5067,7 @@ static void peer_reset(struct ceph_connection *con)
        send_mds_reconnect(mdsc, s);
 }
 
-static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5125,8 +5125,8 @@ out:
  * Note: returned pointer is the address of a structure that's
  * managed separately.  Caller must *not* attempt to free it.
  */
-static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
-                                       int *proto, int force_new)
+static struct ceph_auth_handshake *
+mds_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5142,7 +5142,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
        return auth;
 }
 
-static int add_authorizer_challenge(struct ceph_connection *con,
+static int mds_add_authorizer_challenge(struct ceph_connection *con,
                                    void *challenge_buf, int challenge_buf_len)
 {
        struct ceph_mds_session *s = con->private;
@@ -5153,7 +5153,7 @@ static int add_authorizer_challenge(struct ceph_connection *con,
                                            challenge_buf, challenge_buf_len);
 }
 
-static int verify_authorizer_reply(struct ceph_connection *con)
+static int mds_verify_authorizer_reply(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5165,7 +5165,7 @@ static int verify_authorizer_reply(struct ceph_connection *con)
                NULL, NULL, NULL, NULL);
 }
 
-static int invalidate_authorizer(struct ceph_connection *con)
+static int mds_invalidate_authorizer(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
@@ -5288,15 +5288,15 @@ static int mds_check_message_signature(struct ceph_msg *msg)
 }
 
 static const struct ceph_connection_operations mds_con_ops = {
-       .get = con_get,
-       .put = con_put,
-       .dispatch = dispatch,
-       .get_authorizer = get_authorizer,
-       .add_authorizer_challenge = add_authorizer_challenge,
-       .verify_authorizer_reply = verify_authorizer_reply,
-       .invalidate_authorizer = invalidate_authorizer,
-       .peer_reset = peer_reset,
+       .get = mds_get_con,
+       .put = mds_put_con,
        .alloc_msg = mds_alloc_msg,
+       .dispatch = mds_dispatch,
+       .peer_reset = mds_peer_reset,
+       .get_authorizer = mds_get_authorizer,
+       .add_authorizer_challenge = mds_add_authorizer_challenge,
+       .verify_authorizer_reply = mds_verify_authorizer_reply,
+       .invalidate_authorizer = mds_invalidate_authorizer,
        .sign_message = mds_sign_message,
        .check_message_signature = mds_check_message_signature,
        .get_auth_request = mds_get_auth_request,
index e4c6ae47a79617851ded589594d93dfc7a55975a..6b1ce4efb591c04e932611574eabfd15bd0de3c1 100644 (file)
@@ -133,8 +133,9 @@ cifs_build_devname(char *nodename, const char *prepath)
  * Caller is responsible for freeing returned value if it is not error.
  */
 char *cifs_compose_mount_options(const char *sb_mountdata,
-                                  const char *fullpath,
-                                  const struct dfs_info3_param *ref)
+                                const char *fullpath,
+                                const struct dfs_info3_param *ref,
+                                char **devname)
 {
        int rc;
        char *name;
@@ -231,7 +232,10 @@ char *cifs_compose_mount_options(const char *sb_mountdata,
        strcat(mountdata, "ip=");
        strcat(mountdata, srvIP);
 
-       kfree(name);
+       if (devname)
+               *devname = name;
+       else
+               kfree(name);
 
        /*cifs_dbg(FYI, "%s: parent mountdata: %s\n", __func__, sb_mountdata);*/
        /*cifs_dbg(FYI, "%s: submount mountdata: %s\n", __func__, mountdata );*/
@@ -278,7 +282,7 @@ static struct vfsmount *cifs_dfs_do_mount(struct dentry *mntpt,
 
        /* strip first '\' from fullpath */
        mountdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
-                                              fullpath + 1, NULL);
+                                              fullpath + 1, NULL, NULL);
        if (IS_ERR(mountdata)) {
                kfree(devname);
                return (struct vfsmount *)mountdata;
index ce0d0037fd0afdce968408243ea7bcb4fbebc446..e46da536ed339eee61e963d4693992ae19158845 100644 (file)
@@ -822,7 +822,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
                goto out;
        }
 
-       rc = cifs_setup_volume_info(cifs_sb->ctx);
+       rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, old_ctx->UNC);
        if (rc) {
                root = ERR_PTR(rc);
                goto out;
index 340ff81ee87bfdc6e3c29eb5f0198eaede82caf7..32f7a013402ee8be098e3fe1a600a4709821ae29 100644 (file)
@@ -78,7 +78,8 @@ extern char *cifs_build_path_to_root(struct smb3_fs_context *ctx,
                                     int add_treename);
 extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
 extern char *cifs_compose_mount_options(const char *sb_mountdata,
-               const char *fullpath, const struct dfs_info3_param *ref);
+               const char *fullpath, const struct dfs_info3_param *ref,
+               char **devname);
 /* extern void renew_parental_timestamps(struct dentry *direntry);*/
 extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
                                        struct TCP_Server_Info *server);
@@ -89,6 +90,7 @@ extern void cifs_wake_up_task(struct mid_q_entry *mid);
 extern int cifs_handle_standard(struct TCP_Server_Info *server,
                                struct mid_q_entry *mid);
 extern int smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx);
+extern int smb3_parse_opt(const char *options, const char *key, char **val);
 extern bool cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs);
 extern int cifs_discard_remaining_data(struct TCP_Server_Info *server);
 extern int cifs_call_async(struct TCP_Server_Info *server,
@@ -549,7 +551,7 @@ extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8,
                        unsigned char *p24);
 
 extern int
-cifs_setup_volume_info(struct smb3_fs_context *ctx);
+cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname);
 
 extern struct TCP_Server_Info *
 cifs_find_tcp_session(struct smb3_fs_context *ctx);
index 5d39129406ea66668a62a4d9d990bb89f8ad3d49..10fe6d6d2dee49b068c84ee977092451974e399f 100644 (file)
@@ -2195,7 +2195,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
        if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
                tcon->nohandlecache = ctx->nohandlecache;
        else
-               tcon->nohandlecache = 1;
+               tcon->nohandlecache = true;
        tcon->nodelete = ctx->nodelete;
        tcon->local_lease = ctx->local_lease;
        INIT_LIST_HEAD(&tcon->pending_opens);
@@ -2628,7 +2628,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon,
        } else if (ctx)
                tcon->unix_ext = 1; /* Unix Extensions supported */
 
-       if (tcon->unix_ext == 0) {
+       if (!tcon->unix_ext) {
                cifs_dbg(FYI, "Unix extensions disabled so not set on reconnect\n");
                return;
        }
@@ -2972,17 +2972,20 @@ expand_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
        rc = dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb),
                            ref_path, &referral, NULL);
        if (!rc) {
+               char *fake_devname = NULL;
+
                mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
-                                                  full_path + 1, &referral);
+                                                  full_path + 1, &referral,
+                                                  &fake_devname);
                free_dfs_info_param(&referral);
 
                if (IS_ERR(mdata)) {
                        rc = PTR_ERR(mdata);
                        mdata = NULL;
                } else {
-                       smb3_cleanup_fs_context_contents(ctx);
-                       rc = cifs_setup_volume_info(ctx);
+                       rc = cifs_setup_volume_info(ctx, mdata, fake_devname);
                }
+               kfree(fake_devname);
                kfree(cifs_sb->ctx->mount_options);
                cifs_sb->ctx->mount_options = mdata;
        }
@@ -3036,6 +3039,7 @@ static int setup_dfs_tgt_conn(const char *path, const char *full_path,
        struct dfs_info3_param ref = {0};
        char *mdata = NULL;
        struct smb3_fs_context fake_ctx = {NULL};
+       char *fake_devname = NULL;
 
        cifs_dbg(FYI, "%s: dfs path: %s\n", __func__, path);
 
@@ -3044,16 +3048,18 @@ static int setup_dfs_tgt_conn(const char *path, const char *full_path,
                return rc;
 
        mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
-                                          full_path + 1, &ref);
+                                          full_path + 1, &ref,
+                                          &fake_devname);
        free_dfs_info_param(&ref);
 
        if (IS_ERR(mdata)) {
                rc = PTR_ERR(mdata);
                mdata = NULL;
        } else
-               rc = cifs_setup_volume_info(&fake_ctx);
+               rc = cifs_setup_volume_info(&fake_ctx, mdata, fake_devname);
 
        kfree(mdata);
+       kfree(fake_devname);
 
        if (!rc) {
                /*
@@ -3122,10 +3128,24 @@ static int do_dfs_failover(const char *path, const char *full_path, struct cifs_
  * we should pass a clone of the original context?
  */
 int
-cifs_setup_volume_info(struct smb3_fs_context *ctx)
+cifs_setup_volume_info(struct smb3_fs_context *ctx, const char *mntopts, const char *devname)
 {
        int rc = 0;
 
+       smb3_parse_devname(devname, ctx);
+
+       if (mntopts) {
+               char *ip;
+
+               cifs_dbg(FYI, "%s: mntopts=%s\n", __func__, mntopts);
+               rc = smb3_parse_opt(mntopts, "ip", &ip);
+               if (!rc && !cifs_convert_address((struct sockaddr *)&ctx->dstaddr, ip,
+                                                strlen(ip))) {
+                       cifs_dbg(VFS, "%s: failed to convert ip address\n", __func__);
+                       return -EINVAL;
+               }
+       }
+
        if (ctx->nullauth) {
                cifs_dbg(FYI, "Anonymous login\n");
                kfree(ctx->username);
index 0fdb0de7ff861b89e90ebc43b40921f79411eef2..4950ab0486aeeac4b85b2029d4e1440297b31529 100644 (file)
@@ -1417,7 +1417,7 @@ static struct cifs_ses *find_root_ses(struct vol_info *vi,
        int rc;
        struct cache_entry *ce;
        struct dfs_info3_param ref = {0};
-       char *mdata = NULL;
+       char *mdata = NULL, *devname = NULL;
        struct TCP_Server_Info *server;
        struct cifs_ses *ses;
        struct smb3_fs_context ctx = {NULL};
@@ -1444,7 +1444,8 @@ static struct cifs_ses *find_root_ses(struct vol_info *vi,
 
        up_read(&htable_rw_lock);
 
-       mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref);
+       mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref,
+                                          &devname);
        free_dfs_info_param(&ref);
 
        if (IS_ERR(mdata)) {
@@ -1453,7 +1454,7 @@ static struct cifs_ses *find_root_ses(struct vol_info *vi,
                goto out;
        }
 
-       rc = cifs_setup_volume_info(&ctx);
+       rc = cifs_setup_volume_info(&ctx, NULL, devname);
 
        if (rc) {
                ses = ERR_PTR(rc);
@@ -1472,6 +1473,7 @@ out:
        smb3_cleanup_fs_context_contents(&ctx);
        kfree(mdata);
        kfree(rpath);
+       kfree(devname);
 
        return ses;
 }
index 68900f1629bffa97d243d645c73777157a59d414..97ac363b5df169db6c8f594b6bc3080c7473280f 100644 (file)
@@ -737,6 +737,7 @@ static int
 cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
 {
        struct inode *inode;
+       int rc;
 
        if (flags & LOOKUP_RCU)
                return -ECHILD;
@@ -746,8 +747,25 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
                if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
                        CIFS_I(inode)->time = 0; /* force reval */
 
-               if (cifs_revalidate_dentry(direntry))
-                       return 0;
+               rc = cifs_revalidate_dentry(direntry);
+               if (rc) {
+                       cifs_dbg(FYI, "cifs_revalidate_dentry failed with rc=%d", rc);
+                       switch (rc) {
+                       case -ENOENT:
+                       case -ESTALE:
+                               /*
+                                * Those errors mean the dentry is invalid
+                                * (file was deleted or recreated)
+                                */
+                               return 0;
+                       default:
+                               /*
+                                * Otherwise some unexpected error happened
+                                * report it as-is to VFS layer
+                                */
+                               return rc;
+                       }
+               }
                else {
                        /*
                         * If the inode wasn't known to be a dfs entry when
index 076bcadc756a751ffaa43b55a107404b33b203fa..5111aadfdb6b6e14e86cebba2c59dd5af5de1cb7 100644 (file)
@@ -175,8 +175,10 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
        fsparam_flag_no("exec", Opt_ignore),
        fsparam_flag_no("dev", Opt_ignore),
        fsparam_flag_no("mand", Opt_ignore),
+       fsparam_flag_no("auto", Opt_ignore),
        fsparam_string("cred", Opt_ignore),
        fsparam_string("credentials", Opt_ignore),
+       fsparam_string("prefixpath", Opt_ignore),
        {}
 };
 
@@ -399,6 +401,37 @@ cifs_parse_smb_version(char *value, struct smb3_fs_context *ctx, bool is_smb3)
        return 0;
 }
 
+int smb3_parse_opt(const char *options, const char *key, char **val)
+{
+       int rc = -ENOENT;
+       char *opts, *orig, *p;
+
+       orig = opts = kstrdup(options, GFP_KERNEL);
+       if (!opts)
+               return -ENOMEM;
+
+       while ((p = strsep(&opts, ","))) {
+               char *nval;
+
+               if (!*p)
+                       continue;
+               if (strncasecmp(p, key, strlen(key)))
+                       continue;
+               nval = strchr(p, '=');
+               if (nval) {
+                       if (nval == p)
+                               continue;
+                       *nval++ = 0;
+                       *val = kstrndup(nval, strlen(nval), GFP_KERNEL);
+                       rc = !*val ? -ENOMEM : 0;
+                       goto out;
+               }
+       }
+out:
+       kfree(orig);
+       return rc;
+}
+
 /*
  * Parse a devname into substrings and populate the ctx->UNC and ctx->prepath
  * fields with the result. Returns 0 on success and an error otherwise
@@ -531,7 +564,7 @@ static int smb3_fs_context_validate(struct fs_context *fc)
 
        if (ctx->rdma && ctx->vals->protocol_id < SMB30_PROT_ID) {
                cifs_dbg(VFS, "SMB Direct requires Version >=3.0\n");
-               return -1;
+               return -EOPNOTSUPP;
        }
 
 #ifndef CONFIG_KEYS
@@ -554,7 +587,7 @@ static int smb3_fs_context_validate(struct fs_context *fc)
        /* make sure UNC has a share name */
        if (strlen(ctx->UNC) < 3 || !strchr(ctx->UNC + 3, '\\')) {
                cifs_dbg(VFS, "Malformed UNC. Unable to find share name.\n");
-               return -1;
+               return -ENOENT;
        }
 
        if (!ctx->got_ip) {
@@ -568,7 +601,7 @@ static int smb3_fs_context_validate(struct fs_context *fc)
                if (!cifs_convert_address((struct sockaddr *)&ctx->dstaddr,
                                          &ctx->UNC[2], len)) {
                        pr_err("Unable to determine destination address\n");
-                       return -1;
+                       return -EHOSTUNREACH;
                }
        }
 
@@ -1263,7 +1296,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
        return 0;
 
  cifs_parse_mount_err:
-       return 1;
+       return -EINVAL;
 }
 
 int smb3_init_fs_context(struct fs_context *fc)
index d85edf5d1429418728d9fa44fac2a2d9a6df7684..a5a9e33c0d739dbe1c80c97c12e9e4a4b4c1d390 100644 (file)
@@ -286,7 +286,7 @@ struct smb2_negotiate_req {
        __le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */
        __le16 NegotiateContextCount;  /* SMB3.1.1 only. MBZ earlier */
        __le16 Reserved2;
-       __le16 Dialects[1]; /* One dialect (vers=) at a time for now */
+       __le16 Dialects[4]; /* BB expand this if autonegotiate > 4 dialects */
 } __packed;
 
 /* Dialects */
index e9abb41aa89bcc12002c61d777d4c4f728e9ffb8..4a2b836eb0177024627b252b11d81e802bd0851f 100644 (file)
@@ -338,7 +338,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
        if (ssocket == NULL)
                return -EAGAIN;
 
-       if (signal_pending(current)) {
+       if (fatal_signal_pending(current)) {
                cifs_dbg(FYI, "signal pending before send request\n");
                return -ERESTARTSYS;
        }
@@ -429,7 +429,7 @@ unmask:
 
        if (signal_pending(current) && (total_len != send_length)) {
                cifs_dbg(FYI, "signal is pending after attempt to send\n");
-               rc = -EINTR;
+               rc = -ERESTARTSYS;
        }
 
        /* uncork it */
@@ -666,10 +666,22 @@ wait_for_compound_request(struct TCP_Server_Info *server, int num,
 
        if (*credits < num) {
                /*
-                * Return immediately if not too many requests in flight since
-                * we will likely be stuck on waiting for credits.
+                * If the server is tight on resources or just gives us less
+                * credits for other reasons (e.g. requests are coming out of
+                * order and the server delays granting more credits until it
+                * processes a missing mid) and we exhausted most available
+                * credits there may be situations when we try to send
+                * a compound request but we don't have enough credits. At this
+                * point the client needs to decide if it should wait for
+                * additional credits or fail the request. If at least one
+                * request is in flight there is a high probability that the
+                * server will return enough credits to satisfy this compound
+                * request.
+                *
+                * Return immediately if no requests in flight since we will be
+                * stuck on waiting for credits.
                 */
-               if (server->in_flight < num - *credits) {
+               if (server->in_flight == 0) {
                        spin_unlock(&server->req_lock);
                        trace_smb3_insufficient_credits(server->CurrentMid,
                                        server->hostname, scredits, sin_flight);
index e23752d9a79f3d255345177db1b66950ae8e6607..58d0f7187997950d55a32bdc9911e147adf1f7a1 100644 (file)
@@ -1016,15 +1016,19 @@ ecryptfs_setxattr(struct dentry *dentry, struct inode *inode,
 {
        int rc;
        struct dentry *lower_dentry;
+       struct inode *lower_inode;
 
        lower_dentry = ecryptfs_dentry_to_lower(dentry);
-       if (!(d_inode(lower_dentry)->i_opflags & IOP_XATTR)) {
+       lower_inode = d_inode(lower_dentry);
+       if (!(lower_inode->i_opflags & IOP_XATTR)) {
                rc = -EOPNOTSUPP;
                goto out;
        }
-       rc = vfs_setxattr(lower_dentry, name, value, size, flags);
+       inode_lock(lower_inode);
+       rc = __vfs_setxattr_locked(lower_dentry, name, value, size, flags, NULL);
+       inode_unlock(lower_inode);
        if (!rc && inode)
-               fsstack_copy_attr_all(inode, d_inode(lower_dentry));
+               fsstack_copy_attr_all(inode, lower_inode);
 out:
        return rc;
 }
index acfb55834af23c437d4e166b50d15486c200a709..c41cb887eb7d3cbe3114147cea8fd0321767b831 100644 (file)
@@ -1474,21 +1474,25 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
        }
 
        /*
-        * Some filesystems may redirty the inode during the writeback
-        * due to delalloc, clear dirty metadata flags right before
-        * write_inode()
+        * If the inode has dirty timestamps and we need to write them, call
+        * mark_inode_dirty_sync() to notify the filesystem about it and to
+        * change I_DIRTY_TIME into I_DIRTY_SYNC.
         */
-       spin_lock(&inode->i_lock);
-
-       dirty = inode->i_state & I_DIRTY;
        if ((inode->i_state & I_DIRTY_TIME) &&
-           ((dirty & I_DIRTY_INODE) ||
-            wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
+           (wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync ||
             time_after(jiffies, inode->dirtied_time_when +
                        dirtytime_expire_interval * HZ))) {
-               dirty |= I_DIRTY_TIME;
                trace_writeback_lazytime(inode);
+               mark_inode_dirty_sync(inode);
        }
+
+       /*
+        * Some filesystems may redirty the inode during the writeback
+        * due to delalloc, clear dirty metadata flags right before
+        * write_inode()
+        */
+       spin_lock(&inode->i_lock);
+       dirty = inode->i_state & I_DIRTY;
        inode->i_state &= ~dirty;
 
        /*
@@ -1509,8 +1513,6 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
 
        spin_unlock(&inode->i_lock);
 
-       if (dirty & I_DIRTY_TIME)
-               mark_inode_dirty_sync(inode);
        /* Don't write the inode if only I_DIRTY_PAGES was set */
        if (dirty & ~I_DIRTY_PAGES) {
                int err = write_inode(inode, wbc);
index b5c109703daaf9ff2f4384c1f53e15ee1c5f3e55..21c20fd5f9ee75a096241df596ec9ed45292eaa5 100644 (file)
@@ -735,9 +735,10 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
 
                mutex_unlock(&hugetlb_fault_mutex_table[hash]);
 
+               set_page_huge_active(page);
                /*
                 * unlock_page because locked by add_to_page_cache()
-                * page_put due to reference from alloc_huge_page()
+                * put_page() due to reference from alloc_huge_page()
                 */
                unlock_page(page);
                put_page(page);
index 985a9e3f976d3bf1e8b6da3aebbb6495fe875dc9..1f68105a41ed996622a4c35dbcb6e97e55e00922 100644 (file)
@@ -1025,6 +1025,8 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
                             const struct iovec *fast_iov,
                             struct iov_iter *iter, bool force);
+static void io_req_drop_files(struct io_kiocb *req);
+static void io_req_task_queue(struct io_kiocb *req);
 
 static struct kmem_cache *req_cachep;
 
@@ -1048,8 +1050,7 @@ EXPORT_SYMBOL(io_uring_get_socket);
 
 static inline void io_clean_op(struct io_kiocb *req)
 {
-       if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED |
-                         REQ_F_INFLIGHT))
+       if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
                __io_clean_op(req);
 }
 
@@ -1069,14 +1070,21 @@ static bool io_match_task(struct io_kiocb *head,
 {
        struct io_kiocb *req;
 
-       if (task && head->task != task)
+       if (task && head->task != task) {
+               /* in terms of cancelation, always match if req task is dead */
+               if (head->task->flags & PF_EXITING)
+                       return true;
                return false;
+       }
        if (!files)
                return true;
 
        io_for_each_link(req, head) {
-               if ((req->flags & REQ_F_WORK_INITIALIZED) &&
-                   (req->work.flags & IO_WQ_WORK_FILES) &&
+               if (!(req->flags & REQ_F_WORK_INITIALIZED))
+                       continue;
+               if (req->file && req->file->f_op == &io_uring_fops)
+                       return true;
+               if ((req->work.flags & IO_WQ_WORK_FILES) &&
                    req->work.identity->files == files)
                        return true;
        }
@@ -1394,6 +1402,8 @@ static void io_req_clean_work(struct io_kiocb *req)
                        free_fs_struct(fs);
                req->work.flags &= ~IO_WQ_WORK_FS;
        }
+       if (req->flags & REQ_F_INFLIGHT)
+               io_req_drop_files(req);
 
        io_put_identity(req->task->io_uring, req);
 }
@@ -1503,11 +1513,14 @@ static bool io_grab_identity(struct io_kiocb *req)
                        return false;
                atomic_inc(&id->files->count);
                get_nsproxy(id->nsproxy);
-               req->flags |= REQ_F_INFLIGHT;
 
-               spin_lock_irq(&ctx->inflight_lock);
-               list_add(&req->inflight_entry, &ctx->inflight_list);
-               spin_unlock_irq(&ctx->inflight_lock);
+               if (!(req->flags & REQ_F_INFLIGHT)) {
+                       req->flags |= REQ_F_INFLIGHT;
+
+                       spin_lock_irq(&ctx->inflight_lock);
+                       list_add(&req->inflight_entry, &ctx->inflight_list);
+                       spin_unlock_irq(&ctx->inflight_lock);
+               }
                req->work.flags |= IO_WQ_WORK_FILES;
        }
        if (!(req->work.flags & IO_WQ_WORK_MM) &&
@@ -1622,18 +1635,11 @@ static void __io_queue_deferred(struct io_ring_ctx *ctx)
        do {
                struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
                                                struct io_defer_entry, list);
-               struct io_kiocb *link;
 
                if (req_need_defer(de->req, de->seq))
                        break;
                list_del_init(&de->list);
-               /* punt-init is done before queueing for defer */
-               link = __io_queue_async_work(de->req);
-               if (link) {
-                       __io_queue_linked_timeout(link);
-                       /* drop submission reference */
-                       io_put_req_deferred(link, 1);
-               }
+               io_req_task_queue(de->req);
                kfree(de);
        } while (!list_empty(&ctx->defer_list));
 }
@@ -1767,12 +1773,13 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
        struct io_kiocb *req, *tmp;
        struct io_uring_cqe *cqe;
        unsigned long flags;
-       bool all_flushed;
+       bool all_flushed, posted;
        LIST_HEAD(list);
 
        if (!force && __io_cqring_events(ctx) == rings->cq_ring_entries)
                return false;
 
+       posted = false;
        spin_lock_irqsave(&ctx->completion_lock, flags);
        list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
                if (!io_match_task(req, tsk, files))
@@ -1792,6 +1799,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
                        WRITE_ONCE(ctx->rings->cq_overflow,
                                   ctx->cached_cq_overflow);
                }
+               posted = true;
        }
 
        all_flushed = list_empty(&ctx->cq_overflow_list);
@@ -1801,9 +1809,11 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
                ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
        }
 
-       io_commit_cqring(ctx);
+       if (posted)
+               io_commit_cqring(ctx);
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
-       io_cqring_ev_posted(ctx);
+       if (posted)
+               io_cqring_ev_posted(ctx);
 
        while (!list_empty(&list)) {
                req = list_first_entry(&list, struct io_kiocb, compl.list);
@@ -2195,6 +2205,9 @@ static void __io_req_task_submit(struct io_kiocb *req)
        else
                __io_req_task_cancel(req, -EFAULT);
        mutex_unlock(&ctx->uring_lock);
+
+       if (ctx->flags & IORING_SETUP_SQPOLL)
+               io_sq_thread_drop_mm_files();
 }
 
 static void io_req_task_submit(struct callback_head *cb)
@@ -2270,6 +2283,8 @@ static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
                struct io_uring_task *tctx = rb->task->io_uring;
 
                percpu_counter_sub(&tctx->inflight, rb->task_refs);
+               if (atomic_read(&tctx->in_idle))
+                       wake_up(&tctx->wait);
                put_task_struct_many(rb->task, rb->task_refs);
                rb->task = NULL;
        }
@@ -2288,6 +2303,8 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
                        struct io_uring_task *tctx = rb->task->io_uring;
 
                        percpu_counter_sub(&tctx->inflight, rb->task_refs);
+                       if (atomic_read(&tctx->in_idle))
+                               wake_up(&tctx->wait);
                        put_task_struct_many(rb->task, rb->task_refs);
                }
                rb->task = req->task;
@@ -3548,7 +3565,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
 
        /* read it all, or we did blocking attempt. no retry. */
        if (!iov_iter_count(iter) || !force_nonblock ||
-           (req->file->f_flags & O_NONBLOCK))
+           (req->file->f_flags & O_NONBLOCK) || !(req->flags & REQ_F_ISREG))
                goto done;
 
        io_size -= ret;
@@ -4468,7 +4485,6 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
         * io_wq_work.flags, so initialize io_wq_work firstly.
         */
        io_req_init_async(req);
-       req->work.flags |= IO_WQ_WORK_NO_CANCEL;
 
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
@@ -4501,6 +4517,8 @@ static int io_close(struct io_kiocb *req, bool force_nonblock,
 
        /* if the file has a flush method, be safe and punt to async */
        if (close->put_file->f_op->flush && force_nonblock) {
+               /* not safe to cancel at this point */
+               req->work.flags |= IO_WQ_WORK_NO_CANCEL;
                /* was never set, but play safe */
                req->flags &= ~REQ_F_NOWAIT;
                /* avoid grabbing files - we don't need the files */
@@ -6157,8 +6175,10 @@ static void io_req_drop_files(struct io_kiocb *req)
        struct io_uring_task *tctx = req->task->io_uring;
        unsigned long flags;
 
-       put_files_struct(req->work.identity->files);
-       put_nsproxy(req->work.identity->nsproxy);
+       if (req->work.flags & IO_WQ_WORK_FILES) {
+               put_files_struct(req->work.identity->files);
+               put_nsproxy(req->work.identity->nsproxy);
+       }
        spin_lock_irqsave(&ctx->inflight_lock, flags);
        list_del(&req->inflight_entry);
        spin_unlock_irqrestore(&ctx->inflight_lock, flags);
@@ -6225,9 +6245,6 @@ static void __io_clean_op(struct io_kiocb *req)
                }
                req->flags &= ~REQ_F_NEED_CLEANUP;
        }
-
-       if (req->flags & REQ_F_INFLIGHT)
-               io_req_drop_files(req);
 }
 
 static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
@@ -6446,6 +6463,16 @@ static struct file *io_file_get(struct io_submit_state *state,
                file = __io_file_get(state, fd);
        }
 
+       if (file && file->f_op == &io_uring_fops &&
+           !(req->flags & REQ_F_INFLIGHT)) {
+               io_req_init_async(req);
+               req->flags |= REQ_F_INFLIGHT;
+
+               spin_lock_irq(&ctx->inflight_lock);
+               list_add(&req->inflight_entry, &ctx->inflight_list);
+               spin_unlock_irq(&ctx->inflight_lock);
+       }
+
        return file;
 }
 
@@ -7245,14 +7272,18 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                                                TASK_INTERRUPTIBLE);
                /* make sure we run task_work before checking for signals */
                ret = io_run_task_work_sig();
-               if (ret > 0)
+               if (ret > 0) {
+                       finish_wait(&ctx->wait, &iowq.wq);
                        continue;
+               }
                else if (ret < 0)
                        break;
                if (io_should_wake(&iowq))
                        break;
-               if (test_bit(0, &ctx->cq_check_overflow))
+               if (test_bit(0, &ctx->cq_check_overflow)) {
+                       finish_wait(&ctx->wait, &iowq.wq);
                        continue;
+               }
                if (uts) {
                        timeout = schedule_timeout(timeout);
                        if (timeout == 0) {
@@ -8844,39 +8875,44 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx,
        }
 }
 
+static int io_uring_count_inflight(struct io_ring_ctx *ctx,
+                                  struct task_struct *task,
+                                  struct files_struct *files)
+{
+       struct io_kiocb *req;
+       int cnt = 0;
+
+       spin_lock_irq(&ctx->inflight_lock);
+       list_for_each_entry(req, &ctx->inflight_list, inflight_entry)
+               cnt += io_match_task(req, task, files);
+       spin_unlock_irq(&ctx->inflight_lock);
+       return cnt;
+}
+
 static void io_uring_cancel_files(struct io_ring_ctx *ctx,
                                  struct task_struct *task,
                                  struct files_struct *files)
 {
        while (!list_empty_careful(&ctx->inflight_list)) {
                struct io_task_cancel cancel = { .task = task, .files = files };
-               struct io_kiocb *req;
                DEFINE_WAIT(wait);
-               bool found = false;
-
-               spin_lock_irq(&ctx->inflight_lock);
-               list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
-                       if (req->task != task ||
-                           req->work.identity->files != files)
-                               continue;
-                       found = true;
-                       break;
-               }
-               if (found)
-                       prepare_to_wait(&task->io_uring->wait, &wait,
-                                       TASK_UNINTERRUPTIBLE);
-               spin_unlock_irq(&ctx->inflight_lock);
+               int inflight;
 
-               /* We need to keep going until we don't find a matching req */
-               if (!found)
+               inflight = io_uring_count_inflight(ctx, task, files);
+               if (!inflight)
                        break;
 
                io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
                io_poll_remove_all(ctx, task, files);
                io_kill_timeouts(ctx, task, files);
+               io_cqring_overflow_flush(ctx, true, task, files);
                /* cancellations _may_ trigger task work */
                io_run_task_work();
-               schedule();
+
+               prepare_to_wait(&task->io_uring->wait, &wait,
+                               TASK_UNINTERRUPTIBLE);
+               if (inflight == io_uring_count_inflight(ctx, task, files))
+                       schedule();
                finish_wait(&task->io_uring->wait, &wait);
        }
 }
@@ -8914,8 +8950,6 @@ static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
 
 static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
 {
-       WARN_ON_ONCE(ctx->sqo_task != current);
-
        mutex_lock(&ctx->uring_lock);
        ctx->sqo_dead = 1;
        mutex_unlock(&ctx->uring_lock);
@@ -8936,7 +8970,6 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
        struct task_struct *task = current;
 
        if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
-               /* for SQPOLL only sqo_task has task notes */
                io_disable_sqo_submit(ctx);
                task = ctx->sq_data->thread;
                atomic_inc(&task->io_uring->in_idle);
@@ -8946,19 +8979,12 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
        io_cancel_defer_files(ctx, task, files);
        io_cqring_overflow_flush(ctx, true, task, files);
 
+       io_uring_cancel_files(ctx, task, files);
        if (!files)
                __io_uring_cancel_task_requests(ctx, task);
-       else
-               io_uring_cancel_files(ctx, task, files);
 
        if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
                atomic_dec(&task->io_uring->in_idle);
-               /*
-                * If the files that are going away are the ones in the thread
-                * identity, clear them out.
-                */
-               if (task->io_uring->identity->files == files)
-                       task->io_uring->identity->files = NULL;
                io_sq_thread_unpark(ctx->sq_data);
        }
 }
@@ -9082,6 +9108,10 @@ void __io_uring_task_cancel(void)
        /* make sure overflow events are dropped */
        atomic_inc(&tctx->in_idle);
 
+       /* trigger io_disable_sqo_submit() */
+       if (tctx->sqpoll)
+               __io_uring_files_cancel(NULL);
+
        do {
                /* read completions before cancelations */
                inflight = tctx_inflight(tctx);
@@ -9092,16 +9122,15 @@ void __io_uring_task_cancel(void)
                prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
 
                /*
-                * If we've seen completions, retry. This avoids a race where
-                * a completion comes in before we did prepare_to_wait().
+                * If we've seen completions, retry without waiting. This
+                * avoids a race where a completion comes in before we did
+                * prepare_to_wait().
                 */
-               if (inflight != tctx_inflight(tctx))
-                       continue;
-               schedule();
+               if (inflight == tctx_inflight(tctx))
+                       schedule();
                finish_wait(&tctx->wait, &wait);
        } while (1);
 
-       finish_wait(&tctx->wait, &wait);
        atomic_dec(&tctx->in_idle);
 
        io_uring_remove_task_files(tctx);
@@ -9112,6 +9141,9 @@ static int io_uring_flush(struct file *file, void *data)
        struct io_uring_task *tctx = current->io_uring;
        struct io_ring_ctx *ctx = file->private_data;
 
+       if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
+               io_uring_cancel_task_requests(ctx, NULL);
+
        if (!tctx)
                return 0;
 
@@ -9128,7 +9160,10 @@ static int io_uring_flush(struct file *file, void *data)
 
        if (ctx->flags & IORING_SETUP_SQPOLL) {
                /* there is only one file note, which is owned by sqo_task */
-               WARN_ON_ONCE((ctx->sqo_task == current) ==
+               WARN_ON_ONCE(ctx->sqo_task != current &&
+                            xa_load(&tctx->xa, (unsigned long)file));
+               /* sqo_dead check is for when this happens after cancellation */
+               WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead &&
                             !xa_load(&tctx->xa, (unsigned long)file));
 
                io_disable_sqo_submit(ctx);
index f277d023ebcd14b44454906cc5b776600151013b..c7571931214751b67d3ee3a8c9932d364831ac80 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/pagemap.h>
 #include <linux/sched/mm.h>
 #include <linux/fsnotify.h>
+#include <linux/uio.h>
 
 #include "kernfs-internal.h"
 
@@ -180,11 +181,10 @@ static const struct seq_operations kernfs_seq_ops = {
  * it difficult to use seq_file.  Implement simplistic custom buffering for
  * bin files.
  */
-static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
-                                      char __user *user_buf, size_t count,
-                                      loff_t *ppos)
+static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 {
-       ssize_t len = min_t(size_t, count, PAGE_SIZE);
+       struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
+       ssize_t len = min_t(size_t, iov_iter_count(iter), PAGE_SIZE);
        const struct kernfs_ops *ops;
        char *buf;
 
@@ -210,7 +210,7 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
        of->event = atomic_read(&of->kn->attr.open->event);
        ops = kernfs_ops(of->kn);
        if (ops->read)
-               len = ops->read(of, buf, len, *ppos);
+               len = ops->read(of, buf, len, iocb->ki_pos);
        else
                len = -EINVAL;
 
@@ -220,12 +220,12 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
        if (len < 0)
                goto out_free;
 
-       if (copy_to_user(user_buf, buf, len)) {
+       if (copy_to_iter(buf, len, iter) != len) {
                len = -EFAULT;
                goto out_free;
        }
 
-       *ppos += len;
+       iocb->ki_pos += len;
 
  out_free:
        if (buf == of->prealloc_buf)
@@ -235,31 +235,14 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
        return len;
 }
 
-/**
- * kernfs_fop_read - kernfs vfs read callback
- * @file: file pointer
- * @user_buf: data to write
- * @count: number of bytes
- * @ppos: starting offset
- */
-static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
-                              size_t count, loff_t *ppos)
+static ssize_t kernfs_fop_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 {
-       struct kernfs_open_file *of = kernfs_of(file);
-
-       if (of->kn->flags & KERNFS_HAS_SEQ_SHOW)
-               return seq_read(file, user_buf, count, ppos);
-       else
-               return kernfs_file_direct_read(of, user_buf, count, ppos);
+       if (kernfs_of(iocb->ki_filp)->kn->flags & KERNFS_HAS_SEQ_SHOW)
+               return seq_read_iter(iocb, iter);
+       return kernfs_file_read_iter(iocb, iter);
 }
 
-/**
- * kernfs_fop_write - kernfs vfs write callback
- * @file: file pointer
- * @user_buf: data to write
- * @count: number of bytes
- * @ppos: starting offset
- *
+/*
  * Copy data in from userland and pass it to the matching kernfs write
  * operation.
  *
@@ -269,20 +252,18 @@ static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
  * modify only the the value you're changing, then write entire buffer
  * back.
  */
-static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
-                               size_t count, loff_t *ppos)
+static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
 {
-       struct kernfs_open_file *of = kernfs_of(file);
+       struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
+       ssize_t len = iov_iter_count(iter);
        const struct kernfs_ops *ops;
-       ssize_t len;
        char *buf;
 
        if (of->atomic_write_len) {
-               len = count;
                if (len > of->atomic_write_len)
                        return -E2BIG;
        } else {
-               len = min_t(size_t, count, PAGE_SIZE);
+               len = min_t(size_t, len, PAGE_SIZE);
        }
 
        buf = of->prealloc_buf;
@@ -293,7 +274,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
        if (!buf)
                return -ENOMEM;
 
-       if (copy_from_user(buf, user_buf, len)) {
+       if (copy_from_iter(buf, len, iter) != len) {
                len = -EFAULT;
                goto out_free;
        }
@@ -312,7 +293,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
 
        ops = kernfs_ops(of->kn);
        if (ops->write)
-               len = ops->write(of, buf, len, *ppos);
+               len = ops->write(of, buf, len, iocb->ki_pos);
        else
                len = -EINVAL;
 
@@ -320,7 +301,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
        mutex_unlock(&of->mutex);
 
        if (len > 0)
-               *ppos += len;
+               iocb->ki_pos += len;
 
 out_free:
        if (buf == of->prealloc_buf)
@@ -673,7 +654,7 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
 
        /*
         * Write path needs to atomic_write_len outside active reference.
-        * Cache it in open_file.  See kernfs_fop_write() for details.
+        * Cache it in open_file.  See kernfs_fop_write_iter() for details.
         */
        of->atomic_write_len = ops->atomic_write_len;
 
@@ -960,14 +941,16 @@ void kernfs_notify(struct kernfs_node *kn)
 EXPORT_SYMBOL_GPL(kernfs_notify);
 
 const struct file_operations kernfs_file_fops = {
-       .read           = kernfs_fop_read,
-       .write          = kernfs_fop_write,
+       .read_iter      = kernfs_fop_read_iter,
+       .write_iter     = kernfs_fop_write_iter,
        .llseek         = generic_file_llseek,
        .mmap           = kernfs_fop_mmap,
        .open           = kernfs_fop_open,
        .release        = kernfs_fop_release,
        .poll           = kernfs_fop_poll,
        .fsync          = noop_fsync,
+       .splice_read    = generic_file_splice_read,
+       .splice_write   = iter_file_splice_write,
 };
 
 /**
index 4f274f21c4abcb9c2d96bc4088c3f1f4003258f0..af64b4e6fd1ff2162ee837b4f64baf0f3621fb19 100644 (file)
@@ -324,6 +324,21 @@ pnfs_grab_inode_layout_hdr(struct pnfs_layout_hdr *lo)
        return NULL;
 }
 
+/*
+ * Compare 2 layout stateid sequence ids, to see which is newer,
+ * taking into account wraparound issues.
+ */
+static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
+{
+       return (s32)(s1 - s2) > 0;
+}
+
+static void pnfs_barrier_update(struct pnfs_layout_hdr *lo, u32 newseq)
+{
+       if (pnfs_seqid_is_newer(newseq, lo->plh_barrier))
+               lo->plh_barrier = newseq;
+}
+
 static void
 pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
                         u32 seq)
@@ -335,6 +350,7 @@ pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
        if (seq != 0) {
                WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
                lo->plh_return_seq = seq;
+               pnfs_barrier_update(lo, seq);
        }
 }
 
@@ -639,15 +655,6 @@ static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
        return rv;
 }
 
-/*
- * Compare 2 layout stateid sequence ids, to see which is newer,
- * taking into account wraparound issues.
- */
-static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
-{
-       return (s32)(s1 - s2) > 0;
-}
-
 static bool
 pnfs_should_free_range(const struct pnfs_layout_range *lseg_range,
                 const struct pnfs_layout_range *recall_range)
@@ -984,8 +991,7 @@ pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
                new_barrier = be32_to_cpu(new->seqid);
        else if (new_barrier == 0)
                return;
-       if (pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
-               lo->plh_barrier = new_barrier;
+       pnfs_barrier_update(lo, new_barrier);
 }
 
 static bool
@@ -994,7 +1000,7 @@ pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
 {
        u32 seqid = be32_to_cpu(stateid->seqid);
 
-       return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
+       return !pnfs_seqid_is_newer(seqid, lo->plh_barrier) && lo->plh_barrier;
 }
 
 /* lget is set to 1 if called from inside send_layoutget call chain */
@@ -1183,20 +1189,17 @@ pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
                return false;
        set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
        pnfs_get_layout_hdr(lo);
+       nfs4_stateid_copy(stateid, &lo->plh_stateid);
+       *cred = get_cred(lo->plh_lc_cred);
        if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
-               nfs4_stateid_copy(stateid, &lo->plh_stateid);
-               *cred = get_cred(lo->plh_lc_cred);
                if (lo->plh_return_seq != 0)
                        stateid->seqid = cpu_to_be32(lo->plh_return_seq);
                if (iomode != NULL)
                        *iomode = lo->plh_return_iomode;
                pnfs_clear_layoutreturn_info(lo);
-               return true;
-       }
-       nfs4_stateid_copy(stateid, &lo->plh_stateid);
-       *cred = get_cred(lo->plh_lc_cred);
-       if (iomode != NULL)
+       } else if (iomode != NULL)
                *iomode = IOMODE_ANY;
+       pnfs_barrier_update(lo, be32_to_cpu(stateid->seqid));
        return true;
 }
 
@@ -1909,6 +1912,11 @@ static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
                wake_up_var(&lo->plh_outstanding);
 }
 
+static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo)
+{
+       return test_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags);
+}
+
 static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
 {
        unsigned long *bitlock = &lo->plh_flags;
@@ -2383,23 +2391,34 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
                goto out_forget;
        }
 
-       if (!pnfs_layout_is_valid(lo)) {
-               /* We have a completely new layout */
-               pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, true);
-       } else if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
+       if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
                /* existing state ID, make sure the sequence number matches. */
                if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
+                       if (!pnfs_layout_is_valid(lo) &&
+                           pnfs_is_first_layoutget(lo))
+                               lo->plh_barrier = 0;
                        dprintk("%s forget reply due to sequence\n", __func__);
                        goto out_forget;
                }
                pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, false);
-       } else {
+       } else if (pnfs_layout_is_valid(lo)) {
                /*
                 * We got an entirely new state ID.  Mark all segments for the
                 * inode invalid, and retry the layoutget
                 */
-               pnfs_mark_layout_stateid_invalid(lo, &free_me);
+               struct pnfs_layout_range range = {
+                       .iomode = IOMODE_ANY,
+                       .length = NFS4_MAX_UINT64,
+               };
+               pnfs_set_plh_return_info(lo, IOMODE_ANY, 0);
+               pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
+                                               &range, 0);
                goto out_forget;
+       } else {
+               /* We have a completely new layout */
+               if (!pnfs_is_first_layoutget(lo))
+                       goto out_forget;
+               pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, true);
        }
 
        pnfs_get_lseg(lseg);
index 821db21ba072cd7dbf7a70c69bd8f0ee378758f6..34b880211e5eab8e63fd3d034a9a3b6aa8b4fa40 100644 (file)
@@ -865,9 +865,14 @@ compose_entry_fh(struct nfsd3_readdirres *cd, struct svc_fh *fhp,
        if (isdotent(name, namlen)) {
                if (namlen == 2) {
                        dchild = dget_parent(dparent);
-                       /* filesystem root - cannot return filehandle for ".." */
+                       /*
+                        * Don't return filehandle for ".." if we're at
+                        * the filesystem or export root:
+                        */
                        if (dchild == dparent)
                                goto out;
+                       if (dparent == exp->ex_path.dentry)
+                               goto out;
                } else
                        dchild = dget(dparent);
        } else
index e5b616c93e11b673d870a880d5d6233f20da536a..0fed532efa68d127adc8cbcc5755056495c45caf 100644 (file)
@@ -84,6 +84,14 @@ int ovl_copy_xattr(struct super_block *sb, struct dentry *old,
 
                if (ovl_is_private_xattr(sb, name))
                        continue;
+
+               error = security_inode_copy_up_xattr(name);
+               if (error < 0 && error != -EOPNOTSUPP)
+                       break;
+               if (error == 1) {
+                       error = 0;
+                       continue; /* Discard */
+               }
 retry:
                size = vfs_getxattr(old, name, value, value_size);
                if (size == -ERANGE)
@@ -107,13 +115,6 @@ retry:
                        goto retry;
                }
 
-               error = security_inode_copy_up_xattr(name);
-               if (error < 0 && error != -EOPNOTSUPP)
-                       break;
-               if (error == 1) {
-                       error = 0;
-                       continue; /* Discard */
-               }
                error = vfs_setxattr(new, name, value, size, 0);
                if (error) {
                        if (error != -EOPNOTSUPP || ovl_must_copy_xattr(name))
index 28a075b5f5b2eb2e2391d8b341d3844723ed48b9..d1efa3a5a5032e6ce216342170e952c6222ec04f 100644 (file)
@@ -992,8 +992,8 @@ static char *ovl_get_redirect(struct dentry *dentry, bool abs_redirect)
 
                buflen -= thislen;
                memcpy(&buf[buflen], name, thislen);
-               tmp = dget_dlock(d->d_parent);
                spin_unlock(&d->d_lock);
+               tmp = dget_parent(d);
 
                dput(d);
                d = tmp;
index bd9dd38347aed87ed263d5df56ffcd3e658ea562..077d3ad343f689f4f517b804e2bfc848082a4d2b 100644 (file)
@@ -398,8 +398,9 @@ static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
        const struct cred *old_cred;
        int ret;
 
-       if (!ovl_should_sync(OVL_FS(file_inode(file)->i_sb)))
-               return 0;
+       ret = ovl_sync_status(OVL_FS(file_inode(file)->i_sb));
+       if (ret <= 0)
+               return ret;
 
        ret = ovl_real_fdget_meta(file, &real, !datasync);
        if (ret)
index d739e14c6814df6a38c004e96124a749b8948fe1..cf41bcb664bc74505b7b313c59ccfc21a492835f 100644 (file)
@@ -352,7 +352,9 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
                goto out;
 
        if (!value && !upperdentry) {
+               old_cred = ovl_override_creds(dentry->d_sb);
                err = vfs_getxattr(realdentry, name, NULL, 0);
+               revert_creds(old_cred);
                if (err < 0)
                        goto out_drop_write;
        }
index b487e48c7fd4276d22caf17a56e75f5f298a8cdf..cb4e2d60ecf9ceb2a4aeceecca1748d3002008eb 100644 (file)
@@ -324,6 +324,7 @@ int ovl_check_metacopy_xattr(struct ovl_fs *ofs, struct dentry *dentry);
 bool ovl_is_metacopy_dentry(struct dentry *dentry);
 char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
                             int padding);
+int ovl_sync_status(struct ovl_fs *ofs);
 
 static inline bool ovl_is_impuredir(struct super_block *sb,
                                    struct dentry *dentry)
index fbd5e27ce66bd3aad0c44e406ed943c016c1ea32..63efee554f69a7c1e862650d8f7b388576fda2cd 100644 (file)
@@ -81,6 +81,8 @@ struct ovl_fs {
        atomic_long_t last_ino;
        /* Whiteout dentry cache */
        struct dentry *whiteout;
+       /* r/o snapshot of upperdir sb's only taken on volatile mounts */
+       errseq_t errseq;
 };
 
 static inline struct vfsmount *ovl_upper_mnt(struct ovl_fs *ofs)
index 01620ebae1bd4e0f1a03a9bd05e8f28c9c5afb80..f404a78e6b607bf1d2b854506ebb3f72cd8af8d2 100644 (file)
@@ -865,7 +865,7 @@ struct file *ovl_dir_real_file(const struct file *file, bool want_upper)
 
        struct ovl_dir_file *od = file->private_data;
        struct dentry *dentry = file->f_path.dentry;
-       struct file *realfile = od->realfile;
+       struct file *old, *realfile = od->realfile;
 
        if (!OVL_TYPE_UPPER(ovl_path_type(dentry)))
                return want_upper ? NULL : realfile;
@@ -874,29 +874,20 @@ struct file *ovl_dir_real_file(const struct file *file, bool want_upper)
         * Need to check if we started out being a lower dir, but got copied up
         */
        if (!od->is_upper) {
-               struct inode *inode = file_inode(file);
-
                realfile = READ_ONCE(od->upperfile);
                if (!realfile) {
                        struct path upperpath;
 
                        ovl_path_upper(dentry, &upperpath);
                        realfile = ovl_dir_open_realfile(file, &upperpath);
+                       if (IS_ERR(realfile))
+                               return realfile;
 
-                       inode_lock(inode);
-                       if (!od->upperfile) {
-                               if (IS_ERR(realfile)) {
-                                       inode_unlock(inode);
-                                       return realfile;
-                               }
-                               smp_store_release(&od->upperfile, realfile);
-                       } else {
-                               /* somebody has beaten us to it */
-                               if (!IS_ERR(realfile))
-                                       fput(realfile);
-                               realfile = od->upperfile;
+                       old = cmpxchg_release(&od->upperfile, NULL, realfile);
+                       if (old) {
+                               fput(realfile);
+                               realfile = old;
                        }
-                       inode_unlock(inode);
                }
        }
 
@@ -909,8 +900,9 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
        struct file *realfile;
        int err;
 
-       if (!ovl_should_sync(OVL_FS(file->f_path.dentry->d_sb)))
-               return 0;
+       err = ovl_sync_status(OVL_FS(file->f_path.dentry->d_sb));
+       if (err <= 0)
+               return err;
 
        realfile = ovl_dir_real_file(file, true);
        err = PTR_ERR_OR_ZERO(realfile);
index 2bd570cbe8a4591f58610f9ede56e713b3eb2aa8..d58b8f2bf9d0a45b4745e6feb776f7651c6362be 100644 (file)
@@ -264,11 +264,20 @@ static int ovl_sync_fs(struct super_block *sb, int wait)
        struct super_block *upper_sb;
        int ret;
 
-       if (!ovl_upper_mnt(ofs))
-               return 0;
+       ret = ovl_sync_status(ofs);
+       /*
+        * We have to always set the err, because the return value isn't
+        * checked in syncfs, and instead indirectly return an error via
+        * the sb's writeback errseq, which VFS inspects after this call.
+        */
+       if (ret < 0) {
+               errseq_set(&sb->s_wb_err, -EIO);
+               return -EIO;
+       }
+
+       if (!ret)
+               return ret;
 
-       if (!ovl_should_sync(ofs))
-               return 0;
        /*
         * Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC).
         * All the super blocks will be iterated, including upper_sb.
@@ -1923,6 +1932,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        unsigned int numlower;
        int err;
 
+       err = -EIO;
+       if (WARN_ON(sb->s_user_ns != current_user_ns()))
+               goto out;
+
        sb->s_d_op = &ovl_dentry_operations;
 
        err = -ENOMEM;
@@ -1989,6 +2002,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_op = &ovl_super_operations;
 
        if (ofs->config.upperdir) {
+               struct super_block *upper_sb;
+
                if (!ofs->config.workdir) {
                        pr_err("missing 'workdir'\n");
                        goto out_err;
@@ -1998,6 +2013,16 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                if (err)
                        goto out_err;
 
+               upper_sb = ovl_upper_mnt(ofs)->mnt_sb;
+               if (!ovl_should_sync(ofs)) {
+                       ofs->errseq = errseq_sample(&upper_sb->s_wb_err);
+                       if (errseq_check(&upper_sb->s_wb_err, ofs->errseq)) {
+                               err = -EIO;
+                               pr_err("Cannot mount volatile when upperdir has an unseen error. Sync upperdir fs to clear state.\n");
+                               goto out_err;
+                       }
+               }
+
                err = ovl_get_workdir(sb, ofs, &upperpath);
                if (err)
                        goto out_err;
@@ -2005,9 +2030,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                if (!ofs->workdir)
                        sb->s_flags |= SB_RDONLY;
 
-               sb->s_stack_depth = ovl_upper_mnt(ofs)->mnt_sb->s_stack_depth;
-               sb->s_time_gran = ovl_upper_mnt(ofs)->mnt_sb->s_time_gran;
-
+               sb->s_stack_depth = upper_sb->s_stack_depth;
+               sb->s_time_gran = upper_sb->s_time_gran;
        }
        oe = ovl_get_lowerstack(sb, splitlower, numlower, ofs, layers);
        err = PTR_ERR(oe);
index 6569031af3cdd4ca0fed5cf035161a7f1a33248c..9826b003f1d2702132c4cd12920230723b02416d 100644 (file)
@@ -962,3 +962,30 @@ err_free:
        kfree(buf);
        return ERR_PTR(res);
 }
+
+/*
+ * ovl_sync_status() - Check fs sync status for volatile mounts
+ *
+ * Returns 1 if this is not a volatile mount and a real sync is required.
+ *
+ * Returns 0 if syncing can be skipped because mount is volatile, and no errors
+ * have occurred on the upperdir since the mount.
+ *
+ * Returns -errno if it is a volatile mount, and the error that occurred since
+ * the last mount. If the error code changes, it'll return the latest error
+ * code.
+ */
+
+int ovl_sync_status(struct ovl_fs *ofs)
+{
+       struct vfsmount *mnt;
+
+       if (ovl_should_sync(ofs))
+               return 1;
+
+       mnt = ovl_upper_mnt(ofs);
+       if (!mnt)
+               return 0;
+
+       return errseq_check(&mnt->mnt_sb->s_wb_err, ofs->errseq);
+}
index c5989cfd564d45c7b6e7690af69a1e8effe1fdcd..39c96845a72fb15e19844248d206e81ccf448001 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -1206,6 +1206,7 @@ const struct file_operations pipefifo_fops = {
        .unlocked_ioctl = pipe_ioctl,
        .release        = pipe_release,
        .fasync         = pipe_fasync,
+       .splice_write   = iter_file_splice_write,
 };
 
 /*
index 317899222d7fdf35b564fc7664311f47194db644..d2018f70d1fae25630fb1a847a11a65e86d155b2 100644 (file)
@@ -1770,6 +1770,12 @@ static int process_sysctl_arg(char *param, char *val,
                        return 0;
        }
 
+       if (!val)
+               return -EINVAL;
+       len = strlen(val);
+       if (len == 0)
+               return -EINVAL;
+
        /*
         * To set sysctl options, we use a temporary mount of proc, look up the
         * respective sys/ file and write to it. To avoid mounting it when no
@@ -1811,7 +1817,6 @@ static int process_sysctl_arg(char *param, char *val,
                                file, param, val);
                goto out;
        }
-       len = strlen(val);
        wret = kernel_write(file, val, len, &pos);
        if (wret < 0) {
                err = wret;
index 5bef3a68395d8d32f4f686370846e7b6909ca728..d0df217f4712a5f6b7ca875239fa7a9a0d361800 100644 (file)
@@ -705,6 +705,7 @@ static int udf_check_vsd(struct super_block *sb)
        struct buffer_head *bh = NULL;
        int nsr = 0;
        struct udf_sb_info *sbi;
+       loff_t session_offset;
 
        sbi = UDF_SB(sb);
        if (sb->s_blocksize < sizeof(struct volStructDesc))
@@ -712,7 +713,8 @@ static int udf_check_vsd(struct super_block *sb)
        else
                sectorsize = sb->s_blocksize;
 
-       sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits);
+       session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits;
+       sector += session_offset;
 
        udf_debug("Starting at sector %u (%lu byte sectors)\n",
                  (unsigned int)(sector >> sb->s_blocksize_bits),
@@ -757,8 +759,7 @@ static int udf_check_vsd(struct super_block *sb)
 
        if (nsr > 0)
                return 1;
-       else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) ==
-                       VSD_FIRST_SECTOR_OFFSET)
+       else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET)
                return -1;
        else
                return 0;
index f5e92fe9151c389a9e55a2c6f857eb91279a2fdb..bd1c39907b924a4f0ab5ad1db20a4d3002f11a88 100644 (file)
@@ -783,6 +783,7 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
 
 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
 
+int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count);
 
 int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
 
index 3c3e16c0aadbfd585c0ed0fcd8e9db296a69a1d8..dc605c4bc22491cb3b302008619db20e99386e64 100644 (file)
@@ -2,9 +2,8 @@
 #ifndef __DT_APQ8016_LPASS_H
 #define __DT_APQ8016_LPASS_H
 
-#define MI2S_PRIMARY   0
-#define MI2S_SECONDARY 1
-#define MI2S_TERTIARY  2
-#define MI2S_QUATERNARY        3
+#include <dt-bindings/sound/qcom,lpass.h>
+
+/* NOTE: Use qcom,lpass.h to define any AIF ID's for LPASS */
 
 #endif /* __DT_APQ8016_LPASS_H */
diff --git a/include/dt-bindings/sound/qcom,lpass.h b/include/dt-bindings/sound/qcom,lpass.h
new file mode 100644 (file)
index 0000000..7b0b80b
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_QCOM_LPASS_H
+#define __DT_QCOM_LPASS_H
+
+#define MI2S_PRIMARY   0
+#define MI2S_SECONDARY 1
+#define MI2S_TERTIARY  2
+#define MI2S_QUATERNARY        3
+#define MI2S_QUINARY   4
+
+#define LPASS_DP_RX    5
+
+#define LPASS_MCLK0    0
+
+#endif /* __DT_QCOM_LPASS_H */
index 56ecaafd2dc682d05ef88164b4ba4057b6b15ab8..5c1ee8b36b1972c6c025778a69b9c625834d7f58 100644 (file)
@@ -2,10 +2,8 @@
 #ifndef __DT_SC7180_LPASS_H
 #define __DT_SC7180_LPASS_H
 
-#define MI2S_PRIMARY   0
-#define MI2S_SECONDARY 1
-#define LPASS_DP_RX    2
+#include <dt-bindings/sound/qcom,lpass.h>
 
-#define LPASS_MCLK0    0
+/* NOTE: Use qcom,lpass.h to define any AIF ID's for LPASS */
 
 #endif /* __DT_APQ8016_LPASS_H */
index 89bb8b84173e4ad0c8ca683384419b6027e890b8..1779f90eeb4cb4b3daf2a99b2246148de0c407a7 100644 (file)
@@ -609,6 +609,18 @@ static inline const char *dev_name(const struct device *dev)
        return kobject_name(&dev->kobj);
 }
 
+/**
+ * dev_bus_name - Return a device's bus/class name, if at all possible
+ * @dev: struct device to get the bus/class name of
+ *
+ * Will return the name of the bus/class the device is attached to.  If it is
+ * not attached to a bus/class, an empty string will be returned.
+ */
+static inline const char *dev_bus_name(const struct device *dev)
+{
+       return dev->bus ? dev->bus->name : (dev->class ? dev->class->name : "");
+}
+
 __printf(2, 3) int dev_set_name(struct device *dev, const char *name, ...);
 
 #ifdef CONFIG_NUMA
index ebca2ef022127519ce9ec2c5c58cbe33b6a198af..b5807f23caf80458a6238d286c4deec6fd21fe07 100644 (file)
@@ -770,6 +770,8 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
 }
 #endif
 
+void set_page_huge_active(struct page *page);
+
 #else  /* CONFIG_HUGETLB_PAGE */
 struct hstate {};
 
index b3f0e2018c6238a67a2b1cda397c3dab00689755..efa96263b81b3053d3dc1ec460e019ffb841ec64 100644 (file)
@@ -616,7 +616,10 @@ static inline void dev_iommu_fwspec_set(struct device *dev,
 
 static inline void *dev_iommu_priv_get(struct device *dev)
 {
-       return dev->iommu->priv;
+       if (dev->iommu)
+               return dev->iommu->priv;
+       else
+               return NULL;
 }
 
 static inline void dev_iommu_priv_set(struct device *dev, void *priv)
index fe1ae73ff8b574a8ee31e03b07f06cb6d5ca2473..0aea9e2a2a01d18d4036997967518d56774f371e 100644 (file)
@@ -333,6 +333,13 @@ static inline void *kasan_reset_tag(const void *addr)
        return (void *)arch_kasan_reset_tag(addr);
 }
 
+/**
+ * kasan_report - print a report about a bad memory access detected by KASAN
+ * @addr: address of the bad access
+ * @size: size of the bad access
+ * @is_write: whether the bad access is a write or a read
+ * @ip: instruction pointer for the accessibility check or the bad access itself
+ */
 bool kasan_report(unsigned long addr, size_t size,
                bool is_write, unsigned long ip);
 
index b3a36b0cfc81a3742b9c588b0efa8bb1b9ec8389..1883a4a9f16a7fc70bf533cd2206d046c6722b14 100644 (file)
@@ -266,7 +266,7 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p);
 extern bool arch_within_kprobe_blacklist(unsigned long addr);
 extern int arch_populate_kprobe_blacklist(void);
 extern bool arch_kprobe_on_func_entry(unsigned long offset);
-extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
+extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
 
 extern bool within_kprobe_blacklist(unsigned long addr);
 extern int kprobe_add_ksym_blacklist(unsigned long entry);
index 65b81e0c494d207dc27c9dd526c7146262eae5d9..2484ed97e72f586b7258ec76c392343f7ef4bf03 100644 (file)
@@ -33,6 +33,9 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
                                          unsigned int cpu,
                                          const char *namefmt);
 
+void kthread_set_per_cpu(struct task_struct *k, int cpu);
+bool kthread_is_per_cpu(struct task_struct *k);
+
 /**
  * kthread_run - create and wake a thread.
  * @threadfn: the function to run until signal_pending(current).
index a12b5523cc18e699fea0f36ca49b65dbfdfeb015..73f20deb497d5a86f58b3823615e2b0cd7920201 100644 (file)
@@ -230,6 +230,5 @@ static inline ktime_t ms_to_ktime(u64 ms)
 }
 
 # include <linux/timekeeping.h>
-# include <linux/timekeeping32.h>
 
 #endif
index 5bcfbd972e9709234926b9b7148df45f3092f2f0..dbf8506decca0f5e4f6527b7a070a1c3c8335f43 100644 (file)
  * Objtool generates debug info for both FUNC & CODE, but needs special
  * annotations for each CODE's start (to describe the actual stack frame).
  *
+ * Objtool requires that all code must be contained in an ELF symbol. Symbol
+ * names that have a  .L prefix do not emit symbol table entries. .L
+ * prefixed symbols can be used within a code region, but should be avoided for
+ * denoting a range of code via ``SYM_*_START/END`` annotations.
+ *
  * ALIAS -- does not generate debug info -- the aliased function will
  */
 
index 5d71e8a8500f5ed13872c19b01cf7a8af9d62c11..aca4dc037b70b7284e6f997264c4c66d2701d8ac 100644 (file)
@@ -35,6 +35,9 @@ struct mdiobb_ctrl {
        const struct mdiobb_ops *ops;
 };
 
+int mdiobb_read(struct mii_bus *bus, int phy, int reg);
+int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val);
+
 /* The returned bus is not yet registered with the phy layer. */
 struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl);
 
index f93bfe7473aa7c15e3da3630fffd5786bf21ed3f..4672e12f1aa52cfb7daa2a87824e9715247d36fd 100644 (file)
@@ -1209,22 +1209,4 @@ static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
        return val.vbool;
 }
 
-/**
- * mlx5_core_net - Provide net namespace of the mlx5_core_dev
- * @dev: mlx5 core device
- *
- * mlx5_core_net() returns the net namespace of mlx5 core device.
- * This can be called only in below described limited context.
- * (a) When a devlink instance for mlx5_core is registered and
- *     when devlink reload operation is disabled.
- *     or
- * (b) during devlink reload reload_down() and reload_up callbacks
- *     where it is ensured that devlink instance's net namespace is
- *     stable.
- */
-static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
-{
-       return devlink_net(priv_to_devlink(dev));
-}
-
 #endif /* MLX5_DRIVER_H */
index d925359976873ad4215ddd6f63ed4551d7bd4d91..bfed36e342ccb262dc3625a9d89867725d4be11d 100644 (file)
@@ -116,6 +116,9 @@ enum {
        NVME_REG_BPMBL  = 0x0048,       /* Boot Partition Memory Buffer
                                         * Location
                                         */
+       NVME_REG_CMBMSC = 0x0050,       /* Controller Memory Buffer Memory
+                                        * Space Control
+                                        */
        NVME_REG_PMRCAP = 0x0e00,       /* Persistent Memory Capabilities */
        NVME_REG_PMRCTL = 0x0e04,       /* Persistent Memory Region Control */
        NVME_REG_PMRSTS = 0x0e08,       /* Persistent Memory Region Status */
@@ -135,6 +138,7 @@ enum {
 #define NVME_CAP_CSS(cap)      (((cap) >> 37) & 0xff)
 #define NVME_CAP_MPSMIN(cap)   (((cap) >> 48) & 0xf)
 #define NVME_CAP_MPSMAX(cap)   (((cap) >> 52) & 0xf)
+#define NVME_CAP_CMBS(cap)     (((cap) >> 57) & 0x1)
 
 #define NVME_CMB_BIR(cmbloc)   ((cmbloc) & 0x7)
 #define NVME_CMB_OFST(cmbloc)  (((cmbloc) >> 12) & 0xfffff)
@@ -192,6 +196,8 @@ enum {
        NVME_CSTS_SHST_OCCUR    = 1 << 2,
        NVME_CSTS_SHST_CMPLT    = 2 << 2,
        NVME_CSTS_SHST_MASK     = 3 << 2,
+       NVME_CMBMSC_CRE         = 1 << 0,
+       NVME_CMBMSC_CMSE        = 1 << 1,
 };
 
 struct nvme_id_power_state {
index 2024944fd2f78197792fc3020c04485477e56de0..20e84a84fb779ac549bca8c76bdd36a2df2522ef 100644 (file)
@@ -331,6 +331,12 @@ regulator_get_exclusive(struct device *dev, const char *id)
        return ERR_PTR(-ENODEV);
 }
 
+static inline struct regulator *__must_check
+devm_regulator_get_exclusive(struct device *dev, const char *id)
+{
+       return ERR_PTR(-ENODEV);
+}
+
 static inline struct regulator *__must_check
 regulator_get_optional(struct device *dev, const char *id)
 {
@@ -486,6 +492,11 @@ static inline int regulator_get_voltage(struct regulator *regulator)
        return -EINVAL;
 }
 
+static inline int regulator_sync_voltage(struct regulator *regulator)
+{
+       return -EINVAL;
+}
+
 static inline int regulator_is_supported_voltage(struct regulator *regulator,
                                   int min_uV, int max_uV)
 {
@@ -578,6 +589,25 @@ static inline int devm_regulator_unregister_notifier(struct regulator *regulator
        return 0;
 }
 
+static inline int regulator_suspend_enable(struct regulator_dev *rdev,
+                                          suspend_state_t state)
+{
+       return -EINVAL;
+}
+
+static inline int regulator_suspend_disable(struct regulator_dev *rdev,
+                                           suspend_state_t state)
+{
+       return -EINVAL;
+}
+
+static inline int regulator_set_suspend_voltage(struct regulator *regulator,
+                                               int min_uV, int max_uV,
+                                               suspend_state_t state)
+{
+       return -EINVAL;
+}
+
 static inline void *regulator_get_drvdata(struct regulator *regulator)
 {
        return NULL;
index 19b6dea27367b500456deeda0586978ee6135c38..b26213ae8c1ab011ba757a81367ac865b042982b 100644 (file)
@@ -25,8 +25,7 @@ struct rpc_rqst;
 #define XDR_QUADLEN(l)         (((l) + 3) >> 2)
 
 /*
- * Generic opaque `network object.' At the kernel level, this type
- * is used only by lockd.
+ * Generic opaque `network object.'
  */
 #define XDR_MAX_NETOBJ         1024
 struct xdr_netobj {
diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h
deleted file mode 100644 (file)
index 266017f..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef _LINUX_TIMEKEEPING32_H
-#define _LINUX_TIMEKEEPING32_H
-/*
- * These interfaces are all based on the old timespec type
- * and should get replaced with the timespec64 based versions
- * over time so we can remove the file here.
- */
-
-static inline unsigned long get_seconds(void)
-{
-       return ktime_get_real_seconds();
-}
-
-#endif
index 0f21617f1a6689395d9caa86d8d2982ef9a3ea84..966ed898032746b58ad79caead930f66292a9148 100644 (file)
@@ -307,11 +307,13 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
                                                                        \
                it_func_ptr =                                           \
                        rcu_dereference_raw((&__tracepoint_##_name)->funcs); \
-               do {                                                    \
-                       it_func = (it_func_ptr)->func;                  \
-                       __data = (it_func_ptr)->data;                   \
-                       ((void(*)(void *, proto))(it_func))(__data, args); \
-               } while ((++it_func_ptr)->func);                        \
+               if (it_func_ptr) {                                      \
+                       do {                                            \
+                               it_func = (it_func_ptr)->func;          \
+                               __data = (it_func_ptr)->data;           \
+                               ((void(*)(void *, proto))(it_func))(__data, args); \
+                       } while ((++it_func_ptr)->func);                \
+               }                                                       \
                return 0;                                               \
        }                                                               \
        DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name);
index c873f475f0a762da8205c7cb4be684d5a1cd5677..37803f3e6d49eb7581e7168ea9807d351bf29fe2 100644 (file)
@@ -421,6 +421,7 @@ extern void tty_kclose(struct tty_struct *tty);
 extern int tty_dev_name_to_number(const char *name, dev_t *number);
 extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout);
 extern void tty_ldisc_unlock(struct tty_struct *tty);
+extern ssize_t redirected_tty_write(struct kiocb *, struct iov_iter *);
 #else
 static inline void tty_kref_put(struct tty_struct *tty)
 { }
index 88a7673894d5e624e7ffa51eb3c16d9043080e88..cfbfd6fe01dfad22f81c8090c4a9dfdcb1d6a28a 100644 (file)
@@ -81,6 +81,8 @@ struct usbnet {
 #              define EVENT_LINK_CHANGE        11
 #              define EVENT_SET_RX_MODE        12
 #              define EVENT_NO_IP_ALIGN        13
+       u32                     rx_speed;       /* in bps - NOT Mbps */
+       u32                     tx_speed;       /* in bps - NOT Mbps */
 };
 
 static inline struct usb_driver *driver_of(struct usb_interface *intf)
index 80c0181c411dff2a2f165d8e26e774841f80f9a6..cedcda6593f6134b239e640c44d3dbb5e000e5a0 100644 (file)
@@ -24,7 +24,8 @@ struct notifier_block;                /* in notifier.h */
 #define VM_UNINITIALIZED       0x00000020      /* vm_struct is not fully initialized */
 #define VM_NO_GUARD            0x00000040      /* don't add guard page */
 #define VM_KASAN               0x00000080      /* has allocated kasan shadow memory */
-#define VM_MAP_PUT_PAGES       0x00000100      /* put pages and free array in vfree */
+#define VM_FLUSH_RESET_PERMS   0x00000100      /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
+#define VM_MAP_PUT_PAGES       0x00000200      /* put pages and free array in vfree */
 
 /*
  * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
@@ -37,12 +38,6 @@ struct notifier_block;               /* in notifier.h */
  * determine which allocations need the module shadow freed.
  */
 
-/*
- * Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with
- * vfree_atomic().
- */
-#define VM_FLUSH_RESET_PERMS   0x00000100      /* Reset direct map and flush TLB on unmap */
-
 /* bits [20..32] reserved for arch specific ioremap internals */
 
 /*
index be36cbdcc1bd22ad10ec2f7ec559b0789c666dcc..3eb202259e8ccd617d6b51875bc66917e0588436 100644 (file)
@@ -520,7 +520,7 @@ int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat,
                        u32 width, u32 height);
 
 /**
- * v4l2_get_link_rate - Get link rate from transmitter
+ * v4l2_get_link_freq - Get link rate from transmitter
  *
  * @handler: The transmitter's control handler
  * @mul: The multiplier between pixel rate and link frequency. Bits per pixel on
@@ -537,7 +537,7 @@ int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat,
  *     -ENOENT: Link frequency or pixel rate control not found
  *     -EINVAL: Invalid link frequency value
  */
-s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul,
+s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul,
                       unsigned int div);
 
 static inline u64 v4l2_buffer_get_timestamp(const struct v4l2_buffer *buf)
index 9a4bbccddc7f7fff5a86109eef2dd2f831a3b5ad..0d6f7ec860615c6146d677bda5a652ad205c03f2 100644 (file)
@@ -1756,7 +1756,7 @@ struct cfg80211_sar_specs {
 
 
 /**
- * @struct cfg80211_sar_chan_ranges - sar frequency ranges
+ * struct cfg80211_sar_freq_ranges - sar frequency ranges
  * @start_freq:  start range edge frequency
  * @end_freq:    end range edge frequency
  */
@@ -3972,6 +3972,8 @@ struct mgmt_frame_regs {
  *     This callback may sleep.
  * @reset_tid_config: Reset TID specific configuration for the peer, for the
  *     given TIDs. This callback may sleep.
+ *
+ * @set_sar_specs: Update the SAR (TX power) settings.
  */
 struct cfg80211_ops {
        int     (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -4929,6 +4931,7 @@ struct wiphy_iftype_akm_suites {
  * @max_data_retry_count: maximum supported per TID retry count for
  *     configuration through the %NL80211_TID_CONFIG_ATTR_RETRY_SHORT and
  *     %NL80211_TID_CONFIG_ATTR_RETRY_LONG attributes
+ * @sar_capa: SAR control capabilities
  */
 struct wiphy {
        /* assign these fields before you register the wiphy */
index 7338b3865a2a3d278dc27c0167bba1b966bbda9f..111d7771b208150d4e6f445b2675648b4de0eb7d 100644 (file)
@@ -76,6 +76,8 @@ struct inet_connection_sock_af_ops {
  * @icsk_ext_hdr_len:     Network protocol overhead (IP/IPv6 options)
  * @icsk_ack:             Delayed ACK control data
  * @icsk_mtup;            MTU probing control data
+ * @icsk_probes_tstamp:    Probe timestamp (cleared by non-zero window ack)
+ * @icsk_user_timeout:    TCP_USER_TIMEOUT value
  */
 struct inet_connection_sock {
        /* inet_sock has to be the first member! */
@@ -129,6 +131,7 @@ struct inet_connection_sock {
 
                u32               probe_timestamp;
        } icsk_mtup;
+       u32                       icsk_probes_tstamp;
        u32                       icsk_user_timeout;
 
        u64                       icsk_ca_priv[104 / sizeof(u64)];
index ccc3d1f020b0ca480994a05fff675d590059241e..eee73442a1ba1c2628ae605eb0b3c86cebbe80e6 100644 (file)
@@ -92,6 +92,7 @@ struct lapb_cb {
        unsigned short          n2, n2count;
        unsigned short          t1, t2;
        struct timer_list       t1timer, t2timer;
+       bool                    t1timer_stop, t2timer_stop;
 
        /* Internal control information */
        struct sk_buff_head     write_queue;
@@ -103,6 +104,7 @@ struct lapb_cb {
        struct lapb_frame       frmr_data;
        unsigned char           frmr_type;
 
+       spinlock_t              lock;
        refcount_t              refcnt;
 };
 
index d315740581f1ebc46de1e4180cc6ad9023cf1897..2bdbf62f4ecd7b4e7e62e45263176733fc5db02a 100644 (file)
@@ -3880,6 +3880,7 @@ enum ieee80211_reconfig_type {
  *     This callback may sleep.
  * @sta_set_4addr: Called to notify the driver when a station starts/stops using
  *     4-address mode
+ * @set_sar_specs: Update the SAR (TX power) settings.
  */
 struct ieee80211_ops {
        void (*tx)(struct ieee80211_hw *hw,
index f4af8362d2348475031143630d559e39796515ec..4b6ecf532623898ab9c9e2436e3a5c202f624b0f 100644 (file)
@@ -721,6 +721,8 @@ void *nft_set_elem_init(const struct nft_set *set,
                        const struct nft_set_ext_tmpl *tmpl,
                        const u32 *key, const u32 *key_end, const u32 *data,
                        u64 timeout, u64 expiration, gfp_t gfp);
+int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
+                           struct nft_expr *expr_array[]);
 void nft_set_elem_destroy(const struct nft_set *set, void *elem,
                          bool destroy_expr);
 
index 639e465a108f4476c5d9dbbf45f7b425b250ee7d..5b490b5591df58fda9400d68cebe2c183cba7e73 100644 (file)
@@ -1143,7 +1143,7 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
        old = *pold;
        *pold = new;
        if (old != NULL)
-               qdisc_tree_flush_backlog(old);
+               qdisc_purge_queue(old);
        sch_tree_unlock(sch);
 
        return old;
index bdc4323ce53c957c30809b0773b2d558dce555b7..129d200bccb46581f9959a8ee553acbed94466cf 100644 (file)
@@ -1921,10 +1921,13 @@ static inline void sk_set_txhash(struct sock *sk)
        sk->sk_txhash = net_tx_rndhash();
 }
 
-static inline void sk_rethink_txhash(struct sock *sk)
+static inline bool sk_rethink_txhash(struct sock *sk)
 {
-       if (sk->sk_txhash)
+       if (sk->sk_txhash) {
                sk_set_txhash(sk);
+               return true;
+       }
+       return false;
 }
 
 static inline struct dst_entry *
@@ -1947,12 +1950,10 @@ sk_dst_get(struct sock *sk)
        return dst;
 }
 
-static inline void dst_negative_advice(struct sock *sk)
+static inline void __dst_negative_advice(struct sock *sk)
 {
        struct dst_entry *ndst, *dst = __sk_dst_get(sk);
 
-       sk_rethink_txhash(sk);
-
        if (dst && dst->ops->negative_advice) {
                ndst = dst->ops->negative_advice(dst);
 
@@ -1964,6 +1965,12 @@ static inline void dst_negative_advice(struct sock *sk)
        }
 }
 
+static inline void dst_negative_advice(struct sock *sk)
+{
+       sk_rethink_txhash(sk);
+       __dst_negative_advice(sk);
+}
+
 static inline void
 __sk_dst_set(struct sock *sk, struct dst_entry *dst)
 {
index 78d13c88720fda50e3f1880ac741cea1985ef3e9..25bbada379c46add16fb7239733bd6571f10f680 100644 (file)
@@ -630,6 +630,7 @@ static inline void tcp_clear_xmit_timers(struct sock *sk)
 
 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
 unsigned int tcp_current_mss(struct sock *sk);
+u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
 
 /* Bound MSS / TSO packet size with the half of the window */
 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
@@ -2060,7 +2061,7 @@ void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
 extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
                                u32 reo_wnd);
-extern void tcp_rack_mark_lost(struct sock *sk);
+extern bool tcp_rack_mark_lost(struct sock *sk);
 extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
                             u64 xmit_time);
 extern void tcp_rack_reo_timeout(struct sock *sk);
index 877832bed4713a011a514a2f6f522728c8c89e20..01351ba25b874d261ace47151919cf7d8466b288 100644 (file)
@@ -178,7 +178,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
 int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
 
 struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
-                                 netdev_features_t features);
+                                 netdev_features_t features, bool is_ipv6);
 
 static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
 {
index 2336bf9243e185cd0e8b84b4afe3add26faf59b6..2e1200d17d0cbe5e8a5ef23ffbe1b7f4e17a94f2 100644 (file)
@@ -229,7 +229,7 @@ typedef int (*snd_pcm_hw_rule_func_t)(struct snd_pcm_hw_params *params,
 struct snd_pcm_hw_rule {
        unsigned int cond;
        int var;
-       int deps[4];
+       int deps[5];
 
        snd_pcm_hw_rule_func_t func;
        void *private;
index 5039af667645d64813b0fa151393163f8918cb2f..cbe3e152d24c007c89ae6e6a2e03ca10d9aab390 100644 (file)
@@ -366,7 +366,7 @@ TRACE_EVENT(sched_process_wait,
 );
 
 /*
- * Tracepoint for do_fork:
+ * Tracepoint for kernel_clone:
  */
 TRACE_EVENT(sched_process_fork,
 
index 58994e01302216d9b935f6ac3a3bb4e7fb794077..6f89c27265f5895587416a2ff1aae928c18543c0 100644 (file)
@@ -1424,13 +1424,61 @@ TRACE_EVENT(rpcb_unregister,
        )
 );
 
+/* Record an xdr_buf containing a fully-formed RPC message */
+DECLARE_EVENT_CLASS(svc_xdr_msg_class,
+       TP_PROTO(
+               const struct xdr_buf *xdr
+       ),
+
+       TP_ARGS(xdr),
+
+       TP_STRUCT__entry(
+               __field(u32, xid)
+               __field(const void *, head_base)
+               __field(size_t, head_len)
+               __field(const void *, tail_base)
+               __field(size_t, tail_len)
+               __field(unsigned int, page_len)
+               __field(unsigned int, msg_len)
+       ),
+
+       TP_fast_assign(
+               __be32 *p = (__be32 *)xdr->head[0].iov_base;
+
+               __entry->xid = be32_to_cpu(*p);
+               __entry->head_base = p;
+               __entry->head_len = xdr->head[0].iov_len;
+               __entry->tail_base = xdr->tail[0].iov_base;
+               __entry->tail_len = xdr->tail[0].iov_len;
+               __entry->page_len = xdr->page_len;
+               __entry->msg_len = xdr->len;
+       ),
+
+       TP_printk("xid=0x%08x head=[%p,%zu] page=%u tail=[%p,%zu] len=%u",
+               __entry->xid,
+               __entry->head_base, __entry->head_len, __entry->page_len,
+               __entry->tail_base, __entry->tail_len, __entry->msg_len
+       )
+);
+
+#define DEFINE_SVCXDRMSG_EVENT(name)                                   \
+               DEFINE_EVENT(svc_xdr_msg_class,                         \
+                               svc_xdr_##name,                         \
+                               TP_PROTO(                               \
+                                       const struct xdr_buf *xdr       \
+                               ),                                      \
+                               TP_ARGS(xdr))
+
+DEFINE_SVCXDRMSG_EVENT(recvfrom);
+
+/* Record an xdr_buf containing arbitrary data, tagged with an XID */
 DECLARE_EVENT_CLASS(svc_xdr_buf_class,
        TP_PROTO(
-               const struct svc_rqst *rqst,
+               __be32 xid,
                const struct xdr_buf *xdr
        ),
 
-       TP_ARGS(rqst, xdr),
+       TP_ARGS(xid, xdr),
 
        TP_STRUCT__entry(
                __field(u32, xid)
@@ -1443,7 +1491,7 @@ DECLARE_EVENT_CLASS(svc_xdr_buf_class,
        ),
 
        TP_fast_assign(
-               __entry->xid = be32_to_cpu(rqst->rq_xid);
+               __entry->xid = be32_to_cpu(xid);
                __entry->head_base = xdr->head[0].iov_base;
                __entry->head_len = xdr->head[0].iov_len;
                __entry->tail_base = xdr->tail[0].iov_base;
@@ -1463,12 +1511,11 @@ DECLARE_EVENT_CLASS(svc_xdr_buf_class,
                DEFINE_EVENT(svc_xdr_buf_class,                         \
                                svc_xdr_##name,                         \
                                TP_PROTO(                               \
-                                       const struct svc_rqst *rqst,    \
+                                       __be32 xid,                     \
                                        const struct xdr_buf *xdr       \
                                ),                                      \
-                               TP_ARGS(rqst, xdr))
+                               TP_ARGS(xid, xdr))
 
-DEFINE_SVCXDRBUF_EVENT(recvfrom);
 DEFINE_SVCXDRBUF_EVENT(sendto);
 
 /*
index 9744773de5ffa71203ca31672ddd3b4e449d90f3..bd4424de56ff57944617e3ef3ceb7bed7b2fb66a 100644 (file)
@@ -71,90 +71,4 @@ enum br_mrp_sub_tlv_header_type {
        BR_MRP_SUB_TLV_HEADER_TEST_AUTO_MGR = 0x3,
 };
 
-struct br_mrp_tlv_hdr {
-       __u8 type;
-       __u8 length;
-};
-
-struct br_mrp_sub_tlv_hdr {
-       __u8 type;
-       __u8 length;
-};
-
-struct br_mrp_end_hdr {
-       struct br_mrp_tlv_hdr hdr;
-};
-
-struct br_mrp_common_hdr {
-       __be16 seq_id;
-       __u8 domain[MRP_DOMAIN_UUID_LENGTH];
-};
-
-struct br_mrp_ring_test_hdr {
-       __be16 prio;
-       __u8 sa[ETH_ALEN];
-       __be16 port_role;
-       __be16 state;
-       __be16 transitions;
-       __be32 timestamp;
-};
-
-struct br_mrp_ring_topo_hdr {
-       __be16 prio;
-       __u8 sa[ETH_ALEN];
-       __be16 interval;
-};
-
-struct br_mrp_ring_link_hdr {
-       __u8 sa[ETH_ALEN];
-       __be16 port_role;
-       __be16 interval;
-       __be16 blocked;
-};
-
-struct br_mrp_sub_opt_hdr {
-       __u8 type;
-       __u8 manufacture_data[MRP_MANUFACTURE_DATA_LENGTH];
-};
-
-struct br_mrp_test_mgr_nack_hdr {
-       __be16 prio;
-       __u8 sa[ETH_ALEN];
-       __be16 other_prio;
-       __u8 other_sa[ETH_ALEN];
-};
-
-struct br_mrp_test_prop_hdr {
-       __be16 prio;
-       __u8 sa[ETH_ALEN];
-       __be16 other_prio;
-       __u8 other_sa[ETH_ALEN];
-};
-
-struct br_mrp_oui_hdr {
-       __u8 oui[MRP_OUI_LENGTH];
-};
-
-struct br_mrp_in_test_hdr {
-       __be16 id;
-       __u8 sa[ETH_ALEN];
-       __be16 port_role;
-       __be16 state;
-       __be16 transitions;
-       __be32 timestamp;
-};
-
-struct br_mrp_in_topo_hdr {
-       __u8 sa[ETH_ALEN];
-       __be16 id;
-       __be16 interval;
-};
-
-struct br_mrp_in_link_hdr {
-       __u8 sa[ETH_ALEN];
-       __be16 port_role;
-       __be16 id;
-       __be16 interval;
-};
-
 #endif
index 6e449e78426054c86f919ab7a19adb27ef2c888c..36e3efb81b014c4cd4fd03b021c38b2744c949f5 100644 (file)
 #define RKISP1_CIF_ISP_CTK_COEFF_MAX            0x100
 #define RKISP1_CIF_ISP_CTK_OFFSET_MAX           0x800
 
-#define RKISP1_CIF_ISP_AE_MEAN_MAX              25
-#define RKISP1_CIF_ISP_HIST_BIN_N_MAX           16
+#define RKISP1_CIF_ISP_AE_MEAN_MAX_V10         25
+#define RKISP1_CIF_ISP_AE_MEAN_MAX_V12         81
+#define RKISP1_CIF_ISP_AE_MEAN_MAX             RKISP1_CIF_ISP_AE_MEAN_MAX_V12
+
+#define RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10      16
+#define RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12      32
+#define RKISP1_CIF_ISP_HIST_BIN_N_MAX          RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12
+
 #define RKISP1_CIF_ISP_AFM_MAX_WINDOWS          3
 #define RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE       17
 
@@ -86,7 +92,9 @@
  * Gamma out
  */
 /* Maximum number of color samples supported */
-#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES       17
+#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10   17
+#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12   34
+#define RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES       RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12
 
 /*
  * Lens shade correction
 /*
  * Histogram calculation
  */
-/* Last 3 values unused. */
-#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE 28
+#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10 25
+#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12 81
+#define RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE     RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12
 
 /*
  * Defect Pixel Cluster Correction
 #define RKISP1_CIF_ISP_STAT_AFM           (1U << 2)
 #define RKISP1_CIF_ISP_STAT_HIST          (1U << 3)
 
+/**
+ * enum rkisp1_cif_isp_version - ISP variants
+ *
+ * @RKISP1_V10: used at least in rk3288 and rk3399
+ * @RKISP1_V11: declared in the original vendor code, but not used
+ * @RKISP1_V12: used at least in rk3326 and px30
+ * @RKISP1_V13: used at least in rk1808
+ */
+enum rkisp1_cif_isp_version {
+       RKISP1_V10 = 10,
+       RKISP1_V11,
+       RKISP1_V12,
+       RKISP1_V13,
+};
+
 enum rkisp1_cif_isp_histogram_mode {
        RKISP1_CIF_ISP_HISTOGRAM_MODE_DISABLE,
        RKISP1_CIF_ISP_HISTOGRAM_MODE_RGB_COMBINED,
@@ -510,6 +534,15 @@ enum rkisp1_cif_isp_goc_mode {
  *
  * @mode: goc mode (from enum rkisp1_cif_isp_goc_mode)
  * @gamma_y: gamma out curve y-axis for all color components
+ *
+ * The number of entries of @gamma_y depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
+ *
+ * Versions <= V11 have RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10
+ * entries, versions >= V12 have RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12
+ * entries. RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES is equal to the maximum
+ * of the two.
  */
 struct rkisp1_cif_isp_goc_config {
        __u32 mode;
@@ -524,6 +557,15 @@ struct rkisp1_cif_isp_goc_config {
  *                       skipped
  * @meas_window: coordinates of the measure window
  * @hist_weight: weighting factor for sub-windows
+ *
+ * The number of entries of @hist_weight depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
+ *
+ * Versions <= V11 have RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V10
+ * entries, versions >= V12 have RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE_V12
+ * entries. RKISP1_CIF_ISP_HISTOGRAM_WEIGHT_GRIDS_SIZE is equal to the maximum
+ * of the two.
  */
 struct rkisp1_cif_isp_hst_config {
        __u32 mode;
@@ -811,7 +853,15 @@ struct rkisp1_cif_isp_bls_meas_val {
  * @exp_mean: Mean luminance value of block xx
  * @bls_val:  BLS measured values
  *
- * Image is divided into 5x5 blocks.
+ * The number of entries of @exp_mean depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
+ *
+ * Versions <= V11 have RKISP1_CIF_ISP_AE_MEAN_MAX_V10 entries,
+ * versions >= V12 have RKISP1_CIF_ISP_AE_MEAN_MAX_V12 entries.
+ * RKISP1_CIF_ISP_AE_MEAN_MAX is equal to the maximum of the two.
+ *
+ * Image is divided into 5x5 blocks on V10 and 9x9 blocks on V12.
  */
 struct rkisp1_cif_isp_ae_stat {
        __u8 exp_mean[RKISP1_CIF_ISP_AE_MEAN_MAX];
@@ -844,13 +894,29 @@ struct rkisp1_cif_isp_af_stat {
 /**
  * struct rkisp1_cif_isp_hist_stat - statistics histogram data
  *
- * @hist_bins: measured bin counters
+ * @hist_bins: measured bin counters. Each bin is a 20 bits unsigned fixed point
+ *            type. Bits 0-4 are the fractional part and bits 5-19 are the
+ *            integer part.
+ *
+ * The window of the measurements area is divided to 5x5 sub-windows for
+ * V10/V11 and to 9x9 sub-windows for V12. The histogram is then computed for
+ * each sub-window independently and the final result is a weighted average of
+ * the histogram measurements on all sub-windows. The window of the
+ * measurements area and the weight of each sub-window are configurable using
+ * struct @rkisp1_cif_isp_hst_config.
+ *
+ * The histogram contains 16 bins in V10/V11 and 32 bins in V12/V13.
+ *
+ * The number of entries of @hist_bins depends on the hardware revision
+ * as is reported by the hw_revision field of the struct media_device_info
+ * that is returned by ioctl MEDIA_IOC_DEVICE_INFO.
  *
- * Measurement window divided into 25 sub-windows, set
- * with ISP_HIST_XXX
+ * Versions <= V11 have RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10 entries,
+ * versions >= V12 have RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 entries.
+ * RKISP1_CIF_ISP_HIST_BIN_N_MAX is equal to the maximum of the two.
  */
 struct rkisp1_cif_isp_hist_stat {
-       __u16 hist_bins[RKISP1_CIF_ISP_HIST_BIN_N_MAX];
+       __u32 hist_bins[RKISP1_CIF_ISP_HIST_BIN_N_MAX];
 };
 
 /**
index 1dccb55cf8c647baa1ffda34b9ff442259e8efea..708adddf9f1389c039d7f94f05bef00714034213 100644 (file)
@@ -28,10 +28,10 @@ struct ipv6_rpl_sr_hdr {
                pad:4,
                reserved1:16;
 #elif defined(__BIG_ENDIAN_BITFIELD)
-       __u32   reserved:20,
+       __u32   cmpri:4,
+               cmpre:4,
                pad:4,
-               cmpri:4,
-               cmpre:4;
+               reserved:20;
 #else
 #error  "Please fix <asm/byteorder.h>"
 #endif
index 00850b98078a27495401c2045a8596d14e02c112..a38454d9e0f54eaf85af7a23c8e35554d7421430 100644 (file)
@@ -176,7 +176,7 @@ struct v4l2_subdev_capability {
 };
 
 /* The v4l2 sub-device video device node is registered in read-only mode. */
-#define V4L2_SUBDEV_CAP_RO_SUBDEV              BIT(0)
+#define V4L2_SUBDEV_CAP_RO_SUBDEV              0x00000001
 
 /* Backwards compatibility define --- to be removed */
 #define v4l2_subdev_edid v4l2_edid
index f8b638c73371d86287df15e70a2cafd16944d050..901a4fd72c09f8710a913274a1146c86a456b9ce 100644 (file)
@@ -133,6 +133,13 @@ enum pvrdma_wc_flags {
        PVRDMA_WC_FLAGS_MAX             = PVRDMA_WC_WITH_NETWORK_HDR_TYPE,
 };
 
+enum pvrdma_network_type {
+       PVRDMA_NETWORK_IB,
+       PVRDMA_NETWORK_ROCE_V1 = PVRDMA_NETWORK_IB,
+       PVRDMA_NETWORK_IPV4,
+       PVRDMA_NETWORK_IPV6
+};
+
 struct pvrdma_alloc_ucontext_resp {
        __u32 qp_tab_size;
        __u32 reserved;
index b77c60f8b963d4ae43140cce57b4c19af2e28732..29ad6832502880b8485f5cb76bd4348e7894e0de 100644 (file)
@@ -76,7 +76,6 @@ config CC_HAS_ASM_INLINE
 
 config CONSTRUCTORS
        bool
-       depends on !UML
 
 config IRQ_WORK
        bool
index 8a992d73e6fb7027e0b7b642b3935d81cb518fec..3711cdaafed2fb47ea655f98dd860c588d10fe1e 100644 (file)
@@ -198,7 +198,8 @@ struct task_struct init_task
        .lockdep_recursion = 0,
 #endif
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       .ret_stack      = NULL,
+       .ret_stack              = NULL,
+       .tracing_graph_pause    = ATOMIC_INIT(0),
 #endif
 #if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION)
        .trace_recursion = 0,
index c68d784376ca1058d58e3a6dfb664420c6ded461..a626e78dbf0611df73be6e5f20ce464c8b110d38 100644 (file)
@@ -1066,7 +1066,13 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
 /* Call all constructor functions linked into the kernel. */
 static void __init do_ctors(void)
 {
-#ifdef CONFIG_CONSTRUCTORS
+/*
+ * For UML, the constructors have already been called by the
+ * normal setup code as it's just a normal ELF binary, so we
+ * cannot do it again - but we do need CONFIG_CONSTRUCTORS
+ * even on UML for modules.
+ */
+#if defined(CONFIG_CONSTRUCTORS) && !defined(CONFIG_UML)
        ctor_fn_t *fn = (ctor_fn_t *) __ctors_start;
 
        for (; fn < (ctor_fn_t *) __ctors_end; fn++)
index 6edff97ad594bd5d6e98a8971723a4c07436db1b..6639640523c0bb1afb6a51d9e3e141fc30d8be8a 100644 (file)
@@ -125,8 +125,12 @@ static int bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key,
 
        fd = *(int *)key;
        f = fget_raw(fd);
-       if (!f || !inode_storage_ptr(f->f_inode))
+       if (!f)
+               return -EBADF;
+       if (!inode_storage_ptr(f->f_inode)) {
+               fput(f);
                return -EBADF;
+       }
 
        sdata = bpf_local_storage_update(f->f_inode,
                                         (struct bpf_local_storage_map *)map,
@@ -176,14 +180,14 @@ BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode,
         * bpf_local_storage_update expects the owner to have a
         * valid storage pointer.
         */
-       if (!inode_storage_ptr(inode))
+       if (!inode || !inode_storage_ptr(inode))
                return (unsigned long)NULL;
 
        sdata = inode_storage_lookup(inode, map, true);
        if (sdata)
                return (unsigned long)sdata->data;
 
-       /* This helper must only called from where the inode is gurranteed
+       /* This helper must only called from where the inode is guaranteed
         * to have a refcount and cannot be freed.
         */
        if (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) {
@@ -200,7 +204,10 @@ BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode,
 BPF_CALL_2(bpf_inode_storage_delete,
           struct bpf_map *, map, struct inode *, inode)
 {
-       /* This helper must only called from where the inode is gurranteed
+       if (!inode)
+               return -EINVAL;
+
+       /* This helper must only called from where the inode is guaranteed
         * to have a refcount and cannot be freed.
         */
        return inode_storage_delete(inode, map);
index 70e5e0b6d69d0d591b1cfd9c7ff516bc927c0fcf..1622a44d1617e16d97a4a2470280696ba7e47812 100644 (file)
@@ -149,7 +149,11 @@ BTF_ID(func, bpf_lsm_file_ioctl)
 BTF_ID(func, bpf_lsm_file_lock)
 BTF_ID(func, bpf_lsm_file_open)
 BTF_ID(func, bpf_lsm_file_receive)
+
+#ifdef CONFIG_SECURITY_NETWORK
 BTF_ID(func, bpf_lsm_inet_conn_established)
+#endif /* CONFIG_SECURITY_NETWORK */
+
 BTF_ID(func, bpf_lsm_inode_create)
 BTF_ID(func, bpf_lsm_inode_free_security)
 BTF_ID(func, bpf_lsm_inode_getattr)
@@ -166,7 +170,11 @@ BTF_ID(func, bpf_lsm_inode_symlink)
 BTF_ID(func, bpf_lsm_inode_unlink)
 BTF_ID(func, bpf_lsm_kernel_module_request)
 BTF_ID(func, bpf_lsm_kernfs_init_security)
+
+#ifdef CONFIG_KEYS
 BTF_ID(func, bpf_lsm_key_free)
+#endif /* CONFIG_KEYS */
+
 BTF_ID(func, bpf_lsm_mmap_file)
 BTF_ID(func, bpf_lsm_netlink_send)
 BTF_ID(func, bpf_lsm_path_notify)
@@ -181,6 +189,8 @@ BTF_ID(func, bpf_lsm_sb_show_options)
 BTF_ID(func, bpf_lsm_sb_statfs)
 BTF_ID(func, bpf_lsm_sb_umount)
 BTF_ID(func, bpf_lsm_settime)
+
+#ifdef CONFIG_SECURITY_NETWORK
 BTF_ID(func, bpf_lsm_socket_accept)
 BTF_ID(func, bpf_lsm_socket_bind)
 BTF_ID(func, bpf_lsm_socket_connect)
@@ -195,6 +205,8 @@ BTF_ID(func, bpf_lsm_socket_recvmsg)
 BTF_ID(func, bpf_lsm_socket_sendmsg)
 BTF_ID(func, bpf_lsm_socket_shutdown)
 BTF_ID(func, bpf_lsm_socket_socketpair)
+#endif /* CONFIG_SECURITY_NETWORK */
+
 BTF_ID(func, bpf_lsm_syslog)
 BTF_ID(func, bpf_lsm_task_alloc)
 BTF_ID(func, bpf_lsm_task_getsecid)
index 4ef1959a78f27fcc610b1d31dcc6f4ca489f2f72..e0da0258b732dd318304a6357984ec6f50947f42 100644 (file)
@@ -218,7 +218,7 @@ BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
         * bpf_local_storage_update expects the owner to have a
         * valid storage pointer.
         */
-       if (!task_storage_ptr(task))
+       if (!task || !task_storage_ptr(task))
                return (unsigned long)NULL;
 
        sdata = task_storage_lookup(task, map, true);
@@ -243,6 +243,9 @@ BPF_CALL_4(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
 BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
           task)
 {
+       if (!task)
+               return -EINVAL;
+
        /* This helper must only be called from places where the lifetime of the task
         * is guaranteed. Either by being refcounted or by being protected
         * by an RCU read-side critical section.
index 8d6bdb4f4d61819c873a437a8d3ef50eebe767fc..84a36ee4a4c20fa894a2b82cf05820e2576e7a03 100644 (file)
@@ -4172,7 +4172,7 @@ static int btf_parse_hdr(struct btf_verifier_env *env)
                return -ENOTSUPP;
        }
 
-       if (btf_data_size == hdr->hdr_len) {
+       if (!btf->base_btf && btf_data_size == hdr->hdr_len) {
                btf_verifier_log(env, "No data");
                return -EINVAL;
        }
index 6ec088a96302f980ee7e56feaf626ad078d03a77..6aa9e10c6335a220315eb2d7ee48676f03670bcb 100644 (file)
@@ -1391,12 +1391,13 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
                if (ctx.optlen != 0) {
                        *optlen = ctx.optlen;
                        *kernel_optval = ctx.optval;
+                       /* export and don't free sockopt buf */
+                       return 0;
                }
        }
 
 out:
-       if (ret)
-               sockopt_free_buf(&ctx);
+       sockopt_free_buf(&ctx);
        return ret;
 }
 
@@ -1441,6 +1442,11 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
                        goto out;
                }
 
+               if (ctx.optlen < 0) {
+                       ret = -EFAULT;
+                       goto out;
+               }
+
                if (copy_from_user(ctx.optval, optval,
                                   min(ctx.optlen, max_optlen)) != 0) {
                        ret = -EFAULT;
@@ -1458,7 +1464,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
                goto out;
        }
 
-       if (ctx.optlen > max_optlen) {
+       if (ctx.optlen > max_optlen || ctx.optlen < 0) {
                ret = -EFAULT;
                goto out;
        }
index bd8a3183d0302efd7147e84b30054cf9aa6c10e5..41ca280b1dc19428d626ef16ed8db8eab58892df 100644 (file)
@@ -108,7 +108,7 @@ BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
 }
 
 const struct bpf_func_proto bpf_map_peek_elem_proto = {
-       .func           = bpf_map_pop_elem,
+       .func           = bpf_map_peek_elem,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_CONST_MAP_PTR,
index 23ee310b6eb49c7404667e5f9c94cadbabf0d005..1951332dd15f5c420eccdcd8754620a04dac39d6 100644 (file)
@@ -4,8 +4,11 @@ LIBBPF_SRCS = $(srctree)/tools/lib/bpf/
 LIBBPF_A = $(obj)/libbpf.a
 LIBBPF_OUT = $(abspath $(obj))
 
+# Although not in use by libbpf's Makefile, set $(O) so that the "dummy" test
+# in tools/scripts/Makefile.include always succeeds when building the kernel
+# with $(O) pointing to a relative path, as in "make O=build bindeb-pkg".
 $(LIBBPF_A):
-       $(Q)$(MAKE) -C $(LIBBPF_SRCS) OUTPUT=$(LIBBPF_OUT)/ $(LIBBPF_OUT)/libbpf.a
+       $(Q)$(MAKE) -C $(LIBBPF_SRCS) O=$(LIBBPF_OUT)/ OUTPUT=$(LIBBPF_OUT)/ $(LIBBPF_OUT)/libbpf.a
 
 userccflags += -I $(srctree)/tools/include/ -I $(srctree)/tools/include/uapi \
        -I $(srctree)/tools/lib/ -Wno-unused-result
index c3bb03c8371fc7aabc60245780582a6fed05b12a..e5999d86c76ea17ca49a4cf8851cf51cc20d9a3d 100644 (file)
@@ -2712,7 +2712,6 @@ out_unlock:
 out_put_prog:
        if (tgt_prog_fd && tgt_prog)
                bpf_prog_put(tgt_prog);
-       bpf_prog_put(prog);
        return err;
 }
 
@@ -2825,7 +2824,10 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
                        tp_name = prog->aux->attach_func_name;
                        break;
                }
-               return bpf_tracing_prog_attach(prog, 0, 0);
+               err = bpf_tracing_prog_attach(prog, 0, 0);
+               if (err >= 0)
+                       return err;
+               goto out_put_prog;
        case BPF_PROG_TYPE_RAW_TRACEPOINT:
        case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
                if (strncpy_from_user(buf,
index 17270b8404f173ff8e164fa542d11fa73b571cf9..e7368c5eacb7bdc508099841bc447612624faa7c 100644 (file)
@@ -2217,6 +2217,8 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
        case PTR_TO_RDWR_BUF:
        case PTR_TO_RDWR_BUF_OR_NULL:
        case PTR_TO_PERCPU_BTF_ID:
+       case PTR_TO_MEM:
+       case PTR_TO_MEM_OR_NULL:
                return true;
        default:
                return false;
@@ -5311,7 +5313,7 @@ static bool signed_add_overflows(s64 a, s64 b)
        return res < a;
 }
 
-static bool signed_add32_overflows(s64 a, s64 b)
+static bool signed_add32_overflows(s32 a, s32 b)
 {
        /* Do the add in u32, where overflow is well-defined */
        s32 res = (s32)((u32)a + (u32)b);
@@ -5321,7 +5323,7 @@ static bool signed_add32_overflows(s64 a, s64 b)
        return res < a;
 }
 
-static bool signed_sub_overflows(s32 a, s32 b)
+static bool signed_sub_overflows(s64 a, s64 b)
 {
        /* Do the sub in u64, where overflow is well-defined */
        s64 res = (s64)((u64)a - (u64)b);
@@ -5333,7 +5335,7 @@ static bool signed_sub_overflows(s32 a, s32 b)
 
 static bool signed_sub32_overflows(s32 a, s32 b)
 {
-       /* Do the sub in u64, where overflow is well-defined */
+       /* Do the sub in u32, where overflow is well-defined */
        s32 res = (s32)((u32)a - (u32)b);
 
        if (b < 0)
index b1496e744c687f399cdd02a3e62d86bebe8c0f2c..1b1b8ff875cb3bbd26caef07aa55bc3a1026b431 100644 (file)
@@ -147,8 +147,10 @@ static int do_map_benchmark(struct map_benchmark_data *map)
        atomic64_set(&map->sum_sq_unmap, 0);
        atomic64_set(&map->loops, 0);
 
-       for (i = 0; i < threads; i++)
+       for (i = 0; i < threads; i++) {
+               get_task_struct(tsk[i]);
                wake_up_process(tsk[i]);
+       }
 
        msleep_interruptible(map->bparam.seconds * 1000);
 
@@ -183,6 +185,8 @@ static int do_map_benchmark(struct map_benchmark_data *map)
        }
 
 out:
+       for (i = 0; i < threads; i++)
+               put_task_struct(tsk[i]);
        put_device(map->dev);
        kfree(tsk);
        return ret;
index 378341642f94cd0dfe96f2f6ec554d2109344824..6dd82be60df81698dbe8e20a73a03d653b0bd04c 100644 (file)
@@ -222,7 +222,7 @@ static inline bool report_single_step(unsigned long work)
  */
 static inline bool report_single_step(unsigned long work)
 {
-       if (!(work & SYSCALL_WORK_SYSCALL_EMU))
+       if (work & SYSCALL_WORK_SYSCALL_EMU)
                return false;
 
        return !!(current_thread_info()->flags & _TIF_SINGLESTEP);
index 37720a6d04eaa8c61cdae87092df4a75daa59c75..d66cd1014211b92de6e9f881089175395845265c 100644 (file)
@@ -819,9 +819,8 @@ void __init fork_init(void)
        init_task.signal->rlim[RLIMIT_SIGPENDING] =
                init_task.signal->rlim[RLIMIT_NPROC];
 
-       for (i = 0; i < UCOUNT_COUNTS; i++) {
+       for (i = 0; i < UCOUNT_COUNTS; i++)
                init_user_ns.ucount_max[i] = max_threads/2;
-       }
 
 #ifdef CONFIG_VMAP_STACK
        cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "fork:vm_stack_cache",
@@ -1654,9 +1653,8 @@ static inline void init_task_pid_links(struct task_struct *task)
 {
        enum pid_type type;
 
-       for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
+       for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type)
                INIT_HLIST_NODE(&task->pid_links[type]);
-       }
 }
 
 static inline void
index c47d1015d75919b5436dfaab60c75d31aa14aa68..45a13eb8894e5667481965f219e275b6bd3e30e2 100644 (file)
@@ -763,6 +763,29 @@ static struct futex_pi_state *alloc_pi_state(void)
        return pi_state;
 }
 
+static void pi_state_update_owner(struct futex_pi_state *pi_state,
+                                 struct task_struct *new_owner)
+{
+       struct task_struct *old_owner = pi_state->owner;
+
+       lockdep_assert_held(&pi_state->pi_mutex.wait_lock);
+
+       if (old_owner) {
+               raw_spin_lock(&old_owner->pi_lock);
+               WARN_ON(list_empty(&pi_state->list));
+               list_del_init(&pi_state->list);
+               raw_spin_unlock(&old_owner->pi_lock);
+       }
+
+       if (new_owner) {
+               raw_spin_lock(&new_owner->pi_lock);
+               WARN_ON(!list_empty(&pi_state->list));
+               list_add(&pi_state->list, &new_owner->pi_state_list);
+               pi_state->owner = new_owner;
+               raw_spin_unlock(&new_owner->pi_lock);
+       }
+}
+
 static void get_pi_state(struct futex_pi_state *pi_state)
 {
        WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount));
@@ -785,17 +808,11 @@ static void put_pi_state(struct futex_pi_state *pi_state)
         * and has cleaned up the pi_state already
         */
        if (pi_state->owner) {
-               struct task_struct *owner;
                unsigned long flags;
 
                raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
-               owner = pi_state->owner;
-               if (owner) {
-                       raw_spin_lock(&owner->pi_lock);
-                       list_del_init(&pi_state->list);
-                       raw_spin_unlock(&owner->pi_lock);
-               }
-               rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
+               pi_state_update_owner(pi_state, NULL);
+               rt_mutex_proxy_unlock(&pi_state->pi_mutex);
                raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
        }
 
@@ -941,7 +958,8 @@ static inline void exit_pi_state_list(struct task_struct *curr) { }
  *     FUTEX_OWNER_DIED bit. See [4]
  *
  * [10] There is no transient state which leaves owner and user space
- *     TID out of sync.
+ *     TID out of sync. Except one error case where the kernel is denied
+ *     write access to the user address, see fixup_pi_state_owner().
  *
  *
  * Serialization and lifetime rules:
@@ -1521,26 +1539,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
                        ret = -EINVAL;
        }
 
-       if (ret)
-               goto out_unlock;
-
-       /*
-        * This is a point of no return; once we modify the uval there is no
-        * going back and subsequent operations must not fail.
-        */
-
-       raw_spin_lock(&pi_state->owner->pi_lock);
-       WARN_ON(list_empty(&pi_state->list));
-       list_del_init(&pi_state->list);
-       raw_spin_unlock(&pi_state->owner->pi_lock);
-
-       raw_spin_lock(&new_owner->pi_lock);
-       WARN_ON(!list_empty(&pi_state->list));
-       list_add(&pi_state->list, &new_owner->pi_state_list);
-       pi_state->owner = new_owner;
-       raw_spin_unlock(&new_owner->pi_lock);
-
-       postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+       if (!ret) {
+               /*
+                * This is a point of no return; once we modified the uval
+                * there is no going back and subsequent operations must
+                * not fail.
+                */
+               pi_state_update_owner(pi_state, new_owner);
+               postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+       }
 
 out_unlock:
        raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
@@ -2323,18 +2330,13 @@ static void unqueue_me_pi(struct futex_q *q)
        spin_unlock(q->lock_ptr);
 }
 
-static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
-                               struct task_struct *argowner)
+static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+                                 struct task_struct *argowner)
 {
        struct futex_pi_state *pi_state = q->pi_state;
-       u32 uval, curval, newval;
        struct task_struct *oldowner, *newowner;
-       u32 newtid;
-       int ret, err = 0;
-
-       lockdep_assert_held(q->lock_ptr);
-
-       raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+       u32 uval, curval, newval, newtid;
+       int err = 0;
 
        oldowner = pi_state->owner;
 
@@ -2368,14 +2370,12 @@ retry:
                         * We raced against a concurrent self; things are
                         * already fixed up. Nothing to do.
                         */
-                       ret = 0;
-                       goto out_unlock;
+                       return 0;
                }
 
                if (__rt_mutex_futex_trylock(&pi_state->pi_mutex)) {
-                       /* We got the lock after all, nothing to fix. */
-                       ret = 0;
-                       goto out_unlock;
+                       /* We got the lock. pi_state is correct. Tell caller. */
+                       return 1;
                }
 
                /*
@@ -2402,8 +2402,7 @@ retry:
                         * We raced against a concurrent self; things are
                         * already fixed up. Nothing to do.
                         */
-                       ret = 0;
-                       goto out_unlock;
+                       return 1;
                }
                newowner = argowner;
        }
@@ -2433,22 +2432,9 @@ retry:
         * We fixed up user space. Now we need to fix the pi_state
         * itself.
         */
-       if (pi_state->owner != NULL) {
-               raw_spin_lock(&pi_state->owner->pi_lock);
-               WARN_ON(list_empty(&pi_state->list));
-               list_del_init(&pi_state->list);
-               raw_spin_unlock(&pi_state->owner->pi_lock);
-       }
+       pi_state_update_owner(pi_state, newowner);
 
-       pi_state->owner = newowner;
-
-       raw_spin_lock(&newowner->pi_lock);
-       WARN_ON(!list_empty(&pi_state->list));
-       list_add(&pi_state->list, &newowner->pi_state_list);
-       raw_spin_unlock(&newowner->pi_lock);
-       raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
-
-       return 0;
+       return argowner == current;
 
        /*
         * In order to reschedule or handle a page fault, we need to drop the
@@ -2469,17 +2455,16 @@ handle_err:
 
        switch (err) {
        case -EFAULT:
-               ret = fault_in_user_writeable(uaddr);
+               err = fault_in_user_writeable(uaddr);
                break;
 
        case -EAGAIN:
                cond_resched();
-               ret = 0;
+               err = 0;
                break;
 
        default:
                WARN_ON_ONCE(1);
-               ret = err;
                break;
        }
 
@@ -2489,17 +2474,44 @@ handle_err:
        /*
         * Check if someone else fixed it for us:
         */
-       if (pi_state->owner != oldowner) {
-               ret = 0;
-               goto out_unlock;
-       }
+       if (pi_state->owner != oldowner)
+               return argowner == current;
 
-       if (ret)
-               goto out_unlock;
+       /* Retry if err was -EAGAIN or the fault in succeeded */
+       if (!err)
+               goto retry;
 
-       goto retry;
+       /*
+        * fault_in_user_writeable() failed so user state is immutable. At
+        * best we can make the kernel state consistent but user state will
+        * be most likely hosed and any subsequent unlock operation will be
+        * rejected due to PI futex rule [10].
+        *
+        * Ensure that the rtmutex owner is also the pi_state owner despite
+        * the user space value claiming something different. There is no
+        * point in unlocking the rtmutex if current is the owner as it
+        * would need to wait until the next waiter has taken the rtmutex
+        * to guarantee consistent state. Keep it simple. Userspace asked
+        * for this wreckaged state.
+        *
+        * The rtmutex has an owner - either current or some other
+        * task. See the EAGAIN loop above.
+        */
+       pi_state_update_owner(pi_state, rt_mutex_owner(&pi_state->pi_mutex));
 
-out_unlock:
+       return err;
+}
+
+static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
+                               struct task_struct *argowner)
+{
+       struct futex_pi_state *pi_state = q->pi_state;
+       int ret;
+
+       lockdep_assert_held(q->lock_ptr);
+
+       raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+       ret = __fixup_pi_state_owner(uaddr, q, argowner);
        raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
        return ret;
 }
@@ -2523,8 +2535,6 @@ static long futex_wait_restart(struct restart_block *restart);
  */
 static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
 {
-       int ret = 0;
-
        if (locked) {
                /*
                 * Got the lock. We might not be the anticipated owner if we
@@ -2535,8 +2545,8 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
                 * stable state, anything else needs more attention.
                 */
                if (q->pi_state->owner != current)
-                       ret = fixup_pi_state_owner(uaddr, q, current);
-               return ret ? ret : locked;
+                       return fixup_pi_state_owner(uaddr, q, current);
+               return 1;
        }
 
        /*
@@ -2547,23 +2557,17 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
         * Another speculative read; pi_state->owner == current is unstable
         * but needs our attention.
         */
-       if (q->pi_state->owner == current) {
-               ret = fixup_pi_state_owner(uaddr, q, NULL);
-               return ret;
-       }
+       if (q->pi_state->owner == current)
+               return fixup_pi_state_owner(uaddr, q, NULL);
 
        /*
         * Paranoia check. If we did not take the lock, then we should not be
-        * the owner of the rt_mutex.
+        * the owner of the rt_mutex. Warn and establish consistent state.
         */
-       if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) {
-               printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
-                               "pi-state %p\n", ret,
-                               q->pi_state->pi_mutex.owner,
-                               q->pi_state->owner);
-       }
+       if (WARN_ON_ONCE(rt_mutex_owner(&q->pi_state->pi_mutex) == current))
+               return fixup_pi_state_owner(uaddr, q, current);
 
-       return ret;
+       return 0;
 }
 
 /**
@@ -2771,7 +2775,6 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
                         ktime_t *time, int trylock)
 {
        struct hrtimer_sleeper timeout, *to;
-       struct futex_pi_state *pi_state = NULL;
        struct task_struct *exiting = NULL;
        struct rt_mutex_waiter rt_waiter;
        struct futex_hash_bucket *hb;
@@ -2907,23 +2910,8 @@ no_block:
        if (res)
                ret = (res < 0) ? res : 0;
 
-       /*
-        * If fixup_owner() faulted and was unable to handle the fault, unlock
-        * it and return the fault to userspace.
-        */
-       if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) {
-               pi_state = q.pi_state;
-               get_pi_state(pi_state);
-       }
-
        /* Unqueue and drop the lock */
        unqueue_me_pi(&q);
-
-       if (pi_state) {
-               rt_mutex_futex_unlock(&pi_state->pi_mutex);
-               put_pi_state(pi_state);
-       }
-
        goto out;
 
 out_unlock_put_key:
@@ -3183,7 +3171,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
                                 u32 __user *uaddr2)
 {
        struct hrtimer_sleeper timeout, *to;
-       struct futex_pi_state *pi_state = NULL;
        struct rt_mutex_waiter rt_waiter;
        struct futex_hash_bucket *hb;
        union futex_key key2 = FUTEX_KEY_INIT;
@@ -3261,16 +3248,17 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
                if (q.pi_state && (q.pi_state->owner != current)) {
                        spin_lock(q.lock_ptr);
                        ret = fixup_pi_state_owner(uaddr2, &q, current);
-                       if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
-                               pi_state = q.pi_state;
-                               get_pi_state(pi_state);
-                       }
                        /*
                         * Drop the reference to the pi state which
                         * the requeue_pi() code acquired for us.
                         */
                        put_pi_state(q.pi_state);
                        spin_unlock(q.lock_ptr);
+                       /*
+                        * Adjust the return value. It's either -EFAULT or
+                        * success (1) but the caller expects 0 for success.
+                        */
+                       ret = ret < 0 ? ret : 0;
                }
        } else {
                struct rt_mutex *pi_mutex;
@@ -3301,25 +3289,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
                if (res)
                        ret = (res < 0) ? res : 0;
 
-               /*
-                * If fixup_pi_state_owner() faulted and was unable to handle
-                * the fault, unlock the rt_mutex and return the fault to
-                * userspace.
-                */
-               if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
-                       pi_state = q.pi_state;
-                       get_pi_state(pi_state);
-               }
-
                /* Unqueue and drop the lock. */
                unqueue_me_pi(&q);
        }
 
-       if (pi_state) {
-               rt_mutex_futex_unlock(&pi_state->pi_mutex);
-               put_pi_state(pi_state);
-       }
-
        if (ret == -EINTR) {
                /*
                 * We've already been requeued, but cannot restart by calling
index 3110c77230c7f7729b7a4488680ee0003e1797f0..f62de2dea8a3178cbf5bdc4ae697f69a946cb038 100644 (file)
@@ -4,7 +4,7 @@ menu "GCOV-based kernel profiling"
 config GCOV_KERNEL
        bool "Enable gcov-based kernel profiling"
        depends on DEBUG_FS
-       select CONSTRUCTORS if !UML
+       select CONSTRUCTORS
        default n
        help
        This option enables gcov-based code profiling (e.g. for code coverage
index ab8567f32501f6e0c2dbab9065fa6946f30bc65e..dec3f73e8db92e87e7cf2647d75ad5f7afaf2c62 100644 (file)
@@ -2859,3 +2859,4 @@ bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
        rcu_read_unlock();
        return res;
 }
+EXPORT_SYMBOL_GPL(irq_check_status_bit);
index 2c0c4d6d0f83afcc0d70b696e4b216ed06086106..dc0e2d7fbdfd927c98aade002389549f89477a0c 100644 (file)
@@ -402,7 +402,7 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
        struct msi_domain_ops *ops = info->ops;
        struct irq_data *irq_data;
        struct msi_desc *desc;
-       msi_alloc_info_t arg;
+       msi_alloc_info_t arg = { };
        int i, ret, virq;
        bool can_reserve;
 
index 4f8efc278aa75bc22fc4387e11fe6b8d48548135..aa919585c24b4adb914433fcfdd06999b3a212e6 100644 (file)
@@ -1134,7 +1134,6 @@ int kernel_kexec(void)
 
 #ifdef CONFIG_KEXEC_JUMP
        if (kexec_image->preserve_context) {
-               lock_system_sleep();
                pm_prepare_console();
                error = freeze_processes();
                if (error) {
@@ -1197,7 +1196,6 @@ int kernel_kexec(void)
                thaw_processes();
  Restore_console:
                pm_restore_console();
-               unlock_system_sleep();
        }
 #endif
 
index f7fb5d135930fa46e8f57a85ce67b856a6e23347..d5a3eb74a6574aade1fb23c3959a7ababee96d66 100644 (file)
@@ -1954,28 +1954,48 @@ bool __weak arch_kprobe_on_func_entry(unsigned long offset)
        return !offset;
 }
 
-bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
+/**
+ * kprobe_on_func_entry() -- check whether given address is function entry
+ * @addr: Target address
+ * @sym:  Target symbol name
+ * @offset: The offset from the symbol or the address
+ *
+ * This checks whether the given @addr+@offset or @sym+@offset is on the
+ * function entry address or not.
+ * This returns 0 if it is the function entry, or -EINVAL if it is not.
+ * And also it returns -ENOENT if it fails the symbol or address lookup.
+ * Caller must pass @addr or @sym (either one must be NULL), or this
+ * returns -EINVAL.
+ */
+int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
 {
        kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
 
        if (IS_ERR(kp_addr))
-               return false;
+               return PTR_ERR(kp_addr);
 
-       if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
-                                               !arch_kprobe_on_func_entry(offset))
-               return false;
+       if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset))
+               return -ENOENT;
 
-       return true;
+       if (!arch_kprobe_on_func_entry(offset))
+               return -EINVAL;
+
+       return 0;
 }
 
 int register_kretprobe(struct kretprobe *rp)
 {
-       int ret = 0;
+       int ret;
        struct kretprobe_instance *inst;
        int i;
        void *addr;
 
-       if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
+       ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
+       if (ret)
+               return ret;
+
+       /* If only rp->kp.addr is specified, check reregistering kprobes */
+       if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
                return -EINVAL;
 
        if (kretprobe_blacklist_size) {
index a5eceecd4513c431f24eb72b33c45089037b3413..1578973c57409cceab891f4f36820a2025dc6b1c 100644 (file)
@@ -294,7 +294,7 @@ static int kthread(void *_create)
        do_exit(ret);
 }
 
-/* called from do_fork() to get node information for about to be created task */
+/* called from kernel_clone() to get node information for about to be created task */
 int tsk_fork_get_node(struct task_struct *tsk)
 {
 #ifdef CONFIG_NUMA
@@ -493,11 +493,36 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
                return p;
        kthread_bind(p, cpu);
        /* CPU hotplug need to bind once again when unparking the thread. */
-       set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
        to_kthread(p)->cpu = cpu;
        return p;
 }
 
+void kthread_set_per_cpu(struct task_struct *k, int cpu)
+{
+       struct kthread *kthread = to_kthread(k);
+       if (!kthread)
+               return;
+
+       WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
+
+       if (cpu < 0) {
+               clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+               return;
+       }
+
+       kthread->cpu = cpu;
+       set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+}
+
+bool kthread_is_per_cpu(struct task_struct *k)
+{
+       struct kthread *kthread = to_kthread(k);
+       if (!kthread)
+               return false;
+
+       return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
+}
+
 /**
  * kthread_unpark - unpark a thread created by kthread_create().
  * @k:         thread created by kthread_create().
index c1418b47f625a25fc32d8b18852057dc48b6cc93..bdaf4829098c026e096e88be59df9d198d5b1c9b 100644 (file)
@@ -79,7 +79,7 @@ module_param(lock_stat, int, 0644);
 DEFINE_PER_CPU(unsigned int, lockdep_recursion);
 EXPORT_PER_CPU_SYMBOL_GPL(lockdep_recursion);
 
-static inline bool lockdep_enabled(void)
+static __always_inline bool lockdep_enabled(void)
 {
        if (!debug_locks)
                return false;
@@ -5271,12 +5271,15 @@ static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie
 /*
  * Check whether we follow the irq-flags state precisely:
  */
-static void check_flags(unsigned long flags)
+static noinstr void check_flags(unsigned long flags)
 {
 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
        if (!debug_locks)
                return;
 
+       /* Get the warning out..  */
+       instrumentation_begin();
+
        if (irqs_disabled_flags(flags)) {
                if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
                        printk("possible reason: unannotated irqs-off.\n");
@@ -5304,6 +5307,8 @@ static void check_flags(unsigned long flags)
 
        if (!debug_locks)
                print_irqtrace_events(current);
+
+       instrumentation_end();
 #endif
 }
 
index cfdd5b93264d7e17ca120aa3d5fafe139a2c33f9..2f8cd616d3b29aa93498db95e426acd4abbb153b 100644 (file)
@@ -1716,8 +1716,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
  * possible because it belongs to the pi_state which is about to be freed
  * and it is not longer visible to other tasks.
  */
-void rt_mutex_proxy_unlock(struct rt_mutex *lock,
-                          struct task_struct *proxy_owner)
+void rt_mutex_proxy_unlock(struct rt_mutex *lock)
 {
        debug_rt_mutex_proxy_unlock(lock);
        rt_mutex_set_owner(lock, NULL);
index d1d62f942be2285a7e11588201ac199dc700a805..ca6fb489007b6baced167ea52ccc998179b18eec 100644 (file)
@@ -133,8 +133,7 @@ enum rtmutex_chainwalk {
 extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
 extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
                                       struct task_struct *proxy_owner);
-extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
-                                 struct task_struct *proxy_owner);
+extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
 extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
 extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
                                     struct rt_mutex_waiter *waiter,
index c73f2e295167d141cdc68fd77cc8ac60eb371226..72e33054a2e1b72d7b9c529d59105a51ea9dc9e6 100644 (file)
@@ -497,10 +497,10 @@ static int swap_writer_finish(struct swap_map_handle *handle,
                unsigned int flags, int error)
 {
        if (!error) {
-               flush_swap_writer(handle);
                pr_info("S");
                error = mark_swapfiles(handle, flags);
                pr_cont("|\n");
+               flush_swap_writer(handle);
        }
 
        if (error)
index ffdd0dc7ec6df809101f3a618cb3a65e31e434d9..5a95c688621fa5483b2031efd703180b1edda573 100644 (file)
@@ -1291,11 +1291,16 @@ static size_t info_print_prefix(const struct printk_info  *info, bool syslog,
  * done:
  *
  *   - Add prefix for each line.
+ *   - Drop truncated lines that no longer fit into the buffer.
  *   - Add the trailing newline that has been removed in vprintk_store().
- *   - Drop truncated lines that do not longer fit into the buffer.
+ *   - Add a string terminator.
+ *
+ * Since the produced string is always terminated, the maximum possible
+ * return value is @r->text_buf_size - 1;
  *
  * Return: The length of the updated/prepared text, including the added
- * prefixes and the newline. The dropped line(s) are not counted.
+ * prefixes and the newline. The terminator is not counted. The dropped
+ * line(s) are not counted.
  */
 static size_t record_print_text(struct printk_record *r, bool syslog,
                                bool time)
@@ -1338,26 +1343,31 @@ static size_t record_print_text(struct printk_record *r, bool syslog,
 
                /*
                 * Truncate the text if there is not enough space to add the
-                * prefix and a trailing newline.
+                * prefix and a trailing newline and a terminator.
                 */
-               if (len + prefix_len + text_len + 1 > buf_size) {
+               if (len + prefix_len + text_len + 1 + 1 > buf_size) {
                        /* Drop even the current line if no space. */
-                       if (len + prefix_len + line_len + 1 > buf_size)
+                       if (len + prefix_len + line_len + 1 + 1 > buf_size)
                                break;
 
-                       text_len = buf_size - len - prefix_len - 1;
+                       text_len = buf_size - len - prefix_len - 1 - 1;
                        truncated = true;
                }
 
                memmove(text + prefix_len, text, text_len);
                memcpy(text, prefix, prefix_len);
 
+               /*
+                * Increment the prepared length to include the text and
+                * prefix that were just moved+copied. Also increment for the
+                * newline at the end of this line. If this is the last line,
+                * there is no newline, but it will be added immediately below.
+                */
                len += prefix_len + line_len + 1;
-
                if (text_len == line_len) {
                        /*
-                        * Add the trailing newline removed in
-                        * vprintk_store().
+                        * This is the last line. Add the trailing newline
+                        * removed in vprintk_store().
                         */
                        text[prefix_len + line_len] = '\n';
                        break;
@@ -1382,6 +1392,14 @@ static size_t record_print_text(struct printk_record *r, bool syslog,
                text_len -= line_len + 1;
        }
 
+       /*
+        * If a buffer was provided, it will be terminated. Space for the
+        * string terminator is guaranteed to be available. The terminator is
+        * not counted in the return value.
+        */
+       if (buf_size > 0)
+               r->text_buf[len] = 0;
+
        return len;
 }
 
@@ -3427,7 +3445,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
        while (prb_read_valid_info(prb, seq, &info, &line_count)) {
                if (r.info->seq >= dumper->next_seq)
                        break;
-               l += get_record_print_text_size(&info, line_count, true, time);
+               l += get_record_print_text_size(&info, line_count, syslog, time);
                seq = r.info->seq + 1;
        }
 
@@ -3437,7 +3455,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
                                                &info, &line_count)) {
                if (r.info->seq >= dumper->next_seq)
                        break;
-               l -= get_record_print_text_size(&info, line_count, true, time);
+               l -= get_record_print_text_size(&info, line_count, syslog, time);
                seq = r.info->seq + 1;
        }
 
index 6704f06e0417134cb661d42463a36f9321a65d15..8a7b7362c0dd4765ef414ed1f0945d0470f9ed30 100644 (file)
@@ -1718,7 +1718,7 @@ static bool copy_data(struct prb_data_ring *data_ring,
 
        /* Caller interested in the line count? */
        if (line_count)
-               *line_count = count_lines(data, data_size);
+               *line_count = count_lines(data, len);
 
        /* Caller interested in the data content? */
        if (!buf || !buf_size)
index 15d2562118d1727aa197bd5f9cc6314cfebace6c..ff74fca39ed21693428e2f5276839581808c693b 100644 (file)
@@ -1796,13 +1796,28 @@ static inline bool rq_has_pinned_tasks(struct rq *rq)
  */
 static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
 {
+       /* When not in the task's cpumask, no point in looking further. */
        if (!cpumask_test_cpu(cpu, p->cpus_ptr))
                return false;
 
-       if (is_per_cpu_kthread(p) || is_migration_disabled(p))
+       /* migrate_disabled() must be allowed to finish. */
+       if (is_migration_disabled(p))
                return cpu_online(cpu);
 
-       return cpu_active(cpu);
+       /* Non kernel threads are not allowed during either online or offline. */
+       if (!(p->flags & PF_KTHREAD))
+               return cpu_active(cpu);
+
+       /* KTHREAD_IS_PER_CPU is always allowed. */
+       if (kthread_is_per_cpu(p))
+               return cpu_online(cpu);
+
+       /* Regular kernel threads don't get to stay during offline. */
+       if (cpu_rq(cpu)->balance_push)
+               return false;
+
+       /* But are allowed during online. */
+       return cpu_online(cpu);
 }
 
 /*
@@ -2327,7 +2342,9 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
 
        if (p->flags & PF_KTHREAD || is_migration_disabled(p)) {
                /*
-                * Kernel threads are allowed on online && !active CPUs.
+                * Kernel threads are allowed on online && !active CPUs,
+                * however, during cpu-hot-unplug, even these might get pushed
+                * away if not KTHREAD_IS_PER_CPU.
                 *
                 * Specifically, migration_disabled() tasks must not fail the
                 * cpumask_any_and_distribute() pick below, esp. so on
@@ -2371,16 +2388,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
 
        __do_set_cpus_allowed(p, new_mask, flags);
 
-       if (p->flags & PF_KTHREAD) {
-               /*
-                * For kernel threads that do indeed end up on online &&
-                * !active we want to ensure they are strict per-CPU threads.
-                */
-               WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
-                       !cpumask_intersects(new_mask, cpu_active_mask) &&
-                       p->nr_cpus_allowed != 1);
-       }
-
        return affine_move_task(rq, p, &rf, dest_cpu, flags);
 
 out:
@@ -3121,6 +3128,13 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
 
 static inline bool ttwu_queue_cond(int cpu, int wake_flags)
 {
+       /*
+        * Do not complicate things with the async wake_list while the CPU is
+        * in hotplug state.
+        */
+       if (!cpu_active(cpu))
+               return false;
+
        /*
         * If the CPU does not share cache, then queue the task on the
         * remote rqs wakelist to avoid accessing remote data.
@@ -7276,8 +7290,14 @@ static void balance_push(struct rq *rq)
        /*
         * Both the cpu-hotplug and stop task are in this case and are
         * required to complete the hotplug process.
+        *
+        * XXX: the idle task does not match kthread_is_per_cpu() due to
+        * histerical raisins.
         */
-       if (is_per_cpu_kthread(push_task) || is_migration_disabled(push_task)) {
+       if (rq->idle == push_task ||
+           ((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) ||
+           is_migration_disabled(push_task)) {
+
                /*
                 * If this is the idle task on the outgoing CPU try to wake
                 * up the hotplug control thread which might wait for the
@@ -7309,7 +7329,7 @@ static void balance_push(struct rq *rq)
        /*
         * At this point need_resched() is true and we'll take the loop in
         * schedule(). The next pick is obviously going to be the stop task
-        * which is_per_cpu_kthread() and will push this task away.
+        * which kthread_is_per_cpu() and will push this task away.
         */
        raw_spin_lock(&rq->lock);
 }
@@ -7320,10 +7340,13 @@ static void balance_push_set(int cpu, bool on)
        struct rq_flags rf;
 
        rq_lock_irqsave(rq, &rf);
-       if (on)
+       rq->balance_push = on;
+       if (on) {
+               WARN_ON_ONCE(rq->balance_callback);
                rq->balance_callback = &balance_push_callback;
-       else
+       } else if (rq->balance_callback == &balance_push_callback) {
                rq->balance_callback = NULL;
+       }
        rq_unlock_irqrestore(rq, &rf);
 }
 
@@ -7441,6 +7464,10 @@ int sched_cpu_activate(unsigned int cpu)
        struct rq *rq = cpu_rq(cpu);
        struct rq_flags rf;
 
+       /*
+        * Make sure that when the hotplug state machine does a roll-back
+        * we clear balance_push. Ideally that would happen earlier...
+        */
        balance_push_set(cpu, false);
 
 #ifdef CONFIG_SCHED_SMT
@@ -7483,17 +7510,27 @@ int sched_cpu_deactivate(unsigned int cpu)
        int ret;
 
        set_cpu_active(cpu, false);
+
+       /*
+        * From this point forward, this CPU will refuse to run any task that
+        * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
+        * push those tasks away until this gets cleared, see
+        * sched_cpu_dying().
+        */
+       balance_push_set(cpu, true);
+
        /*
-        * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
-        * users of this state to go away such that all new such users will
-        * observe it.
+        * We've cleared cpu_active_mask / set balance_push, wait for all
+        * preempt-disabled and RCU users of this state to go away such that
+        * all new such users will observe it.
+        *
+        * Specifically, we rely on ttwu to no longer target this CPU, see
+        * ttwu_queue_cond() and is_cpu_allowed().
         *
         * Do sync before park smpboot threads to take care the rcu boost case.
         */
        synchronize_rcu();
 
-       balance_push_set(cpu, true);
-
        rq_lock_irqsave(rq, &rf);
        if (rq->rd) {
                update_rq_clock(rq);
@@ -7574,6 +7611,25 @@ static void calc_load_migrate(struct rq *rq)
                atomic_long_add(delta, &calc_load_tasks);
 }
 
+static void dump_rq_tasks(struct rq *rq, const char *loglvl)
+{
+       struct task_struct *g, *p;
+       int cpu = cpu_of(rq);
+
+       lockdep_assert_held(&rq->lock);
+
+       printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
+       for_each_process_thread(g, p) {
+               if (task_cpu(p) != cpu)
+                       continue;
+
+               if (!task_on_rq_queued(p))
+                       continue;
+
+               printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
+       }
+}
+
 int sched_cpu_dying(unsigned int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
@@ -7583,9 +7639,18 @@ int sched_cpu_dying(unsigned int cpu)
        sched_tick_stop(cpu);
 
        rq_lock_irqsave(rq, &rf);
-       BUG_ON(rq->nr_running != 1 || rq_has_pinned_tasks(rq));
+       if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
+               WARN(true, "Dying CPU not properly vacated!");
+               dump_rq_tasks(rq, KERN_WARNING);
+       }
        rq_unlock_irqrestore(rq, &rf);
 
+       /*
+        * Now that the CPU is offline, make sure we're welcome
+        * to new tasks once we come back up.
+        */
+       balance_push_set(cpu, false);
+
        calc_load_migrate(rq);
        update_max_interval();
        nohz_balance_exit_idle(rq);
index 12ada79d40f338209e40244b156a69883ffbe0e9..bb09988451a041e526957d4de46a09b99d83fa63 100644 (file)
@@ -975,6 +975,7 @@ struct rq {
        unsigned long           cpu_capacity_orig;
 
        struct callback_head    *balance_callback;
+       unsigned char           balance_push;
 
        unsigned char           nohz_idle_balance;
        unsigned char           idle_balance;
index 5736c55aaa1afc60d21294c79a0c8ad5d70efc09..5ad8566534e7604d275e60cd4bd2ba78a1eb0b3e 100644 (file)
@@ -2550,6 +2550,9 @@ bool get_signal(struct ksignal *ksig)
        struct signal_struct *signal = current->signal;
        int signr;
 
+       if (unlikely(current->task_works))
+               task_work_run();
+
        /*
         * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
         * that the arch handlers don't all have to do it. If we get here
@@ -3701,7 +3704,8 @@ static bool access_pidfd_pidns(struct pid *pid)
        return true;
 }
 
-static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
+static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
+               siginfo_t __user *info)
 {
 #ifdef CONFIG_COMPAT
        /*
index 2efe1e206167ccfbade03bf0b76b8e5cb657c207..f25208e8df8365e090cedf887638b3e0b00e92bc 100644 (file)
@@ -188,6 +188,7 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
                kfree(td);
                return PTR_ERR(tsk);
        }
+       kthread_set_per_cpu(tsk, cpu);
        /*
         * Park the thread so that it could start right on the CPU
         * when it is available.
index 7404d38315276a96fa69f78d67a856c4867dbf95..87389b9e21abaa5f4a876eeec52840b5ad65e403 100644 (file)
@@ -498,7 +498,7 @@ out:
 static void sync_hw_clock(struct work_struct *work);
 static DECLARE_WORK(sync_work, sync_hw_clock);
 static struct hrtimer sync_hrtimer;
-#define SYNC_PERIOD_NS (11UL * 60 * NSEC_PER_SEC)
+#define SYNC_PERIOD_NS (11ULL * 60 * NSEC_PER_SEC)
 
 static enum hrtimer_restart sync_timer_callback(struct hrtimer *timer)
 {
@@ -512,7 +512,7 @@ static void sched_sync_hw_clock(unsigned long offset_nsec, bool retry)
        ktime_t exp = ktime_set(ktime_get_real_seconds(), 0);
 
        if (retry)
-               exp = ktime_add_ns(exp, 2 * NSEC_PER_SEC - offset_nsec);
+               exp = ktime_add_ns(exp, 2ULL * NSEC_PER_SEC - offset_nsec);
        else
                exp = ktime_add_ns(exp, SYNC_PERIOD_NS - offset_nsec);
 
index a45cedda93a7cc26b9ba8e92b7f763a5c8c0700e..6aee5768c86ff7dbd4acd58cc96d4fac949f9437 100644 (file)
@@ -991,8 +991,7 @@ EXPORT_SYMBOL_GPL(ktime_get_seconds);
 /**
  * ktime_get_real_seconds - Get the seconds portion of CLOCK_REALTIME
  *
- * Returns the wall clock seconds since 1970. This replaces the
- * get_seconds() interface which is not y2038 safe on 32bit systems.
+ * Returns the wall clock seconds since 1970.
  *
  * For 64bit systems the fast access to tk->xtime_sec is preserved. On
  * 32bit systems the access must be protected with the sequence
index 73edb9e4f3548e0331ea0d38c351750a05a418d9..29a6ebeebc9e1eb75f803046c92cb7fef97c418d 100644 (file)
@@ -394,7 +394,6 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
                }
 
                if (t->ret_stack == NULL) {
-                       atomic_set(&t->tracing_graph_pause, 0);
                        atomic_set(&t->trace_overrun, 0);
                        t->curr_ret_stack = -1;
                        t->curr_ret_depth = -1;
@@ -489,7 +488,6 @@ static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
 static void
 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
 {
-       atomic_set(&t->tracing_graph_pause, 0);
        atomic_set(&t->trace_overrun, 0);
        t->ftrace_timestamp = 0;
        /* make curr_ret_stack visible before we add the ret_stack */
index d06aab4dcbb8f41fe9929197b509bfad94f6e99b..6756379b661fee56125f1ea81e8106ddbb1f6887 100644 (file)
@@ -562,6 +562,8 @@ static int __irqsoff_tracer_init(struct trace_array *tr)
        /* non overwrite screws up the latency tracers */
        set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
        set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
+       /* without pause, we will produce garbage if another latency occurs */
+       set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
 
        tr->max_latency = 0;
        irqsoff_trace = tr;
@@ -583,11 +585,13 @@ static void __irqsoff_tracer_reset(struct trace_array *tr)
 {
        int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
        int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
+       int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
 
        stop_irqsoff_tracer(tr, is_graph(tr));
 
        set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
        set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
+       set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
        ftrace_reset_array_ops(tr);
 
        irqsoff_busy = false;
index e6fba1798771b401eaf97d08b2c4d5926eb81995..56c7fbff7bd7f7186bef4f9a5506063767b578aa 100644 (file)
@@ -221,9 +221,9 @@ bool trace_kprobe_on_func_entry(struct trace_event_call *call)
 {
        struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
 
-       return tk ? kprobe_on_func_entry(tk->rp.kp.addr,
+       return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
                        tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
-                       tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false;
+                       tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
 }
 
 bool trace_kprobe_error_injectable(struct trace_event_call *call)
@@ -828,9 +828,11 @@ static int trace_kprobe_create(int argc, const char *argv[])
                }
                if (is_return)
                        flags |= TPARG_FL_RETURN;
-               if (kprobe_on_func_entry(NULL, symbol, offset))
+               ret = kprobe_on_func_entry(NULL, symbol, offset);
+               if (ret == 0)
                        flags |= TPARG_FL_FENTRY;
-               if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
+               /* Defer the ENOENT case until register kprobe */
+               if (ret == -EINVAL && is_return) {
                        trace_probe_log_err(0, BAD_RETPROBE);
                        goto parse_error;
                }
index 9880b6c0e2721fe5c0758eda58d82c1f36576d19..894bb885b40b146ef5c13d007427bb0156490bf5 100644 (file)
@@ -1848,12 +1848,6 @@ static void worker_attach_to_pool(struct worker *worker,
 {
        mutex_lock(&wq_pool_attach_mutex);
 
-       /*
-        * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
-        * online CPUs.  It'll be re-applied when any of the CPUs come up.
-        */
-       set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
-
        /*
         * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
         * stable across this function.  See the comments above the flag
@@ -1861,6 +1855,11 @@ static void worker_attach_to_pool(struct worker *worker,
         */
        if (pool->flags & POOL_DISASSOCIATED)
                worker->flags |= WORKER_UNBOUND;
+       else
+               kthread_set_per_cpu(worker->task, pool->cpu);
+
+       if (worker->rescue_wq)
+               set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
 
        list_add_tail(&worker->node, &pool->workers);
        worker->pool = pool;
@@ -1883,6 +1882,7 @@ static void worker_detach_from_pool(struct worker *worker)
 
        mutex_lock(&wq_pool_attach_mutex);
 
+       kthread_set_per_cpu(worker->task, -1);
        list_del(&worker->node);
        worker->pool = NULL;
 
@@ -4919,8 +4919,10 @@ static void unbind_workers(int cpu)
 
                raw_spin_unlock_irq(&pool->lock);
 
-               for_each_pool_worker(worker, pool)
-                       WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_active_mask) < 0);
+               for_each_pool_worker(worker, pool) {
+                       kthread_set_per_cpu(worker->task, -1);
+                       WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
+               }
 
                mutex_unlock(&wq_pool_attach_mutex);
 
@@ -4972,9 +4974,11 @@ static void rebind_workers(struct worker_pool *pool)
         * of all workers first and then clear UNBOUND.  As we're called
         * from CPU_ONLINE, the following shouldn't fail.
         */
-       for_each_pool_worker(worker, pool)
+       for_each_pool_worker(worker, pool) {
+               kthread_set_per_cpu(worker->task, pool->cpu);
                WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
                                                  pool->attrs->cpumask) < 0);
+       }
 
        raw_spin_lock_irq(&pool->lock);
 
index 8b635fd75fe4187e4a96f2e1908fb367f6c44ecb..3a0b1c930733aff12981e925eda1600556ac2854 100644 (file)
@@ -123,6 +123,7 @@ config UBSAN_SIGNED_OVERFLOW
 config UBSAN_UNSIGNED_OVERFLOW
        bool "Perform checking for unsigned arithmetic overflow"
        depends on $(cc-option,-fsanitize=unsigned-integer-overflow)
+       depends on !X86_32 # avoid excessive stack usage on x86-32/clang
        help
          This option enables -fsanitize=unsigned-integer-overflow which checks
          for overflow of any arithmetic operations with unsigned integers. This
index 3e3352f3d0da7b9e9f764bf7eb106463872f0a3e..bec38c64d6a624fe5680cff59bdc1a0b9dc7d803 100644 (file)
@@ -427,3 +427,34 @@ void __ubsan_handle_load_invalid_value(void *_data, void *val)
        ubsan_epilogue();
 }
 EXPORT_SYMBOL(__ubsan_handle_load_invalid_value);
+
+void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+                                        unsigned long align,
+                                        unsigned long offset);
+void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+                                        unsigned long align,
+                                        unsigned long offset)
+{
+       struct alignment_assumption_data *data = _data;
+       unsigned long real_ptr;
+
+       if (suppress_report(&data->location))
+               return;
+
+       ubsan_prologue(&data->location, "alignment-assumption");
+
+       if (offset)
+               pr_err("assumption of %lu byte alignment (with offset of %lu byte) for pointer of type %s failed",
+                      align, offset, data->type->type_name);
+       else
+               pr_err("assumption of %lu byte alignment for pointer of type %s failed",
+                      align, data->type->type_name);
+
+       real_ptr = ptr - offset;
+       pr_err("%saddress is %lu aligned, misalignment offset is %lu bytes",
+              offset ? "offset " : "", BIT(real_ptr ? __ffs(real_ptr) : 0),
+              real_ptr & (align - 1));
+
+       ubsan_epilogue();
+}
+EXPORT_SYMBOL(__ubsan_handle_alignment_assumption);
index 7b56c09473a98e7fcff4c8e6640459d1d178c49c..9a0b71c5ff9fbf45492c0043f24a8578f06acaf6 100644 (file)
@@ -78,6 +78,12 @@ struct invalid_value_data {
        struct type_descriptor *type;
 };
 
+struct alignment_assumption_data {
+       struct source_location location;
+       struct source_location assumption_location;
+       struct type_descriptor *type;
+};
+
 #if defined(CONFIG_ARCH_SUPPORTS_INT128)
 typedef __int128 s_max;
 typedef unsigned __int128 u_max;
index e5acb9714436307f3ecc3e42b53301f2c8c92475..190ccdaa6c192f641a456c15bd6623b85178abce 100644 (file)
@@ -1342,7 +1342,7 @@ fast_isolate_freepages(struct compact_control *cc)
 {
        unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1);
        unsigned int nr_scanned = 0;
-       unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0;
+       unsigned long low_pfn, min_pfn, highest = 0;
        unsigned long nr_isolated = 0;
        unsigned long distance;
        struct page *page = NULL;
@@ -1387,6 +1387,7 @@ fast_isolate_freepages(struct compact_control *cc)
                struct page *freepage;
                unsigned long flags;
                unsigned int order_scanned = 0;
+               unsigned long high_pfn = 0;
 
                if (!area->nr_free)
                        continue;
index 5c9d564317a5c5eb2e7f5c4edc6e5e3571bd9c2d..aa0e0fb046700a0b5a468abfe57f91ca01970829 100644 (file)
@@ -835,6 +835,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
        XA_STATE(xas, &mapping->i_pages, offset);
        int huge = PageHuge(page);
        int error;
+       bool charged = false;
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_PAGE(PageSwapBacked(page), page);
@@ -848,6 +849,7 @@ noinline int __add_to_page_cache_locked(struct page *page,
                error = mem_cgroup_charge(page, current->mm, gfp);
                if (error)
                        goto error;
+               charged = true;
        }
 
        gfp &= GFP_RECLAIM_MASK;
@@ -896,6 +898,8 @@ unlock:
 
        if (xas_error(&xas)) {
                error = xas_error(&xas);
+               if (charged)
+                       mem_cgroup_uncharge(page);
                goto error;
        }
 
index c3a9ea7875ef86856e1a50967fc2bf363fdfb09c..874b732b120ce2c81945c8582971f645fba5a9f8 100644 (file)
@@ -473,6 +473,11 @@ static inline void *arch_kmap_local_high_get(struct page *page)
 }
 #endif
 
+#ifndef arch_kmap_local_set_pte
+#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \
+       set_pte_at(mm, vaddr, ptep, ptev)
+#endif
+
 /* Unmap a local mapping which was obtained by kmap_high_get() */
 static inline bool kmap_high_unmap_local(unsigned long vaddr)
 {
@@ -515,7 +520,7 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        BUG_ON(!pte_none(*(kmap_pte - idx)));
        pteval = pfn_pte(pfn, prot);
-       set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval);
+       arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte - idx, pteval);
        arch_kmap_local_post_map(vaddr, pteval);
        current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
        preempt_enable();
index 9237976abe72b7a7aa6afe50fce36c0b34007939..91ca9b103ee527283f0dc2cef8816d43beb9c6bc 100644 (file)
@@ -2202,7 +2202,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 {
        spinlock_t *ptl;
        struct mmu_notifier_range range;
-       bool was_locked = false;
+       bool do_unlock_page = false;
        pmd_t _pmd;
 
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
@@ -2218,7 +2218,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
        VM_BUG_ON(freeze && !page);
        if (page) {
                VM_WARN_ON_ONCE(!PageLocked(page));
-               was_locked = true;
                if (page != pmd_page(*pmd))
                        goto out;
        }
@@ -2227,19 +2226,29 @@ repeat:
        if (pmd_trans_huge(*pmd)) {
                if (!page) {
                        page = pmd_page(*pmd);
-                       if (unlikely(!trylock_page(page))) {
-                               get_page(page);
-                               _pmd = *pmd;
-                               spin_unlock(ptl);
-                               lock_page(page);
-                               spin_lock(ptl);
-                               if (unlikely(!pmd_same(*pmd, _pmd))) {
-                                       unlock_page(page);
+                       /*
+                        * An anonymous page must be locked, to ensure that a
+                        * concurrent reuse_swap_page() sees stable mapcount;
+                        * but reuse_swap_page() is not used on shmem or file,
+                        * and page lock must not be taken when zap_pmd_range()
+                        * calls __split_huge_pmd() while i_mmap_lock is held.
+                        */
+                       if (PageAnon(page)) {
+                               if (unlikely(!trylock_page(page))) {
+                                       get_page(page);
+                                       _pmd = *pmd;
+                                       spin_unlock(ptl);
+                                       lock_page(page);
+                                       spin_lock(ptl);
+                                       if (unlikely(!pmd_same(*pmd, _pmd))) {
+                                               unlock_page(page);
+                                               put_page(page);
+                                               page = NULL;
+                                               goto repeat;
+                                       }
                                        put_page(page);
-                                       page = NULL;
-                                       goto repeat;
                                }
-                               put_page(page);
+                               do_unlock_page = true;
                        }
                }
                if (PageMlocked(page))
@@ -2249,7 +2258,7 @@ repeat:
        __split_huge_pmd_locked(vma, pmd, range.start, freeze);
 out:
        spin_unlock(ptl);
-       if (!was_locked && page)
+       if (do_unlock_page)
                unlock_page(page);
        /*
         * No need to double call mmu_notifier->invalidate_range() callback.
index 18f6ee3179002a7f218dfafb4e7724685ff1c215..4bdb58ab14cbbd659a1f9ca2e531a995fed03077 100644 (file)
@@ -79,6 +79,21 @@ DEFINE_SPINLOCK(hugetlb_lock);
 static int num_fault_mutexes;
 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
 
+static inline bool PageHugeFreed(struct page *head)
+{
+       return page_private(head + 4) == -1UL;
+}
+
+static inline void SetPageHugeFreed(struct page *head)
+{
+       set_page_private(head + 4, -1UL);
+}
+
+static inline void ClearPageHugeFreed(struct page *head)
+{
+       set_page_private(head + 4, 0);
+}
+
 /* Forward declaration */
 static int hugetlb_acct_memory(struct hstate *h, long delta);
 
@@ -1028,6 +1043,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
        list_move(&page->lru, &h->hugepage_freelists[nid]);
        h->free_huge_pages++;
        h->free_huge_pages_node[nid]++;
+       SetPageHugeFreed(page);
 }
 
 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
@@ -1044,6 +1060,7 @@ static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
 
                list_move(&page->lru, &h->hugepage_activelist);
                set_page_refcounted(page);
+               ClearPageHugeFreed(page);
                h->free_huge_pages--;
                h->free_huge_pages_node[nid]--;
                return page;
@@ -1344,12 +1361,11 @@ struct hstate *size_to_hstate(unsigned long size)
  */
 bool page_huge_active(struct page *page)
 {
-       VM_BUG_ON_PAGE(!PageHuge(page), page);
-       return PageHead(page) && PagePrivate(&page[1]);
+       return PageHeadHuge(page) && PagePrivate(&page[1]);
 }
 
 /* never called for tail page */
-static void set_page_huge_active(struct page *page)
+void set_page_huge_active(struct page *page)
 {
        VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
        SetPagePrivate(&page[1]);
@@ -1505,6 +1521,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
        spin_lock(&hugetlb_lock);
        h->nr_huge_pages++;
        h->nr_huge_pages_node[nid]++;
+       ClearPageHugeFreed(page);
        spin_unlock(&hugetlb_lock);
 }
 
@@ -1755,6 +1772,7 @@ int dissolve_free_huge_page(struct page *page)
 {
        int rc = -EBUSY;
 
+retry:
        /* Not to disrupt normal path by vainly holding hugetlb_lock */
        if (!PageHuge(page))
                return 0;
@@ -1771,6 +1789,26 @@ int dissolve_free_huge_page(struct page *page)
                int nid = page_to_nid(head);
                if (h->free_huge_pages - h->resv_huge_pages == 0)
                        goto out;
+
+               /*
+                * We should make sure that the page is already on the free list
+                * when it is dissolved.
+                */
+               if (unlikely(!PageHugeFreed(head))) {
+                       spin_unlock(&hugetlb_lock);
+                       cond_resched();
+
+                       /*
+                        * Theoretically, we should return -EBUSY when we
+                        * encounter this race. In fact, we have a chance
+                        * to successfully dissolve the page if we do a
+                        * retry. Because the race window is quite small.
+                        * If we seize this opportunity, it is an optimization
+                        * for increasing the success rate of dissolving page.
+                        */
+                       goto retry;
+               }
+
                /*
                 * Move PageHWPoison flag from head page to the raw error page,
                 * which makes any subpages rather than the error page reusable.
@@ -2009,13 +2047,16 @@ retry:
 
        /* Free the needed pages to the hugetlb pool */
        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+               int zeroed;
+
                if ((--needed) < 0)
                        break;
                /*
                 * This page is now managed by the hugetlb allocator and has
                 * no users -- drop the buddy allocator's reference.
                 */
-               VM_BUG_ON_PAGE(!put_page_testzero(page), page);
+               zeroed = put_page_testzero(page);
+               VM_BUG_ON_PAGE(!zeroed, page);
                enqueue_huge_page(h, page);
        }
 free:
@@ -5555,9 +5596,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
 {
        bool ret = true;
 
-       VM_BUG_ON_PAGE(!PageHead(page), page);
        spin_lock(&hugetlb_lock);
-       if (!page_huge_active(page) || !get_page_unless_zero(page)) {
+       if (!PageHeadHuge(page) || !page_huge_active(page) ||
+           !get_page_unless_zero(page)) {
                ret = false;
                goto unlock;
        }
index 55bd6f09c70ff842bf58b49e9c699e1ec766e886..e529428e7a1110600badb017ff468c219d944def 100644 (file)
 
 #include "kasan.h"
 
-enum kasan_arg_mode {
-       KASAN_ARG_MODE_DEFAULT,
-       KASAN_ARG_MODE_OFF,
-       KASAN_ARG_MODE_PROD,
-       KASAN_ARG_MODE_FULL,
+enum kasan_arg {
+       KASAN_ARG_DEFAULT,
+       KASAN_ARG_OFF,
+       KASAN_ARG_ON,
 };
 
 enum kasan_arg_stacktrace {
@@ -38,7 +37,7 @@ enum kasan_arg_fault {
        KASAN_ARG_FAULT_PANIC,
 };
 
-static enum kasan_arg_mode kasan_arg_mode __ro_after_init;
+static enum kasan_arg kasan_arg __ro_after_init;
 static enum kasan_arg_stacktrace kasan_arg_stacktrace __ro_after_init;
 static enum kasan_arg_fault kasan_arg_fault __ro_after_init;
 
@@ -52,26 +51,24 @@ DEFINE_STATIC_KEY_FALSE(kasan_flag_stacktrace);
 /* Whether panic or disable tag checking on fault. */
 bool kasan_flag_panic __ro_after_init;
 
-/* kasan.mode=off/prod/full */
-static int __init early_kasan_mode(char *arg)
+/* kasan=off/on */
+static int __init early_kasan_flag(char *arg)
 {
        if (!arg)
                return -EINVAL;
 
        if (!strcmp(arg, "off"))
-               kasan_arg_mode = KASAN_ARG_MODE_OFF;
-       else if (!strcmp(arg, "prod"))
-               kasan_arg_mode = KASAN_ARG_MODE_PROD;
-       else if (!strcmp(arg, "full"))
-               kasan_arg_mode = KASAN_ARG_MODE_FULL;
+               kasan_arg = KASAN_ARG_OFF;
+       else if (!strcmp(arg, "on"))
+               kasan_arg = KASAN_ARG_ON;
        else
                return -EINVAL;
 
        return 0;
 }
-early_param("kasan.mode", early_kasan_mode);
+early_param("kasan", early_kasan_flag);
 
-/* kasan.stack=off/on */
+/* kasan.stacktrace=off/on */
 static int __init early_kasan_flag_stacktrace(char *arg)
 {
        if (!arg)
@@ -113,8 +110,8 @@ void kasan_init_hw_tags_cpu(void)
         * as this function is only called for MTE-capable hardware.
         */
 
-       /* If KASAN is disabled, do nothing. */
-       if (kasan_arg_mode == KASAN_ARG_MODE_OFF)
+       /* If KASAN is disabled via command line, don't initialize it. */
+       if (kasan_arg == KASAN_ARG_OFF)
                return;
 
        hw_init_tags(KASAN_TAG_MAX);
@@ -124,43 +121,28 @@ void kasan_init_hw_tags_cpu(void)
 /* kasan_init_hw_tags() is called once on boot CPU. */
 void __init kasan_init_hw_tags(void)
 {
-       /* If hardware doesn't support MTE, do nothing. */
+       /* If hardware doesn't support MTE, don't initialize KASAN. */
        if (!system_supports_mte())
                return;
 
-       /* Choose KASAN mode if kasan boot parameter is not provided. */
-       if (kasan_arg_mode == KASAN_ARG_MODE_DEFAULT) {
-               if (IS_ENABLED(CONFIG_DEBUG_KERNEL))
-                       kasan_arg_mode = KASAN_ARG_MODE_FULL;
-               else
-                       kasan_arg_mode = KASAN_ARG_MODE_PROD;
-       }
-
-       /* Preset parameter values based on the mode. */
-       switch (kasan_arg_mode) {
-       case KASAN_ARG_MODE_DEFAULT:
-               /* Shouldn't happen as per the check above. */
-               WARN_ON(1);
-               return;
-       case KASAN_ARG_MODE_OFF:
-               /* If KASAN is disabled, do nothing. */
+       /* If KASAN is disabled via command line, don't initialize it. */
+       if (kasan_arg == KASAN_ARG_OFF)
                return;
-       case KASAN_ARG_MODE_PROD:
-               static_branch_enable(&kasan_flag_enabled);
-               break;
-       case KASAN_ARG_MODE_FULL:
-               static_branch_enable(&kasan_flag_enabled);
-               static_branch_enable(&kasan_flag_stacktrace);
-               break;
-       }
 
-       /* Now, optionally override the presets. */
+       /* Enable KASAN. */
+       static_branch_enable(&kasan_flag_enabled);
 
        switch (kasan_arg_stacktrace) {
        case KASAN_ARG_STACKTRACE_DEFAULT:
+               /*
+                * Default to enabling stack trace collection for
+                * debug kernels.
+                */
+               if (IS_ENABLED(CONFIG_DEBUG_KERNEL))
+                       static_branch_enable(&kasan_flag_stacktrace);
                break;
        case KASAN_ARG_STACKTRACE_OFF:
-               static_branch_disable(&kasan_flag_stacktrace);
+               /* Do nothing, kasan_flag_stacktrace keeps its default value. */
                break;
        case KASAN_ARG_STACKTRACE_ON:
                static_branch_enable(&kasan_flag_stacktrace);
@@ -169,11 +151,16 @@ void __init kasan_init_hw_tags(void)
 
        switch (kasan_arg_fault) {
        case KASAN_ARG_FAULT_DEFAULT:
+               /*
+                * Default to no panic on report.
+                * Do nothing, kasan_flag_panic keeps its default value.
+                */
                break;
        case KASAN_ARG_FAULT_REPORT:
-               kasan_flag_panic = false;
+               /* Do nothing, kasan_flag_panic keeps its default value. */
                break;
        case KASAN_ARG_FAULT_PANIC:
+               /* Enable panic on report. */
                kasan_flag_panic = true;
                break;
        }
index 7ca0b92d5886dca033e6b2a050441fd01b71c048..c4605ac9837b011460d21c000fdc3c76db764348 100644 (file)
@@ -373,9 +373,10 @@ static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr,
 
                if (kasan_pte_table(*pmd)) {
                        if (IS_ALIGNED(addr, PMD_SIZE) &&
-                           IS_ALIGNED(next, PMD_SIZE))
+                           IS_ALIGNED(next, PMD_SIZE)) {
                                pmd_clear(pmd);
-                       continue;
+                               continue;
+                       }
                }
                pte = pte_offset_kernel(pmd, addr);
                kasan_remove_pte_table(pte, addr, next);
@@ -398,9 +399,10 @@ static void kasan_remove_pud_table(pud_t *pud, unsigned long addr,
 
                if (kasan_pmd_table(*pud)) {
                        if (IS_ALIGNED(addr, PUD_SIZE) &&
-                           IS_ALIGNED(next, PUD_SIZE))
+                           IS_ALIGNED(next, PUD_SIZE)) {
                                pud_clear(pud);
-                       continue;
+                               continue;
+                       }
                }
                pmd = pmd_offset(pud, addr);
                pmd_base = pmd_offset(pud, 0);
@@ -424,9 +426,10 @@ static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr,
 
                if (kasan_pud_table(*p4d)) {
                        if (IS_ALIGNED(addr, P4D_SIZE) &&
-                           IS_ALIGNED(next, P4D_SIZE))
+                           IS_ALIGNED(next, P4D_SIZE)) {
                                p4d_clear(p4d);
-                       continue;
+                               continue;
+                       }
                }
                pud = pud_offset(p4d, addr);
                kasan_remove_pud_table(pud, addr, next);
@@ -457,9 +460,10 @@ void kasan_remove_zero_shadow(void *start, unsigned long size)
 
                if (kasan_p4d_table(*pgd)) {
                        if (IS_ALIGNED(addr, PGDIR_SIZE) &&
-                           IS_ALIGNED(next, PGDIR_SIZE))
+                           IS_ALIGNED(next, PGDIR_SIZE)) {
                                pgd_clear(pgd);
-                       continue;
+                               continue;
+                       }
                }
 
                p4d = p4d_offset(pgd, addr);
@@ -482,7 +486,6 @@ int kasan_add_zero_shadow(void *start, unsigned long size)
 
        ret = kasan_populate_early_shadow(shadow_start, shadow_end);
        if (ret)
-               kasan_remove_zero_shadow(shadow_start,
-                                       size >> KASAN_SHADOW_SCALE_SHIFT);
+               kasan_remove_zero_shadow(start, size);
        return ret;
 }
index cc4d9e1d49b1d046d579863ec36461152f4a2d12..8c706e7652f2b1fa026b7da658382cdcc4b8b8ea 100644 (file)
@@ -209,7 +209,7 @@ bool check_memory_region(unsigned long addr, size_t size, bool write,
 
 static inline bool addr_has_metadata(const void *addr)
 {
-       return true;
+       return (is_vmalloc_addr(addr) || virt_addr_valid(addr));
 }
 
 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
index d24bcfa88d2f1bd42d96f4e52a6f3e8c2cd91396..8d9b5f1e70409e1ea0ba95b3c576185cb80def05 100644 (file)
@@ -275,14 +275,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
  *
  * Find @size free area aligned to @align in the specified range and node.
  *
- * When allocation direction is bottom-up, the @start should be greater
- * than the end of the kernel image. Otherwise, it will be trimmed. The
- * reason is that we want the bottom-up allocation just near the kernel
- * image so it is highly likely that the allocated memory and the kernel
- * will reside in the same node.
- *
- * If bottom-up allocation failed, will try to allocate memory top-down.
- *
  * Return:
  * Found address on success, 0 on failure.
  */
@@ -291,8 +283,6 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
                                        phys_addr_t end, int nid,
                                        enum memblock_flags flags)
 {
-       phys_addr_t kernel_end, ret;
-
        /* pump up @end */
        if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
            end == MEMBLOCK_ALLOC_KASAN)
@@ -301,40 +291,13 @@ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
        /* avoid allocating the first page */
        start = max_t(phys_addr_t, start, PAGE_SIZE);
        end = max(start, end);
-       kernel_end = __pa_symbol(_end);
-
-       /*
-        * try bottom-up allocation only when bottom-up mode
-        * is set and @end is above the kernel image.
-        */
-       if (memblock_bottom_up() && end > kernel_end) {
-               phys_addr_t bottom_up_start;
-
-               /* make sure we will allocate above the kernel */
-               bottom_up_start = max(start, kernel_end);
 
-               /* ok, try bottom-up allocation first */
-               ret = __memblock_find_range_bottom_up(bottom_up_start, end,
-                                                     size, align, nid, flags);
-               if (ret)
-                       return ret;
-
-               /*
-                * we always limit bottom-up allocation above the kernel,
-                * but top-down allocation doesn't have the limit, so
-                * retrying top-down allocation may succeed when bottom-up
-                * allocation failed.
-                *
-                * bottom-up allocation is expected to be fail very rarely,
-                * so we use WARN_ONCE() here to see the stack trace if
-                * fail happens.
-                */
-               WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
-                         "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
-       }
-
-       return __memblock_find_range_top_down(start, end, size, align, nid,
-                                             flags);
+       if (memblock_bottom_up())
+               return __memblock_find_range_bottom_up(start, end, size, align,
+                                                      nid, flags);
+       else
+               return __memblock_find_range_top_down(start, end, size, align,
+                                                     nid, flags);
 }
 
 /**
@@ -1427,7 +1390,7 @@ phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
 }
 
 /**
- * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node
+ * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node
  * @size: size of memory block to be allocated in bytes
  * @align: alignment of the region and block's size
  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
index 605f671203efbbe13cdd88163f4c71b68863acf8..e2de77b5bcc2fb2afe109447ec754e17b4b17d7c 100644 (file)
@@ -3115,9 +3115,7 @@ void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages)
        if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
                page_counter_uncharge(&memcg->kmem, nr_pages);
 
-       page_counter_uncharge(&memcg->memory, nr_pages);
-       if (do_memsw_account())
-               page_counter_uncharge(&memcg->memsw, nr_pages);
+       refill_stock(memcg, nr_pages);
 }
 
 /**
index 04d9f154a130d28f38d26dfb0a897656d747cd71..e9481632fcd1b5e83fff6c87f4ad7354bb0b40bf 100644 (file)
@@ -1885,6 +1885,12 @@ static int soft_offline_free_page(struct page *page)
        return rc;
 }
 
+static void put_ref_page(struct page *page)
+{
+       if (page)
+               put_page(page);
+}
+
 /**
  * soft_offline_page - Soft offline a page.
  * @pfn: pfn to soft-offline
@@ -1910,20 +1916,26 @@ static int soft_offline_free_page(struct page *page)
 int soft_offline_page(unsigned long pfn, int flags)
 {
        int ret;
-       struct page *page;
        bool try_again = true;
+       struct page *page, *ref_page = NULL;
+
+       WARN_ON_ONCE(!pfn_valid(pfn) && (flags & MF_COUNT_INCREASED));
 
        if (!pfn_valid(pfn))
                return -ENXIO;
+       if (flags & MF_COUNT_INCREASED)
+               ref_page = pfn_to_page(pfn);
+
        /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
        page = pfn_to_online_page(pfn);
-       if (!page)
+       if (!page) {
+               put_ref_page(ref_page);
                return -EIO;
+       }
 
        if (PageHWPoison(page)) {
                pr_info("%s: %#lx page already poisoned\n", __func__, pfn);
-               if (flags & MF_COUNT_INCREASED)
-                       put_page(page);
+               put_ref_page(ref_page);
                return 0;
        }
 
index ee5e612b4cd87bcab7b72948bee803d4fb346615..20ca887ea76943c97236872c2b046ffa7647a613 100644 (file)
@@ -402,6 +402,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
        struct zone *oldzone, *newzone;
        int dirty;
        int expected_count = expected_page_refs(mapping, page) + extra_count;
+       int nr = thp_nr_pages(page);
 
        if (!mapping) {
                /* Anonymous page without mapping */
@@ -437,7 +438,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         */
        newpage->index = page->index;
        newpage->mapping = page->mapping;
-       page_ref_add(newpage, thp_nr_pages(page)); /* add cache reference */
+       page_ref_add(newpage, nr); /* add cache reference */
        if (PageSwapBacked(page)) {
                __SetPageSwapBacked(newpage);
                if (PageSwapCache(page)) {
@@ -459,7 +460,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
        if (PageTransHuge(page)) {
                int i;
 
-               for (i = 1; i < HPAGE_PMD_NR; i++) {
+               for (i = 1; i < nr; i++) {
                        xas_next(&xas);
                        xas_store(&xas, newpage);
                }
@@ -470,7 +471,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
         * to one less reference.
         * We know this isn't the last reference.
         */
-       page_ref_unfreeze(page, expected_count - thp_nr_pages(page));
+       page_ref_unfreeze(page, expected_count - nr);
 
        xas_unlock(&xas);
        /* Leave irq disabled to prevent preemption while updating stats */
@@ -493,17 +494,17 @@ int migrate_page_move_mapping(struct address_space *mapping,
                old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
                new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
 
-               __dec_lruvec_state(old_lruvec, NR_FILE_PAGES);
-               __inc_lruvec_state(new_lruvec, NR_FILE_PAGES);
+               __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
+               __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
                if (PageSwapBacked(page) && !PageSwapCache(page)) {
-                       __dec_lruvec_state(old_lruvec, NR_SHMEM);
-                       __inc_lruvec_state(new_lruvec, NR_SHMEM);
+                       __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
+                       __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
                }
                if (dirty && mapping_can_writeback(mapping)) {
-                       __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
-                       __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
-                       __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
-                       __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
+                       __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
+                       __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
+                       __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
+                       __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
                }
        }
        local_irq_enable();
@@ -1279,6 +1280,12 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
                return -ENOSYS;
        }
 
+       if (page_count(hpage) == 1) {
+               /* page was freed from under us. So we are done. */
+               putback_active_hugepage(hpage);
+               return MIGRATEPAGE_SUCCESS;
+       }
+
        new_hpage = get_new_page(hpage, private);
        if (!new_hpage)
                return -ENOMEM;
index 027f6481ba59bca36ff024dbbf62cfa3f7ef04e9..519a60d5b6f7d5017c788ced10c9fcca6637dd65 100644 (file)
@@ -1207,8 +1207,10 @@ static void kernel_init_free_pages(struct page *page, int numpages)
        /* s390's use of memset() could override KASAN redzones. */
        kasan_disable_current();
        for (i = 0; i < numpages; i++) {
+               u8 tag = page_kasan_tag(page + i);
                page_kasan_tag_reset(page + i);
                clear_highpage(page + i);
+               page_kasan_tag_set(page + i, tag);
        }
        kasan_enable_current();
 }
index d9e4e10683cc129c33c67864e75c926a5a7bf9d6..7ecbbbe5bc0c1fe65e3fc52b881205b18189b771 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2791,7 +2791,8 @@ static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
                                                   void *obj)
 {
        if (unlikely(slab_want_init_on_free(s)) && obj)
-               memset((void *)((char *)obj + s->offset), 0, sizeof(void *));
+               memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
+                       0, sizeof(void *));
 }
 
 /*
@@ -2883,7 +2884,7 @@ redo:
                stat(s, ALLOC_FASTPATH);
        }
 
-       maybe_wipe_obj_freeptr(s, kasan_reset_tag(object));
+       maybe_wipe_obj_freeptr(s, object);
 
        if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
                memset(kasan_reset_tag(object), 0, s->object_size);
@@ -3329,7 +3330,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
                int j;
 
                for (j = 0; j < i; j++)
-                       memset(p[j], 0, s->object_size);
+                       memset(kasan_reset_tag(p[j]), 0, s->object_size);
        }
 
        /* memcg and kmem_cache debug support */
@@ -5624,10 +5625,8 @@ static int sysfs_slab_add(struct kmem_cache *s)
 
        s->kobj.kset = kset;
        err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
-       if (err) {
-               kobject_put(&s->kobj);
+       if (err)
                goto out;
-       }
 
        err = sysfs_create_group(&s->kobj, &slab_attr_group);
        if (err)
index c1c30a9f76f343e90570102e6eb9d8407ae563ea..8b796c499cbb243f7cfa9552d02703602316fd70 100644 (file)
@@ -272,7 +272,8 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
            kattr->test.repeat)
                return -EINVAL;
 
-       if (ctx_size_in < prog->aux->max_ctx_offset)
+       if (ctx_size_in < prog->aux->max_ctx_offset ||
+           ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
                return -EINVAL;
 
        if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
index 1883118aae55b015e33b3702a3611bc5a60f8357..32a48e5418dac67af8daf4dcf9b4312ab89c321f 100644 (file)
@@ -88,4 +88,33 @@ int br_mrp_switchdev_send_in_test(struct net_bridge *br, struct br_mrp *mrp,
 int br_mrp_ring_port_open(struct net_device *dev, u8 loc);
 int br_mrp_in_port_open(struct net_device *dev, u8 loc);
 
+/* MRP protocol data units */
+struct br_mrp_tlv_hdr {
+       __u8 type;
+       __u8 length;
+};
+
+struct br_mrp_common_hdr {
+       __be16 seq_id;
+       __u8 domain[MRP_DOMAIN_UUID_LENGTH];
+};
+
+struct br_mrp_ring_test_hdr {
+       __be16 prio;
+       __u8 sa[ETH_ALEN];
+       __be16 port_role;
+       __be16 state;
+       __be16 transitions;
+       __be32 timestamp;
+} __attribute__((__packed__));
+
+struct br_mrp_in_test_hdr {
+       __be16 id;
+       __u8 sa[ETH_ALEN];
+       __be16 port_role;
+       __be16 state;
+       __be16 transitions;
+       __be32 timestamp;
+} __attribute__((__packed__));
+
 #endif /* _BR_PRIVATE_MRP_H */
index 9815cfe42af097a274957ee73cb4d4c2f3fe8985..ca44c327bacedb8a218d404503c59c2d6e60649a 100644 (file)
@@ -569,6 +569,34 @@ e_range:
        return -ERANGE;
 }
 
+static int decode_con_secret(void **p, void *end, u8 *con_secret,
+                            int *con_secret_len)
+{
+       int len;
+
+       ceph_decode_32_safe(p, end, len, bad);
+       ceph_decode_need(p, end, len, bad);
+
+       dout("%s len %d\n", __func__, len);
+       if (con_secret) {
+               if (len > CEPH_MAX_CON_SECRET_LEN) {
+                       pr_err("connection secret too big %d\n", len);
+                       goto bad_memzero;
+               }
+               memcpy(con_secret, *p, len);
+               *con_secret_len = len;
+       }
+       memzero_explicit(*p, len);
+       *p += len;
+       return 0;
+
+bad_memzero:
+       memzero_explicit(*p, len);
+bad:
+       pr_err("failed to decode connection secret\n");
+       return -EINVAL;
+}
+
 static int handle_auth_session_key(struct ceph_auth_client *ac,
                                   void **p, void *end,
                                   u8 *session_key, int *session_key_len,
@@ -612,17 +640,9 @@ static int handle_auth_session_key(struct ceph_auth_client *ac,
                dout("%s decrypted %d bytes\n", __func__, ret);
                dend = dp + ret;
 
-               ceph_decode_32_safe(&dp, dend, len, e_inval);
-               if (len > CEPH_MAX_CON_SECRET_LEN) {
-                       pr_err("connection secret too big %d\n", len);
-                       return -EINVAL;
-               }
-
-               dout("%s connection secret len %d\n", __func__, len);
-               if (con_secret) {
-                       memcpy(con_secret, dp, len);
-                       *con_secret_len = len;
-               }
+               ret = decode_con_secret(&dp, dend, con_secret, con_secret_len);
+               if (ret)
+                       return ret;
        }
 
        /* service tickets */
@@ -828,7 +848,6 @@ static int decrypt_authorizer_reply(struct ceph_crypto_key *secret,
 {
        void *dp, *dend;
        u8 struct_v;
-       int len;
        int ret;
 
        dp = *p + ceph_x_encrypt_offset();
@@ -843,17 +862,9 @@ static int decrypt_authorizer_reply(struct ceph_crypto_key *secret,
        ceph_decode_64_safe(&dp, dend, *nonce_plus_one, e_inval);
        dout("%s nonce_plus_one %llu\n", __func__, *nonce_plus_one);
        if (struct_v >= 2) {
-               ceph_decode_32_safe(&dp, dend, len, e_inval);
-               if (len > CEPH_MAX_CON_SECRET_LEN) {
-                       pr_err("connection secret too big %d\n", len);
-                       return -EINVAL;
-               }
-
-               dout("%s connection secret len %d\n", __func__, len);
-               if (con_secret) {
-                       memcpy(con_secret, dp, len);
-                       *con_secret_len = len;
-               }
+               ret = decode_con_secret(&dp, dend, con_secret, con_secret_len);
+               if (ret)
+                       return ret;
        }
 
        return 0;
index 4f75df40fb121f3c955f916fac5a3bb7f64d7f40..92d89b3316459858668f9ad2adabb859552bdb8d 100644 (file)
@@ -96,6 +96,7 @@ int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
        key->len = ceph_decode_16(p);
        ceph_decode_need(p, end, key->len, bad);
        ret = set_secret(key, *p);
+       memzero_explicit(*p, key->len);
        *p += key->len;
        return ret;
 
@@ -134,7 +135,7 @@ int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
 void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
 {
        if (key) {
-               kfree(key->key);
+               kfree_sensitive(key->key);
                key->key = NULL;
                if (key->tfm) {
                        crypto_free_sync_skcipher(key->tfm);
index 04f653b3c89733bba8771d79399af0053243957c..2cb5ffdf071af76a679a0f1cdae2f7b1a13a909d 100644 (file)
@@ -1100,7 +1100,7 @@ static int read_partial_message(struct ceph_connection *con)
                if (ret < 0)
                        return ret;
 
-               BUG_ON(!con->in_msg ^ skip);
+               BUG_ON((!con->in_msg) ^ skip);
                if (skip) {
                        /* skip this message */
                        dout("alloc_msg said skip message\n");
index c38d8de93836371f0beb51bee80c1ef9b81485f2..cc40ce4e02fbc44cfc3460fc5c52f5a04886295b 100644 (file)
@@ -689,11 +689,10 @@ static int verify_epilogue_crcs(struct ceph_connection *con, u32 front_crc,
 }
 
 static int setup_crypto(struct ceph_connection *con,
-                       u8 *session_key, int session_key_len,
-                       u8 *con_secret, int con_secret_len)
+                       const u8 *session_key, int session_key_len,
+                       const u8 *con_secret, int con_secret_len)
 {
        unsigned int noio_flag;
-       void *p;
        int ret;
 
        dout("%s con %p con_mode %d session_key_len %d con_secret_len %d\n",
@@ -751,15 +750,14 @@ static int setup_crypto(struct ceph_connection *con,
                return ret;
        }
 
-       p = con_secret;
-       WARN_ON((unsigned long)p & crypto_aead_alignmask(con->v2.gcm_tfm));
-       ret = crypto_aead_setkey(con->v2.gcm_tfm, p, CEPH_GCM_KEY_LEN);
+       WARN_ON((unsigned long)con_secret &
+               crypto_aead_alignmask(con->v2.gcm_tfm));
+       ret = crypto_aead_setkey(con->v2.gcm_tfm, con_secret, CEPH_GCM_KEY_LEN);
        if (ret) {
                pr_err("failed to set gcm key: %d\n", ret);
                return ret;
        }
 
-       p += CEPH_GCM_KEY_LEN;
        WARN_ON(crypto_aead_ivsize(con->v2.gcm_tfm) != CEPH_GCM_IV_LEN);
        ret = crypto_aead_setauthsize(con->v2.gcm_tfm, CEPH_GCM_TAG_LEN);
        if (ret) {
@@ -777,8 +775,11 @@ static int setup_crypto(struct ceph_connection *con,
        aead_request_set_callback(con->v2.gcm_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                                  crypto_req_done, &con->v2.gcm_wait);
 
-       memcpy(&con->v2.in_gcm_nonce, p, CEPH_GCM_IV_LEN);
-       memcpy(&con->v2.out_gcm_nonce, p + CEPH_GCM_IV_LEN, CEPH_GCM_IV_LEN);
+       memcpy(&con->v2.in_gcm_nonce, con_secret + CEPH_GCM_KEY_LEN,
+              CEPH_GCM_IV_LEN);
+       memcpy(&con->v2.out_gcm_nonce,
+              con_secret + CEPH_GCM_KEY_LEN + CEPH_GCM_IV_LEN,
+              CEPH_GCM_IV_LEN);
        return 0;  /* auth_x, secure mode */
 }
 
@@ -800,7 +801,7 @@ static int hmac_sha256(struct ceph_connection *con, const struct kvec *kvecs,
        desc->tfm = con->v2.hmac_tfm;
        ret = crypto_shash_init(desc);
        if (ret)
-               return ret;
+               goto out;
 
        for (i = 0; i < kvec_cnt; i++) {
                WARN_ON((unsigned long)kvecs[i].iov_base &
@@ -808,15 +809,14 @@ static int hmac_sha256(struct ceph_connection *con, const struct kvec *kvecs,
                ret = crypto_shash_update(desc, kvecs[i].iov_base,
                                          kvecs[i].iov_len);
                if (ret)
-                       return ret;
+                       goto out;
        }
 
        ret = crypto_shash_final(desc, hmac);
-       if (ret)
-               return ret;
 
+out:
        shash_desc_zero(desc);
-       return 0;  /* auth_x, both plain and secure modes */
+       return ret;  /* auth_x, both plain and secure modes */
 }
 
 static void gcm_inc_nonce(struct ceph_gcm_nonce *nonce)
@@ -2072,27 +2072,32 @@ static int process_auth_done(struct ceph_connection *con, void *p, void *end)
        if (con->state != CEPH_CON_S_V2_AUTH) {
                dout("%s con %p state changed to %d\n", __func__, con,
                     con->state);
-               return -EAGAIN;
+               ret = -EAGAIN;
+               goto out;
        }
 
        dout("%s con %p handle_auth_done ret %d\n", __func__, con, ret);
        if (ret)
-               return ret;
+               goto out;
 
        ret = setup_crypto(con, session_key, session_key_len, con_secret,
                           con_secret_len);
        if (ret)
-               return ret;
+               goto out;
 
        reset_out_kvecs(con);
        ret = prepare_auth_signature(con);
        if (ret) {
                pr_err("prepare_auth_signature failed: %d\n", ret);
-               return ret;
+               goto out;
        }
 
        con->state = CEPH_CON_S_V2_AUTH_SIGNATURE;
-       return 0;
+
+out:
+       memzero_explicit(session_key_buf, sizeof(session_key_buf));
+       memzero_explicit(con_secret_buf, sizeof(con_secret_buf));
+       return ret;
 
 bad:
        pr_err("failed to decode auth_done\n");
@@ -3436,6 +3441,8 @@ void ceph_con_v2_reset_protocol(struct ceph_connection *con)
        }
 
        con->v2.con_mode = CEPH_CON_MODE_UNKNOWN;
+       memzero_explicit(&con->v2.in_gcm_nonce, CEPH_GCM_IV_LEN);
+       memzero_explicit(&con->v2.out_gcm_nonce, CEPH_GCM_IV_LEN);
 
        if (con->v2.hmac_tfm) {
                crypto_free_shash(con->v2.hmac_tfm);
index b9d54ed9f33841d546bca9b79c04bbef6654b70d..195ceb8afb061ce2938f0f61ce21b88ee7174cf4 100644 (file)
@@ -1433,7 +1433,7 @@ static int mon_handle_auth_bad_method(struct ceph_connection *con,
 /*
  * handle incoming message
  */
-static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+static void mon_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
 {
        struct ceph_mon_client *monc = con->private;
        int type = le16_to_cpu(msg->hdr.type);
@@ -1565,21 +1565,21 @@ static void mon_fault(struct ceph_connection *con)
  * will come from the messenger workqueue, which is drained prior to
  * mon_client destruction.
  */
-static struct ceph_connection *con_get(struct ceph_connection *con)
+static struct ceph_connection *mon_get_con(struct ceph_connection *con)
 {
        return con;
 }
 
-static void con_put(struct ceph_connection *con)
+static void mon_put_con(struct ceph_connection *con)
 {
 }
 
 static const struct ceph_connection_operations mon_con_ops = {
-       .get = con_get,
-       .put = con_put,
-       .dispatch = dispatch,
-       .fault = mon_fault,
+       .get = mon_get_con,
+       .put = mon_put_con,
        .alloc_msg = mon_alloc_msg,
+       .dispatch = mon_dispatch,
+       .fault = mon_fault,
        .get_auth_request = mon_get_auth_request,
        .handle_auth_reply_more = mon_handle_auth_reply_more,
        .handle_auth_done = mon_handle_auth_done,
index 61229c5e22cb84e6943978671b3b8bb1749243e6..ff8624a7c96438df6b691cdc536a7b196919df95 100644 (file)
@@ -5412,7 +5412,7 @@ void ceph_osdc_cleanup(void)
 /*
  * handle incoming message
  */
-static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+static void osd_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
 {
        struct ceph_osd *osd = con->private;
        struct ceph_osd_client *osdc = osd->o_osdc;
@@ -5534,9 +5534,9 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
        return m;
 }
 
-static struct ceph_msg *alloc_msg(struct ceph_connection *con,
-                                 struct ceph_msg_header *hdr,
-                                 int *skip)
+static struct ceph_msg *osd_alloc_msg(struct ceph_connection *con,
+                                     struct ceph_msg_header *hdr,
+                                     int *skip)
 {
        struct ceph_osd *osd = con->private;
        int type = le16_to_cpu(hdr->type);
@@ -5560,7 +5560,7 @@ static struct ceph_msg *alloc_msg(struct ceph_connection *con,
 /*
  * Wrappers to refcount containing ceph_osd struct
  */
-static struct ceph_connection *get_osd_con(struct ceph_connection *con)
+static struct ceph_connection *osd_get_con(struct ceph_connection *con)
 {
        struct ceph_osd *osd = con->private;
        if (get_osd(osd))
@@ -5568,7 +5568,7 @@ static struct ceph_connection *get_osd_con(struct ceph_connection *con)
        return NULL;
 }
 
-static void put_osd_con(struct ceph_connection *con)
+static void osd_put_con(struct ceph_connection *con)
 {
        struct ceph_osd *osd = con->private;
        put_osd(osd);
@@ -5582,8 +5582,8 @@ static void put_osd_con(struct ceph_connection *con)
  * Note: returned pointer is the address of a structure that's
  * managed separately.  Caller must *not* attempt to free it.
  */
-static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
-                                       int *proto, int force_new)
+static struct ceph_auth_handshake *
+osd_get_authorizer(struct ceph_connection *con, int *proto, int force_new)
 {
        struct ceph_osd *o = con->private;
        struct ceph_osd_client *osdc = o->o_osdc;
@@ -5599,7 +5599,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
        return auth;
 }
 
-static int add_authorizer_challenge(struct ceph_connection *con,
+static int osd_add_authorizer_challenge(struct ceph_connection *con,
                                    void *challenge_buf, int challenge_buf_len)
 {
        struct ceph_osd *o = con->private;
@@ -5610,7 +5610,7 @@ static int add_authorizer_challenge(struct ceph_connection *con,
                                            challenge_buf, challenge_buf_len);
 }
 
-static int verify_authorizer_reply(struct ceph_connection *con)
+static int osd_verify_authorizer_reply(struct ceph_connection *con)
 {
        struct ceph_osd *o = con->private;
        struct ceph_osd_client *osdc = o->o_osdc;
@@ -5622,7 +5622,7 @@ static int verify_authorizer_reply(struct ceph_connection *con)
                NULL, NULL, NULL, NULL);
 }
 
-static int invalidate_authorizer(struct ceph_connection *con)
+static int osd_invalidate_authorizer(struct ceph_connection *con)
 {
        struct ceph_osd *o = con->private;
        struct ceph_osd_client *osdc = o->o_osdc;
@@ -5731,18 +5731,18 @@ static int osd_check_message_signature(struct ceph_msg *msg)
 }
 
 static const struct ceph_connection_operations osd_con_ops = {
-       .get = get_osd_con,
-       .put = put_osd_con,
-       .dispatch = dispatch,
-       .get_authorizer = get_authorizer,
-       .add_authorizer_challenge = add_authorizer_challenge,
-       .verify_authorizer_reply = verify_authorizer_reply,
-       .invalidate_authorizer = invalidate_authorizer,
-       .alloc_msg = alloc_msg,
+       .get = osd_get_con,
+       .put = osd_put_con,
+       .alloc_msg = osd_alloc_msg,
+       .dispatch = osd_dispatch,
+       .fault = osd_fault,
        .reencode_message = osd_reencode_message,
+       .get_authorizer = osd_get_authorizer,
+       .add_authorizer_challenge = osd_add_authorizer_challenge,
+       .verify_authorizer_reply = osd_verify_authorizer_reply,
+       .invalidate_authorizer = osd_invalidate_authorizer,
        .sign_message = osd_sign_message,
        .check_message_signature = osd_check_message_signature,
-       .fault = osd_fault,
        .get_auth_request = osd_get_auth_request,
        .handle_auth_reply_more = osd_handle_auth_reply_more,
        .handle_auth_done = osd_handle_auth_done,
index c360bb5367e240c228efadacf0e2c2ba4250e13a..a979b86dbacda9dfe31dd8b269024f7f0f5a8ef1 100644 (file)
@@ -9672,6 +9672,11 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
                }
        }
 
+       if ((features & NETIF_F_HW_TLS_RX) && !(features & NETIF_F_RXCSUM)) {
+               netdev_dbg(dev, "Dropping TLS RX HW offload feature since no RXCSUM feature.\n");
+               features &= ~NETIF_F_HW_TLS_RX;
+       }
+
        return features;
 }
 
index ee828e4b1007e9809e2495a759b869dafb8daa14..738d4344d6799a0dda3051e265da84fcabc0d86c 100644 (file)
@@ -4146,7 +4146,7 @@ out:
 static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
                                              struct genl_info *info)
 {
-       struct devlink_port *devlink_port = info->user_ptr[0];
+       struct devlink_port *devlink_port = info->user_ptr[1];
        struct devlink_param_item *param_item;
        struct sk_buff *msg;
        int err;
@@ -4175,7 +4175,7 @@ static int devlink_nl_cmd_port_param_get_doit(struct sk_buff *skb,
 static int devlink_nl_cmd_port_param_set_doit(struct sk_buff *skb,
                                              struct genl_info *info)
 {
-       struct devlink_port *devlink_port = info->user_ptr[0];
+       struct devlink_port *devlink_port = info->user_ptr[1];
 
        return __devlink_nl_cmd_param_set_doit(devlink_port->devlink,
                                               devlink_port->index,
index 80dbf2f4016e26824bc968115503ca2072933f63..8e582e29a41e39809cc534865bb3c91c05b3d9f2 100644 (file)
@@ -80,11 +80,11 @@ static void est_timer(struct timer_list *t)
        u64 rate, brate;
 
        est_fetch_counters(est, &b);
-       brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log);
-       brate -= (est->avbps >> est->ewma_log);
+       brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log);
+       brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);
 
-       rate = (b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
-       rate -= (est->avpps >> est->ewma_log);
+       rate = (b.packets - est->last_packets) << (10 - est->intvl_log);
+       rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
 
        write_seqcount_begin(&est->seq);
        est->avbps += brate;
@@ -143,6 +143,9 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
        if (parm->interval < -2 || parm->interval > 3)
                return -EINVAL;
 
+       if (parm->ewma_log == 0 || parm->ewma_log >= 31)
+               return -EINVAL;
+
        est = kzalloc(sizeof(*est), GFP_KERNEL);
        if (!est)
                return -ENOBUFS;
index 277ed854aef1c362df96bedfa19b07ef1b6d6b80..6d2d557442dc69b403cdbf98b4b8f5d593287121 100644 (file)
@@ -1245,13 +1245,14 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
        old    = neigh->nud_state;
        err    = -EPERM;
 
-       if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
-           (old & (NUD_NOARP | NUD_PERMANENT)))
-               goto out;
        if (neigh->dead) {
                NL_SET_ERR_MSG(extack, "Neighbor entry is now dead");
+               new = old;
                goto out;
        }
+       if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
+           (old & (NUD_NOARP | NUD_PERMANENT)))
+               goto out;
 
        ext_learn_change = neigh_update_ext_learned(neigh, flags, &notify);
 
index c1a6f262636a15c828100c8e80cd5021bebe89f7..785daff48030d328b879afe58db35c8d96a58ea7 100644 (file)
@@ -437,7 +437,11 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
 
        len += NET_SKB_PAD;
 
-       if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
+       /* If requested length is either too small or too big,
+        * we use kmalloc() for skb->head allocation.
+        */
+       if (len <= SKB_WITH_OVERHEAD(1024) ||
+           len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
            (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
                skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
                if (!skb)
index 4cac31d22a502077b959e7d734d24be7a57467f7..2193ae529e752623123dfbbaeb19f6962ec4d64e 100644 (file)
@@ -1035,7 +1035,7 @@ source_ok:
                        fld.saddr = dnet_select_source(dev_out, 0,
                                                       RT_SCOPE_HOST);
                        if (!fld.daddr)
-                               goto out;
+                               goto done;
                }
                fld.flowidn_oif = LOOPBACK_IFINDEX;
                res.type = RTN_LOCAL;
index 7dc92ce5a1340ebe379d9d7dae2111ba5961edaf..a9c30a608e35da7a823f959bebe75911430f3c28 100644 (file)
@@ -217,7 +217,10 @@ struct hsr_priv {
        u8 net_id;              /* for PRP, it occupies most significant 3 bits
                                 * of lan_id
                                 */
-       unsigned char           sup_multicast_addr[ETH_ALEN];
+       unsigned char           sup_multicast_addr[ETH_ALEN] __aligned(sizeof(u16));
+                               /* Align to u16 boundary to avoid unaligned access
+                                * in ether_addr_equal
+                                */
 #ifdef CONFIG_DEBUG_FS
        struct dentry *node_tbl_root;
 #endif
index fd8b8800a2c3022666f46b9ba2ac984f7cf6b04d..6bd7ca09af03dd5385096f749cf05afecb4b7795 100644 (file)
@@ -851,6 +851,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
                newicsk->icsk_retransmits = 0;
                newicsk->icsk_backoff     = 0;
                newicsk->icsk_probes_out  = 0;
+               newicsk->icsk_probes_tstamp = 0;
 
                /* Deinitialize accept_queue to trap illegal accesses. */
                memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
index 64594aa755f0549081185575c5108e753f50749c..76a420c76f16e701d36e0ffe235fc19e0ccd8235 100644 (file)
@@ -317,7 +317,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
        }
 
        dev->needed_headroom = t_hlen + hlen;
-       mtu -= (dev->hard_header_len + t_hlen);
+       mtu -= t_hlen;
 
        if (mtu < IPV4_MIN_MTU)
                mtu = IPV4_MIN_MTU;
@@ -347,7 +347,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
        nt = netdev_priv(dev);
        t_hlen = nt->hlen + sizeof(struct iphdr);
        dev->min_mtu = ETH_MIN_MTU;
-       dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
+       dev->max_mtu = IP_MAX_MTU - t_hlen;
        ip_tunnel_add(itn, nt);
        return nt;
 
@@ -488,11 +488,10 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
        int mtu;
 
        tunnel_hlen = md ? tunnel_hlen : tunnel->hlen;
-       pkt_size = skb->len - tunnel_hlen - dev->hard_header_len;
+       pkt_size = skb->len - tunnel_hlen;
 
        if (df)
-               mtu = dst_mtu(&rt->dst) - dev->hard_header_len
-                                       - sizeof(struct iphdr) - tunnel_hlen;
+               mtu = dst_mtu(&rt->dst) - (sizeof(struct iphdr) + tunnel_hlen);
        else
                mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
 
@@ -972,7 +971,7 @@ int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
        int t_hlen = tunnel->hlen + sizeof(struct iphdr);
-       int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen;
+       int max_mtu = IP_MAX_MTU - t_hlen;
 
        if (new_mtu < ETH_MIN_MTU)
                return -EINVAL;
@@ -1149,10 +1148,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
 
        mtu = ip_tunnel_bind_dev(dev);
        if (tb[IFLA_MTU]) {
-               unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen;
+               unsigned int max = IP_MAX_MTU - (nt->hlen + sizeof(struct iphdr));
 
-               mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU,
-                           (unsigned int)(max - sizeof(struct iphdr)));
+               mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, max);
        }
 
        err = dev_set_mtu(dev, mtu);
index cc23f1ce239c28ac9c12ef1a5ef5316407e2bee9..8cd3224d913e0ca5cc0c227e813b5f279fed5545 100644 (file)
@@ -76,7 +76,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
        flow.daddr = iph->saddr;
        flow.saddr = rpfilter_get_saddr(iph->daddr);
        flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
-       flow.flowi4_tos = RT_TOS(iph->tos);
+       flow.flowi4_tos = iph->tos & IPTOS_RT_MASK;
        flow.flowi4_scope = RT_SCOPE_UNIVERSE;
        flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
 
index ed42d2193c5c76bc9d48f36c13e72ca5be8aee1f..32545ecf2ab105739e9a751b5202a7b3c9d6b22e 100644 (file)
@@ -2937,6 +2937,7 @@ int tcp_disconnect(struct sock *sk, int flags)
 
        icsk->icsk_backoff = 0;
        icsk->icsk_probes_out = 0;
+       icsk->icsk_probes_tstamp = 0;
        icsk->icsk_rto = TCP_TIMEOUT_INIT;
        icsk->icsk_rto_min = TCP_RTO_MIN;
        icsk->icsk_delack_max = TCP_DELACK_MAX;
index c7e16b0ed791fcbd864860d6216339542e286929..9b44caa4b956221a8d911fa8d0d8a6c51bad5829 100644 (file)
@@ -2859,7 +2859,8 @@ static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag)
        } else if (tcp_is_rack(sk)) {
                u32 prior_retrans = tp->retrans_out;
 
-               tcp_rack_mark_lost(sk);
+               if (tcp_rack_mark_lost(sk))
+                       *ack_flag &= ~FLAG_SET_XMIT_TIMER;
                if (prior_retrans > tp->retrans_out)
                        *ack_flag |= FLAG_LOST_RETRANS;
        }
@@ -3384,6 +3385,7 @@ static void tcp_ack_probe(struct sock *sk)
                return;
        if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) {
                icsk->icsk_backoff = 0;
+               icsk->icsk_probes_tstamp = 0;
                inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
                /* Socket must be waked up by subsequent tcp_data_snd_check().
                 * This function is not for random using!
@@ -3391,8 +3393,8 @@ static void tcp_ack_probe(struct sock *sk)
        } else {
                unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
 
-               tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
-                                    when, TCP_RTO_MAX);
+               when = tcp_clamp_probe0_to_user_timeout(sk, when);
+               tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, TCP_RTO_MAX);
        }
 }
 
@@ -3815,9 +3817,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
        if (tp->tlp_high_seq)
                tcp_process_tlp_ack(sk, ack, flag);
-       /* If needed, reset TLP/RTO timer; RACK may later override this. */
-       if (flag & FLAG_SET_XMIT_TIMER)
-               tcp_set_xmit_timer(sk);
 
        if (tcp_ack_is_dubious(sk, flag)) {
                if (!(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP))) {
@@ -3830,6 +3829,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                                      &rexmit);
        }
 
+       /* If needed, reset TLP/RTO timer when RACK doesn't set. */
+       if (flag & FLAG_SET_XMIT_TIMER)
+               tcp_set_xmit_timer(sk);
+
        if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
                sk_dst_confirm(sk);
 
@@ -4396,10 +4399,9 @@ static void tcp_rcv_spurious_retrans(struct sock *sk, const struct sk_buff *skb)
         * The receiver remembers and reflects via DSACKs. Leverage the
         * DSACK state and change the txhash to re-route speculatively.
         */
-       if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq) {
-               sk_rethink_txhash(sk);
+       if (TCP_SKB_CB(skb)->seq == tcp_sk(sk)->duplicate_sack[0].start_seq &&
+           sk_rethink_txhash(sk))
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDUPLICATEDATAREHASH);
-       }
 }
 
 static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
index 58207c7769d05693b650e3c93e4ef405a5d4b23a..777306b5bc224d6e36da13641b5cdf4f0dba81fd 100644 (file)
@@ -1595,6 +1595,8 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
                tcp_move_syn(newtp, req);
                ireq->ireq_opt = NULL;
        } else {
+               newinet->inet_opt = NULL;
+
                if (!req_unhash && found_dup_sk) {
                        /* This code path should only be executed in the
                         * syncookie case only
@@ -1602,8 +1604,6 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
                        bh_unlock_sock(newsk);
                        sock_put(newsk);
                        newsk = NULL;
-               } else {
-                       newinet->inet_opt = NULL;
                }
        }
        return newsk;
@@ -1760,6 +1760,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
 {
        u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
+       u32 tail_gso_size, tail_gso_segs;
        struct skb_shared_info *shinfo;
        const struct tcphdr *th;
        struct tcphdr *thtail;
@@ -1767,6 +1768,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
        unsigned int hdrlen;
        bool fragstolen;
        u32 gso_segs;
+       u32 gso_size;
        int delta;
 
        /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
@@ -1792,13 +1794,6 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
         */
        th = (const struct tcphdr *)skb->data;
        hdrlen = th->doff * 4;
-       shinfo = skb_shinfo(skb);
-
-       if (!shinfo->gso_size)
-               shinfo->gso_size = skb->len - hdrlen;
-
-       if (!shinfo->gso_segs)
-               shinfo->gso_segs = 1;
 
        tail = sk->sk_backlog.tail;
        if (!tail)
@@ -1821,6 +1816,15 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
                goto no_coalesce;
 
        __skb_pull(skb, hdrlen);
+
+       shinfo = skb_shinfo(skb);
+       gso_size = shinfo->gso_size ?: skb->len;
+       gso_segs = shinfo->gso_segs ?: 1;
+
+       shinfo = skb_shinfo(tail);
+       tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen);
+       tail_gso_segs = shinfo->gso_segs ?: 1;
+
        if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
                TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
 
@@ -1847,11 +1851,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
                }
 
                /* Not as strict as GRO. We only need to carry mss max value */
-               skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
-                                                skb_shinfo(tail)->gso_size);
-
-               gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
-               skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
+               shinfo->gso_size = max(gso_size, tail_gso_size);
+               shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);
 
                sk->sk_backlog.len += delta;
                __NET_INC_STATS(sock_net(sk),
index f322e798a3519153472434a0a4a85449a2da20ce..8478cf749821ade9c1abf52406634ed15be03a25 100644 (file)
@@ -4084,6 +4084,7 @@ void tcp_send_probe0(struct sock *sk)
                /* Cancel probe timer, if it is not required. */
                icsk->icsk_probes_out = 0;
                icsk->icsk_backoff = 0;
+               icsk->icsk_probes_tstamp = 0;
                return;
        }
 
@@ -4098,6 +4099,8 @@ void tcp_send_probe0(struct sock *sk)
                 */
                timeout = TCP_RESOURCE_PROBE_INTERVAL;
        }
+
+       timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
        tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
 }
 
index 177307a3081f9583e5e488e1a1d73035fb4bc3fd..6f1b4ac7fe99c4b256d45e23f7b96509dd597e77 100644 (file)
@@ -96,13 +96,13 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
        }
 }
 
-void tcp_rack_mark_lost(struct sock *sk)
+bool tcp_rack_mark_lost(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        u32 timeout;
 
        if (!tp->rack.advanced)
-               return;
+               return false;
 
        /* Reset the advanced flag to avoid unnecessary queue scanning */
        tp->rack.advanced = 0;
@@ -112,6 +112,7 @@ void tcp_rack_mark_lost(struct sock *sk)
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
                                          timeout, inet_csk(sk)->icsk_rto);
        }
+       return !!timeout;
 }
 
 /* Record the most recently (re)sent time among the (s)acked packets
index 6c62b9ea1320d9bbd26ed86b9f41de02fee6c491..4ef08079ccfa9d0e68f046b3cb329e7e3efdf1ff 100644 (file)
@@ -40,6 +40,24 @@ static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
        return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
 }
 
+u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
+{
+       struct inet_connection_sock *icsk = inet_csk(sk);
+       u32 remaining;
+       s32 elapsed;
+
+       if (!icsk->icsk_user_timeout || !icsk->icsk_probes_tstamp)
+               return when;
+
+       elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
+       if (unlikely(elapsed < 0))
+               elapsed = 0;
+       remaining = msecs_to_jiffies(icsk->icsk_user_timeout) - elapsed;
+       remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN);
+
+       return min_t(u32, remaining, when);
+}
+
 /**
  *  tcp_write_err() - close socket and save error info
  *  @sk:  The socket the error has appeared on.
@@ -219,14 +237,8 @@ static int tcp_write_timeout(struct sock *sk)
        int retry_until;
 
        if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
-               if (icsk->icsk_retransmits) {
-                       dst_negative_advice(sk);
-               } else {
-                       sk_rethink_txhash(sk);
-                       tp->timeout_rehash++;
-                       __NET_INC_STATS(sock_net(sk),
-                                       LINUX_MIB_TCPTIMEOUTREHASH);
-               }
+               if (icsk->icsk_retransmits)
+                       __dst_negative_advice(sk);
                retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
                expired = icsk->icsk_retransmits >= retry_until;
        } else {
@@ -234,12 +246,7 @@ static int tcp_write_timeout(struct sock *sk)
                        /* Black hole detection */
                        tcp_mtu_probing(icsk, sk);
 
-                       dst_negative_advice(sk);
-               } else {
-                       sk_rethink_txhash(sk);
-                       tp->timeout_rehash++;
-                       __NET_INC_STATS(sock_net(sk),
-                                       LINUX_MIB_TCPTIMEOUTREHASH);
+                       __dst_negative_advice(sk);
                }
 
                retry_until = net->ipv4.sysctl_tcp_retries2;
@@ -270,6 +277,11 @@ static int tcp_write_timeout(struct sock *sk)
                return 1;
        }
 
+       if (sk_rethink_txhash(sk)) {
+               tp->timeout_rehash++;
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTREHASH);
+       }
+
        return 0;
 }
 
@@ -349,6 +361,7 @@ static void tcp_probe_timer(struct sock *sk)
 
        if (tp->packets_out || !skb) {
                icsk->icsk_probes_out = 0;
+               icsk->icsk_probes_tstamp = 0;
                return;
        }
 
@@ -360,13 +373,12 @@ static void tcp_probe_timer(struct sock *sk)
         * corresponding system limit. We also implement similar policy when
         * we use RTO to probe window in tcp_retransmit_timer().
         */
-       if (icsk->icsk_user_timeout) {
-               u32 elapsed = tcp_model_timeout(sk, icsk->icsk_probes_out,
-                                               tcp_probe0_base(sk));
-
-               if (elapsed >= icsk->icsk_user_timeout)
-                       goto abort;
-       }
+       if (!icsk->icsk_probes_tstamp)
+               icsk->icsk_probes_tstamp = tcp_jiffies32;
+       else if (icsk->icsk_user_timeout &&
+                (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >=
+                msecs_to_jiffies(icsk->icsk_user_timeout))
+               goto abort;
 
        max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
        if (sock_flag(sk, SOCK_DEAD)) {
index 7103b0a89756e24203261684e88432615c344581..69ea76578abb95c613ef20f4fc0751e1ff81ac7b 100644 (file)
@@ -2555,7 +2555,8 @@ int udp_v4_early_demux(struct sk_buff *skb)
                 */
                if (!inet_sk(sk)->inet_daddr && in_dev)
                        return ip_mc_validate_source(skb, iph->daddr,
-                                                    iph->saddr, iph->tos,
+                                                    iph->saddr,
+                                                    iph->tos & IPTOS_RT_MASK,
                                                     skb->dev, in_dev, &itag);
        }
        return 0;
index ff39e94781bfb825a6d24d7642424458550e6a12..cfc872689b9976d793ba5b893338ed323bf17c65 100644 (file)
@@ -187,8 +187,67 @@ out_unlock:
 }
 EXPORT_SYMBOL(skb_udp_tunnel_segment);
 
+static void __udpv4_gso_segment_csum(struct sk_buff *seg,
+                                    __be32 *oldip, __be32 *newip,
+                                    __be16 *oldport, __be16 *newport)
+{
+       struct udphdr *uh;
+       struct iphdr *iph;
+
+       if (*oldip == *newip && *oldport == *newport)
+               return;
+
+       uh = udp_hdr(seg);
+       iph = ip_hdr(seg);
+
+       if (uh->check) {
+               inet_proto_csum_replace4(&uh->check, seg, *oldip, *newip,
+                                        true);
+               inet_proto_csum_replace2(&uh->check, seg, *oldport, *newport,
+                                        false);
+               if (!uh->check)
+                       uh->check = CSUM_MANGLED_0;
+       }
+       *oldport = *newport;
+
+       csum_replace4(&iph->check, *oldip, *newip);
+       *oldip = *newip;
+}
+
+static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs)
+{
+       struct sk_buff *seg;
+       struct udphdr *uh, *uh2;
+       struct iphdr *iph, *iph2;
+
+       seg = segs;
+       uh = udp_hdr(seg);
+       iph = ip_hdr(seg);
+
+       if ((udp_hdr(seg)->dest == udp_hdr(seg->next)->dest) &&
+           (udp_hdr(seg)->source == udp_hdr(seg->next)->source) &&
+           (ip_hdr(seg)->daddr == ip_hdr(seg->next)->daddr) &&
+           (ip_hdr(seg)->saddr == ip_hdr(seg->next)->saddr))
+               return segs;
+
+       while ((seg = seg->next)) {
+               uh2 = udp_hdr(seg);
+               iph2 = ip_hdr(seg);
+
+               __udpv4_gso_segment_csum(seg,
+                                        &iph2->saddr, &iph->saddr,
+                                        &uh2->source, &uh->source);
+               __udpv4_gso_segment_csum(seg,
+                                        &iph2->daddr, &iph->daddr,
+                                        &uh2->dest, &uh->dest);
+       }
+
+       return segs;
+}
+
 static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
-                                             netdev_features_t features)
+                                             netdev_features_t features,
+                                             bool is_ipv6)
 {
        unsigned int mss = skb_shinfo(skb)->gso_size;
 
@@ -198,11 +257,11 @@ static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
 
        udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss);
 
-       return skb;
+       return is_ipv6 ? skb : __udpv4_gso_segment_list_csum(skb);
 }
 
 struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
-                                 netdev_features_t features)
+                                 netdev_features_t features, bool is_ipv6)
 {
        struct sock *sk = gso_skb->sk;
        unsigned int sum_truesize = 0;
@@ -214,7 +273,7 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
        __be16 newlen;
 
        if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)
-               return __udp_gso_segment_list(gso_skb, features);
+               return __udp_gso_segment_list(gso_skb, features, is_ipv6);
 
        mss = skb_shinfo(gso_skb)->gso_size;
        if (gso_skb->len <= sizeof(*uh) + mss)
@@ -328,7 +387,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
                goto out;
 
        if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
-               return __udp_gso_segment(skb, features);
+               return __udp_gso_segment(skb, features, false);
 
        mss = skb_shinfo(skb)->gso_size;
        if (unlikely(skb->len <= mss))
index eff2cacd52093962d17bb9798b21ed20d7b1a995..9edc5bb2d531aeffef448ef049ed8a745fe61544 100644 (file)
@@ -2467,8 +2467,9 @@ static void addrconf_add_mroute(struct net_device *dev)
                .fc_ifindex = dev->ifindex,
                .fc_dst_len = 8,
                .fc_flags = RTF_UP,
-               .fc_type = RTN_UNICAST,
+               .fc_type = RTN_MULTICAST,
                .fc_nlinfo.nl_net = dev_net(dev),
+               .fc_protocol = RTPROT_KERNEL,
        };
 
        ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
index c7bd7b1a04c13fb2379edcadfa99152e09d11ba3..faa823c242923ff36ebd4b048bdb2d84728df0e7 100644 (file)
@@ -42,7 +42,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
                        goto out;
 
                if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
-                       return __udp_gso_segment(skb, features);
+                       return __udp_gso_segment(skb, features, true);
 
                mss = skb_shinfo(skb)->gso_size;
                if (unlikely(skb->len <= mss))
index c12dbc51ef5fe93b4a11c4b8e2a413c5e2dd0c85..ef9b4ac03e7b74e5ddad0ca2d3d337583fc25fee 100644 (file)
@@ -2902,7 +2902,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
                        break;
                if (!aalg->pfkey_supported)
                        continue;
-               if (aalg_tmpl_set(t, aalg) && aalg->available)
+               if (aalg_tmpl_set(t, aalg))
                        sz += sizeof(struct sadb_comb);
        }
        return sz + sizeof(struct sadb_prop);
@@ -2920,7 +2920,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
                if (!ealg->pfkey_supported)
                        continue;
 
-               if (!(ealg_tmpl_set(t, ealg) && ealg->available))
+               if (!(ealg_tmpl_set(t, ealg)))
                        continue;
 
                for (k = 1; ; k++) {
@@ -2931,7 +2931,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
                        if (!aalg->pfkey_supported)
                                continue;
 
-                       if (aalg_tmpl_set(t, aalg) && aalg->available)
+                       if (aalg_tmpl_set(t, aalg))
                                sz += sizeof(struct sadb_comb);
                }
        }
index 40961889e9c01add01792690e61c16a04647cc4c..0511bbe4af7b4f6421de932a95bd72b55d4f8182 100644 (file)
@@ -122,6 +122,8 @@ static struct lapb_cb *lapb_create_cb(void)
 
        timer_setup(&lapb->t1timer, NULL, 0);
        timer_setup(&lapb->t2timer, NULL, 0);
+       lapb->t1timer_stop = true;
+       lapb->t2timer_stop = true;
 
        lapb->t1      = LAPB_DEFAULT_T1;
        lapb->t2      = LAPB_DEFAULT_T2;
@@ -129,6 +131,8 @@ static struct lapb_cb *lapb_create_cb(void)
        lapb->mode    = LAPB_DEFAULT_MODE;
        lapb->window  = LAPB_DEFAULT_WINDOW;
        lapb->state   = LAPB_STATE_0;
+
+       spin_lock_init(&lapb->lock);
        refcount_set(&lapb->refcnt, 1);
 out:
        return lapb;
@@ -178,11 +182,23 @@ int lapb_unregister(struct net_device *dev)
                goto out;
        lapb_put(lapb);
 
+       /* Wait for other refs to "lapb" to drop */
+       while (refcount_read(&lapb->refcnt) > 2)
+               usleep_range(1, 10);
+
+       spin_lock_bh(&lapb->lock);
+
        lapb_stop_t1timer(lapb);
        lapb_stop_t2timer(lapb);
 
        lapb_clear_queues(lapb);
 
+       spin_unlock_bh(&lapb->lock);
+
+       /* Wait for running timers to stop */
+       del_timer_sync(&lapb->t1timer);
+       del_timer_sync(&lapb->t2timer);
+
        __lapb_remove_cb(lapb);
 
        lapb_put(lapb);
@@ -201,6 +217,8 @@ int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms)
        if (!lapb)
                goto out;
 
+       spin_lock_bh(&lapb->lock);
+
        parms->t1      = lapb->t1 / HZ;
        parms->t2      = lapb->t2 / HZ;
        parms->n2      = lapb->n2;
@@ -219,6 +237,7 @@ int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms)
        else
                parms->t2timer = (lapb->t2timer.expires - jiffies) / HZ;
 
+       spin_unlock_bh(&lapb->lock);
        lapb_put(lapb);
        rc = LAPB_OK;
 out:
@@ -234,6 +253,8 @@ int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms)
        if (!lapb)
                goto out;
 
+       spin_lock_bh(&lapb->lock);
+
        rc = LAPB_INVALUE;
        if (parms->t1 < 1 || parms->t2 < 1 || parms->n2 < 1)
                goto out_put;
@@ -256,6 +277,7 @@ int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms)
 
        rc = LAPB_OK;
 out_put:
+       spin_unlock_bh(&lapb->lock);
        lapb_put(lapb);
 out:
        return rc;
@@ -270,6 +292,8 @@ int lapb_connect_request(struct net_device *dev)
        if (!lapb)
                goto out;
 
+       spin_lock_bh(&lapb->lock);
+
        rc = LAPB_OK;
        if (lapb->state == LAPB_STATE_1)
                goto out_put;
@@ -285,24 +309,18 @@ int lapb_connect_request(struct net_device *dev)
 
        rc = LAPB_OK;
 out_put:
+       spin_unlock_bh(&lapb->lock);
        lapb_put(lapb);
 out:
        return rc;
 }
 EXPORT_SYMBOL(lapb_connect_request);
 
-int lapb_disconnect_request(struct net_device *dev)
+static int __lapb_disconnect_request(struct lapb_cb *lapb)
 {
-       struct lapb_cb *lapb = lapb_devtostruct(dev);
-       int rc = LAPB_BADTOKEN;
-
-       if (!lapb)
-               goto out;
-
        switch (lapb->state) {
        case LAPB_STATE_0:
-               rc = LAPB_NOTCONNECTED;
-               goto out_put;
+               return LAPB_NOTCONNECTED;
 
        case LAPB_STATE_1:
                lapb_dbg(1, "(%p) S1 TX DISC(1)\n", lapb->dev);
@@ -310,12 +328,10 @@ int lapb_disconnect_request(struct net_device *dev)
                lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND);
                lapb->state = LAPB_STATE_0;
                lapb_start_t1timer(lapb);
-               rc = LAPB_NOTCONNECTED;
-               goto out_put;
+               return LAPB_NOTCONNECTED;
 
        case LAPB_STATE_2:
-               rc = LAPB_OK;
-               goto out_put;
+               return LAPB_OK;
        }
 
        lapb_clear_queues(lapb);
@@ -328,8 +344,22 @@ int lapb_disconnect_request(struct net_device *dev)
        lapb_dbg(1, "(%p) S3 DISC(1)\n", lapb->dev);
        lapb_dbg(0, "(%p) S3 -> S2\n", lapb->dev);
 
-       rc = LAPB_OK;
-out_put:
+       return LAPB_OK;
+}
+
+int lapb_disconnect_request(struct net_device *dev)
+{
+       struct lapb_cb *lapb = lapb_devtostruct(dev);
+       int rc = LAPB_BADTOKEN;
+
+       if (!lapb)
+               goto out;
+
+       spin_lock_bh(&lapb->lock);
+
+       rc = __lapb_disconnect_request(lapb);
+
+       spin_unlock_bh(&lapb->lock);
        lapb_put(lapb);
 out:
        return rc;
@@ -344,6 +374,8 @@ int lapb_data_request(struct net_device *dev, struct sk_buff *skb)
        if (!lapb)
                goto out;
 
+       spin_lock_bh(&lapb->lock);
+
        rc = LAPB_NOTCONNECTED;
        if (lapb->state != LAPB_STATE_3 && lapb->state != LAPB_STATE_4)
                goto out_put;
@@ -352,6 +384,7 @@ int lapb_data_request(struct net_device *dev, struct sk_buff *skb)
        lapb_kick(lapb);
        rc = LAPB_OK;
 out_put:
+       spin_unlock_bh(&lapb->lock);
        lapb_put(lapb);
 out:
        return rc;
@@ -364,7 +397,9 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
        int rc = LAPB_BADTOKEN;
 
        if (lapb) {
+               spin_lock_bh(&lapb->lock);
                lapb_data_input(lapb, skb);
+               spin_unlock_bh(&lapb->lock);
                lapb_put(lapb);
                rc = LAPB_OK;
        }
@@ -435,6 +470,8 @@ static int lapb_device_event(struct notifier_block *this, unsigned long event,
        if (!lapb)
                return NOTIFY_DONE;
 
+       spin_lock_bh(&lapb->lock);
+
        switch (event) {
        case NETDEV_UP:
                lapb_dbg(0, "(%p) Interface up: %s\n", dev, dev->name);
@@ -454,7 +491,7 @@ static int lapb_device_event(struct notifier_block *this, unsigned long event,
                break;
        case NETDEV_GOING_DOWN:
                if (netif_carrier_ok(dev))
-                       lapb_disconnect_request(dev);
+                       __lapb_disconnect_request(lapb);
                break;
        case NETDEV_DOWN:
                lapb_dbg(0, "(%p) Interface down: %s\n", dev, dev->name);
@@ -489,6 +526,7 @@ static int lapb_device_event(struct notifier_block *this, unsigned long event,
                break;
        }
 
+       spin_unlock_bh(&lapb->lock);
        lapb_put(lapb);
        return NOTIFY_DONE;
 }
index 7a4d0715d1c32b842a7ba781da5f774d107ad92d..a966d29c772d9f09e73350718468e3e1a4331160 100644 (file)
@@ -82,7 +82,8 @@ void lapb_kick(struct lapb_cb *lapb)
                skb = skb_dequeue(&lapb->write_queue);
 
                do {
-                       if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
+                       skbn = skb_copy(skb, GFP_ATOMIC);
+                       if (!skbn) {
                                skb_queue_head(&lapb->write_queue, skb);
                                break;
                        }
index baa247fe4ed057a13b3a1fb257bf5685570f35f4..0230b272b7d1db9ef1864f4f6a57948593f61346 100644 (file)
@@ -40,6 +40,7 @@ void lapb_start_t1timer(struct lapb_cb *lapb)
        lapb->t1timer.function = lapb_t1timer_expiry;
        lapb->t1timer.expires  = jiffies + lapb->t1;
 
+       lapb->t1timer_stop = false;
        add_timer(&lapb->t1timer);
 }
 
@@ -50,16 +51,19 @@ void lapb_start_t2timer(struct lapb_cb *lapb)
        lapb->t2timer.function = lapb_t2timer_expiry;
        lapb->t2timer.expires  = jiffies + lapb->t2;
 
+       lapb->t2timer_stop = false;
        add_timer(&lapb->t2timer);
 }
 
 void lapb_stop_t1timer(struct lapb_cb *lapb)
 {
+       lapb->t1timer_stop = true;
        del_timer(&lapb->t1timer);
 }
 
 void lapb_stop_t2timer(struct lapb_cb *lapb)
 {
+       lapb->t2timer_stop = true;
        del_timer(&lapb->t2timer);
 }
 
@@ -72,16 +76,31 @@ static void lapb_t2timer_expiry(struct timer_list *t)
 {
        struct lapb_cb *lapb = from_timer(lapb, t, t2timer);
 
+       spin_lock_bh(&lapb->lock);
+       if (timer_pending(&lapb->t2timer)) /* A new timer has been set up */
+               goto out;
+       if (lapb->t2timer_stop) /* The timer has been stopped */
+               goto out;
+
        if (lapb->condition & LAPB_ACK_PENDING_CONDITION) {
                lapb->condition &= ~LAPB_ACK_PENDING_CONDITION;
                lapb_timeout_response(lapb);
        }
+
+out:
+       spin_unlock_bh(&lapb->lock);
 }
 
 static void lapb_t1timer_expiry(struct timer_list *t)
 {
        struct lapb_cb *lapb = from_timer(lapb, t, t1timer);
 
+       spin_lock_bh(&lapb->lock);
+       if (timer_pending(&lapb->t1timer)) /* A new timer has been set up */
+               goto out;
+       if (lapb->t1timer_stop) /* The timer has been stopped */
+               goto out;
+
        switch (lapb->state) {
 
                /*
@@ -108,7 +127,7 @@ static void lapb_t1timer_expiry(struct timer_list *t)
                                lapb->state = LAPB_STATE_0;
                                lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
                                lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev);
-                               return;
+                               goto out;
                        } else {
                                lapb->n2count++;
                                if (lapb->mode & LAPB_EXTENDED) {
@@ -132,7 +151,7 @@ static void lapb_t1timer_expiry(struct timer_list *t)
                                lapb->state = LAPB_STATE_0;
                                lapb_disconnect_confirmation(lapb, LAPB_TIMEDOUT);
                                lapb_dbg(0, "(%p) S2 -> S0\n", lapb->dev);
-                               return;
+                               goto out;
                        } else {
                                lapb->n2count++;
                                lapb_dbg(1, "(%p) S2 TX DISC(1)\n", lapb->dev);
@@ -150,7 +169,7 @@ static void lapb_t1timer_expiry(struct timer_list *t)
                                lapb_stop_t2timer(lapb);
                                lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
                                lapb_dbg(0, "(%p) S3 -> S0\n", lapb->dev);
-                               return;
+                               goto out;
                        } else {
                                lapb->n2count++;
                                lapb_requeue_frames(lapb);
@@ -167,7 +186,7 @@ static void lapb_t1timer_expiry(struct timer_list *t)
                                lapb->state = LAPB_STATE_0;
                                lapb_disconnect_indication(lapb, LAPB_TIMEDOUT);
                                lapb_dbg(0, "(%p) S4 -> S0\n", lapb->dev);
-                               return;
+                               goto out;
                        } else {
                                lapb->n2count++;
                                lapb_transmit_frmr(lapb);
@@ -176,4 +195,7 @@ static void lapb_t1timer_expiry(struct timer_list *t)
        }
 
        lapb_start_t1timer(lapb);
+
+out:
+       spin_unlock_bh(&lapb->lock);
 }
index 48f144f107d536eb96d08f438ad21e5a9e3c8c1e..9e723d943421907abb6664bbdad92c9b416efde0 100644 (file)
@@ -120,18 +120,17 @@ static ssize_t aqm_write(struct file *file,
 {
        struct ieee80211_local *local = file->private_data;
        char buf[100];
-       size_t len;
 
-       if (count > sizeof(buf))
+       if (count >= sizeof(buf))
                return -EINVAL;
 
        if (copy_from_user(buf, user_buf, count))
                return -EFAULT;
 
-       buf[sizeof(buf) - 1] = '\0';
-       len = strlen(buf);
-       if (len > 0 && buf[len-1] == '\n')
-               buf[len-1] = 0;
+       if (count && buf[count - 1] == '\n')
+               buf[count - 1] = '\0';
+       else
+               buf[count] = '\0';
 
        if (sscanf(buf, "fq_limit %u", &local->fq.limit) == 1)
                return count;
@@ -177,18 +176,17 @@ static ssize_t airtime_flags_write(struct file *file,
 {
        struct ieee80211_local *local = file->private_data;
        char buf[16];
-       size_t len;
 
-       if (count > sizeof(buf))
+       if (count >= sizeof(buf))
                return -EINVAL;
 
        if (copy_from_user(buf, user_buf, count))
                return -EFAULT;
 
-       buf[sizeof(buf) - 1] = 0;
-       len = strlen(buf);
-       if (len > 0 && buf[len - 1] == '\n')
-               buf[len - 1] = 0;
+       if (count && buf[count - 1] == '\n')
+               buf[count - 1] = '\0';
+       else
+               buf[count] = '\0';
 
        if (kstrtou16(buf, 0, &local->airtime_flags))
                return -EINVAL;
@@ -237,20 +235,19 @@ static ssize_t aql_txq_limit_write(struct file *file,
 {
        struct ieee80211_local *local = file->private_data;
        char buf[100];
-       size_t len;
        u32 ac, q_limit_low, q_limit_high, q_limit_low_old, q_limit_high_old;
        struct sta_info *sta;
 
-       if (count > sizeof(buf))
+       if (count >= sizeof(buf))
                return -EINVAL;
 
        if (copy_from_user(buf, user_buf, count))
                return -EFAULT;
 
-       buf[sizeof(buf) - 1] = 0;
-       len = strlen(buf);
-       if (len > 0 && buf[len - 1] == '\n')
-               buf[len - 1] = 0;
+       if (count && buf[count - 1] == '\n')
+               buf[count - 1] = '\0';
+       else
+               buf[count] = '\0';
 
        if (sscanf(buf, "%u %u %u", &ac, &q_limit_low, &q_limit_high) != 3)
                return -EINVAL;
@@ -306,18 +303,17 @@ static ssize_t force_tx_status_write(struct file *file,
 {
        struct ieee80211_local *local = file->private_data;
        char buf[3];
-       size_t len;
 
-       if (count > sizeof(buf))
+       if (count >= sizeof(buf))
                return -EINVAL;
 
        if (copy_from_user(buf, user_buf, count))
                return -EFAULT;
 
-       buf[sizeof(buf) - 1] = '\0';
-       len = strlen(buf);
-       if (len > 0 && buf[len - 1] == '\n')
-               buf[len - 1] = 0;
+       if (count && buf[count - 1] == '\n')
+               buf[count - 1] = '\0';
+       else
+               buf[count] = '\0';
 
        if (buf[0] == '0' && buf[1] == '\0')
                local->force_tx_status = 0;
index c9a8a2433e8ac51f9c63827887113b357ea23a95..48322e45e7ddb56f82a6c762fed757368cbdeb91 100644 (file)
@@ -125,8 +125,11 @@ int drv_sta_state(struct ieee80211_local *local,
        } else if (old_state == IEEE80211_STA_AUTH &&
                   new_state == IEEE80211_STA_ASSOC) {
                ret = drv_sta_add(local, sdata, &sta->sta);
-               if (ret == 0)
+               if (ret == 0) {
                        sta->uploaded = true;
+                       if (rcu_access_pointer(sta->sta.rates))
+                               drv_sta_rate_tbl_update(local, sdata, &sta->sta);
+               }
        } else if (old_state == IEEE80211_STA_ASSOC &&
                   new_state == IEEE80211_STA_AUTH) {
                drv_sta_remove(local, sdata, &sta->sta);
index 8bf9c0e974d6284390840970f0bdcecbd9476423..8e281c2e644d5ebc75aa4529e3bb996bea0c8fc1 100644 (file)
@@ -1078,6 +1078,7 @@ enum queue_stop_reason {
        IEEE80211_QUEUE_STOP_REASON_FLUSH,
        IEEE80211_QUEUE_STOP_REASON_TDLS_TEARDOWN,
        IEEE80211_QUEUE_STOP_REASON_RESERVE_TID,
+       IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE,
 
        IEEE80211_QUEUE_STOP_REASONS,
 };
index 3b9ec4ef81c3baa6d44261dd339e4fe6964b56d9..b31417f40bd56f4df116c7b91647275883eb86a8 100644 (file)
@@ -1617,6 +1617,10 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
        if (ret)
                return ret;
 
+       ieee80211_stop_vif_queues(local, sdata,
+                                 IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE);
+       synchronize_net();
+
        ieee80211_do_stop(sdata, false);
 
        ieee80211_teardown_sdata(sdata);
@@ -1639,6 +1643,8 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
        err = ieee80211_do_open(&sdata->wdev, false);
        WARN(err, "type change: do_open returned %d", err);
 
+       ieee80211_wake_vif_queues(local, sdata,
+                                 IEEE80211_QUEUE_STOP_REASON_IFTYPE_CHANGE);
        return ret;
 }
 
index 45927202c71c6e0664331e2347cb40c9ed1170c3..63652c39c8e07df537901b3f19a4e6d979d4c5a3 100644 (file)
@@ -960,7 +960,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
        if (old)
                kfree_rcu(old, rcu_head);
 
-       drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
+       if (sta->uploaded)
+               drv_sta_rate_tbl_update(hw_to_local(hw), sta->sdata, pubsta);
 
        ieee80211_sta_set_expected_throughput(pubsta, sta_get_expected_throughput(sta));
 
index 13b9bcc4865deb38644d15bbe0cdc4ee28f5bdd2..972895e9f22dc5e4b977087bba880f9a40fcdcdb 100644 (file)
@@ -4176,6 +4176,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta)
 
        rcu_read_lock();
        key = rcu_dereference(sta->ptk[sta->ptk_idx]);
+       if (!key)
+               key = rcu_dereference(sdata->default_unicast_key);
        if (key) {
                switch (key->conf.cipher) {
                case WLAN_CIPHER_SUITE_TKIP:
index ae1cb2c6872245e03d7f9805cb4e46c2c6ad52b2..76747bfdaddd08ec02e207a0fb603976b443dafe 100644 (file)
@@ -133,16 +133,20 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
        }
 
        if (wide_bw_chansw_ie) {
+               u8 new_seg1 = wide_bw_chansw_ie->new_center_freq_seg1;
                struct ieee80211_vht_operation vht_oper = {
                        .chan_width =
                                wide_bw_chansw_ie->new_channel_width,
                        .center_freq_seg0_idx =
                                wide_bw_chansw_ie->new_center_freq_seg0,
-                       .center_freq_seg1_idx =
-                               wide_bw_chansw_ie->new_center_freq_seg1,
+                       .center_freq_seg1_idx = new_seg1,
                        /* .basic_mcs_set doesn't matter */
                };
-               struct ieee80211_ht_operation ht_oper = {};
+               struct ieee80211_ht_operation ht_oper = {
+                       .operation_mode =
+                               cpu_to_le16(new_seg1 <<
+                                           IEEE80211_HT_OP_MODE_CCFS2_SHIFT),
+               };
 
                /* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT,
                 * to the previously parsed chandef
index 6422da6690f795890286113c3e6a46fbe271d4d7..ebb3228ce9718fd52bb2e481874508397fea47d0 100644 (file)
@@ -649,7 +649,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
                if (!skip_hw && tx->key &&
                    tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
                        info->control.hw_key = &tx->key->conf;
-       } else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta &&
+       } else if (ieee80211_is_data_present(hdr->frame_control) && tx->sta &&
                   test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) {
                return TX_DROP;
        }
@@ -3809,7 +3809,7 @@ void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
                 * get immediately moved to the back of the list on the next
                 * call to ieee80211_next_txq().
                 */
-               if (txqi->txq.sta &&
+               if (txqi->txq.sta && local->airtime_flags &&
                    wiphy_ext_feature_isset(local->hw.wiphy,
                                            NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
                        list_add(&txqi->schedule_order,
@@ -4251,7 +4251,6 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
        struct ethhdr *ehdr = (struct ethhdr *)skb->data;
        struct ieee80211_key *key;
        struct sta_info *sta;
-       bool offload = true;
 
        if (unlikely(skb->len < ETH_HLEN)) {
                kfree_skb(skb);
@@ -4267,18 +4266,22 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
 
        if (unlikely(IS_ERR_OR_NULL(sta) || !sta->uploaded ||
            !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
-               sdata->control_port_protocol == ehdr->h_proto))
-               offload = false;
-       else if ((key = rcu_dereference(sta->ptk[sta->ptk_idx])) &&
-                (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
-                 key->conf.cipher == WLAN_CIPHER_SUITE_TKIP))
-               offload = false;
-
-       if (offload)
-               ieee80211_8023_xmit(sdata, dev, sta, key, skb);
-       else
-               ieee80211_subif_start_xmit(skb, dev);
+           sdata->control_port_protocol == ehdr->h_proto))
+               goto skip_offload;
+
+       key = rcu_dereference(sta->ptk[sta->ptk_idx]);
+       if (!key)
+               key = rcu_dereference(sdata->default_unicast_key);
+
+       if (key && (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
+                   key->conf.cipher == WLAN_CIPHER_SUITE_TKIP))
+               goto skip_offload;
+
+       ieee80211_8023_xmit(sdata, dev, sta, key, skb);
+       goto out;
 
+skip_offload:
+       ieee80211_subif_start_xmit(skb, dev);
 out:
        rcu_read_unlock();
 
index 15c467f1a9dd9daf84b8d3b21f0f070924d06fb5..8d3aa97b52e71950ed34eacb9dc69d0cee1ab7ef 100644 (file)
@@ -5235,9 +5235,8 @@ static void nf_tables_set_elem_destroy(const struct nft_ctx *ctx,
        kfree(elem);
 }
 
-static int nft_set_elem_expr_clone(const struct nft_ctx *ctx,
-                                  struct nft_set *set,
-                                  struct nft_expr *expr_array[])
+int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
+                           struct nft_expr *expr_array[])
 {
        struct nft_expr *expr;
        int err, i, k;
index 0b053f75cd6047c1ba619dc540b7cf544699786d..d164ef9e6843976b8abcfd65ce95f3a37ade2b35 100644 (file)
@@ -295,6 +295,12 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
                        err = -EOPNOTSUPP;
                        goto err_expr_free;
                }
+       } else if (set->num_exprs > 0) {
+               err = nft_set_elem_expr_clone(ctx, set, priv->expr_array);
+               if (err < 0)
+                       return err;
+
+               priv->num_exprs = set->num_exprs;
        }
 
        nft_set_ext_prepare(&priv->tmpl);
@@ -306,8 +312,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
                nft_dynset_ext_add_expr(priv);
 
        if (set->flags & NFT_SET_TIMEOUT) {
-               if (timeout || set->timeout)
+               if (timeout || set->timeout) {
+                       nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_TIMEOUT);
                        nft_set_ext_add(&priv->tmpl, NFT_SET_EXT_EXPIRATION);
+               }
        }
 
        priv->timeout = timeout;
@@ -376,22 +384,25 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
                         nf_jiffies64_to_msecs(priv->timeout),
                         NFTA_DYNSET_PAD))
                goto nla_put_failure;
-       if (priv->num_exprs == 1) {
-               if (nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr_array[0]))
-                       goto nla_put_failure;
-       } else if (priv->num_exprs > 1) {
-               struct nlattr *nest;
-
-               nest = nla_nest_start_noflag(skb, NFTA_DYNSET_EXPRESSIONS);
-               if (!nest)
-                       goto nla_put_failure;
-
-               for (i = 0; i < priv->num_exprs; i++) {
-                       if (nft_expr_dump(skb, NFTA_LIST_ELEM,
-                                         priv->expr_array[i]))
+       if (priv->set->num_exprs == 0) {
+               if (priv->num_exprs == 1) {
+                       if (nft_expr_dump(skb, NFTA_DYNSET_EXPR,
+                                         priv->expr_array[0]))
                                goto nla_put_failure;
+               } else if (priv->num_exprs > 1) {
+                       struct nlattr *nest;
+
+                       nest = nla_nest_start_noflag(skb, NFTA_DYNSET_EXPRESSIONS);
+                       if (!nest)
+                               goto nla_put_failure;
+
+                       for (i = 0; i < priv->num_exprs; i++) {
+                               if (nft_expr_dump(skb, NFTA_LIST_ELEM,
+                                                 priv->expr_array[i]))
+                                       goto nla_put_failure;
+                       }
+                       nla_nest_end(skb, nest);
                }
-               nla_nest_end(skb, nest);
        }
        if (nla_put_be32(skb, NFTA_DYNSET_FLAGS, htonl(flags)))
                goto nla_put_failure;
index e64727e1a72f9c2ffb830de8492b1e64d2d7963b..02a1f13f07980fd16bef7008a184c891369f3626 100644 (file)
@@ -508,7 +508,7 @@ static int nci_open_device(struct nci_dev *ndev)
                };
                unsigned long opt = 0;
 
-               if (!(ndev->nci_ver & NCI_VER_2_MASK))
+               if (ndev->nci_ver & NCI_VER_2_MASK)
                        opt = (unsigned long)&nci_init_v2_cmd;
 
                rc = __nci_request(ndev, nci_init_req, opt,
index 573b38ad2f8ed91e1f46d12213d7890cf9ca2c6c..e161ef2d4720d523ea5ff62e598bafd99872db97 100644 (file)
@@ -852,6 +852,7 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info)
 
        if (!dev->polling) {
                device_unlock(&dev->dev);
+               nfc_put_device(dev);
                return -EINVAL;
        }
 
index 955c195ae14bcf5adbd0cc9c94989c890dc8d6da..9c7eb8455ba8eb275133f0d0bfc0b8d5a6fbdd70 100644 (file)
@@ -105,7 +105,7 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
        if (addr->target_idx > dev->target_next_idx - 1 ||
            addr->target_idx < dev->target_next_idx - dev->n_targets) {
                rc = -EINVAL;
-               goto error;
+               goto put_dev;
        }
 
        rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol);
index 1d0afb1dd77b5cfc78b46a88451b15c4f1a44069..6f1a50d50d06da3924370492bc829e4391fd3d9c 100644 (file)
@@ -565,6 +565,9 @@ int rds_rdma_extra_size(struct rds_rdma_args *args,
        if (args->nr_local == 0)
                return -EINVAL;
 
+       if (args->nr_local > UIO_MAXIOV)
+               return -EMSGSIZE;
+
        iov->iov = kcalloc(args->nr_local,
                           sizeof(struct rds_iovec),
                           GFP_KERNEL);
index 0a2f4817ec6cf2b8b2847aa26ccd7f16e4c6f10d..41671af6b33f91e9f77af4dc0c9d8f813247e4f3 100644 (file)
@@ -990,7 +990,7 @@ static int __init af_rxrpc_init(void)
                goto error_security;
        }
 
-       ret = register_pernet_subsys(&rxrpc_net_ops);
+       ret = register_pernet_device(&rxrpc_net_ops);
        if (ret)
                goto error_pernet;
 
@@ -1035,7 +1035,7 @@ error_key_type:
 error_sock:
        proto_unregister(&rxrpc_proto);
 error_proto:
-       unregister_pernet_subsys(&rxrpc_net_ops);
+       unregister_pernet_device(&rxrpc_net_ops);
 error_pernet:
        rxrpc_exit_security();
 error_security:
@@ -1057,7 +1057,7 @@ static void __exit af_rxrpc_exit(void)
        unregister_key_type(&key_type_rxrpc);
        sock_unregister(PF_RXRPC);
        proto_unregister(&rxrpc_proto);
-       unregister_pernet_subsys(&rxrpc_net_ops);
+       unregister_pernet_device(&rxrpc_net_ops);
        ASSERTCMP(atomic_read(&rxrpc_n_tx_skbs), ==, 0);
        ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0);
 
index 382add72c66f6df606c552d4109fa3aa9bfcb6aa..1ae90fb979362b8a585b5a5f984de91ba871220e 100644 (file)
@@ -197,6 +197,7 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
        tail = b->peer_backlog_tail;
        while (CIRC_CNT(head, tail, size) > 0) {
                struct rxrpc_peer *peer = b->peer_backlog[tail];
+               rxrpc_put_local(peer->local);
                kfree(peer);
                tail = (tail + 1) & (size - 1);
        }
index 1319986693fc8826330e0a7dc49143d1d7518d64..84f932532db7dc39e23d946cd73e97ba042795a4 100644 (file)
@@ -1272,6 +1272,10 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
 
                nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
                msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
+               if (!nla_ok(nla_opt_msk, msk_depth)) {
+                       NL_SET_ERR_MSG(extack, "Invalid nested attribute for masks");
+                       return -EINVAL;
+               }
        }
 
        nla_for_each_attr(nla_opt_key, nla_enc_key,
@@ -1307,9 +1311,6 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
                                NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
                                return -EINVAL;
                        }
-
-                       if (msk_depth)
-                               nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
                        break;
                case TCA_FLOWER_KEY_ENC_OPTS_VXLAN:
                        if (key->enc_opts.dst_opt_type) {
@@ -1340,9 +1341,6 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
                                NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
                                return -EINVAL;
                        }
-
-                       if (msk_depth)
-                               nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
                        break;
                case TCA_FLOWER_KEY_ENC_OPTS_ERSPAN:
                        if (key->enc_opts.dst_opt_type) {
@@ -1373,14 +1371,20 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
                                NL_SET_ERR_MSG(extack, "Key and mask miss aligned");
                                return -EINVAL;
                        }
-
-                       if (msk_depth)
-                               nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
                        break;
                default:
                        NL_SET_ERR_MSG(extack, "Unknown tunnel option type");
                        return -EINVAL;
                }
+
+               if (!msk_depth)
+                       continue;
+
+               if (!nla_ok(nla_opt_msk, msk_depth)) {
+                       NL_SET_ERR_MSG(extack, "A mask attribute is invalid");
+                       return -EINVAL;
+               }
+               nla_opt_msk = nla_next(nla_opt_msk, &msk_depth);
        }
 
        return 0;
index 78bec347b8b66f660e620dd715d0eb68f9bcd2d3..c4007b9cd16d6a200d943e3e0536d6b20022ba77 100644 (file)
@@ -366,9 +366,13 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
        if (tb[TCA_TCINDEX_MASK])
                cp->mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
 
-       if (tb[TCA_TCINDEX_SHIFT])
+       if (tb[TCA_TCINDEX_SHIFT]) {
                cp->shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
-
+               if (cp->shift > 16) {
+                       err = -EINVAL;
+                       goto errout;
+               }
+       }
        if (!cp->hash) {
                /* Hash not specified, use perfect hash if the upper limit
                 * of the hashing index is below the threshold.
index 51cb553e4317a3e2bca1996e0df004aab8111d58..6fe4e5cc807c90b046a16f014df43bfe841cbc43 100644 (file)
@@ -412,7 +412,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
 {
        struct qdisc_rate_table *rtab;
 
-       if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
+       if (tab == NULL || r->rate == 0 ||
+           r->cell_log == 0 || r->cell_log >= 32 ||
            nla_len(tab) != TC_RTAB_SIZE) {
                NL_SET_ERR_MSG(extack, "Invalid rate table parameters for searching");
                return NULL;
index 4ecc2a9595674a783140cc22e862fe5e6c5dc34e..5f42aa5fc612850b526c160ab5e5c75416862676 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/uaccess.h>
 #include <linux/hashtable.h>
 
+#include "auth_gss_internal.h"
 #include "../netns.h"
 
 #include <trace/events/rpcgss.h>
@@ -125,35 +126,6 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
        clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
 }
 
-static const void *
-simple_get_bytes(const void *p, const void *end, void *res, size_t len)
-{
-       const void *q = (const void *)((const char *)p + len);
-       if (unlikely(q > end || q < p))
-               return ERR_PTR(-EFAULT);
-       memcpy(res, p, len);
-       return q;
-}
-
-static inline const void *
-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
-{
-       const void *q;
-       unsigned int len;
-
-       p = simple_get_bytes(p, end, &len, sizeof(len));
-       if (IS_ERR(p))
-               return p;
-       q = (const void *)((const char *)p + len);
-       if (unlikely(q > end || q < p))
-               return ERR_PTR(-EFAULT);
-       dest->data = kmemdup(p, len, GFP_NOFS);
-       if (unlikely(dest->data == NULL))
-               return ERR_PTR(-ENOMEM);
-       dest->len = len;
-       return q;
-}
-
 static struct gss_cl_ctx *
 gss_cred_get_ctx(struct rpc_cred *cred)
 {
diff --git a/net/sunrpc/auth_gss/auth_gss_internal.h b/net/sunrpc/auth_gss/auth_gss_internal.h
new file mode 100644 (file)
index 0000000..f6d9631
--- /dev/null
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * linux/net/sunrpc/auth_gss/auth_gss_internal.h
+ *
+ * Internal definitions for RPCSEC_GSS client authentication
+ *
+ * Copyright (c) 2000 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ */
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/sunrpc/xdr.h>
+
+static inline const void *
+simple_get_bytes(const void *p, const void *end, void *res, size_t len)
+{
+       const void *q = (const void *)((const char *)p + len);
+       if (unlikely(q > end || q < p))
+               return ERR_PTR(-EFAULT);
+       memcpy(res, p, len);
+       return q;
+}
+
+static inline const void *
+simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
+{
+       const void *q;
+       unsigned int len;
+
+       p = simple_get_bytes(p, end, &len, sizeof(len));
+       if (IS_ERR(p))
+               return p;
+       q = (const void *)((const char *)p + len);
+       if (unlikely(q > end || q < p))
+               return ERR_PTR(-EFAULT);
+       if (len) {
+               dest->data = kmemdup(p, len, GFP_NOFS);
+               if (unlikely(dest->data == NULL))
+                       return ERR_PTR(-ENOMEM);
+       } else
+               dest->data = NULL;
+       dest->len = len;
+       return q;
+}
index ae9acf3a73898dea708cbb01fd7cc76e8815abc9..1c092b05c2bba01e8181a4140d58eee2f1b057b7 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/sunrpc/xdr.h>
 #include <linux/sunrpc/gss_krb5_enctypes.h>
 
+#include "auth_gss_internal.h"
+
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 # define RPCDBG_FACILITY       RPCDBG_AUTH
 #endif
@@ -143,35 +145,6 @@ get_gss_krb5_enctype(int etype)
        return NULL;
 }
 
-static const void *
-simple_get_bytes(const void *p, const void *end, void *res, int len)
-{
-       const void *q = (const void *)((const char *)p + len);
-       if (unlikely(q > end || q < p))
-               return ERR_PTR(-EFAULT);
-       memcpy(res, p, len);
-       return q;
-}
-
-static const void *
-simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
-{
-       const void *q;
-       unsigned int len;
-
-       p = simple_get_bytes(p, end, &len, sizeof(len));
-       if (IS_ERR(p))
-               return p;
-       q = (const void *)((const char *)p + len);
-       if (unlikely(q > end || q < p))
-               return ERR_PTR(-EFAULT);
-       res->data = kmemdup(p, len, GFP_NOFS);
-       if (unlikely(res->data == NULL))
-               return ERR_PTR(-ENOMEM);
-       res->len = len;
-       return q;
-}
-
 static inline const void *
 get_key(const void *p, const void *end,
        struct krb5_ctx *ctx, struct crypto_sync_skcipher **res)
index 5fb9164aa69059f77158a9f6ec796381474641ae..dcc50ae545506f39d04908f6168c36ddda350f6f 100644 (file)
@@ -857,6 +857,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
        err = -EAGAIN;
        if (len <= 0)
                goto out_release;
+       trace_svc_xdr_recvfrom(&rqstp->rq_arg);
 
        clear_bit(XPT_OLD, &xprt->xpt_flags);
 
@@ -866,7 +867,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
 
        if (serv->sv_stats)
                serv->sv_stats->netcnt++;
-       trace_svc_xdr_recvfrom(rqstp, &rqstp->rq_arg);
        return len;
 out_release:
        rqstp->rq_res.len = 0;
@@ -904,7 +904,7 @@ int svc_send(struct svc_rqst *rqstp)
        xb->len = xb->head[0].iov_len +
                xb->page_len +
                xb->tail[0].iov_len;
-       trace_svc_xdr_sendto(rqstp, xb);
+       trace_svc_xdr_sendto(rqstp->rq_xid, xb);
        trace_svc_stats_latency(rqstp);
 
        len = xprt->xpt_ops->xpo_sendto(rqstp);
index c9766d07eb81a1cdee4cad8bfc4a393114543e6f..5a809c64dc7b9c5673969da158f1453e62a03d16 100644 (file)
@@ -1113,14 +1113,15 @@ static int svc_tcp_sendmsg(struct socket *sock, struct msghdr *msg,
                unsigned int offset, len, remaining;
                struct bio_vec *bvec;
 
-               bvec = xdr->bvec;
-               offset = xdr->page_base;
+               bvec = xdr->bvec + (xdr->page_base >> PAGE_SHIFT);
+               offset = offset_in_page(xdr->page_base);
                remaining = xdr->page_len;
                flags = MSG_MORE | MSG_SENDPAGE_NOTLAST;
                while (remaining > 0) {
                        if (remaining <= PAGE_SIZE && tail->iov_len == 0)
                                flags = 0;
-                       len = min(remaining, bvec->bv_len);
+
+                       len = min(remaining, bvec->bv_len - offset);
                        ret = kernel_sendpage(sock, bvec->bv_page,
                                              bvec->bv_offset + offset,
                                              len, flags);
index 23d8685453627c569cc9aa1af5297f6318df4cad..2c1ffc9ba2eb2567da09139aa204e9c6db8f2d20 100644 (file)
@@ -460,10 +460,11 @@ static int __switchdev_handle_port_obj_add(struct net_device *dev,
        extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
 
        if (check_cb(dev)) {
-               /* This flag is only checked if the return value is success. */
-               port_obj_info->handled = true;
-               return add_cb(dev, port_obj_info->obj, port_obj_info->trans,
-                             extack);
+               err = add_cb(dev, port_obj_info->obj, port_obj_info->trans,
+                            extack);
+               if (err != -EOPNOTSUPP)
+                       port_obj_info->handled = true;
+               return err;
        }
 
        /* Switch ports might be stacked under e.g. a LAG. Ignore the
@@ -515,9 +516,10 @@ static int __switchdev_handle_port_obj_del(struct net_device *dev,
        int err = -EOPNOTSUPP;
 
        if (check_cb(dev)) {
-               /* This flag is only checked if the return value is success. */
-               port_obj_info->handled = true;
-               return del_cb(dev, port_obj_info->obj);
+               err = del_cb(dev, port_obj_info->obj);
+               if (err != -EOPNOTSUPP)
+                       port_obj_info->handled = true;
+               return err;
        }
 
        /* Switch ports might be stacked under e.g. a LAG. Ignore the
@@ -568,9 +570,10 @@ static int __switchdev_handle_port_attr_set(struct net_device *dev,
        int err = -EOPNOTSUPP;
 
        if (check_cb(dev)) {
-               port_attr_info->handled = true;
-               return set_cb(dev, port_attr_info->attr,
-                             port_attr_info->trans);
+               err = set_cb(dev, port_attr_info->attr, port_attr_info->trans);
+               if (err != -EOPNOTSUPP)
+                       port_attr_info->handled = true;
+               return err;
        }
 
        /* Switch ports might be stacked under e.g. a LAG. Ignore the
index b12d3a32224280e838827fc7ee25f931d5c7d549..6894f21dc147557ed1540120b1045d6e9d9956f3 100644 (file)
@@ -1014,9 +1014,12 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
                        mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
 
        } else if (sock->type == SOCK_STREAM) {
-               const struct vsock_transport *transport = vsk->transport;
+               const struct vsock_transport *transport;
+
                lock_sock(sk);
 
+               transport = vsk->transport;
+
                /* Listening sockets that have connections in their accept
                 * queue can be read.
                 */
@@ -1099,10 +1102,11 @@ static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
        err = 0;
        sk = sock->sk;
        vsk = vsock_sk(sk);
-       transport = vsk->transport;
 
        lock_sock(sk);
 
+       transport = vsk->transport;
+
        err = vsock_auto_bind(vsk);
        if (err)
                goto out;
@@ -1561,10 +1565,11 @@ static int vsock_stream_setsockopt(struct socket *sock,
        err = 0;
        sk = sock->sk;
        vsk = vsock_sk(sk);
-       transport = vsk->transport;
 
        lock_sock(sk);
 
+       transport = vsk->transport;
+
        switch (optname) {
        case SO_VM_SOCKETS_BUFFER_SIZE:
                COPY_IN(val);
@@ -1697,7 +1702,6 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
 
        sk = sock->sk;
        vsk = vsock_sk(sk);
-       transport = vsk->transport;
        total_written = 0;
        err = 0;
 
@@ -1706,6 +1710,8 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
 
        lock_sock(sk);
 
+       transport = vsk->transport;
+
        /* Callers should not provide a destination with stream sockets. */
        if (msg->msg_namelen) {
                err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
@@ -1840,11 +1846,12 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
 
        sk = sock->sk;
        vsk = vsock_sk(sk);
-       transport = vsk->transport;
        err = 0;
 
        lock_sock(sk);
 
+       transport = vsk->transport;
+
        if (!transport || sk->sk_state != TCP_ESTABLISHED) {
                /* Recvmsg is supposed to return 0 if a peer performs an
                 * orderly shutdown. Differentiate between that case and when a
index bb72447ad960271ab1aaec43867202763f8bc6f6..8114bba8556c72a1ed173cb7c29232ed9dbdf7a0 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright      2017  Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2021 Intel Corporation
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -139,6 +139,11 @@ static const struct ieee80211_regdomain *get_cfg80211_regdom(void)
        return rcu_dereference_rtnl(cfg80211_regdomain);
 }
 
+/*
+ * Returns the regulatory domain associated with the wiphy.
+ *
+ * Requires either RTNL or RCU protection
+ */
 const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy)
 {
        return rcu_dereference_rtnl(wiphy->regd);
@@ -2571,9 +2576,13 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
        if (IS_ERR(new_regd))
                return;
 
+       rtnl_lock();
+
        tmp = get_wiphy_regdom(wiphy);
        rcu_assign_pointer(wiphy->regd, new_regd);
        rcu_free_regdom(tmp);
+
+       rtnl_unlock();
 }
 EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
 
index 69102fda9ebd46343a0444d7206799223e24a0fd..76a80a41615befda05f5e62a8b5f2ba1a2253d04 100644 (file)
@@ -896,8 +896,9 @@ out:
 int call_commit_handler(struct net_device *dev)
 {
 #ifdef CONFIG_WIRELESS_EXT
-       if ((netif_running(dev)) &&
-          (dev->wireless_handlers->standard[0] != NULL))
+       if (netif_running(dev) &&
+           dev->wireless_handlers &&
+           dev->wireless_handlers->standard[0])
                /* Call the commit handler on the driver */
                return dev->wireless_handlers->standard[0](dev, NULL,
                                                           NULL, NULL);
index 8037b04a9edd11cad8845d95a06b2d442c29f8f5..4a83117507f5a867412e350b27577a7832e16c22 100644 (file)
@@ -108,9 +108,9 @@ EXPORT_SYMBOL(xsk_get_pool_from_qid);
 
 void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
 {
-       if (queue_id < dev->real_num_rx_queues)
+       if (queue_id < dev->num_rx_queues)
                dev->_rx[queue_id].pool = NULL;
-       if (queue_id < dev->real_num_tx_queues)
+       if (queue_id < dev->num_tx_queues)
                dev->_tx[queue_id].pool = NULL;
 }
 
index be6351e3f3cdfaa6c15d1b824907042d0365731d..1158cd0311d7d0efef498b9b8b6ffa3c8fd241a1 100644 (file)
@@ -660,7 +660,7 @@ resume:
                /* only the first xfrm gets the encap type */
                encap_type = 0;
 
-               if (async && x->repl->recheck(x, skb, seq)) {
+               if (x->repl->recheck(x, skb, seq)) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
                        goto drop_unlock;
                }
index d622c2548d2295c0893b1b5fcbd84862f95fb8ea..b74f28cabe24f245266f97f32428edc2cb68673b 100644 (file)
@@ -793,15 +793,22 @@ static int xfrm_policy_addr_delta(const xfrm_address_t *a,
                                  const xfrm_address_t *b,
                                  u8 prefixlen, u16 family)
 {
+       u32 ma, mb, mask;
        unsigned int pdw, pbi;
        int delta = 0;
 
        switch (family) {
        case AF_INET:
-               if (sizeof(long) == 4 && prefixlen == 0)
-                       return ntohl(a->a4) - ntohl(b->a4);
-               return (ntohl(a->a4) & ((~0UL << (32 - prefixlen)))) -
-                      (ntohl(b->a4) & ((~0UL << (32 - prefixlen))));
+               if (prefixlen == 0)
+                       return 0;
+               mask = ~0U << (32 - prefixlen);
+               ma = ntohl(a->a4) & mask;
+               mb = ntohl(b->a4) & mask;
+               if (ma < mb)
+                       delta = -1;
+               else if (ma > mb)
+                       delta = 1;
+               break;
        case AF_INET6:
                pdw = prefixlen >> 5;
                pbi = prefixlen & 0x1f;
@@ -812,10 +819,13 @@ static int xfrm_policy_addr_delta(const xfrm_address_t *a,
                                return delta;
                }
                if (pbi) {
-                       u32 mask = ~0u << (32 - pbi);
-
-                       delta = (ntohl(a->a6[pdw]) & mask) -
-                               (ntohl(b->a6[pdw]) & mask);
+                       mask = ~0U << (32 - pbi);
+                       ma = ntohl(a->a6[pdw]) & mask;
+                       mb = ntohl(b->a6[pdw]) & mask;
+                       if (ma < mb)
+                               delta = -1;
+                       else if (ma > mb)
+                               delta = 1;
                }
                break;
        default:
@@ -3078,8 +3088,8 @@ struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
                xflo.flags = flags;
 
                /* To accelerate a bit...  */
-               if ((dst_orig->flags & DST_NOXFRM) ||
-                   !net->xfrm.policy_count[XFRM_POLICY_OUT])
+               if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
+                              !net->xfrm.policy_count[XFRM_POLICY_OUT]))
                        goto nopol;
 
                xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
index bacc1111d871bc257c5f86bbf206430f64cb8d79..26c1cb725dcbe5c59197cc1e962a60d370a0deb6 100644 (file)
@@ -371,10 +371,11 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
 {
        int size, ret;
        kuid_t kroot;
+       u32 nsmagic, magic;
        uid_t root, mappedroot;
        char *tmpbuf = NULL;
        struct vfs_cap_data *cap;
-       struct vfs_ns_cap_data *nscap;
+       struct vfs_ns_cap_data *nscap = NULL;
        struct dentry *dentry;
        struct user_namespace *fs_ns;
 
@@ -396,46 +397,61 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
        fs_ns = inode->i_sb->s_user_ns;
        cap = (struct vfs_cap_data *) tmpbuf;
        if (is_v2header((size_t) ret, cap)) {
-               /* If this is sizeof(vfs_cap_data) then we're ok with the
-                * on-disk value, so return that.  */
-               if (alloc)
-                       *buffer = tmpbuf;
-               else
-                       kfree(tmpbuf);
-               return ret;
-       } else if (!is_v3header((size_t) ret, cap)) {
-               kfree(tmpbuf);
-               return -EINVAL;
+               root = 0;
+       } else if (is_v3header((size_t) ret, cap)) {
+               nscap = (struct vfs_ns_cap_data *) tmpbuf;
+               root = le32_to_cpu(nscap->rootid);
+       } else {
+               size = -EINVAL;
+               goto out_free;
        }
 
-       nscap = (struct vfs_ns_cap_data *) tmpbuf;
-       root = le32_to_cpu(nscap->rootid);
        kroot = make_kuid(fs_ns, root);
 
        /* If the root kuid maps to a valid uid in current ns, then return
         * this as a nscap. */
        mappedroot = from_kuid(current_user_ns(), kroot);
        if (mappedroot != (uid_t)-1 && mappedroot != (uid_t)0) {
+               size = sizeof(struct vfs_ns_cap_data);
                if (alloc) {
-                       *buffer = tmpbuf;
+                       if (!nscap) {
+                               /* v2 -> v3 conversion */
+                               nscap = kzalloc(size, GFP_ATOMIC);
+                               if (!nscap) {
+                                       size = -ENOMEM;
+                                       goto out_free;
+                               }
+                               nsmagic = VFS_CAP_REVISION_3;
+                               magic = le32_to_cpu(cap->magic_etc);
+                               if (magic & VFS_CAP_FLAGS_EFFECTIVE)
+                                       nsmagic |= VFS_CAP_FLAGS_EFFECTIVE;
+                               memcpy(&nscap->data, &cap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
+                               nscap->magic_etc = cpu_to_le32(nsmagic);
+                       } else {
+                               /* use allocated v3 buffer */
+                               tmpbuf = NULL;
+                       }
                        nscap->rootid = cpu_to_le32(mappedroot);
-               } else
-                       kfree(tmpbuf);
-               return size;
+                       *buffer = nscap;
+               }
+               goto out_free;
        }
 
        if (!rootid_owns_currentns(kroot)) {
-               kfree(tmpbuf);
-               return -EOPNOTSUPP;
+               size = -EOVERFLOW;
+               goto out_free;
        }
 
        /* This comes from a parent namespace.  Return as a v2 capability */
        size = sizeof(struct vfs_cap_data);
        if (alloc) {
-               *buffer = kmalloc(size, GFP_ATOMIC);
-               if (*buffer) {
-                       struct vfs_cap_data *cap = *buffer;
-                       __le32 nsmagic, magic;
+               if (nscap) {
+                       /* v3 -> v2 conversion */
+                       cap = kzalloc(size, GFP_ATOMIC);
+                       if (!cap) {
+                               size = -ENOMEM;
+                               goto out_free;
+                       }
                        magic = VFS_CAP_REVISION_2;
                        nsmagic = le32_to_cpu(nscap->magic_etc);
                        if (nsmagic & VFS_CAP_FLAGS_EFFECTIVE)
@@ -443,9 +459,12 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
                        memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
                        cap->magic_etc = cpu_to_le32(magic);
                } else {
-                       size = -ENOMEM;
+                       /* use unconverted v2 */
+                       tmpbuf = NULL;
                }
+               *buffer = cap;
        }
+out_free:
        kfree(tmpbuf);
        return size;
 }
index 9f3f8e953ff04ef7eb49b0928d1a09b918177c03..c4aac703dc224c0c58995239275fe2c515be445c 100644 (file)
@@ -382,8 +382,8 @@ retry:
                        continue;
 
                /*
-                * The 'deps' array includes maximum three dependencies
-                * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fourth
+                * The 'deps' array includes maximum four dependencies
+                * to SNDRV_PCM_HW_PARAM_XXXs for this rule. The fifth
                 * member of this array is a sentinel and should be
                 * negative value.
                 *
index 11554d0412f06f0d77b6cbe82a77ea051cb36eaf..1b8409ec2c97f69460441bc3fcdd6b700ca820d4 100644 (file)
@@ -611,7 +611,8 @@ snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_in
 
        if (info->is_midi) {
                struct midi_info minf;
-               snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
+               if (snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf))
+                       return -ENXIO;
                inf->synth_type = SYNTH_TYPE_MIDI;
                inf->synth_subtype = 0;
                inf->nr_voices = 16;
index 6a0d070c60c9ac489a9c1e9d35e1262e0b4bbe62..c4568617251723c41090a7a188d3d7013007c747 100644 (file)
@@ -307,6 +307,10 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0xa0c8,
        },
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x43c8,
+       },
 #endif
 
 /* Elkhart Lake */
index 687216e7452675e9fc9d9d888d7aa8ead2ba961b..eec1775dfffe9fc57278f75669337e5d703c28c5 100644 (file)
@@ -2934,7 +2934,7 @@ static void hda_call_codec_resume(struct hda_codec *codec)
        snd_hdac_leave_pm(&codec->core);
 }
 
-static int hda_codec_suspend(struct device *dev)
+static int hda_codec_runtime_suspend(struct device *dev)
 {
        struct hda_codec *codec = dev_to_hda_codec(dev);
        unsigned int state;
@@ -2953,7 +2953,7 @@ static int hda_codec_suspend(struct device *dev)
        return 0;
 }
 
-static int hda_codec_resume(struct device *dev)
+static int hda_codec_runtime_resume(struct device *dev)
 {
        struct hda_codec *codec = dev_to_hda_codec(dev);
 
@@ -2968,16 +2968,6 @@ static int hda_codec_resume(struct device *dev)
        return 0;
 }
 
-static int hda_codec_runtime_suspend(struct device *dev)
-{
-       return hda_codec_suspend(dev);
-}
-
-static int hda_codec_runtime_resume(struct device *dev)
-{
-       return hda_codec_resume(dev);
-}
-
 #endif /* CONFIG_PM */
 
 #ifdef CONFIG_PM_SLEEP
@@ -2998,31 +2988,31 @@ static void hda_codec_pm_complete(struct device *dev)
 static int hda_codec_pm_suspend(struct device *dev)
 {
        dev->power.power_state = PMSG_SUSPEND;
-       return hda_codec_suspend(dev);
+       return pm_runtime_force_suspend(dev);
 }
 
 static int hda_codec_pm_resume(struct device *dev)
 {
        dev->power.power_state = PMSG_RESUME;
-       return hda_codec_resume(dev);
+       return pm_runtime_force_resume(dev);
 }
 
 static int hda_codec_pm_freeze(struct device *dev)
 {
        dev->power.power_state = PMSG_FREEZE;
-       return hda_codec_suspend(dev);
+       return pm_runtime_force_suspend(dev);
 }
 
 static int hda_codec_pm_thaw(struct device *dev)
 {
        dev->power.power_state = PMSG_THAW;
-       return hda_codec_resume(dev);
+       return pm_runtime_force_resume(dev);
 }
 
 static int hda_codec_pm_restore(struct device *dev)
 {
        dev->power.power_state = PMSG_RESTORE;
-       return hda_codec_resume(dev);
+       return pm_runtime_force_resume(dev);
 }
 #endif /* CONFIG_PM_SLEEP */
 
index e4dd2ff5e47378905f3cae5d53a5770c76740dfd..5a50d3a46445931afe6b859eb7f5301a4b6864d5 100644 (file)
@@ -2484,6 +2484,9 @@ static const struct pci_device_id azx_ids[] = {
        /* CometLake-S */
        { PCI_DEVICE(0x8086, 0xa3f0),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* CometLake-R */
+       { PCI_DEVICE(0x8086, 0xf0c8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Icelake */
        { PCI_DEVICE(0x8086, 0x34c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
@@ -2507,6 +2510,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Alderlake-S */
        { PCI_DEVICE(0x8086, 0x7ad0),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Alderlake-P */
+       { PCI_DEVICE(0x8086, 0x51c8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Elkhart Lake */
        { PCI_DEVICE(0x8086, 0x4b55),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
index 74d246a0dc6de2cedfa8f24ccb9d8378d7e3bf84..97adff0cbcab4bfcec1934a264941ca4d584710a 100644 (file)
@@ -4346,6 +4346,7 @@ HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI",       patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI",  patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI",        patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI",  patch_i915_tgl_hdmi),
+HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi),
 HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI",        patch_i915_icl_hdmi),
index dd82ff2bd5d65146a5ff3f06fc5cc8cf51b09b42..290645516313c6a034d9b6f4ebf9d6065261eb3e 100644 (file)
@@ -6371,6 +6371,7 @@ enum {
        ALC256_FIXUP_HP_HEADSET_MIC,
        ALC236_FIXUP_DELL_AIO_HEADSET_MIC,
        ALC282_FIXUP_ACER_DISABLE_LINEOUT,
+       ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -7808,6 +7809,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE
        },
+       [ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc269_fixup_limit_int_mic_boost,
+               .chained = true,
+               .chain_id = ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7826,6 +7833,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
+       SND_PCI_QUIRK(0x1025, 0x1094, "Acer Aspire E5-575T", ALC255_FIXUP_ACER_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1166, "Acer Veriton N4640G", ALC269_FIXUP_LIFEBOOK),
@@ -7998,6 +8006,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE),
+       SND_PCI_QUIRK(0x1043, 0x1982, "ASUS B1400CEPE", ALC256_FIXUP_ASUS_HPE),
        SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
        SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
index 0ab40a8a68fb50d3033cfc8306e44a18803e7841..a5c1a2c4eae4e6f2d53968d514696e371e759584 100644 (file)
@@ -113,6 +113,7 @@ static struct via_spec *via_new_spec(struct hda_codec *codec)
                spec->codec_type = VT1708S;
        spec->gen.indep_hp = 1;
        spec->gen.keep_eapd_on = 1;
+       spec->gen.dac_min_mute = 1;
        spec->gen.pcm_playback_hook = via_playback_pcm_hook;
        spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
        codec->power_save_node = 1;
@@ -1042,7 +1043,7 @@ static const struct hda_fixup via_fixups[] = {
 static const struct snd_pci_quirk vt2002p_fixups[] = {
        SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
        SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
-       SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", VIA_FIXUP_POWER_SAVE),
+       SND_PCI_QUIRK_VENDOR(0x1558, "Clevo", VIA_FIXUP_POWER_SAVE),
        {}
 };
 
index deca8c7a0e8784b82458bc9b5d9d0d12adbd58e7..050a61fe9693f99171dee180db6cdd1eb95fccfe 100644 (file)
@@ -165,10 +165,24 @@ static int rn_acp_deinit(void __iomem *acp_base)
 
 static const struct dmi_system_id rn_acp_quirk_table[] = {
        {
-               /* Lenovo IdeaPad Flex 5 14ARE05, IdeaPad 5 15ARE05 */
+               /* Lenovo IdeaPad S340-14API */
                .matches = {
                        DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
-                       DMI_EXACT_MATCH(DMI_BOARD_NAME, "LNVNB161216"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81NB"),
+               }
+       },
+       {
+               /* Lenovo IdeaPad Flex 5 14ARE05 */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81X2"),
+               }
+       },
+       {
+               /* Lenovo IdeaPad 5 15ARE05 */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81YQ"),
                }
        },
        {
index 1010c9ee2e836272055d689d9736da34689f9629..472caad17012e9f032563984528f084c03ea684d 100644 (file)
@@ -595,18 +595,10 @@ static struct snd_soc_dai_driver ak4497_dai = {
        .ops = &ak4458_dai_ops,
 };
 
-static void ak4458_power_off(struct ak4458_priv *ak4458)
+static void ak4458_reset(struct ak4458_priv *ak4458, bool active)
 {
        if (ak4458->reset_gpiod) {
-               gpiod_set_value_cansleep(ak4458->reset_gpiod, 0);
-               usleep_range(1000, 2000);
-       }
-}
-
-static void ak4458_power_on(struct ak4458_priv *ak4458)
-{
-       if (ak4458->reset_gpiod) {
-               gpiod_set_value_cansleep(ak4458->reset_gpiod, 1);
+               gpiod_set_value_cansleep(ak4458->reset_gpiod, active);
                usleep_range(1000, 2000);
        }
 }
@@ -620,7 +612,7 @@ static int ak4458_init(struct snd_soc_component *component)
        if (ak4458->mute_gpiod)
                gpiod_set_value_cansleep(ak4458->mute_gpiod, 1);
 
-       ak4458_power_on(ak4458);
+       ak4458_reset(ak4458, false);
 
        ret = snd_soc_component_update_bits(component, AK4458_00_CONTROL1,
                            0x80, 0x80);   /* ACKS bit = 1; 10000000 */
@@ -650,7 +642,7 @@ static void ak4458_remove(struct snd_soc_component *component)
 {
        struct ak4458_priv *ak4458 = snd_soc_component_get_drvdata(component);
 
-       ak4458_power_off(ak4458);
+       ak4458_reset(ak4458, true);
 }
 
 #ifdef CONFIG_PM
@@ -660,7 +652,7 @@ static int __maybe_unused ak4458_runtime_suspend(struct device *dev)
 
        regcache_cache_only(ak4458->regmap, true);
 
-       ak4458_power_off(ak4458);
+       ak4458_reset(ak4458, true);
 
        if (ak4458->mute_gpiod)
                gpiod_set_value_cansleep(ak4458->mute_gpiod, 0);
@@ -685,8 +677,8 @@ static int __maybe_unused ak4458_runtime_resume(struct device *dev)
        if (ak4458->mute_gpiod)
                gpiod_set_value_cansleep(ak4458->mute_gpiod, 1);
 
-       ak4458_power_off(ak4458);
-       ak4458_power_on(ak4458);
+       ak4458_reset(ak4458, true);
+       ak4458_reset(ak4458, false);
 
        regcache_cache_only(ak4458->regmap, false);
        regcache_mark_dirty(ak4458->regmap);
index d5fcc4db8284afccb24778ba76a323c73068388f..0f3ac22f2cf8ee124281e1b5247be38604f0d30a 100644 (file)
@@ -717,7 +717,7 @@ static int hdmi_codec_set_jack(struct snd_soc_component *component,
                               void *data)
 {
        struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
-       int ret = -EOPNOTSUPP;
+       int ret = -ENOTSUPP;
 
        if (hcp->hcd.ops->hook_plugged_cb) {
                hcp->jack = jack;
index dec8716aa8ef5ebe92410027b9996ddab5bbadd6..985b2dcecf138c18f6cc6f562e2dfadb119f48dc 100644 (file)
@@ -2031,11 +2031,14 @@ static struct wm_coeff_ctl *wm_adsp_get_ctl(struct wm_adsp *dsp,
                                             unsigned int alg)
 {
        struct wm_coeff_ctl *pos, *rslt = NULL;
+       const char *fw_txt = wm_adsp_fw_text[dsp->fw];
 
        list_for_each_entry(pos, &dsp->ctl_list, list) {
                if (!pos->subname)
                        continue;
                if (strncmp(pos->subname, name, pos->subname_len) == 0 &&
+                   strncmp(pos->fw_name, fw_txt,
+                           SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == 0 &&
                                pos->alg_region.alg == alg &&
                                pos->alg_region.type == type) {
                        rslt = pos;
index ede4a9ad1054cee66e49b2f1e85de85048103153..dbbb7618351c78b79627db8031a8fa9d7e32376c 100644 (file)
@@ -90,7 +90,7 @@ static int imx_hdmi_init(struct snd_soc_pcm_runtime *rtd)
        }
 
        ret = snd_soc_component_set_jack(component, &data->hdmi_jack, NULL);
-       if (ret && ret != -EOPNOTSUPP) {
+       if (ret && ret != -ENOTSUPP) {
                dev_err(card->dev, "Can't set HDMI Jack %d\n", ret);
                return ret;
        }
index ca968901ac96f39070c7164446162a268bfd5ba8..6d0d6ef711e0f3129019829d1f35e9e7348e69af 100644 (file)
@@ -67,6 +67,16 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
                .driver_data = (void *)(SOF_RT711_JD_SRC_JD2 |
                                        SOF_RT715_DAI_ID_FIX),
        },
+       {
+               .callback = sof_sdw_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0A5E")
+               },
+               .driver_data = (void *)(SOF_RT711_JD_SRC_JD2 |
+                                       SOF_RT715_DAI_ID_FIX |
+                                       SOF_SDW_FOUR_SPK),
+       },
        {
                .callback = sof_sdw_quirk_cb,
                .matches = {
index ae466cd592922a4a6cff1ed7117521a0bd186653..b824086203b9e14292e3db5165dc9ffb3987a665 100644 (file)
@@ -3619,19 +3619,20 @@ static void skl_tplg_complete(struct snd_soc_component *component)
 
        list_for_each_entry(dobj, &component->dobj_list, list) {
                struct snd_kcontrol *kcontrol = dobj->control.kcontrol;
-               struct soc_enum *se =
-                       (struct soc_enum *)kcontrol->private_value;
-               char **texts = dobj->control.dtexts;
+               struct soc_enum *se;
+               char **texts;
                char chan_text[4];
 
-               if (dobj->type != SND_SOC_DOBJ_ENUM ||
-                   dobj->control.kcontrol->put !=
-                   skl_tplg_multi_config_set_dmic)
+               if (dobj->type != SND_SOC_DOBJ_ENUM || !kcontrol ||
+                   kcontrol->put != skl_tplg_multi_config_set_dmic)
                        continue;
+
+               se = (struct soc_enum *)kcontrol->private_value;
+               texts = dobj->control.dtexts;
                sprintf(chan_text, "c%d", mach->mach_params.dmic_num);
 
                for (i = 0; i < se->items; i++) {
-                       struct snd_ctl_elem_value val;
+                       struct snd_ctl_elem_value val = {};
 
                        if (strstr(texts[i], chan_text)) {
                                val.value.enumerated.item[0] = i;
index 078e58f1ad0b27b2e49837dc145e674689ce9b08..cfbd0c65c7a389564ebed1fed50f8dd32413245e 100644 (file)
@@ -532,6 +532,7 @@ static struct snd_soc_dai_link mt8183_da7219_dai_links[] = {
                .dpcm_playback = 1,
                .ignore_suspend = 1,
                .be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
+               .ignore = 1,
                .init = mt8183_da7219_max98357_hdmi_init,
                SND_SOC_DAILINK_REG(tdm),
        },
@@ -754,8 +755,10 @@ static int mt8183_da7219_max98357_dev_probe(struct platform_device *pdev)
                        }
                }
 
-               if (hdmi_codec && strcmp(dai_link->name, "TDM") == 0)
+               if (hdmi_codec && strcmp(dai_link->name, "TDM") == 0) {
                        dai_link->codecs->of_node = hdmi_codec;
+                       dai_link->ignore = 0;
+               }
 
                if (!dai_link->platforms->name)
                        dai_link->platforms->of_node = platform_node;
index 8c8340854859aceadfd0da76b4733fadf6527946..1ce3eddbee13bd1ed9bfc4d4850adacc833d20a6 100644 (file)
@@ -515,6 +515,7 @@ static struct snd_soc_dai_link mt8183_mt6358_ts3a227_dai_links[] = {
                .ignore_suspend = 1,
                .be_hw_params_fixup = mt8183_i2s_hw_params_fixup,
                .ops = &mt8183_mt6358_tdm_ops,
+               .ignore = 1,
                .init = mt8183_mt6358_ts3a227_max98357_hdmi_init,
                SND_SOC_DAILINK_REG(tdm),
        },
@@ -661,8 +662,10 @@ mt8183_mt6358_ts3a227_max98357_dev_probe(struct platform_device *pdev)
                                                    SND_SOC_DAIFMT_CBM_CFM;
                }
 
-               if (hdmi_codec && strcmp(dai_link->name, "TDM") == 0)
+               if (hdmi_codec && strcmp(dai_link->name, "TDM") == 0) {
                        dai_link->codecs->of_node = hdmi_codec;
+                       dai_link->ignore = 0;
+               }
 
                if (!dai_link->platforms->name)
                        dai_link->platforms->of_node = platform_node;
index 716fbb4126b5fc2f2b8fbf19bf7a38cf0f5429da..ae2c748eb19c40a490f84880820fc97093f5b895 100644 (file)
@@ -401,6 +401,53 @@ static const struct snd_soc_ops mt8192_mt6359_rt1015_rt5682_capture1_ops = {
        .startup = mt8192_mt6359_rt1015_rt5682_cap1_startup,
 };
 
+static int
+mt8192_mt6359_rt5682_startup(struct snd_pcm_substream *substream)
+{
+       static const unsigned int channels[] = {
+               1, 2
+       };
+       static const struct snd_pcm_hw_constraint_list constraints_channels = {
+               .count = ARRAY_SIZE(channels),
+               .list = channels,
+               .mask = 0,
+       };
+       static const unsigned int rates[] = {
+               48000
+       };
+       static const struct snd_pcm_hw_constraint_list constraints_rates = {
+               .count = ARRAY_SIZE(rates),
+               .list  = rates,
+               .mask = 0,
+       };
+
+       struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       int ret;
+
+       ret = snd_pcm_hw_constraint_list(runtime, 0,
+                                        SNDRV_PCM_HW_PARAM_CHANNELS,
+                                        &constraints_channels);
+       if (ret < 0) {
+               dev_err(rtd->dev, "hw_constraint_list channels failed\n");
+               return ret;
+       }
+
+       ret = snd_pcm_hw_constraint_list(runtime, 0,
+                                        SNDRV_PCM_HW_PARAM_RATE,
+                                        &constraints_rates);
+       if (ret < 0) {
+               dev_err(rtd->dev, "hw_constraint_list rate failed\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static const struct snd_soc_ops mt8192_mt6359_rt5682_ops = {
+       .startup = mt8192_mt6359_rt5682_startup,
+};
+
 /* FE */
 SND_SOC_DAILINK_DEFS(playback1,
                     DAILINK_COMP_ARRAY(COMP_CPU("DL1")),
@@ -648,6 +695,7 @@ static struct snd_soc_dai_link mt8192_mt6359_dai_links[] = {
                            SND_SOC_DPCM_TRIGGER_PRE},
                .dynamic = 1,
                .dpcm_playback = 1,
+               .ops = &mt8192_mt6359_rt5682_ops,
                SND_SOC_DAILINK_REG(playback3),
        },
        {
@@ -721,6 +769,7 @@ static struct snd_soc_dai_link mt8192_mt6359_dai_links[] = {
                            SND_SOC_DPCM_TRIGGER_PRE},
                .dynamic = 1,
                .dpcm_capture = 1,
+               .ops = &mt8192_mt6359_rt5682_ops,
                SND_SOC_DAILINK_REG(capture2),
        },
        {
index c5e99c2d89c7ed65b3ae4eacd69a449df5b37d4b..66b834312f330fdd8424d7caca8e93706ecb094a 100644 (file)
@@ -344,8 +344,30 @@ int asoc_qcom_lpass_cpu_dai_probe(struct snd_soc_dai *dai)
 }
 EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_probe);
 
+static int asoc_qcom_of_xlate_dai_name(struct snd_soc_component *component,
+                                  struct of_phandle_args *args,
+                                  const char **dai_name)
+{
+       struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+       struct lpass_variant *variant = drvdata->variant;
+       int id = args->args[0];
+       int ret = -EINVAL;
+       int i;
+
+       for (i = 0; i  < variant->num_dai; i++) {
+               if (variant->dai_driver[i].id == id) {
+                       *dai_name = variant->dai_driver[i].name;
+                       ret = 0;
+                       break;
+               }
+       }
+
+       return ret;
+}
+
 static const struct snd_soc_component_driver lpass_cpu_comp_driver = {
        .name = "lpass-cpu",
+       .of_xlate_dai_name = asoc_qcom_of_xlate_dai_name,
 };
 
 static bool lpass_cpu_regmap_writeable(struct device *dev, unsigned int reg)
index 92f98b4df47ffb23b878108342c8c8c3c785c427..ef8a7984f232356c1f879abfb5b5efe04e725d44 100644 (file)
@@ -131,7 +131,7 @@ static struct lpass_variant ipq806x_data = {
        .micmode                = REG_FIELD_ID(0x0010, 4, 7, 5, 0x4),
        .micmono                = REG_FIELD_ID(0x0010, 3, 3, 5, 0x4),
        .wssrc                  = REG_FIELD_ID(0x0010, 2, 2, 5, 0x4),
-       .bitwidth               = REG_FIELD_ID(0x0010, 0, 0, 5, 0x4),
+       .bitwidth               = REG_FIELD_ID(0x0010, 0, 1, 5, 0x4),
 
        .rdma_dyncclk           = REG_FIELD_ID(0x6000, 12, 12, 4, 0x1000),
        .rdma_bursten           = REG_FIELD_ID(0x6000, 11, 11, 4, 0x1000),
index 405542832e99416737b62b2c47ccf980e8a6965b..baf72f124ea9b24603692bad3708821e744dfd87 100644 (file)
 #define        LPAIF_WRDMAPERCNT_REG(v, chan)  LPAIF_WRDMA_REG_ADDR(v, 0x14, (chan))
 
 #define LPAIF_INTFDMA_REG(v, chan, reg, dai_id)  \
-               ((v->dai_driver[dai_id].id ==  LPASS_DP_RX) ? \
+       ((dai_id ==  LPASS_DP_RX) ? \
                LPAIF_HDMI_RDMA##reg##_REG(v, chan) : \
                 LPAIF_RDMA##reg##_REG(v, chan))
 
index d1c248590f3ab6da7622dcf8d1909d74c340c88a..0074b7f2dbc107ba8376d7fe51b056fd54361551 100644 (file)
@@ -257,6 +257,9 @@ static int lpass_platform_pcmops_hw_params(struct snd_soc_component *component,
                break;
        case MI2S_PRIMARY:
        case MI2S_SECONDARY:
+       case MI2S_TERTIARY:
+       case MI2S_QUATERNARY:
+       case MI2S_QUINARY:
                ret = regmap_fields_write(dmactl->intf, id,
                                                LPAIF_DMACTL_AUDINTF(dma_port));
                if (ret) {
@@ -507,6 +510,9 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                        break;
                case MI2S_PRIMARY:
                case MI2S_SECONDARY:
+               case MI2S_TERTIARY:
+               case MI2S_QUATERNARY:
+               case MI2S_QUINARY:
                        reg_irqclr = LPAIF_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
                        val_irqclr = LPAIF_IRQ_ALL(ch);
 
@@ -559,6 +565,9 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                        break;
                case MI2S_PRIMARY:
                case MI2S_SECONDARY:
+               case MI2S_TERTIARY:
+               case MI2S_QUATERNARY:
+               case MI2S_QUINARY:
                        reg_irqen = LPAIF_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
                        val_mask = LPAIF_IRQ_ALL(ch);
                        val_irqen = 0;
@@ -655,6 +664,9 @@ static irqreturn_t lpass_dma_interrupt_handler(
        break;
        case MI2S_PRIMARY:
        case MI2S_SECONDARY:
+       case MI2S_TERTIARY:
+       case MI2S_QUATERNARY:
+       case MI2S_QUINARY:
                map = drvdata->lpaif_map;
                reg = LPAIF_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
                val = 0;
index 85db650c2169d2356cf71b0eb3b8724d1fc72538..735c9dac28f2683abdb11b5e51f0d61483253682 100644 (file)
@@ -20,7 +20,7 @@
 #include "lpass.h"
 
 static struct snd_soc_dai_driver sc7180_lpass_cpu_dai_driver[] = {
-       [MI2S_PRIMARY] = {
+       {
                .id = MI2S_PRIMARY,
                .name = "Primary MI2S",
                .playback = {
@@ -44,9 +44,7 @@ static struct snd_soc_dai_driver sc7180_lpass_cpu_dai_driver[] = {
                },
                .probe  = &asoc_qcom_lpass_cpu_dai_probe,
                .ops    = &asoc_qcom_lpass_cpu_dai_ops,
-       },
-
-       [MI2S_SECONDARY] = {
+       }, {
                .id = MI2S_SECONDARY,
                .name = "Secondary MI2S",
                .playback = {
@@ -60,8 +58,7 @@ static struct snd_soc_dai_driver sc7180_lpass_cpu_dai_driver[] = {
                },
                .probe  = &asoc_qcom_lpass_cpu_dai_probe,
                .ops    = &asoc_qcom_lpass_cpu_dai_ops,
-       },
-       [LPASS_DP_RX] = {
+       }, {
                .id = LPASS_DP_RX,
                .name = "Hdmi",
                .playback = {
@@ -174,7 +171,7 @@ static struct lpass_variant sc7180_data = {
        .rdma_channels          = 5,
        .hdmi_rdma_reg_base             = 0x64000,
        .hdmi_rdma_reg_stride   = 0x1000,
-       .hdmi_rdma_channels             = 4,
+       .hdmi_rdma_channels             = 3,
        .dmactl_audif_start     = 1,
        .wrdma_reg_base         = 0x18000,
        .wrdma_reg_stride       = 0x1000,
index 0195372905ed3849ba4e0111a797dd5edbf5a0dd..2d68af0da34d877943946da18d8dda616544dbc4 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/compiler.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
-#include <dt-bindings/sound/sc7180-lpass.h>
+#include <dt-bindings/sound/qcom,lpass.h>
 #include "lpass-hdmi.h"
 
 #define LPASS_AHBIX_CLOCK_FREQUENCY            131072000
index 950c45008e245fbf2952bff0f9c986c95d40a8a3..22e7b4c9115b723f194c931f485bb102cfcc563e 100644 (file)
@@ -447,7 +447,7 @@ static void remove_dai(struct snd_soc_component *comp,
 {
        struct snd_soc_dai_driver *dai_drv =
                container_of(dobj, struct snd_soc_dai_driver, dobj);
-       struct snd_soc_dai *dai;
+       struct snd_soc_dai *dai, *_dai;
 
        if (pass != SOC_TPLG_PASS_PCM_DAI)
                return;
@@ -455,9 +455,9 @@ static void remove_dai(struct snd_soc_component *comp,
        if (dobj->ops && dobj->ops->dai_unload)
                dobj->ops->dai_unload(comp, dobj);
 
-       for_each_component_dais(comp, dai)
+       for_each_component_dais_safe(comp, dai, _dai)
                if (dai->driver == dai_drv)
-                       dai->driver = NULL;
+                       snd_soc_unregister_dai(dai);
 
        list_del(&dobj->list);
 }
@@ -902,7 +902,7 @@ static int soc_tplg_denum_create_values(struct soc_tplg *tplg, struct soc_enum *
                return -EINVAL;
 
        se->dobj.control.dvalues = devm_kcalloc(tplg->dev, le32_to_cpu(ec->items),
-                                          sizeof(u32),
+                                          sizeof(*se->dobj.control.dvalues),
                                           GFP_KERNEL);
        if (!se->dobj.control.dvalues)
                return -ENOMEM;
@@ -1742,7 +1742,7 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
        list_add(&dai_drv->dobj.list, &tplg->comp->dobj_list);
 
        /* register the DAI to the component */
-       dai = devm_snd_soc_register_dai(tplg->dev, tplg->comp, dai_drv, false);
+       dai = snd_soc_register_dai(tplg->comp, dai_drv, false);
        if (!dai)
                return -ENOMEM;
 
@@ -1750,6 +1750,7 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
        ret = snd_soc_dapm_new_dai_widgets(dapm, dai);
        if (ret != 0) {
                dev_err(dai->dev, "Failed to create DAI widgets %d\n", ret);
+               snd_soc_unregister_dai(dai);
                return ret;
        }
 
index d306c370e5d164ac786f0760812927e8b17e2a08..4797a1cf8c805d5d063cba4544845cfdbb748c55 100644 (file)
@@ -355,7 +355,7 @@ config SND_SOC_SOF_HDA
 
 config SND_SOC_SOF_INTEL_SOUNDWIRE_LINK
        bool "SOF support for SoundWire"
-       depends on SOUNDWIRE && ACPI
+       depends on ACPI
        help
          This adds support for SoundWire with Sound Open Firmware
          for Intel(R) platforms.
@@ -371,6 +371,7 @@ config SND_SOC_SOF_INTEL_SOUNDWIRE_LINK_BASELINE
 
 config SND_SOC_SOF_INTEL_SOUNDWIRE
        tristate
+       select SOUNDWIRE
        select SOUNDWIRE_INTEL
        help
          This option is not user-selectable but automagically handled by
index 6875fa570c2c55bf86d919624db577e35f9b177b..6744318de612e567ceab15b22df953686222a50e 100644 (file)
@@ -63,16 +63,18 @@ static int hda_codec_load_module(struct hda_codec *codec)
 }
 
 /* enable controller wake up event for all codecs with jack connectors */
-void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev)
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable)
 {
        struct hda_bus *hbus = sof_to_hbus(sdev);
        struct hdac_bus *bus = sof_to_bus(sdev);
        struct hda_codec *codec;
        unsigned int mask = 0;
 
-       list_for_each_codec(codec, hbus)
-               if (codec->jacktbl.used)
-                       mask |= BIT(codec->core.addr);
+       if (enable) {
+               list_for_each_codec(codec, hbus)
+                       if (codec->jacktbl.used)
+                               mask |= BIT(codec->core.addr);
+       }
 
        snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, mask);
 }
@@ -81,23 +83,18 @@ void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev)
 void hda_codec_jack_check(struct snd_sof_dev *sdev)
 {
        struct hda_bus *hbus = sof_to_hbus(sdev);
-       struct hdac_bus *bus = sof_to_bus(sdev);
        struct hda_codec *codec;
 
-       /* disable controller Wake Up event*/
-       snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, 0);
-
        list_for_each_codec(codec, hbus)
                /*
                 * Wake up all jack-detecting codecs regardless whether an event
                 * has been recorded in STATESTS
                 */
                if (codec->jacktbl.used)
-                       schedule_delayed_work(&codec->jackpoll_work,
-                                             codec->jackpoll_interval);
+                       pm_request_resume(&codec->core.dev);
 }
 #else
-void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev) {}
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable) {}
 void hda_codec_jack_check(struct snd_sof_dev *sdev) {}
 #endif /* CONFIG_SND_SOC_SOF_HDA_AUDIO_CODEC */
 EXPORT_SYMBOL_NS(hda_codec_jack_wake_enable, SND_SOC_SOF_HDA_AUDIO_CODEC);
@@ -156,7 +153,8 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
                if (!hdev->bus->audio_component) {
                        dev_dbg(sdev->dev,
                                "iDisp hw present but no driver\n");
-                       goto error;
+                       ret = -ENOENT;
+                       goto out;
                }
                hda_priv->need_display_power = true;
        }
@@ -173,24 +171,23 @@ static int hda_codec_probe(struct snd_sof_dev *sdev, int address,
                 * other return codes without modification
                 */
                if (ret == 0)
-                       goto error;
+                       ret = -ENOENT;
        }
 
-       return ret;
-
-error:
-       snd_hdac_ext_bus_device_exit(hdev);
-       return -ENOENT;
-
+out:
+       if (ret < 0) {
+               snd_hdac_device_unregister(hdev);
+               put_device(&hdev->dev);
+       }
 #else
        hdev = devm_kzalloc(sdev->dev, sizeof(*hdev), GFP_KERNEL);
        if (!hdev)
                return -ENOMEM;
 
        ret = snd_hdac_ext_bus_device_init(&hbus->core, address, hdev, HDA_DEV_ASOC);
+#endif
 
        return ret;
-#endif
 }
 
 /* Codec initialization */
index 2b001151fe376c83b55066513a487f1524b0d8df..1c5e05b88a90d7a25d21ab2830fc1e80f309254c 100644 (file)
@@ -617,7 +617,7 @@ static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
 
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
        if (runtime_suspend)
-               hda_codec_jack_wake_enable(sdev);
+               hda_codec_jack_wake_enable(sdev, true);
 
        /* power down all hda link */
        snd_hdac_ext_bus_link_power_down_all(bus);
@@ -683,8 +683,11 @@ static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
 
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
        /* check jack status */
-       if (runtime_resume)
-               hda_codec_jack_check(sdev);
+       if (runtime_resume) {
+               hda_codec_jack_wake_enable(sdev, false);
+               if (sdev->system_suspend_target == SOF_SUSPEND_NONE)
+                       hda_codec_jack_check(sdev);
+       }
 
        /* turn off the links that were off before suspend */
        list_for_each_entry(hlink, &bus->hlink_list, list) {
index 9ec8ae0fd6495505e59e9e9aef71d576465631e2..a3b6f3e9121c495813a65724763e7a29c5621e8f 100644 (file)
@@ -650,7 +650,7 @@ void sof_hda_bus_init(struct hdac_bus *bus, struct device *dev);
  */
 void hda_codec_probe_bus(struct snd_sof_dev *sdev,
                         bool hda_codec_use_common_hdmi);
-void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev);
+void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev, bool enable);
 void hda_codec_jack_check(struct snd_sof_dev *sdev);
 
 #endif /* CONFIG_SND_SOC_SOF_HDA */
index 2a369c2c65514c7c3b359d73eca2a0e2bce1994f..cc2e257087e4c37583b0eab45a0e8abbc9e9ddf8 100644 (file)
@@ -131,12 +131,13 @@ static int sof_acpi_probe(struct platform_device *pdev)
        if (!id)
                return -ENODEV;
 
-       ret = snd_intel_acpi_dsp_driver_probe(dev, id->id);
-       if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
-               dev_dbg(dev, "SOF ACPI driver not selected, aborting probe\n");
-               return -ENODEV;
+       if (IS_REACHABLE(CONFIG_SND_INTEL_DSP_CONFIG)) {
+               ret = snd_intel_acpi_dsp_driver_probe(dev, id->id);
+               if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
+                       dev_dbg(dev, "SOF ACPI driver not selected, aborting probe\n");
+                       return -ENODEV;
+               }
        }
-
        dev_dbg(dev, "ACPI DSP detected");
 
        sof_pdata = devm_kzalloc(dev, sizeof(*sof_pdata), GFP_KERNEL);
index 63b989e3ec409826a01ccd8a04295e7e323638b5..215711ac7450997138d542b952b50db2e3b028d9 100644 (file)
@@ -344,10 +344,12 @@ static int sof_pci_probe(struct pci_dev *pci,
        const struct snd_sof_dsp_ops *ops;
        int ret;
 
-       ret = snd_intel_dsp_driver_probe(pci);
-       if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
-               dev_dbg(&pci->dev, "SOF PCI driver not selected, aborting probe\n");
-               return -ENODEV;
+       if (IS_REACHABLE(CONFIG_SND_INTEL_DSP_CONFIG)) {
+               ret = snd_intel_dsp_driver_probe(pci);
+               if (ret != SND_INTEL_DSP_DRIVER_ANY && ret != SND_INTEL_DSP_DRIVER_SOF) {
+                       dev_dbg(&pci->dev, "SOF PCI driver not selected, aborting probe\n");
+                       return -ENODEV;
+               }
        }
        dev_dbg(&pci->dev, "PCI DSP detected");
 
index 31051f2be46da9434851dd06272f4fbe318c41ae..dc68ed65e47877a31b6fcb1144246337f5a5213d 100644 (file)
@@ -485,18 +485,9 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip,
                              const struct audioformat *fmt, int rate)
 {
        struct usb_device *dev = chip->dev;
-       struct usb_host_interface *alts;
-       unsigned int ep;
        unsigned char data[3];
        int err, crate;
 
-       alts = snd_usb_get_host_interface(chip, fmt->iface, fmt->altsetting);
-       if (!alts)
-               return -EINVAL;
-       if (get_iface_desc(alts)->bNumEndpoints < 1)
-               return -EINVAL;
-       ep = get_endpoint(alts, 0)->bEndpointAddress;
-
        /* if endpoint doesn't have sampling rate control, bail out */
        if (!(fmt->attributes & UAC_EP_CS_ATTR_SAMPLE_RATE))
                return 0;
@@ -506,11 +497,11 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip,
        data[2] = rate >> 16;
        err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR,
                              USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_OUT,
-                             UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep,
-                             data, sizeof(data));
+                             UAC_EP_CS_ATTR_SAMPLE_RATE << 8,
+                             fmt->endpoint, data, sizeof(data));
        if (err < 0) {
                dev_err(&dev->dev, "%d:%d: cannot set freq %d to ep %#x\n",
-                       fmt->iface, fmt->altsetting, rate, ep);
+                       fmt->iface, fmt->altsetting, rate, fmt->endpoint);
                return err;
        }
 
@@ -524,11 +515,11 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip,
 
        err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC_GET_CUR,
                              USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN,
-                             UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep,
-                             data, sizeof(data));
+                             UAC_EP_CS_ATTR_SAMPLE_RATE << 8,
+                             fmt->endpoint, data, sizeof(data));
        if (err < 0) {
                dev_err(&dev->dev, "%d:%d: cannot get freq at ep %#x\n",
-                       fmt->iface, fmt->altsetting, ep);
+                       fmt->iface, fmt->altsetting, fmt->endpoint);
                chip->sample_rate_read_error++;
                return 0; /* some devices don't support reading */
        }
index fe73fe3ff2bcadccafa584bb5b756d5f730cf9a4..8e568823c99246ff66d88be10bf0d692e4bad95b 100644 (file)
@@ -1252,6 +1252,15 @@ int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
 
        /* If the interface has been already set up, just set EP parameters */
        if (!ep->iface_ref->need_setup) {
+               /* sample rate setup of UAC1 is per endpoint, and we need
+                * to update at each EP configuration
+                */
+               if (ep->cur_audiofmt->protocol == UAC_VERSION_1) {
+                       err = snd_usb_init_sample_rate(chip, ep->cur_audiofmt,
+                                                      ep->cur_rate);
+                       if (err < 0)
+                               goto unlock;
+               }
                err = snd_usb_endpoint_set_params(chip, ep);
                if (err < 0)
                        goto unlock;
index 9ebc5d202c87398fa4e622acd58295d6bc356df3..e6ff317a678520a20ff5f6109886d5018fab9043 100644 (file)
@@ -466,6 +466,17 @@ static int validate_sample_rate_table_v2v3(struct snd_usb_audio *chip,
        unsigned int nr_rates;
        int i, err;
 
+       /* performing the rate verification may lead to unexpected USB bus
+        * behavior afterwards by some unknown reason.  Do this only for the
+        * known devices.
+        */
+       switch (USB_ID_VENDOR(chip->usb_id)) {
+       case 0x07fd: /* MOTU */
+               break;
+       default:
+               return 0; /* don't perform the validation as default */
+       }
+
        table = kcalloc(fp->nr_rates, sizeof(*table), GFP_KERNEL);
        if (!table)
                return -ENOMEM;
index 1ac2cc6c33fb1628a15dce6293c3ce4351088a1c..521cc846d9d9fec8246e38f22fb01fd0030d2fe0 100644 (file)
@@ -175,11 +175,13 @@ static int add_roland_implicit_fb(struct snd_usb_audio *chip,
                                       ifnum, alts);
 }
 
-/* Pioneer devices: playback and capture streams sharing the same iface/altset
+/* Playback and capture EPs on Pioneer devices share the same iface/altset,
+ * but they don't seem working with the implicit fb mode well, hence we
+ * just return as if the sync were already set up.
  */
-static int add_pioneer_implicit_fb(struct snd_usb_audio *chip,
-                                  struct audioformat *fmt,
-                                  struct usb_host_interface *alts)
+static int skip_pioneer_sync_ep(struct snd_usb_audio *chip,
+                               struct audioformat *fmt,
+                               struct usb_host_interface *alts)
 {
        struct usb_endpoint_descriptor *epd;
 
@@ -194,8 +196,7 @@ static int add_pioneer_implicit_fb(struct snd_usb_audio *chip,
             (epd->bmAttributes & USB_ENDPOINT_USAGE_MASK) !=
             USB_ENDPOINT_USAGE_IMPLICIT_FB))
                return 0;
-       return add_implicit_fb_sync_ep(chip, fmt, epd->bEndpointAddress, 1,
-                                      alts->desc.bInterfaceNumber, alts);
+       return 1; /* don't handle with the implicit fb, just skip sync EP */
 }
 
 static int __add_generic_implicit_fb(struct snd_usb_audio *chip,
@@ -298,11 +299,11 @@ static int audioformat_implicit_fb_quirk(struct snd_usb_audio *chip,
                        return 1;
        }
 
-       /* Pioneer devices implicit feedback with vendor spec class */
+       /* Pioneer devices with vendor spec class */
        if (attr == USB_ENDPOINT_SYNC_ASYNC &&
            alts->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
            USB_ID_VENDOR(chip->usb_id) == 0x2b73 /* Pioneer */) {
-               if (add_pioneer_implicit_fb(chip, fmt, alts))
+               if (skip_pioneer_sync_ep(chip, fmt, alts))
                        return 1;
        }
 
index 56079901769fd93413df9c24f49dbd9ff76d4cf0..078bb4c94033498e7ddcc938aa31eb1a5a471e2e 100644 (file)
@@ -663,7 +663,7 @@ static int hw_check_valid_format(struct snd_usb_substream *subs,
        check_fmts.bits[1] = (u32)(fp->formats >> 32);
        snd_mask_intersect(&check_fmts, fmts);
        if (snd_mask_empty(&check_fmts)) {
-               hwc_debug("   > check: no supported format %d\n", fp->format);
+               hwc_debug("   > check: no supported format 0x%llx\n", fp->formats);
                return 0;
        }
        /* check the channels */
@@ -775,24 +775,11 @@ static int hw_rule_channels(struct snd_pcm_hw_params *params,
        return apply_hw_params_minmax(it, rmin, rmax);
 }
 
-static int hw_rule_format(struct snd_pcm_hw_params *params,
-                         struct snd_pcm_hw_rule *rule)
+static int apply_hw_params_format_bits(struct snd_mask *fmt, u64 fbits)
 {
-       struct snd_usb_substream *subs = rule->private;
-       const struct audioformat *fp;
-       struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
-       u64 fbits;
        u32 oldbits[2];
        int changed;
 
-       hwc_debug("hw_rule_format: %x:%x\n", fmt->bits[0], fmt->bits[1]);
-       fbits = 0;
-       list_for_each_entry(fp, &subs->fmt_list, list) {
-               if (!hw_check_valid_format(subs, params, fp))
-                       continue;
-               fbits |= fp->formats;
-       }
-
        oldbits[0] = fmt->bits[0];
        oldbits[1] = fmt->bits[1];
        fmt->bits[0] &= (u32)fbits;
@@ -806,6 +793,24 @@ static int hw_rule_format(struct snd_pcm_hw_params *params,
        return changed;
 }
 
+static int hw_rule_format(struct snd_pcm_hw_params *params,
+                         struct snd_pcm_hw_rule *rule)
+{
+       struct snd_usb_substream *subs = rule->private;
+       const struct audioformat *fp;
+       struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+       u64 fbits;
+
+       hwc_debug("hw_rule_format: %x:%x\n", fmt->bits[0], fmt->bits[1]);
+       fbits = 0;
+       list_for_each_entry(fp, &subs->fmt_list, list) {
+               if (!hw_check_valid_format(subs, params, fp))
+                       continue;
+               fbits |= fp->formats;
+       }
+       return apply_hw_params_format_bits(fmt, fbits);
+}
+
 static int hw_rule_period_time(struct snd_pcm_hw_params *params,
                               struct snd_pcm_hw_rule *rule)
 {
@@ -833,64 +838,92 @@ static int hw_rule_period_time(struct snd_pcm_hw_params *params,
        return apply_hw_params_minmax(it, pmin, UINT_MAX);
 }
 
-/* apply PCM hw constraints from the concurrent sync EP */
-static int apply_hw_constraint_from_sync(struct snd_pcm_runtime *runtime,
-                                        struct snd_usb_substream *subs)
+/* get the EP or the sync EP for implicit fb when it's already set up */
+static const struct snd_usb_endpoint *
+get_sync_ep_from_substream(struct snd_usb_substream *subs)
 {
        struct snd_usb_audio *chip = subs->stream->chip;
-       struct snd_usb_endpoint *ep;
        const struct audioformat *fp;
-       int err;
+       const struct snd_usb_endpoint *ep;
 
        list_for_each_entry(fp, &subs->fmt_list, list) {
                ep = snd_usb_get_endpoint(chip, fp->endpoint);
                if (ep && ep->cur_rate)
-                       goto found;
+                       return ep;
                if (!fp->implicit_fb)
                        continue;
                /* for the implicit fb, check the sync ep as well */
                ep = snd_usb_get_endpoint(chip, fp->sync_ep);
                if (ep && ep->cur_rate)
-                       goto found;
+                       return ep;
        }
-       return 0;
+       return NULL;
+}
 
- found:
-       if (!find_format(&subs->fmt_list, ep->cur_format, ep->cur_rate,
-                        ep->cur_channels, false, NULL)) {
-               usb_audio_dbg(chip, "EP 0x%x being used, but not applicable\n",
-                             ep->ep_num);
+/* additional hw constraints for implicit feedback mode */
+static int hw_rule_format_implicit_fb(struct snd_pcm_hw_params *params,
+                                     struct snd_pcm_hw_rule *rule)
+{
+       struct snd_usb_substream *subs = rule->private;
+       const struct snd_usb_endpoint *ep;
+       struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+
+       ep = get_sync_ep_from_substream(subs);
+       if (!ep)
                return 0;
-       }
 
-       usb_audio_dbg(chip, "EP 0x%x being used, using fixed params:\n",
-                     ep->ep_num);
-       usb_audio_dbg(chip, "rate=%d, period_size=%d, periods=%d\n",
-                     ep->cur_rate, ep->cur_period_frames,
-                     ep->cur_buffer_periods);
+       hwc_debug("applying %s\n", __func__);
+       return apply_hw_params_format_bits(fmt, pcm_format_to_bits(ep->cur_format));
+}
 
-       runtime->hw.formats = subs->formats;
-       runtime->hw.rate_min = runtime->hw.rate_max = ep->cur_rate;
-       runtime->hw.rates = SNDRV_PCM_RATE_KNOT;
-       runtime->hw.periods_min = runtime->hw.periods_max =
-               ep->cur_buffer_periods;
+static int hw_rule_rate_implicit_fb(struct snd_pcm_hw_params *params,
+                                   struct snd_pcm_hw_rule *rule)
+{
+       struct snd_usb_substream *subs = rule->private;
+       const struct snd_usb_endpoint *ep;
+       struct snd_interval *it;
 
-       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
-                                 hw_rule_channels, subs,
-                                 SNDRV_PCM_HW_PARAM_FORMAT,
-                                 SNDRV_PCM_HW_PARAM_RATE,
-                                 -1);
-       if (err < 0)
-               return err;
+       ep = get_sync_ep_from_substream(subs);
+       if (!ep)
+               return 0;
 
-       err = snd_pcm_hw_constraint_minmax(runtime,
-                                          SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
-                                          ep->cur_period_frames,
-                                          ep->cur_period_frames);
-       if (err < 0)
-               return err;
+       hwc_debug("applying %s\n", __func__);
+       it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+       return apply_hw_params_minmax(it, ep->cur_rate, ep->cur_rate);
+}
 
-       return 1; /* notify the finding */
+static int hw_rule_period_size_implicit_fb(struct snd_pcm_hw_params *params,
+                                          struct snd_pcm_hw_rule *rule)
+{
+       struct snd_usb_substream *subs = rule->private;
+       const struct snd_usb_endpoint *ep;
+       struct snd_interval *it;
+
+       ep = get_sync_ep_from_substream(subs);
+       if (!ep)
+               return 0;
+
+       hwc_debug("applying %s\n", __func__);
+       it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
+       return apply_hw_params_minmax(it, ep->cur_period_frames,
+                                     ep->cur_period_frames);
+}
+
+static int hw_rule_periods_implicit_fb(struct snd_pcm_hw_params *params,
+                                      struct snd_pcm_hw_rule *rule)
+{
+       struct snd_usb_substream *subs = rule->private;
+       const struct snd_usb_endpoint *ep;
+       struct snd_interval *it;
+
+       ep = get_sync_ep_from_substream(subs);
+       if (!ep)
+               return 0;
+
+       hwc_debug("applying %s\n", __func__);
+       it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIODS);
+       return apply_hw_params_minmax(it, ep->cur_buffer_periods,
+                                     ep->cur_buffer_periods);
 }
 
 /*
@@ -899,20 +932,11 @@ static int apply_hw_constraint_from_sync(struct snd_pcm_runtime *runtime,
 
 static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substream *subs)
 {
-       struct snd_usb_audio *chip = subs->stream->chip;
        const struct audioformat *fp;
        unsigned int pt, ptmin;
        int param_period_time_if_needed = -1;
        int err;
 
-       mutex_lock(&chip->mutex);
-       err = apply_hw_constraint_from_sync(runtime, subs);
-       mutex_unlock(&chip->mutex);
-       if (err < 0)
-               return err;
-       if (err > 0) /* found the matching? */
-               goto add_extra_rules;
-
        runtime->hw.formats = subs->formats;
 
        runtime->hw.rate_min = 0x7fffffff;
@@ -957,6 +981,7 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre
 
        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
                                  hw_rule_rate, subs,
+                                 SNDRV_PCM_HW_PARAM_RATE,
                                  SNDRV_PCM_HW_PARAM_FORMAT,
                                  SNDRV_PCM_HW_PARAM_CHANNELS,
                                  param_period_time_if_needed,
@@ -964,9 +989,9 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre
        if (err < 0)
                return err;
 
-add_extra_rules:
        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
                                  hw_rule_channels, subs,
+                                 SNDRV_PCM_HW_PARAM_CHANNELS,
                                  SNDRV_PCM_HW_PARAM_FORMAT,
                                  SNDRV_PCM_HW_PARAM_RATE,
                                  param_period_time_if_needed,
@@ -975,6 +1000,7 @@ add_extra_rules:
                return err;
        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
                                  hw_rule_format, subs,
+                                 SNDRV_PCM_HW_PARAM_FORMAT,
                                  SNDRV_PCM_HW_PARAM_RATE,
                                  SNDRV_PCM_HW_PARAM_CHANNELS,
                                  param_period_time_if_needed,
@@ -993,6 +1019,28 @@ add_extra_rules:
                        return err;
        }
 
+       /* additional hw constraints for implicit fb */
+       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
+                                 hw_rule_format_implicit_fb, subs,
+                                 SNDRV_PCM_HW_PARAM_FORMAT, -1);
+       if (err < 0)
+               return err;
+       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+                                 hw_rule_rate_implicit_fb, subs,
+                                 SNDRV_PCM_HW_PARAM_RATE, -1);
+       if (err < 0)
+               return err;
+       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+                                 hw_rule_period_size_implicit_fb, subs,
+                                 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
+       if (err < 0)
+               return err;
+       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS,
+                                 hw_rule_periods_implicit_fb, subs,
+                                 SNDRV_PCM_HW_PARAM_PERIODS, -1);
+       if (err < 0)
+               return err;
+
        return 0;
 }
 
index 89e172642d98b6ddb4d697a3f324b66d6cc3814f..e196e364cef19401c5557bc5bf6a05d5843e3206 100644 (file)
@@ -1470,30 +1470,6 @@ static void set_format_emu_quirk(struct snd_usb_substream *subs,
        subs->pkt_offset_adj = (emu_samplerate_id >= EMU_QUIRK_SR_176400HZ) ? 4 : 0;
 }
 
-
-/*
- * Pioneer DJ DJM-900NXS2
- * Device needs to know the sample rate each time substream is started
- */
-static int pioneer_djm_set_format_quirk(struct snd_usb_substream *subs)
-{
-       unsigned int cur_rate = subs->data_endpoint->cur_rate;
-       /* Convert sample rate value to little endian */
-       u8 sr[3];
-
-       sr[0] = cur_rate & 0xff;
-       sr[1] = (cur_rate >> 8) & 0xff;
-       sr[2] = (cur_rate >> 16) & 0xff;
-
-       /* Configure device */
-       usb_set_interface(subs->dev, 0, 1);
-       snd_usb_ctl_msg(subs->stream->chip->dev,
-               usb_rcvctrlpipe(subs->stream->chip->dev, 0),
-               0x01, 0x22, 0x0100, 0x0082, &sr, 0x0003);
-
-       return 0;
-}
-
 void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
                              const struct audioformat *fmt)
 {
@@ -1504,10 +1480,6 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
        case USB_ID(0x041e, 0x3f19): /* E-Mu 0204 USB */
                set_format_emu_quirk(subs, fmt);
                break;
-       case USB_ID(0x2b73, 0x000a): /* Pioneer DJ DJM-900NXS2 */
-       case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
-               pioneer_djm_set_format_quirk(subs);
-               break;
        case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
                subs->stream_offset_adj = 2;
                break;
index cacd66ad792610e673969cc2056391218496fc28..a2b233fdb572eb687674315054e19bfe7e070d3b 100644 (file)
@@ -107,8 +107,8 @@ int monitor_device(const char *device_name,
                        ret = -EIO;
                        break;
                }
-               fprintf(stdout, "GPIO EVENT at %llu on line %d (%d|%d) ",
-                       event.timestamp_ns, event.offset, event.line_seqno,
+               fprintf(stdout, "GPIO EVENT at %" PRIu64 " on line %d (%d|%d) ",
+                       (uint64_t)event.timestamp_ns, event.offset, event.line_seqno,
                        event.seqno);
                switch (event.id) {
                case GPIO_V2_LINE_EVENT_RISING_EDGE:
index f229ec62301b7e17c827a1532f34e0b8c3341b72..41e76d244192244d6326f764378576fbbbfa7dc9 100644 (file)
@@ -10,6 +10,7 @@
 #include <ctype.h>
 #include <errno.h>
 #include <fcntl.h>
+#include <inttypes.h>
 #include <linux/gpio.h>
 #include <poll.h>
 #include <stdbool.h>
@@ -86,8 +87,8 @@ int main(int argc, char **argv)
                                return EXIT_FAILURE;
                        }
 
-                       printf("line %u: %s at %llu\n",
-                              chg.info.offset, event, chg.timestamp_ns);
+                       printf("line %u: %s at %" PRIu64 "\n",
+                              chg.info.offset, event, (uint64_t)chg.timestamp_ns);
                }
        }
 
index 3c3f2bc6c6528e08fdaa913f1da43e2c02799b48..9970a288dda536768b5878f5f0a2c71dc0af8a97 100644 (file)
@@ -240,11 +240,6 @@ static int btf_parse_hdr(struct btf *btf)
        }
 
        meta_left = btf->raw_size - sizeof(*hdr);
-       if (!meta_left) {
-               pr_debug("BTF has no data\n");
-               return -EINVAL;
-       }
-
        if (meta_left < hdr->str_off + hdr->str_len) {
                pr_debug("Invalid BTF total size:%u\n", btf->raw_size);
                return -EINVAL;
index cfcdbd7be066eaaac7677c5e723d5eef9f999fbb..17465d454a0e31d912f7d0782d3f0bc19b6bfcdd 100644 (file)
@@ -367,21 +367,13 @@ static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, boo
        return map;
 }
 
-static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
-                                    struct perf_evsel *evsel, int idx, int cpu,
-                                    int thread)
+static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
 {
        struct perf_sample_id *sid = SID(evsel, cpu, thread);
 
        sid->idx = idx;
-       if (evlist->cpus && cpu >= 0)
-               sid->cpu = evlist->cpus->map[cpu];
-       else
-               sid->cpu = -1;
-       if (!evsel->system_wide && evlist->threads && thread >= 0)
-               sid->tid = perf_thread_map__pid(evlist->threads, thread);
-       else
-               sid->tid = -1;
+       sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
+       sid->tid = perf_thread_map__pid(evsel->threads, thread);
 }
 
 static struct perf_mmap*
@@ -500,8 +492,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
                        if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
                                                   fd) < 0)
                                return -1;
-                       perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
-                                                thread);
+                       perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
                }
        }
 
index 5f8d3eed78a1869e3f294e27b7522508a82bf17c..4bd30315eb62b792c85e52559de94cd38380a42d 100644 (file)
@@ -2928,14 +2928,10 @@ int check(struct objtool_file *file)
        warnings += ret;
 
 out:
-       if (ret < 0) {
-               /*
-                *  Fatal error.  The binary is corrupt or otherwise broken in
-                *  some way, or objtool itself is broken.  Fail the kernel
-                *  build.
-                */
-               return ret;
-       }
-
+       /*
+        *  For now, don't fail the kernel build on fatal warnings.  These
+        *  errors are still fairly common due to the growing matrix of
+        *  supported toolchains and their recent pace of change.
+        */
        return 0;
 }
index be89c741ba9a09441e80566f370b7f44102d0c33..d8421e1d06bed33f79e8e46d14f7253aa0256102 100644 (file)
@@ -380,8 +380,11 @@ static int read_symbols(struct elf *elf)
 
        symtab = find_section_by_name(elf, ".symtab");
        if (!symtab) {
-               WARN("missing symbol table");
-               return -1;
+               /*
+                * A missing symbol table is actually possible if it's an empty
+                * .o file.  This can happen for thunk_64.o.
+                */
+               return 0;
        }
 
        symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
@@ -448,6 +451,13 @@ static int read_symbols(struct elf *elf)
                list_add(&sym->list, entry);
                elf_hash_add(elf->symbol_hash, &sym->hash, sym->idx);
                elf_hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
+
+               /*
+                * Don't store empty STT_NOTYPE symbols in the rbtree.  They
+                * can exist within a function, confusing the sorting.
+                */
+               if (!sym->len)
+                       rb_erase(&sym->node, &sym->sec->symbol_tree);
        }
 
        if (stats)
index edacfa98d073b38bdbc70754a4b795ec82cb53c2..42dad4a0f8cf49eecd17fe129134cb5f9aef2db3 100644 (file)
@@ -186,6 +186,7 @@ struct output_option {
 
 enum {
        OUTPUT_TYPE_SYNTH = PERF_TYPE_MAX,
+       OUTPUT_TYPE_OTHER,
        OUTPUT_TYPE_MAX
 };
 
@@ -283,6 +284,18 @@ static struct {
 
                .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
        },
+
+       [OUTPUT_TYPE_OTHER] = {
+               .user_set = false,
+
+               .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
+                             PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
+                             PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
+                             PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
+                             PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
+
+               .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
+       },
 };
 
 struct evsel_script {
@@ -343,8 +356,11 @@ static inline int output_type(unsigned int type)
        case PERF_TYPE_SYNTH:
                return OUTPUT_TYPE_SYNTH;
        default:
-               return type;
+               if (type < PERF_TYPE_MAX)
+                       return type;
        }
+
+       return OUTPUT_TYPE_OTHER;
 }
 
 static inline unsigned int attr_type(unsigned int type)
index ee94d3e8dd654e389eeb811af211e0d4c0a92149..e6d3452031e52efac75566a0ffd2a926fd7aeac9 100644 (file)
@@ -162,6 +162,14 @@ static bool contains_event(struct evsel **metric_events, int num_events,
        return false;
 }
 
+static bool evsel_same_pmu(struct evsel *ev1, struct evsel *ev2)
+{
+       if (!ev1->pmu_name || !ev2->pmu_name)
+               return false;
+
+       return !strcmp(ev1->pmu_name, ev2->pmu_name);
+}
+
 /**
  * Find a group of events in perf_evlist that correspond to those from a parsed
  * metric expression. Note, as find_evsel_group is called in the same order as
@@ -280,8 +288,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
                         */
                        if (!has_constraint &&
                            ev->leader != metric_events[i]->leader &&
-                           !strcmp(ev->leader->pmu_name,
-                                   metric_events[i]->leader->pmu_name))
+                           evsel_same_pmu(ev->leader, metric_events[i]->leader))
                                break;
                        if (!strcmp(metric_events[i]->name, ev->name)) {
                                set_bit(ev->idx, evlist_used);
@@ -766,7 +773,6 @@ int __weak arch_get_runtimeparam(struct pmu_event *pe __maybe_unused)
 struct metricgroup_add_iter_data {
        struct list_head *metric_list;
        const char *metric;
-       struct metric **m;
        struct expr_ids *ids;
        int *ret;
        bool *has_match;
@@ -1058,12 +1064,13 @@ static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
                                                  void *data)
 {
        struct metricgroup_add_iter_data *d = data;
+       struct metric *m = NULL;
        int ret;
 
        if (!match_pe_metric(pe, d->metric))
                return 0;
 
-       ret = add_metric(d->metric_list, pe, d->metric_no_group, d->m, NULL, d->ids);
+       ret = add_metric(d->metric_list, pe, d->metric_no_group, &m, NULL, d->ids);
        if (ret)
                return ret;
 
@@ -1114,7 +1121,6 @@ static int metricgroup__add_metric(const char *metric, bool metric_no_group,
                                .metric_list = &list,
                                .metric = metric,
                                .metric_no_group = metric_no_group,
-                               .m = &m,
                                .ids = &ids,
                                .has_match = &has_match,
                                .ret = &ret,
index 5390158cdb401ebbfa7940118827c58382559a2c..09cb3a6672f3e45061c6332301f9178435b1ad43 100644 (file)
@@ -1249,6 +1249,8 @@ static void dump_isst_config(int arg)
        isst_ctdp_display_information_end(outf);
 }
 
+static void adjust_scaling_max_from_base_freq(int cpu);
+
 static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
                                  void *arg4)
 {
@@ -1267,6 +1269,9 @@ static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
                        int pkg_id = get_physical_package_id(cpu);
                        int die_id = get_physical_die_id(cpu);
 
+                       /* Wait for updated base frequencies */
+                       usleep(2000);
+
                        fprintf(stderr, "Option is set to online/offline\n");
                        ctdp_level.core_cpumask_size =
                                alloc_cpu_set(&ctdp_level.core_cpumask);
@@ -1283,6 +1288,7 @@ static void set_tdp_level_for_cpu(int cpu, void *arg1, void *arg2, void *arg3,
                                        if (CPU_ISSET_S(i, ctdp_level.core_cpumask_size, ctdp_level.core_cpumask)) {
                                                fprintf(stderr, "online cpu %d\n", i);
                                                set_cpu_online_offline(i, 1);
+                                               adjust_scaling_max_from_base_freq(i);
                                        } else {
                                                fprintf(stderr, "offline cpu %d\n", i);
                                                set_cpu_online_offline(i, 0);
@@ -1440,6 +1446,31 @@ static int set_cpufreq_scaling_min_max(int cpu, int max, int freq)
        return 0;
 }
 
+static int no_turbo(void)
+{
+       return parse_int_file(0, "/sys/devices/system/cpu/intel_pstate/no_turbo");
+}
+
+static void adjust_scaling_max_from_base_freq(int cpu)
+{
+       int base_freq, scaling_max_freq;
+
+       scaling_max_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_max_freq", cpu);
+       base_freq = get_cpufreq_base_freq(cpu);
+       if (scaling_max_freq < base_freq || no_turbo())
+               set_cpufreq_scaling_min_max(cpu, 1, base_freq);
+}
+
+static void adjust_scaling_min_from_base_freq(int cpu)
+{
+       int base_freq, scaling_min_freq;
+
+       scaling_min_freq = parse_int_file(0, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_min_freq", cpu);
+       base_freq = get_cpufreq_base_freq(cpu);
+       if (scaling_min_freq < base_freq)
+               set_cpufreq_scaling_min_max(cpu, 0, base_freq);
+}
+
 static int set_clx_pbf_cpufreq_scaling_min_max(int cpu)
 {
        struct isst_pkg_ctdp_level_info *ctdp_level;
@@ -1537,6 +1568,7 @@ static void set_scaling_min_to_cpuinfo_max(int cpu)
                        continue;
 
                set_cpufreq_scaling_min_max_from_cpuinfo(i, 1, 0);
+               adjust_scaling_min_from_base_freq(i);
        }
 }
 
index 21516e293d1711462af704366f7e1b8512325e44..e808a47c839bf6131f22debd2936f42bc9fdae27 100755 (executable)
@@ -43,9 +43,9 @@ class KunitStatus(Enum):
        BUILD_FAILURE = auto()
        TEST_FAILURE = auto()
 
-def get_kernel_root_path():
-       parts = sys.argv[0] if not __file__ else __file__
-       parts = os.path.realpath(parts).split('tools/testing/kunit')
+def get_kernel_root_path() -> str:
+       path = sys.argv[0] if not __file__ else __file__
+       parts = os.path.realpath(path).split('tools/testing/kunit')
        if len(parts) != 2:
                sys.exit(1)
        return parts[0]
@@ -171,7 +171,7 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree,
                                exec_result.elapsed_time))
        return parse_result
 
-def add_common_opts(parser):
+def add_common_opts(parser) -> None:
        parser.add_argument('--build_dir',
                            help='As in the make command, it specifies the build '
                            'directory.',
@@ -183,13 +183,13 @@ def add_common_opts(parser):
                            help='Run all KUnit tests through allyesconfig',
                            action='store_true')
 
-def add_build_opts(parser):
+def add_build_opts(parser) -> None:
        parser.add_argument('--jobs',
                            help='As in the make command, "Specifies  the number of '
                            'jobs (commands) to run simultaneously."',
                            type=int, default=8, metavar='jobs')
 
-def add_exec_opts(parser):
+def add_exec_opts(parser) -> None:
        parser.add_argument('--timeout',
                            help='maximum number of seconds to allow for all tests '
                            'to run. This does not include time taken to build the '
@@ -198,7 +198,7 @@ def add_exec_opts(parser):
                            default=300,
                            metavar='timeout')
 
-def add_parse_opts(parser):
+def add_parse_opts(parser) -> None:
        parser.add_argument('--raw_output', help='don\'t format output from kernel',
                            action='store_true')
        parser.add_argument('--json',
@@ -256,10 +256,7 @@ def main(argv, linux=None):
                        os.mkdir(cli_args.build_dir)
 
                if not linux:
-                       linux = kunit_kernel.LinuxSourceTree()
-
-               linux.create_kunitconfig(cli_args.build_dir)
-               linux.read_kunitconfig(cli_args.build_dir)
+                       linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
 
                request = KunitRequest(cli_args.raw_output,
                                       cli_args.timeout,
@@ -277,10 +274,7 @@ def main(argv, linux=None):
                        os.mkdir(cli_args.build_dir)
 
                if not linux:
-                       linux = kunit_kernel.LinuxSourceTree()
-
-               linux.create_kunitconfig(cli_args.build_dir)
-               linux.read_kunitconfig(cli_args.build_dir)
+                       linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
 
                request = KunitConfigRequest(cli_args.build_dir,
                                             cli_args.make_options)
@@ -292,10 +286,7 @@ def main(argv, linux=None):
                        sys.exit(1)
        elif cli_args.subcommand == 'build':
                if not linux:
-                       linux = kunit_kernel.LinuxSourceTree()
-
-               linux.create_kunitconfig(cli_args.build_dir)
-               linux.read_kunitconfig(cli_args.build_dir)
+                       linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
 
                request = KunitBuildRequest(cli_args.jobs,
                                            cli_args.build_dir,
@@ -309,10 +300,7 @@ def main(argv, linux=None):
                        sys.exit(1)
        elif cli_args.subcommand == 'exec':
                if not linux:
-                       linux = kunit_kernel.LinuxSourceTree()
-
-               linux.create_kunitconfig(cli_args.build_dir)
-               linux.read_kunitconfig(cli_args.build_dir)
+                       linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir)
 
                exec_request = KunitExecRequest(cli_args.timeout,
                                                cli_args.build_dir,
index 02ffc3a3e5dc7f4b85156de813460ee89847fa61..bdd60230764b02fe120a01942e49ebf0983af592 100644 (file)
@@ -8,6 +8,7 @@
 
 import collections
 import re
+from typing import List, Set
 
 CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
 CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$'
@@ -30,10 +31,10 @@ class KconfigParseError(Exception):
 class Kconfig(object):
        """Represents defconfig or .config specified using the Kconfig language."""
 
-       def __init__(self):
-               self._entries = []
+       def __init__(self) -> None:
+               self._entries = []  # type: List[KconfigEntry]
 
-       def entries(self):
+       def entries(self) -> Set[KconfigEntry]:
                return set(self._entries)
 
        def add_entry(self, entry: KconfigEntry) -> None:
index 624b31b2dbd620cf1fb8bf6e301d517d55c2ee5a..f5cca5c38cacfc6617de2bf931b6ca2bfb7c185d 100644 (file)
@@ -13,7 +13,7 @@ import kunit_parser
 
 from kunit_parser import TestStatus
 
-def get_json_result(test_result, def_config, build_dir, json_path):
+def get_json_result(test_result, def_config, build_dir, json_path) -> str:
        sub_groups = []
 
        # Each test suite is mapped to a KernelCI sub_group
index 698358c9c0d60951eade09cc50e012a00a8eb446..2076a5a2d060a680793b24fb15c09ec2ed80cb1b 100644 (file)
@@ -11,6 +11,7 @@ import subprocess
 import os
 import shutil
 import signal
+from typing import Iterator
 
 from contextlib import ExitStack
 
@@ -39,7 +40,7 @@ class BuildError(Exception):
 class LinuxSourceTreeOperations(object):
        """An abstraction over command line operations performed on a source tree."""
 
-       def make_mrproper(self):
+       def make_mrproper(self) -> None:
                try:
                        subprocess.check_output(['make', 'mrproper'], stderr=subprocess.STDOUT)
                except OSError as e:
@@ -47,7 +48,7 @@ class LinuxSourceTreeOperations(object):
                except subprocess.CalledProcessError as e:
                        raise ConfigError(e.output.decode())
 
-       def make_olddefconfig(self, build_dir, make_options):
+       def make_olddefconfig(self, build_dir, make_options) -> None:
                command = ['make', 'ARCH=um', 'olddefconfig']
                if make_options:
                        command.extend(make_options)
@@ -60,7 +61,7 @@ class LinuxSourceTreeOperations(object):
                except subprocess.CalledProcessError as e:
                        raise ConfigError(e.output.decode())
 
-       def make_allyesconfig(self, build_dir, make_options):
+       def make_allyesconfig(self, build_dir, make_options) -> None:
                kunit_parser.print_with_timestamp(
                        'Enabling all CONFIGs for UML...')
                command = ['make', 'ARCH=um', 'allyesconfig']
@@ -82,7 +83,7 @@ class LinuxSourceTreeOperations(object):
                kunit_parser.print_with_timestamp(
                        'Starting Kernel with all configs takes a few minutes...')
 
-       def make(self, jobs, build_dir, make_options):
+       def make(self, jobs, build_dir, make_options) -> None:
                command = ['make', 'ARCH=um', '--jobs=' + str(jobs)]
                if make_options:
                        command.extend(make_options)
@@ -100,7 +101,7 @@ class LinuxSourceTreeOperations(object):
                if stderr:  # likely only due to build warnings
                        print(stderr.decode())
 
-       def linux_bin(self, params, timeout, build_dir):
+       def linux_bin(self, params, timeout, build_dir) -> None:
                """Runs the Linux UML binary. Must be named 'linux'."""
                linux_bin = get_file_path(build_dir, 'linux')
                outfile = get_outfile_path(build_dir)
@@ -110,41 +111,42 @@ class LinuxSourceTreeOperations(object):
                                                   stderr=subprocess.STDOUT)
                        process.wait(timeout)
 
-def get_kconfig_path(build_dir):
+def get_kconfig_path(build_dir) -> str:
        return get_file_path(build_dir, KCONFIG_PATH)
 
-def get_kunitconfig_path(build_dir):
+def get_kunitconfig_path(build_dir) -> str:
        return get_file_path(build_dir, KUNITCONFIG_PATH)
 
-def get_outfile_path(build_dir):
+def get_outfile_path(build_dir) -> str:
        return get_file_path(build_dir, OUTFILE_PATH)
 
 class LinuxSourceTree(object):
        """Represents a Linux kernel source tree with KUnit tests."""
 
-       def __init__(self):
-               self._ops = LinuxSourceTreeOperations()
+       def __init__(self, build_dir: str, load_config=True, defconfig=DEFAULT_KUNITCONFIG_PATH) -> None:
                signal.signal(signal.SIGINT, self.signal_handler)
 
-       def clean(self):
-               try:
-                       self._ops.make_mrproper()
-               except ConfigError as e:
-                       logging.error(e)
-                       return False
-               return True
+               self._ops = LinuxSourceTreeOperations()
+
+               if not load_config:
+                       return
 
-       def create_kunitconfig(self, build_dir, defconfig=DEFAULT_KUNITCONFIG_PATH):
                kunitconfig_path = get_kunitconfig_path(build_dir)
                if not os.path.exists(kunitconfig_path):
                        shutil.copyfile(defconfig, kunitconfig_path)
 
-       def read_kunitconfig(self, build_dir):
-               kunitconfig_path = get_kunitconfig_path(build_dir)
                self._kconfig = kunit_config.Kconfig()
                self._kconfig.read_from_file(kunitconfig_path)
 
-       def validate_config(self, build_dir):
+       def clean(self) -> bool:
+               try:
+                       self._ops.make_mrproper()
+               except ConfigError as e:
+                       logging.error(e)
+                       return False
+               return True
+
+       def validate_config(self, build_dir) -> bool:
                kconfig_path = get_kconfig_path(build_dir)
                validated_kconfig = kunit_config.Kconfig()
                validated_kconfig.read_from_file(kconfig_path)
@@ -158,7 +160,7 @@ class LinuxSourceTree(object):
                        return False
                return True
 
-       def build_config(self, build_dir, make_options):
+       def build_config(self, build_dir, make_options) -> bool:
                kconfig_path = get_kconfig_path(build_dir)
                if build_dir and not os.path.exists(build_dir):
                        os.mkdir(build_dir)
@@ -170,7 +172,7 @@ class LinuxSourceTree(object):
                        return False
                return self.validate_config(build_dir)
 
-       def build_reconfig(self, build_dir, make_options):
+       def build_reconfig(self, build_dir, make_options) -> bool:
                """Creates a new .config if it is not a subset of the .kunitconfig."""
                kconfig_path = get_kconfig_path(build_dir)
                if os.path.exists(kconfig_path):
@@ -186,7 +188,7 @@ class LinuxSourceTree(object):
                        print('Generating .config ...')
                        return self.build_config(build_dir, make_options)
 
-       def build_um_kernel(self, alltests, jobs, build_dir, make_options):
+       def build_um_kernel(self, alltests, jobs, build_dir, make_options) -> bool:
                try:
                        if alltests:
                                self._ops.make_allyesconfig(build_dir, make_options)
@@ -197,7 +199,7 @@ class LinuxSourceTree(object):
                        return False
                return self.validate_config(build_dir)
 
-       def run_kernel(self, args=[], build_dir='', timeout=None):
+       def run_kernel(self, args=[], build_dir='', timeout=None) -> Iterator[str]:
                args.extend(['mem=1G', 'console=tty'])
                self._ops.linux_bin(args, timeout, build_dir)
                outfile = get_outfile_path(build_dir)
@@ -206,6 +208,6 @@ class LinuxSourceTree(object):
                        for line in file:
                                yield line
 
-       def signal_handler(self, sig, frame):
+       def signal_handler(self, sig, frame) -> None:
                logging.error('Build interruption occurred. Cleaning console.')
                subprocess.call(['stty', 'sane'])
index 6614ec4d08989696b82cc67c1b8c3a92707da740..e8bcc139702e29da713a5275624c9b1693fa91eb 100644 (file)
@@ -12,32 +12,32 @@ from collections import namedtuple
 from datetime import datetime
 from enum import Enum, auto
 from functools import reduce
-from typing import List, Optional, Tuple
+from typing import Iterable, Iterator, List, Optional, Tuple
 
 TestResult = namedtuple('TestResult', ['status','suites','log'])
 
 class TestSuite(object):
-       def __init__(self):
-               self.status = None
-               self.name = None
-               self.cases = []
+       def __init__(self) -> None:
+               self.status = TestStatus.SUCCESS
+               self.name = ''
+               self.cases = []  # type: List[TestCase]
 
-       def __str__(self):
-               return 'TestSuite(' + self.status + ',' + self.name + ',' + str(self.cases) + ')'
+       def __str__(self) -> str:
+               return 'TestSuite(' + str(self.status) + ',' + self.name + ',' + str(self.cases) + ')'
 
-       def __repr__(self):
+       def __repr__(self) -> str:
                return str(self)
 
 class TestCase(object):
-       def __init__(self):
-               self.status = None
+       def __init__(self) -> None:
+               self.status = TestStatus.SUCCESS
                self.name = ''
-               self.log = []
+               self.log = []  # type: List[str]
 
-       def __str__(self):
-               return 'TestCase(' + self.status + ',' + self.name + ',' + str(self.log) + ')'
+       def __str__(self) -> str:
+               return 'TestCase(' + str(self.status) + ',' + self.name + ',' + str(self.log) + ')'
 
-       def __repr__(self):
+       def __repr__(self) -> str:
                return str(self)
 
 class TestStatus(Enum):
@@ -51,7 +51,7 @@ kunit_start_re = re.compile(r'TAP version [0-9]+$')
 kunit_end_re = re.compile('(List of all partitions:|'
                          'Kernel panic - not syncing: VFS:)')
 
-def isolate_kunit_output(kernel_output):
+def isolate_kunit_output(kernel_output) -> Iterator[str]:
        started = False
        for line in kernel_output:
                line = line.rstrip()  # line always has a trailing \n
@@ -64,7 +64,7 @@ def isolate_kunit_output(kernel_output):
                elif started:
                        yield line[prefix_len:] if prefix_len > 0 else line
 
-def raw_output(kernel_output):
+def raw_output(kernel_output) -> None:
        for line in kernel_output:
                print(line.rstrip())
 
@@ -72,36 +72,36 @@ DIVIDER = '=' * 60
 
 RESET = '\033[0;0m'
 
-def red(text):
+def red(text) -> str:
        return '\033[1;31m' + text + RESET
 
-def yellow(text):
+def yellow(text) -> str:
        return '\033[1;33m' + text + RESET
 
-def green(text):
+def green(text) -> str:
        return '\033[1;32m' + text + RESET
 
-def print_with_timestamp(message):
+def print_with_timestamp(message) -> None:
        print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
 
-def format_suite_divider(message):
+def format_suite_divider(message) -> str:
        return '======== ' + message + ' ========'
 
-def print_suite_divider(message):
+def print_suite_divider(message) -> None:
        print_with_timestamp(DIVIDER)
        print_with_timestamp(format_suite_divider(message))
 
-def print_log(log):
+def print_log(log) -> None:
        for m in log:
                print_with_timestamp(m)
 
 TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*#).*$')
 
-def consume_non_diagnositic(lines: List[str]) -> None:
+def consume_non_diagnostic(lines: List[str]) -> None:
        while lines and not TAP_ENTRIES.match(lines[0]):
                lines.pop(0)
 
-def save_non_diagnositic(lines: List[str], test_case: TestCase) -> None:
+def save_non_diagnostic(lines: List[str], test_case: TestCase) -> None:
        while lines and not TAP_ENTRIES.match(lines[0]):
                test_case.log.append(lines[0])
                lines.pop(0)
@@ -113,7 +113,7 @@ OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
 OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
 
 def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool:
-       save_non_diagnositic(lines, test_case)
+       save_non_diagnostic(lines, test_case)
        if not lines:
                test_case.status = TestStatus.TEST_CRASHED
                return True
@@ -139,7 +139,7 @@ SUBTEST_DIAGNOSTIC = re.compile(r'^[\s]+# (.*)$')
 DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^[\s]+# .*?: kunit test case crashed!$')
 
 def parse_diagnostic(lines: List[str], test_case: TestCase) -> bool:
-       save_non_diagnositic(lines, test_case)
+       save_non_diagnostic(lines, test_case)
        if not lines:
                return False
        line = lines[0]
@@ -155,7 +155,7 @@ def parse_diagnostic(lines: List[str], test_case: TestCase) -> bool:
 
 def parse_test_case(lines: List[str]) -> Optional[TestCase]:
        test_case = TestCase()
-       save_non_diagnositic(lines, test_case)
+       save_non_diagnostic(lines, test_case)
        while parse_diagnostic(lines, test_case):
                pass
        if parse_ok_not_ok_test_case(lines, test_case):
@@ -166,7 +166,7 @@ def parse_test_case(lines: List[str]) -> Optional[TestCase]:
 SUBTEST_HEADER = re.compile(r'^[\s]+# Subtest: (.*)$')
 
 def parse_subtest_header(lines: List[str]) -> Optional[str]:
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        if not lines:
                return None
        match = SUBTEST_HEADER.match(lines[0])
@@ -179,7 +179,7 @@ def parse_subtest_header(lines: List[str]) -> Optional[str]:
 SUBTEST_PLAN = re.compile(r'[\s]+[0-9]+\.\.([0-9]+)')
 
 def parse_subtest_plan(lines: List[str]) -> Optional[int]:
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        match = SUBTEST_PLAN.match(lines[0])
        if match:
                lines.pop(0)
@@ -202,7 +202,7 @@ def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
 def parse_ok_not_ok_test_suite(lines: List[str],
                               test_suite: TestSuite,
                               expected_suite_index: int) -> bool:
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        if not lines:
                test_suite.status = TestStatus.TEST_CRASHED
                return False
@@ -224,18 +224,17 @@ def parse_ok_not_ok_test_suite(lines: List[str],
        else:
                return False
 
-def bubble_up_errors(to_status, status_container_list) -> TestStatus:
-       status_list = map(to_status, status_container_list)
-       return reduce(max_status, status_list, TestStatus.SUCCESS)
+def bubble_up_errors(statuses: Iterable[TestStatus]) -> TestStatus:
+       return reduce(max_status, statuses, TestStatus.SUCCESS)
 
 def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
-       max_test_case_status = bubble_up_errors(lambda x: x.status, test_suite.cases)
+       max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases)
        return max_status(max_test_case_status, test_suite.status)
 
 def parse_test_suite(lines: List[str], expected_suite_index: int) -> Optional[TestSuite]:
        if not lines:
                return None
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        test_suite = TestSuite()
        test_suite.status = TestStatus.SUCCESS
        name = parse_subtest_header(lines)
@@ -264,7 +263,7 @@ def parse_test_suite(lines: List[str], expected_suite_index: int) -> Optional[Te
 TAP_HEADER = re.compile(r'^TAP version 14$')
 
 def parse_tap_header(lines: List[str]) -> bool:
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        if TAP_HEADER.match(lines[0]):
                lines.pop(0)
                return True
@@ -274,7 +273,7 @@ def parse_tap_header(lines: List[str]) -> bool:
 TEST_PLAN = re.compile(r'[0-9]+\.\.([0-9]+)')
 
 def parse_test_plan(lines: List[str]) -> Optional[int]:
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        match = TEST_PLAN.match(lines[0])
        if match:
                lines.pop(0)
@@ -282,11 +281,11 @@ def parse_test_plan(lines: List[str]) -> Optional[int]:
        else:
                return None
 
-def bubble_up_suite_errors(test_suite_list: List[TestSuite]) -> TestStatus:
-       return bubble_up_errors(lambda x: x.status, test_suite_list)
+def bubble_up_suite_errors(test_suites: Iterable[TestSuite]) -> TestStatus:
+       return bubble_up_errors(x.status for x in test_suites)
 
 def parse_test_result(lines: List[str]) -> TestResult:
-       consume_non_diagnositic(lines)
+       consume_non_diagnostic(lines)
        if not lines or not parse_tap_header(lines):
                return TestResult(TestStatus.NO_TESTS, [], lines)
        expected_test_suite_num = parse_test_plan(lines)
index c0fe73a17ed1234ae7517b452364b32e58760771..3bfcf00c0a673771498f586410b062a115bd8220 100644 (file)
@@ -34,61 +34,6 @@ struct storage {
        struct bpf_spin_lock lock;
 };
 
-/* Copies an rm binary to a temp file. dest is a mkstemp template */
-static int copy_rm(char *dest)
-{
-       int fd_in, fd_out = -1, ret = 0;
-       struct stat stat;
-       char *buf = NULL;
-
-       fd_in = open("/bin/rm", O_RDONLY);
-       if (fd_in < 0)
-               return -errno;
-
-       fd_out = mkstemp(dest);
-       if (fd_out < 0) {
-               ret = -errno;
-               goto out;
-       }
-
-       ret = fstat(fd_in, &stat);
-       if (ret == -1) {
-               ret = -errno;
-               goto out;
-       }
-
-       buf = malloc(stat.st_blksize);
-       if (!buf) {
-               ret = -errno;
-               goto out;
-       }
-
-       while (ret = read(fd_in, buf, stat.st_blksize), ret > 0) {
-               ret = write(fd_out, buf, ret);
-               if (ret < 0) {
-                       ret = -errno;
-                       goto out;
-
-               }
-       }
-       if (ret < 0) {
-               ret = -errno;
-               goto out;
-
-       }
-
-       /* Set executable permission on the copied file */
-       ret = chmod(dest, 0100);
-       if (ret == -1)
-               ret = -errno;
-
-out:
-       free(buf);
-       close(fd_in);
-       close(fd_out);
-       return ret;
-}
-
 /* Fork and exec the provided rm binary and return the exit code of the
  * forked process and its pid.
  */
@@ -168,9 +113,11 @@ static bool check_syscall_operations(int map_fd, int obj_fd)
 
 void test_test_local_storage(void)
 {
-       char tmp_exec_path[PATH_MAX] = "/tmp/copy_of_rmXXXXXX";
+       char tmp_dir_path[64] = "/tmp/local_storageXXXXXX";
        int err, serv_sk = -1, task_fd = -1, rm_fd = -1;
        struct local_storage *skel = NULL;
+       char tmp_exec_path[64];
+       char cmd[256];
 
        skel = local_storage__open_and_load();
        if (CHECK(!skel, "skel_load", "lsm skeleton failed\n"))
@@ -189,18 +136,24 @@ void test_test_local_storage(void)
                                      task_fd))
                goto close_prog;
 
-       err = copy_rm(tmp_exec_path);
-       if (CHECK(err < 0, "copy_rm", "err %d errno %d\n", err, errno))
+       if (CHECK(!mkdtemp(tmp_dir_path), "mkdtemp",
+                 "unable to create tmpdir: %d\n", errno))
                goto close_prog;
 
+       snprintf(tmp_exec_path, sizeof(tmp_exec_path), "%s/copy_of_rm",
+                tmp_dir_path);
+       snprintf(cmd, sizeof(cmd), "cp /bin/rm %s", tmp_exec_path);
+       if (CHECK_FAIL(system(cmd)))
+               goto close_prog_rmdir;
+
        rm_fd = open(tmp_exec_path, O_RDONLY);
        if (CHECK(rm_fd < 0, "open", "failed to open %s err:%d, errno:%d",
                  tmp_exec_path, rm_fd, errno))
-               goto close_prog;
+               goto close_prog_rmdir;
 
        if (!check_syscall_operations(bpf_map__fd(skel->maps.inode_storage_map),
                                      rm_fd))
-               goto close_prog;
+               goto close_prog_rmdir;
 
        /* Sets skel->bss->monitored_pid to the pid of the forked child
         * forks a child process that executes tmp_exec_path and tries to
@@ -209,33 +162,36 @@ void test_test_local_storage(void)
         */
        err = run_self_unlink(&skel->bss->monitored_pid, tmp_exec_path);
        if (CHECK(err != EPERM, "run_self_unlink", "err %d want EPERM\n", err))
-               goto close_prog_unlink;
+               goto close_prog_rmdir;
 
        /* Set the process being monitored to be the current process */
        skel->bss->monitored_pid = getpid();
 
-       /* Remove the temporary created executable */
-       err = unlink(tmp_exec_path);
-       if (CHECK(err != 0, "unlink", "unable to unlink %s: %d", tmp_exec_path,
-                 errno))
-               goto close_prog_unlink;
+       /* Move copy_of_rm to a new location so that it triggers the
+        * inode_rename LSM hook with a new_dentry that has a NULL inode ptr.
+        */
+       snprintf(cmd, sizeof(cmd), "mv %s/copy_of_rm %s/check_null_ptr",
+                tmp_dir_path, tmp_dir_path);
+       if (CHECK_FAIL(system(cmd)))
+               goto close_prog_rmdir;
 
        CHECK(skel->data->inode_storage_result != 0, "inode_storage_result",
              "inode_local_storage not set\n");
 
        serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
        if (CHECK(serv_sk < 0, "start_server", "failed to start server\n"))
-               goto close_prog;
+               goto close_prog_rmdir;
 
        CHECK(skel->data->sk_storage_result != 0, "sk_storage_result",
              "sk_local_storage not set\n");
 
        if (!check_syscall_operations(bpf_map__fd(skel->maps.sk_storage_map),
                                      serv_sk))
-               goto close_prog;
+               goto close_prog_rmdir;
 
-close_prog_unlink:
-       unlink(tmp_exec_path);
+close_prog_rmdir:
+       snprintf(cmd, sizeof(cmd), "rm -rf %s", tmp_dir_path);
+       system(cmd);
 close_prog:
        close(serv_sk);
        close(rm_fd);
index 3e3de130f28f303e30a45bff6501e502f905fa93..95868bc7ada9873efcbf9edb16b6bed5f8ec1b4c 100644 (file)
@@ -50,7 +50,6 @@ int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
        __u32 pid = bpf_get_current_pid_tgid() >> 32;
        struct local_storage *storage;
        bool is_self_unlink;
-       int err;
 
        if (pid != monitored_pid)
                return 0;
@@ -66,8 +65,27 @@ int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
                        return -EPERM;
        }
 
-       storage = bpf_inode_storage_get(&inode_storage_map, victim->d_inode, 0,
-                                       BPF_LOCAL_STORAGE_GET_F_CREATE);
+       return 0;
+}
+
+SEC("lsm/inode_rename")
+int BPF_PROG(inode_rename, struct inode *old_dir, struct dentry *old_dentry,
+            struct inode *new_dir, struct dentry *new_dentry,
+            unsigned int flags)
+{
+       __u32 pid = bpf_get_current_pid_tgid() >> 32;
+       struct local_storage *storage;
+       int err;
+
+       /* new_dentry->d_inode can be NULL when the inode is renamed to a file
+        * that did not exist before. The helper should be able to handle this
+        * NULL pointer.
+        */
+       bpf_inode_storage_get(&inode_storage_map, new_dentry->d_inode, 0,
+                             BPF_LOCAL_STORAGE_GET_F_CREATE);
+
+       storage = bpf_inode_storage_get(&inode_storage_map, old_dentry->d_inode,
+                                       0, 0);
        if (!storage)
                return 0;
 
@@ -76,7 +94,7 @@ int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
                inode_storage_result = -1;
        bpf_spin_unlock(&storage->lock);
 
-       err = bpf_inode_storage_delete(&inode_storage_map, victim->d_inode);
+       err = bpf_inode_storage_delete(&inode_storage_map, old_dentry->d_inode);
        if (!err)
                inode_storage_result = err;
 
@@ -133,37 +151,18 @@ int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
        return 0;
 }
 
-SEC("lsm/file_open")
-int BPF_PROG(file_open, struct file *file)
-{
-       __u32 pid = bpf_get_current_pid_tgid() >> 32;
-       struct local_storage *storage;
-
-       if (pid != monitored_pid)
-               return 0;
-
-       if (!file->f_inode)
-               return 0;
-
-       storage = bpf_inode_storage_get(&inode_storage_map, file->f_inode, 0,
-                                       BPF_LOCAL_STORAGE_GET_F_CREATE);
-       if (!storage)
-               return 0;
-
-       bpf_spin_lock(&storage->lock);
-       storage->value = DUMMY_STORAGE_VALUE;
-       bpf_spin_unlock(&storage->lock);
-       return 0;
-}
-
 /* This uses the local storage to remember the inode of the binary that a
  * process was originally executing.
  */
 SEC("lsm/bprm_committed_creds")
 void BPF_PROG(exec, struct linux_binprm *bprm)
 {
+       __u32 pid = bpf_get_current_pid_tgid() >> 32;
        struct local_storage *storage;
 
+       if (pid != monitored_pid)
+               return;
+
        storage = bpf_task_storage_get(&task_storage_map,
                                       bpf_get_current_task_btf(), 0,
                                       BPF_LOCAL_STORAGE_GET_F_CREATE);
@@ -172,4 +171,13 @@ void BPF_PROG(exec, struct linux_binprm *bprm)
                storage->exec_inode = bprm->file->f_inode;
                bpf_spin_unlock(&storage->lock);
        }
+
+       storage = bpf_inode_storage_get(&inode_storage_map, bprm->file->f_inode,
+                                       0, BPF_LOCAL_STORAGE_GET_F_CREATE);
+       if (!storage)
+               return;
+
+       bpf_spin_lock(&storage->lock);
+       storage->value = DUMMY_STORAGE_VALUE;
+       bpf_spin_unlock(&storage->lock);
 }
index 777a81404fdbd15a9643151c108a51db7d4478f3..f8569f04064b7e9684e3a93e1e03ab1e2f13fa7c 100644 (file)
@@ -50,7 +50,7 @@
 #define MAX_INSNS      BPF_MAXINSNS
 #define MAX_TEST_INSNS 1000000
 #define MAX_FIXUPS     8
-#define MAX_NR_MAPS    20
+#define MAX_NR_MAPS    21
 #define MAX_TEST_RUNS  8
 #define POINTER_VALUE  0xcafe4all
 #define TEST_DATA_LEN  64
@@ -87,6 +87,7 @@ struct bpf_test {
        int fixup_sk_storage_map[MAX_FIXUPS];
        int fixup_map_event_output[MAX_FIXUPS];
        int fixup_map_reuseport_array[MAX_FIXUPS];
+       int fixup_map_ringbuf[MAX_FIXUPS];
        const char *errstr;
        const char *errstr_unpriv;
        uint32_t insn_processed;
@@ -640,6 +641,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
        int *fixup_sk_storage_map = test->fixup_sk_storage_map;
        int *fixup_map_event_output = test->fixup_map_event_output;
        int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
+       int *fixup_map_ringbuf = test->fixup_map_ringbuf;
 
        if (test->fill_helper) {
                test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
@@ -817,6 +819,14 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
                        fixup_map_reuseport_array++;
                } while (*fixup_map_reuseport_array);
        }
+       if (*fixup_map_ringbuf) {
+               map_fds[20] = create_map(BPF_MAP_TYPE_RINGBUF, 0,
+                                          0, 4096);
+               do {
+                       prog[*fixup_map_ringbuf].imm = map_fds[20];
+                       fixup_map_ringbuf++;
+               } while (*fixup_map_ringbuf);
+       }
 }
 
 struct libcap {
index 45d43bf82f269190bc51ab29f7032e909103dca0..0b943897aaf6c136a8fea40a3804042b28c44e37 100644 (file)
        .result = ACCEPT,
        .result_unpriv = ACCEPT,
 },
+{
+       "check valid spill/fill, ptr to mem",
+       .insns = {
+       /* reserve 8 byte ringbuf memory */
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_MOV64_IMM(BPF_REG_2, 8),
+       BPF_MOV64_IMM(BPF_REG_3, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
+       /* store a pointer to the reserved memory in R6 */
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       /* check whether the reservation was successful */
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+       /* spill R6(mem) into the stack */
+       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
+       /* fill it back in R7 */
+       BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
+       /* should be able to access *(R7) = 0 */
+       BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
+       /* submit the reserved ringbuf memory */
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+       BPF_MOV64_IMM(BPF_REG_2, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_ringbuf = { 1 },
+       .result = ACCEPT,
+       .result_unpriv = ACCEPT,
+},
 {
        "check corrupted spill/fill",
        .insns = {
index 84205c3a55ebed11ddbbf307d771338e0f4d1ac5..2b5707738609ef313e533c78492e25bbc6a02313 100755 (executable)
@@ -1055,7 +1055,6 @@ ipv6_addr_metric_test()
 
        check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 260"
        log_test $? 0 "Set metric with peer route on local side"
-       log_test $? 0 "User specified metric on local address"
        check_route6 "2001:db8:104::2 dev dummy2 proto kernel metric 260"
        log_test $? 0 "Set metric with peer route on peer side"
 
index 388e4492b81be44c5a0444a0ea5afeb4c80e7842..76efb1f8375e3c2ac9f7f5cc810a91d1f98d698a 100755 (executable)
@@ -203,7 +203,7 @@ multipath4_test()
        t0_rp12=$(link_stats_tx_packets_get $rp12)
        t0_rp13=$(link_stats_tx_packets_get $rp13)
 
-       ip vrf exec vrf-h1 $MZ -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
+       ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
                -d 1msec -t udp "sp=1024,dp=0-32768"
 
        t1_rp12=$(link_stats_tx_packets_get $rp12)
index 79a2099279621a7810b2718f76f2964012423fa0..464821c587a5e8cd073db1b64ab8f39267a669b2 100755 (executable)
@@ -178,7 +178,7 @@ multipath4_test()
        t0_rp12=$(link_stats_tx_packets_get $rp12)
        t0_rp13=$(link_stats_tx_packets_get $rp13)
 
-       ip vrf exec vrf-h1 $MZ -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
+       ip vrf exec vrf-h1 $MZ $h1 -q -p 64 -A 192.0.2.2 -B 198.51.100.2 \
               -d 1msec -t udp "sp=1024,dp=0-32768"
 
        t1_rp12=$(link_stats_tx_packets_get $rp12)
index 7a1bf94c5bd38bc9837b87911a082a1ef287eee8..bdf450eaf60cff25d7d36308e0ddc94838423bc7 100755 (executable)
@@ -202,7 +202,7 @@ check_xfrm() {
        # 1: iptables -m policy rule count != 0
        rval=$1
        ip=$2
-       lret=0
+       local lret=0
 
        ip netns exec ns1 ping -q -c 1 10.0.2.$ip > /dev/null
 
@@ -287,6 +287,47 @@ check_hthresh_repeat()
        return 0
 }
 
+# insert non-overlapping policies in a random order and check that
+# all of them can be fetched using the traffic selectors.
+check_random_order()
+{
+       local ns=$1
+       local log=$2
+
+       for i in $(seq 100); do
+               ip -net $ns xfrm policy flush
+               for j in $(seq 0 16 255 | sort -R); do
+                       ip -net $ns xfrm policy add dst $j.0.0.0/24 dir out priority 10 action allow
+               done
+               for j in $(seq 0 16 255); do
+                       if ! ip -net $ns xfrm policy get dst $j.0.0.0/24 dir out > /dev/null; then
+                               echo "FAIL: $log" 1>&2
+                               return 1
+                       fi
+               done
+       done
+
+       for i in $(seq 100); do
+               ip -net $ns xfrm policy flush
+               for j in $(seq 0 16 255 | sort -R); do
+                       local addr=$(printf "e000:0000:%02x00::/56" $j)
+                       ip -net $ns xfrm policy add dst $addr dir out priority 10 action allow
+               done
+               for j in $(seq 0 16 255); do
+                       local addr=$(printf "e000:0000:%02x00::/56" $j)
+                       if ! ip -net $ns xfrm policy get dst $addr dir out > /dev/null; then
+                               echo "FAIL: $log" 1>&2
+                               return 1
+                       fi
+               done
+       done
+
+       ip -net $ns xfrm policy flush
+
+       echo "PASS: $log"
+       return 0
+}
+
 #check for needed privileges
 if [ "$(id -u)" -ne 0 ];then
        echo "SKIP: Need root privileges"
@@ -438,6 +479,8 @@ check_exceptions "exceptions and block policies after htresh change to normal"
 
 check_hthresh_repeat "policies with repeated htresh change"
 
+check_random_order ns3 "policies inserted in random order"
+
 for i in 1 2 3 4;do ip netns del ns$i;done
 
 exit $ret
index cb53a8b777e68a7fc49c5c7ed1d249cb13c0e1d1..c25cf7cd45e9fdb8ec328c5e5a8dff16bc993e05 100644 (file)
@@ -443,7 +443,6 @@ int test_alignment_handler_integer(void)
        LOAD_DFORM_TEST(ldu);
        LOAD_XFORM_TEST(ldx);
        LOAD_XFORM_TEST(ldux);
-       LOAD_DFORM_TEST(lmw);
        STORE_DFORM_TEST(stb);
        STORE_XFORM_TEST(stbx);
        STORE_DFORM_TEST(stbu);
@@ -462,7 +461,11 @@ int test_alignment_handler_integer(void)
        STORE_XFORM_TEST(stdx);
        STORE_DFORM_TEST(stdu);
        STORE_XFORM_TEST(stdux);
+
+#ifdef __BIG_ENDIAN__
+       LOAD_DFORM_TEST(lmw);
        STORE_DFORM_TEST(stmw);
+#endif
 
        return rc;
 }
index 9e5c7f3f498a7937e13614413019588666671c87..0af4f02669a115f547d33b8ed8261201876dbc57 100644 (file)
@@ -290,5 +290,5 @@ static int test(void)
 
 int main(void)
 {
-       test_harness(test, "pkey_exec_prot");
+       return test_harness(test, "pkey_exec_prot");
 }
index 4f815d7c12145a61ba37ba83488b04c1f90e8ee2..2db76e56d4cb99ee9897817fb3557a2f3e8800fc 100644 (file)
@@ -329,5 +329,5 @@ static int test(void)
 
 int main(void)
 {
-       test_harness(test, "pkey_siginfo");
+       return test_harness(test, "pkey_siginfo");
 }
index fa9e3614d30edb150f6b8f7a071f0e351657dd08..8367d88ce39bf2b831d317c91ebb087cc034d096 100644 (file)
@@ -1292,6 +1292,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
                return -EINVAL;
        /* We can read the guest memory with __xxx_user() later on. */
        if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
+           (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
             !access_ok((void __user *)(unsigned long)mem->userspace_addr,
                        mem->memory_size))
                return -EINVAL;