Merge tag 'samsung-soc-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/krzk...
authorArnd Bergmann <arnd@arndb.de>
Fri, 6 May 2022 20:07:12 +0000 (22:07 +0200)
committerArnd Bergmann <arnd@arndb.de>
Fri, 6 May 2022 20:07:13 +0000 (22:07 +0200)
Samsung mach/soc changes for v5.19

Cleanup: drop unneeded CONFIG_S3C24XX_PWM and fix some typos.

* tag 'samsung-soc-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux:
  ARM: s3c: fix typos in comments
  ARM: s3c: Drop config symbol S3C24XX_PWM

Link: https://lore.kernel.org/r/20220506081438.149192-6-krzysztof.kozlowski@linaro.org
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
922 files changed:
.mailmap
Documentation/ABI/testing/sysfs-class-firmware-attributes
Documentation/ABI/testing/sysfs-driver-intel_sdsi
Documentation/dev-tools/kunit/start.rst
Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-pmc.yaml
Documentation/devicetree/bindings/bus/ti-sysc.yaml
Documentation/devicetree/bindings/clock/samsung,exynos-audss-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos-ext-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos4412-isp-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos5260-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos5410-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos5433-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos7-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos7885-clock.yaml
Documentation/devicetree/bindings/clock/samsung,exynos850-clock.yaml
Documentation/devicetree/bindings/clock/samsung,s2mps11.yaml
Documentation/devicetree/bindings/clock/samsung,s5pv210-audss-clock.yaml
Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.yaml
Documentation/devicetree/bindings/devfreq/event/samsung,exynos-nocp.yaml
Documentation/devicetree/bindings/devfreq/event/samsung,exynos-ppmu.yaml
Documentation/devicetree/bindings/display/bridge/chipone,icn6211.yaml
Documentation/devicetree/bindings/display/bridge/toshiba,tc358762.yaml
Documentation/devicetree/bindings/display/msm/dpu-qcm2290.yaml
Documentation/devicetree/bindings/display/panel/panel-mipi-dbi-spi.yaml
Documentation/devicetree/bindings/display/panel/panel-timing.yaml
Documentation/devicetree/bindings/display/samsung/samsung,exynos-hdmi-ddc.yaml
Documentation/devicetree/bindings/display/samsung/samsung,exynos-hdmi.yaml
Documentation/devicetree/bindings/display/samsung/samsung,exynos-mixer.yaml
Documentation/devicetree/bindings/display/samsung/samsung,exynos5433-decon.yaml
Documentation/devicetree/bindings/display/samsung/samsung,exynos5433-mic.yaml
Documentation/devicetree/bindings/display/samsung/samsung,exynos7-decon.yaml
Documentation/devicetree/bindings/display/samsung/samsung,fimd.yaml
Documentation/devicetree/bindings/extcon/maxim,max77843.yaml
Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
Documentation/devicetree/bindings/hwmon/lltc,ltc4151.yaml
Documentation/devicetree/bindings/hwmon/microchip,mcp3021.yaml
Documentation/devicetree/bindings/hwmon/sensirion,sht15.yaml
Documentation/devicetree/bindings/hwmon/ti,tmp102.yaml
Documentation/devicetree/bindings/hwmon/ti,tmp108.yaml
Documentation/devicetree/bindings/hwmon/ti,tmp464.yaml
Documentation/devicetree/bindings/i2c/i2c-exynos5.yaml
Documentation/devicetree/bindings/i2c/samsung,s3c2410-i2c.yaml
Documentation/devicetree/bindings/iio/adc/adi,ad7476.yaml
Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
Documentation/devicetree/bindings/iio/dac/adi,ad5360.yaml
Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml
Documentation/devicetree/bindings/interrupt-controller/mrvl,intc.yaml
Documentation/devicetree/bindings/interrupt-controller/samsung,exynos4210-combiner.yaml
Documentation/devicetree/bindings/iommu/apple,sart.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/leds/maxim,max77693.yaml
Documentation/devicetree/bindings/media/coda.yaml
Documentation/devicetree/bindings/media/mediatek,vcodec-decoder.yaml
Documentation/devicetree/bindings/media/mediatek,vcodec-encoder.yaml
Documentation/devicetree/bindings/media/mediatek,vcodec-subdev-decoder.yaml
Documentation/devicetree/bindings/memory-controllers/brcm,dpfe-cpu.yaml
Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2-timings.yaml
Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr2.yaml
Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr3-timings.yaml
Documentation/devicetree/bindings/memory-controllers/ddr/jedec,lpddr3.yaml
Documentation/devicetree/bindings/memory-controllers/marvell,mvebu-sdram-controller.yaml
Documentation/devicetree/bindings/memory-controllers/qca,ath79-ddr-controller.yaml
Documentation/devicetree/bindings/memory-controllers/renesas,h8300-bsc.yaml
Documentation/devicetree/bindings/memory-controllers/renesas,rpc-if.yaml
Documentation/devicetree/bindings/memory-controllers/samsung,exynos5422-dmc.yaml
Documentation/devicetree/bindings/memory-controllers/synopsys,ddrc-ecc.yaml
Documentation/devicetree/bindings/memory-controllers/ti,da8xx-ddrctl.yaml
Documentation/devicetree/bindings/mfd/maxim,max14577.yaml
Documentation/devicetree/bindings/mfd/maxim,max77686.yaml
Documentation/devicetree/bindings/mfd/maxim,max77693.yaml
Documentation/devicetree/bindings/mfd/maxim,max77802.yaml
Documentation/devicetree/bindings/mfd/maxim,max77843.yaml
Documentation/devicetree/bindings/mfd/samsung,exynos5433-lpass.yaml
Documentation/devicetree/bindings/mfd/samsung,s2mpa01.yaml
Documentation/devicetree/bindings/mfd/samsung,s2mps11.yaml
Documentation/devicetree/bindings/mfd/samsung,s5m8767.yaml
Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.yaml
Documentation/devicetree/bindings/net/ethernet-controller.yaml
Documentation/devicetree/bindings/net/micrel.txt
Documentation/devicetree/bindings/net/nfc/marvell,nci.yaml
Documentation/devicetree/bindings/net/nfc/nxp,nci.yaml
Documentation/devicetree/bindings/net/nfc/nxp,pn532.yaml
Documentation/devicetree/bindings/net/nfc/nxp,pn544.yaml
Documentation/devicetree/bindings/net/nfc/st,st-nci.yaml
Documentation/devicetree/bindings/net/nfc/st,st21nfca.yaml
Documentation/devicetree/bindings/net/nfc/st,st95hf.yaml
Documentation/devicetree/bindings/net/nfc/ti,trf7970a.yaml
Documentation/devicetree/bindings/net/snps,dwmac.yaml
Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml
Documentation/devicetree/bindings/net/xilinx_axienet.txt
Documentation/devicetree/bindings/nvme/apple,nvme-ans.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/phy/nvidia,tegra20-usb-phy.yaml
Documentation/devicetree/bindings/phy/qcom,usb-hs-phy.yaml
Documentation/devicetree/bindings/phy/samsung,dp-video-phy.yaml
Documentation/devicetree/bindings/phy/samsung,exynos-hdmi-phy.yaml
Documentation/devicetree/bindings/phy/samsung,exynos5250-sata-phy.yaml
Documentation/devicetree/bindings/phy/samsung,mipi-video-phy.yaml
Documentation/devicetree/bindings/phy/samsung,usb2-phy.yaml
Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml
Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml
Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-gpio-bank.yaml
Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-pins-cfg.yaml
Documentation/devicetree/bindings/pinctrl/samsung,pinctrl-wakeup-interrupt.yaml
Documentation/devicetree/bindings/pinctrl/samsung,pinctrl.yaml
Documentation/devicetree/bindings/power/renesas,apmu.yaml
Documentation/devicetree/bindings/power/supply/bq2415x.yaml
Documentation/devicetree/bindings/power/supply/maxim,max14577.yaml
Documentation/devicetree/bindings/power/supply/maxim,max77693.yaml
Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
Documentation/devicetree/bindings/regulator/maxim,max14577.yaml
Documentation/devicetree/bindings/regulator/maxim,max77686.yaml
Documentation/devicetree/bindings/regulator/maxim,max77693.yaml
Documentation/devicetree/bindings/regulator/maxim,max77802.yaml
Documentation/devicetree/bindings/regulator/maxim,max77843.yaml
Documentation/devicetree/bindings/regulator/maxim,max8952.yaml
Documentation/devicetree/bindings/regulator/maxim,max8973.yaml
Documentation/devicetree/bindings/regulator/maxim,max8997.yaml
Documentation/devicetree/bindings/regulator/samsung,s2mpa01.yaml
Documentation/devicetree/bindings/regulator/samsung,s2mps11.yaml
Documentation/devicetree/bindings/regulator/samsung,s2mps13.yaml
Documentation/devicetree/bindings/regulator/samsung,s2mps14.yaml
Documentation/devicetree/bindings/regulator/samsung,s2mps15.yaml
Documentation/devicetree/bindings/regulator/samsung,s2mpu02.yaml
Documentation/devicetree/bindings/regulator/samsung,s5m8767.yaml
Documentation/devicetree/bindings/remoteproc/qcom,sc7280-wpss-pil.yaml
Documentation/devicetree/bindings/reset/altr,rst-mgr.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/reset/amlogic,meson-axg-audio-arb.txt [deleted file]
Documentation/devicetree/bindings/reset/amlogic,meson-axg-audio-arb.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml
Documentation/devicetree/bindings/reset/ath79-reset.txt [deleted file]
Documentation/devicetree/bindings/reset/berlin,reset.txt [deleted file]
Documentation/devicetree/bindings/reset/bitmain,bm1880-reset.txt [deleted file]
Documentation/devicetree/bindings/reset/bitmain,bm1880-reset.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.yaml
Documentation/devicetree/bindings/reset/lantiq,reset.txt [deleted file]
Documentation/devicetree/bindings/reset/lantiq,reset.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/reset/marvell,berlin2-reset.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/reset/nuvoton,npcm-reset.txt [deleted file]
Documentation/devicetree/bindings/reset/nuvoton,npcm750-reset.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/reset/qca,ar7100-reset.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/reset/snps,axs10x-reset.txt [deleted file]
Documentation/devicetree/bindings/reset/snps,axs10x-reset.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/reset/socfpga-reset.txt [deleted file]
Documentation/devicetree/bindings/reset/socionext,uniphier-glue-reset.yaml
Documentation/devicetree/bindings/reset/socionext,uniphier-reset.yaml
Documentation/devicetree/bindings/reset/st,sti-picophyreset.txt [deleted file]
Documentation/devicetree/bindings/reset/st,sti-powerdown.txt [deleted file]
Documentation/devicetree/bindings/reset/st,stih407-picophyreset.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/reset/st,stih407-powerdown.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/rng/samsung,exynos5250-trng.yaml
Documentation/devicetree/bindings/rng/timeriomem_rng.yaml
Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml
Documentation/devicetree/bindings/sound/samsung,arndale.yaml
Documentation/devicetree/bindings/sound/samsung,smdk5250.yaml
Documentation/devicetree/bindings/sound/samsung,snow.yaml
Documentation/devicetree/bindings/sound/samsung,tm2.yaml
Documentation/devicetree/bindings/sound/st,stm32-sai.yaml
Documentation/devicetree/bindings/spi/renesas,sh-msiof.yaml
Documentation/devicetree/bindings/spi/samsung,spi-peripheral-props.yaml
Documentation/devicetree/bindings/spi/samsung,spi.yaml
Documentation/devicetree/bindings/sram/sram.yaml
Documentation/devicetree/bindings/thermal/samsung,exynos-thermal.yaml
Documentation/devicetree/bindings/timer/samsung,exynos4210-mct.yaml
Documentation/devicetree/bindings/usb/samsung,exynos-dwc3.yaml
Documentation/devicetree/bindings/usb/samsung,exynos-usb2.yaml
Documentation/driver-api/dma-buf.rst
Documentation/filesystems/caching/backend-api.rst
Documentation/filesystems/caching/netfs-api.rst
Documentation/networking/bonding.rst
Documentation/networking/dsa/dsa.rst
Documentation/virt/kvm/api.rst
Documentation/virt/kvm/vcpu-requests.rst
Documentation/virt/kvm/x86/amd-memory-encryption.rst
Documentation/virt/kvm/x86/errata.rst
Documentation/virt/kvm/x86/running-nested-guests.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/at91-kizbox3-hs.dts
arch/arm/boot/dts/at91-kizbox3_common.dtsi
arch/arm/boot/dts/at91-sam9_l9260.dts
arch/arm/boot/dts/da850-evm.dts
arch/arm/boot/dts/dm8168-evm.dts
arch/arm/boot/dts/imx28-ts4600.dts
arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
arch/arm/boot/dts/imx6ul-phytec-segin-peb-av-02.dtsi
arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
arch/arm/boot/dts/qcom-apq8064-pins.dtsi
arch/arm/boot/dts/qcom-ipq8064.dtsi
arch/arm/boot/dts/spear1310-evb.dts
arch/arm/boot/dts/spear1340-evb.dts
arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
arch/arm/boot/dts/stm32mp157c-ev1.dts
arch/arm/configs/gemini_defconfig
arch/arm/configs/imote2_defconfig [deleted file]
arch/arm/configs/u8500_defconfig
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-ep93xx/clock.c
arch/arm/mach-iop32x/cp6.c
arch/arm/mach-vexpress/spc.c
arch/arm64/boot/dts/freescale/imx8mm-var-som.dtsi
arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
arch/arm64/boot/dts/nvidia/tegra186-p3509-0000+p3636-0001.dts
arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi
arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
arch/arm64/boot/dts/nvidia/tegra210-p2894.dtsi
arch/arm64/boot/dts/nvidia/tegra210-p3450-0000.dts
arch/arm64/boot/dts/nvidia/tegra210-smaug.dts
arch/arm64/boot/dts/qcom/msm8996.dtsi
arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
arch/arm64/boot/dts/qcom/sdm845-oneplus-common.dtsi
arch/arm64/boot/dts/qcom/sdm845-shift-axolotl.dts
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/el2_setup.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/kernel/alternative.c
arch/arm64/kernel/elfcore.c
arch/arm64/kernel/hw_breakpoint.c
arch/arm64/kernel/module-plts.c
arch/arm64/kernel/patching.c
arch/arm64/kernel/proton-pack.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/suspend.c
arch/arm64/kvm/mmu.c
arch/arm64/kvm/psci.c
arch/arm64/kvm/reset.c
arch/arm64/kvm/vgic/vgic-debug.c
arch/arm64/kvm/vgic/vgic-its.c
arch/arm64/mm/init.c
arch/powerpc/include/asm/kvm_book3s_64.h
arch/powerpc/include/asm/kvm_ppc.h
arch/powerpc/include/asm/page.h
arch/powerpc/include/asm/setup.h
arch/powerpc/include/asm/static_call.h
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/setup_64.c
arch/powerpc/kvm/Kconfig
arch/powerpc/kvm/book3s_64_entry.S
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/book3s_pr_papr.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/numa.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/platforms/pseries/vas-sysfs.c
arch/riscv/kvm/vcpu.c
arch/riscv/kvm/vcpu_fp.c
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/asm/entry-common.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/stacktrace.h
arch/s390/kernel/machine_kexec.c
arch/s390/kernel/processor.c
arch/s390/lib/test_unwind.c
arch/x86/events/intel/core.c
arch/x86/events/intel/cstate.c
arch/x86/events/intel/uncore.c
arch/x86/events/intel/uncore_snb.c
arch/x86/events/msr.c
arch/x86/include/asm/asm.h
arch/x86/include/asm/bug.h
arch/x86/include/asm/compat.h
arch/x86/include/asm/io.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/msi.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/percpu.h
arch/x86/include/asm/perf_event.h
arch/x86/include/asm/static_call.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/tsx.c
arch/x86/kernel/crash_dump_64.c
arch/x86/kernel/static_call.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/hyperv.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/x86.c
arch/x86/mm/tlb.c
arch/x86/net/bpf_jit_comp.c
arch/x86/power/cpu.c
block/bio.c
block/blk-mq.c
block/ioctl.c
drivers/acpi/processor_idle.c
drivers/acpi/scan.c
drivers/ata/Kconfig
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/libata-core.c
drivers/ata/libata-sff.c
drivers/ata/sata_dwc_460ex.c
drivers/base/dd.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_nl.c
drivers/block/drbd/drbd_state.c
drivers/block/drbd/drbd_state_change.h
drivers/block/null_blk/main.c
drivers/bus/brcmstb_gisb.c
drivers/bus/ti-sysc.c
drivers/cdrom/cdrom.c
drivers/char/random.c
drivers/cxl/pci.c
drivers/dma-buf/Makefile
drivers/dma-buf/dma-fence-array.c
drivers/dma-buf/selftests.h
drivers/dma-buf/st-dma-fence-unwrap.c [new file with mode: 0644]
drivers/dma-buf/sync_file.c
drivers/firmware/arm_ffa/driver.c
drivers/firmware/arm_scmi/Kconfig
drivers/firmware/arm_scmi/base.c
drivers/firmware/arm_scmi/clock.c
drivers/firmware/arm_scmi/common.h
drivers/firmware/arm_scmi/driver.c
drivers/firmware/arm_scmi/optee.c
drivers/firmware/arm_scmi/perf.c
drivers/firmware/arm_scmi/power.c
drivers/firmware/arm_scmi/protocols.h [new file with mode: 0644]
drivers/firmware/arm_scmi/reset.c
drivers/firmware/arm_scmi/sensors.c
drivers/firmware/arm_scmi/system.c
drivers/firmware/arm_scmi/voltage.c
drivers/gpio/gpio-sim.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/ObjectID.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_events.c
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_stream.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
drivers/gpu/drm/drm_of.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/imx/dw_hdmi-imx.c
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/imx/parallel-display.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/msm/dp/dp_panel.c
drivers/gpu/drm/msm/dp/dp_panel.h
drivers/gpu/drm/msm/dsi/dsi_manager.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h
drivers/gpu/drm/panel/panel-ilitek-ili9341.c
drivers/gpu/ipu-v3/ipu-di.c
drivers/hv/channel_mgmt.c
drivers/hv/hv_balloon.c
drivers/hv/hv_common.c
drivers/hv/ring_buffer.c
drivers/hv/vmbus_drv.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-ismt.c
drivers/i2c/busses/i2c-pasemi-core.c
drivers/i2c/busses/i2c-qcom-geni.c
drivers/i2c/i2c-dev.c
drivers/infiniband/core/cm.c
drivers/infiniband/hw/hfi1/mmu_rb.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/iommu/omap-iommu.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-qcom-mpm.c
drivers/md/dm-integrity.c
drivers/md/dm-ps-historical-service-time.c
drivers/md/dm-zone.c
drivers/md/dm.c
drivers/media/platform/nxp/Kconfig
drivers/media/platform/rockchip/rga/rga.c
drivers/media/tuners/si2157.c
drivers/memory/Kconfig
drivers/memory/atmel-ebi.c
drivers/memory/brcmstb_dpfe.c
drivers/memory/da8xx-ddrctl.c
drivers/memory/emif.c
drivers/memory/fsl-corenet-cf.c
drivers/memory/fsl_ifc.c
drivers/memory/omap-gpmc.c
drivers/memory/renesas-rpc-if.c
drivers/memory/samsung/exynos5422-dmc.c
drivers/memory/tegra/mc.c
drivers/memory/ti-aemif.c
drivers/memory/ti-emif-pm.c
drivers/message/fusion/mptbase.c
drivers/misc/habanalabs/common/memory.c
drivers/mmc/core/block.c
drivers/mmc/core/core.c
drivers/mmc/core/mmc_test.c
drivers/mmc/host/mmci_stm32_sdmmc.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/sdhci-xenon.c
drivers/net/dsa/ocelot/felix.c
drivers/net/dsa/ocelot/felix_vsc9959.c
drivers/net/dsa/realtek/Kconfig
drivers/net/dsa/realtek/realtek-smi.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_vec.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
drivers/net/ethernet/fungible/funcore/fun_dev.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_arfs.c
drivers/net/ethernet/intel/ice/ice_fltr.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_virtchnl.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/mellanox/mlxsw/i2c.c
drivers/net/ethernet/micrel/Kconfig
drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
drivers/net/ethernet/microchip/lan966x/lan966x_switchdev.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/qlogic/qed/qed_debug.c
drivers/net/ethernet/qlogic/qede/qede_fp.c
drivers/net/ethernet/sfc/efx_channels.c
drivers/net/ethernet/sfc/rx_common.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/sfc/tx_common.c
drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/xilinx/xilinx_axienet.h
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/macvlan.c
drivers/net/mctp/mctp-i2c.c
drivers/net/mdio/fwnode_mdio.c
drivers/net/mdio/mdio-mscc-miim.c
drivers/net/phy/micrel.c
drivers/net/phy/microchip_t1.c
drivers/net/slip/slip.c
drivers/net/tun.c
drivers/net/usb/aqc111.c
drivers/net/veth.c
drivers/net/vrf.c
drivers/net/vxlan/vxlan_core.c
drivers/net/wireless/ath/ath10k/sdio.c
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/marvell/mwifiex/sdio.c
drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
drivers/net/wireless/ti/wlcore/sdio.c
drivers/nvme/host/Kconfig
drivers/nvme/host/Makefile
drivers/nvme/host/apple.c [new file with mode: 0644]
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/pci/controller/pci-hyperv.c
drivers/perf/Kconfig
drivers/perf/fsl_imx8_ddr_perf.c
drivers/perf/qcom_l2_pmu.c
drivers/platform/x86/acerhdf.c
drivers/platform/x86/amd-pmc.c
drivers/platform/x86/barco-p50-gpio.c
drivers/platform/x86/samsung-laptop.c
drivers/platform/x86/think-lmi.c
drivers/platform/x86/think-lmi.h
drivers/power/supply/power_supply_core.c
drivers/power/supply/samsung-sdi-battery.c
drivers/regulator/atc260x-regulator.c
drivers/regulator/rtq2134-regulator.c
drivers/regulator/wm8994-regulator.c
drivers/reset/Kconfig
drivers/reset/core.c
drivers/reset/reset-meson.c
drivers/reset/reset-rzg2l-usbphy-ctrl.c
drivers/reset/reset-simple.c
drivers/reset/reset-uniphier-glue.c
drivers/reset/tegra/reset-bpmp.c
drivers/scsi/aha152x.c
drivers/scsi/aic7xxx/aic79xx_osm.h
drivers/scsi/aic7xxx/aic79xx_pci.c
drivers/scsi/aic7xxx/aic7xxx_osm.h
drivers/scsi/aic7xxx/aic7xxx_pci.c
drivers/scsi/bnx2fc/bnx2fc_hwi.c
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/bnx2i/bnx2i_iscsi.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/isci/host.c
drivers/scsi/libiscsi.c
drivers/scsi/libiscsi_tcp.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nvme.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_config.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/mvsas/mv_init.c
drivers/scsi/pcmcia/sym53c500_cs.c
drivers/scsi/pm8001/pm80xx_hwi.c
drivers/scsi/pmcraid.c
drivers/scsi/pmcraid.h
drivers/scsi/qedi/qedi_iscsi.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_logging.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/sr.c
drivers/scsi/ufs/ufs-qcom.c
drivers/scsi/ufs/ufshcd-pci.c
drivers/scsi/ufs/ufshcd.h
drivers/scsi/ufs/ufshpb.c
drivers/scsi/virtio_scsi.c
drivers/scsi/zorro7xx.c
drivers/soc/Makefile
drivers/soc/apple/Kconfig
drivers/soc/apple/Makefile
drivers/soc/apple/rtkit-crashlog.c [new file with mode: 0644]
drivers/soc/apple/rtkit-internal.h [new file with mode: 0644]
drivers/soc/apple/rtkit.c [new file with mode: 0644]
drivers/soc/apple/sart.c [new file with mode: 0644]
drivers/soc/bcm/bcm63xx/bcm-pmb.c
drivers/soc/renesas/Kconfig
drivers/soc/renesas/renesas-soc.c
drivers/spi/spi-bcm-qspi.c
drivers/spi/spi-cadence-quadspi.c
drivers/spi/spi-mxic.c
drivers/spi/spi-rpc-if.c
drivers/spi/spi.c
drivers/staging/r8188eu/core/rtw_br_ext.c
drivers/target/target_core_user.c
drivers/tee/Kconfig
drivers/tee/optee/call.c
drivers/tee/optee/core.c
drivers/tee/optee/ffa_abi.c
drivers/tee/optee/optee_ffa.h
drivers/tee/optee/optee_private.h
drivers/tee/optee/optee_smc.h
drivers/tee/optee/smc_abi.c
drivers/tee/tee_core.c
drivers/tee/tee_shm.c
drivers/tty/serial/mpc52xx_uart.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vfio/pci/vfio_pci_core.c
drivers/video/fbdev/core/fbmem.c
drivers/virtio/virtio.c
drivers/xen/balloon.c
drivers/xen/unpopulated-alloc.c
fs/afs/write.c
fs/binfmt_elf.c
fs/btrfs/block-group.c
fs/btrfs/block-group.h
fs/btrfs/compression.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/volumes.c
fs/btrfs/zoned.c
fs/cachefiles/namei.c
fs/cachefiles/xattr.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/connect.c
fs/cifs/link.c
fs/cifs/netmisc.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/trace.h
fs/cifs/transport.c
fs/file_table.c
fs/fscache/Kconfig
fs/fscache/cache.c
fs/fscache/cookie.c
fs/fscache/internal.h
fs/fscache/io.c
fs/io_uring.c
fs/namei.c
fs/nfs/Kconfig
fs/nfs/dir.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs42xattr.c
fs/nfs/nfs4file.c
fs/nfs/nfs4proc.c
fs/nfs/unlink.c
fs/nfsd/filecache.c
fs/nfsd/nfs2acl.c
fs/stat.c
fs/sysfs/file.c
include/acpi/acpi_bus.h
include/asm-generic/mshyperv.h
include/asm-generic/tlb.h
include/asm-generic/unaligned.h
include/dt-bindings/reset/amlogic,meson-s4-reset.h [new file with mode: 0644]
include/linux/arm_ffa.h
include/linux/bpf_verifier.h
include/linux/dma-fence-array.h
include/linux/dma-fence-chain.h
include/linux/dma-fence-unwrap.h [new file with mode: 0644]
include/linux/fscache.h
include/linux/gfp.h
include/linux/gpio/consumer.h
include/linux/gpio/driver.h
include/linux/kernel.h
include/linux/kfence.h
include/linux/kobject.h
include/linux/local_lock_internal.h
include/linux/mmc/core.h
include/linux/mmzone.h
include/linux/nfs_xdr.h
include/linux/scmi_protocol.h
include/linux/soc/apple/rtkit.h [new file with mode: 0644]
include/linux/soc/apple/sart.h [new file with mode: 0644]
include/linux/static_call.h
include/linux/sunrpc/svc.h
include/linux/sunrpc/xprt.h
include/linux/t10-pi.h
include/linux/tee_drv.h
include/linux/timex.h
include/linux/vfio_pci_core.h
include/linux/virtio_config.h
include/net/flow_dissector.h
include/net/mctp.h
include/scsi/libiscsi.h
include/scsi/scsi_transport_iscsi.h
include/sound/core.h
include/sound/memalloc.h
include/trace/events/sunrpc.h
include/uapi/linux/io_uring.h
include/uapi/linux/stddef.h
include/uapi/linux/tee.h
kernel/Makefile
kernel/cpu.c
kernel/dma/direct.h
kernel/entry/common.c
kernel/events/core.c
kernel/irq/affinity.c
kernel/irq_work.c
kernel/sched/core.c
kernel/sched/idle.c
kernel/sched/sched.h
kernel/smp.c
kernel/static_call.c
kernel/static_call_inline.c [new file with mode: 0644]
kernel/time/tick-sched.c
kernel/time/timer.c
kernel/trace/bpf_trace.c
kernel/trace/rethook.c
lib/kobject.c
lib/lz4/lz4_decompress.c
mm/compaction.c
mm/filemap.c
mm/highmem.c
mm/huge_memory.c
mm/hugetlb.c
mm/kasan/hw_tags.c
mm/kasan/kasan.h
mm/kfence/core.c
mm/kfence/kfence.h
mm/kfence/report.c
mm/kmemleak.c
mm/list_lru.c
mm/mempolicy.c
mm/migrate.c
mm/mremap.c
mm/page_alloc.c
mm/page_io.c
mm/page_vma_mapped.c
mm/secretmem.c
mm/shmem.c
mm/slab.c
mm/slab.h
mm/slab_common.c
mm/slob.c
mm/slub.c
mm/vmalloc.c
net/core/filter.c
net/core/flow_dissector.c
net/core/rtnetlink.c
net/core/skbuff.c
net/dsa/dsa2.c
net/dsa/master.c
net/ipv4/fib_semantics.c
net/ipv6/ip6_output.c
net/ipv6/ip6mr.c
net/ipv6/route.c
net/mac80211/debugfs_sta.c
net/mctp/af_mctp.c
net/mctp/route.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_bitwise.c
net/netfilter/nft_connlimit.c
net/netfilter/nft_counter.c
net/netfilter/nft_last.c
net/netfilter/nft_limit.c
net/netfilter/nft_quota.c
net/netfilter/nft_socket.c
net/nfc/nci/core.c
net/openvswitch/actions.c
net/openvswitch/flow_netlink.c
net/rxrpc/net_ns.c
net/sched/cls_api.c
net/sched/cls_flower.c
net/sched/sch_taprio.c
net/sctp/outqueue.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/smc/af_smc.c
net/smc/smc_clc.c
net/smc/smc_pnet.c
net/sunrpc/clnt.c
net/sunrpc/sched.c
net/sunrpc/socklib.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcsock.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/sunrpc/xprtsock.c
net/tls/tls_sw.c
net/wireless/nl80211.c
net/wireless/scan.c
scripts/gcc-plugins/latent_entropy_plugin.c
sound/core/init.c
sound/core/memalloc.c
sound/core/pcm_misc.c
sound/drivers/mtpav.c
sound/hda/hdac_i915.c
sound/hda/intel-dsp-config.c
sound/isa/galaxy/galaxy.c
sound/isa/sc6000.c
sound/oss/dmasound/dmasound.h
sound/oss/dmasound/dmasound_core.c
sound/pci/ad1889.c
sound/pci/ali5451/ali5451.c
sound/pci/als300.c
sound/pci/als4000.c
sound/pci/atiixp.c
sound/pci/atiixp_modem.c
sound/pci/au88x0/au88x0.c
sound/pci/aw2/aw2-alsa.c
sound/pci/azt3328.c
sound/pci/bt87x.c
sound/pci/ca0106/ca0106_main.c
sound/pci/cmipci.c
sound/pci/cs4281.c
sound/pci/cs5535audio/cs5535audio.c
sound/pci/echoaudio/echoaudio.c
sound/pci/emu10k1/emu10k1x.c
sound/pci/ens1370.c
sound/pci/es1938.c
sound/pci/es1968.c
sound/pci/fm801.c
sound/pci/hda/patch_realtek.c
sound/pci/ice1712/ice1724.c
sound/pci/intel8x0.c
sound/pci/intel8x0m.c
sound/pci/korg1212/korg1212.c
sound/pci/lola/lola.c
sound/pci/lx6464es/lx6464es.c
sound/pci/maestro3.c
sound/pci/nm256/nm256.c
sound/pci/oxygen/oxygen_lib.c
sound/pci/riptide/riptide.c
sound/pci/rme32.c
sound/pci/rme96.c
sound/pci/rme9652/hdsp.c
sound/pci/rme9652/hdspm.c
sound/pci/rme9652/rme9652.c
sound/pci/sis7019.c
sound/pci/sonicvibes.c
sound/pci/via82xx.c
sound/pci/via82xx_modem.c
sound/usb/pcm.c
sound/usb/usbaudio.h
sound/x86/intel_hdmi_audio.c
tools/arch/arm64/include/asm/cputype.h
tools/arch/x86/include/asm/msr-index.h
tools/bpf/bpftool/gen.c
tools/build/feature/Makefile
tools/include/uapi/linux/vhost.h
tools/lib/perf/evlist.c
tools/objtool/check.c
tools/perf/Documentation/perf.txt
tools/perf/Makefile.config
tools/perf/arch/arm64/util/arm-spe.c
tools/perf/bench/epoll-ctl.c
tools/perf/bench/epoll-wait.c
tools/perf/bench/futex-hash.c
tools/perf/bench/futex-lock-pi.c
tools/perf/bench/futex-requeue.c
tools/perf/bench/futex-wake-parallel.c
tools/perf/bench/futex-wake.c
tools/perf/bench/numa.c
tools/perf/builtin-record.c
tools/perf/perf.c
tools/perf/tests/dwarf-unwind.c
tools/perf/tests/perf-time-to-tsc.c
tools/perf/util/annotate.c
tools/perf/util/arm64-frame-pointer-unwind-support.c
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/machine.c
tools/perf/util/parse-events.c
tools/perf/util/session.c
tools/perf/util/setup.py
tools/perf/util/stat.c
tools/perf/util/unwind-libdw.c
tools/perf/util/unwind-libdw.h
tools/perf/util/unwind-libunwind-local.c
tools/perf/util/unwind-libunwind.c
tools/perf/util/unwind.h
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c
tools/testing/selftests/bpf/progs/map_ptr_kern.c
tools/testing/selftests/bpf/progs/trace_dummy_st_ops.c [new file with mode: 0644]
tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
tools/testing/selftests/kselftest_harness.h
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/aarch64/arch_timer.c
tools/testing/selftests/kvm/aarch64/get-reg-list.c
tools/testing/selftests/kvm/aarch64/vcpu_width_config.c [new file with mode: 0644]
tools/testing/selftests/kvm/dirty_log_perf_test.c
tools/testing/selftests/kvm/include/riscv/processor.h
tools/testing/selftests/kvm/lib/riscv/processor.c
tools/testing/selftests/mqueue/mq_perf_tests.c
tools/testing/selftests/net/fib_nexthops.sh
tools/testing/selftests/pid_namespace/Makefile
tools/testing/selftests/pidfd/pidfd_wait.c
tools/testing/selftests/proc/proc-pid-vm.c
tools/testing/selftests/vDSO/vdso_test_correctness.c
tools/testing/selftests/x86/Makefile
virt/kvm/kvm_main.c

index b9d3582..9345815 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -391,6 +391,10 @@ Uwe Kleine-König <ukleinek@strlen.de>
 Uwe Kleine-König <ukl@pengutronix.de>
 Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
 Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
+Vasily Averin <vasily.averin@linux.dev> <vvs@virtuozzo.com>
+Vasily Averin <vasily.averin@linux.dev> <vvs@openvz.org>
+Vasily Averin <vasily.averin@linux.dev> <vvs@parallels.com>
+Vasily Averin <vasily.averin@linux.dev> <vvs@sw.ru>
 Vinod Koul <vkoul@kernel.org> <vinod.koul@intel.com>
 Vinod Koul <vkoul@kernel.org> <vinod.koul@linux.intel.com>
 Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org>
index 0582036..4cdba34 100644 (file)
@@ -116,7 +116,7 @@ Description:
                                            <value>[ForceIf:<attribute>=<value>]
                                            <value>[ForceIfNot:<attribute>=<value>]
 
-                                       For example:
+                                       For example::
 
                                            LegacyOrom/dell_value_modifier has value:
                                                    Disabled[ForceIf:SecureBoot=Enabled]
@@ -212,7 +212,7 @@ Description:
                the next boot.
 
                Lenovo specific class extensions
-               ------------------------------
+               --------------------------------
 
                On Lenovo systems the following additional settings are available:
 
@@ -246,9 +246,7 @@ Description:
                                        that is being referenced (e.g hdd0, hdd1 etc)
                                        This attribute defaults to device 0.
 
-               certificate:
-               signature:
-               save_signature:
+               certificate, signature, save_signature:
                                        These attributes are used for certificate based authentication. This is
                                        used in conjunction with a signing server as an alternative to password
                                        based authentication.
@@ -257,22 +255,27 @@ Description:
                                        The attributes can be displayed to check the stored value.
 
                                        Some usage examples:
-                                       Installing a certificate to enable feature:
-                                               echo <supervisor password > authentication/Admin/current_password
-                                               echo <signed certificate> > authentication/Admin/certificate
 
-                                       Updating the installed certificate:
-                                               echo <signature> > authentication/Admin/signature
-                                               echo <signed certificate> > authentication/Admin/certificate
+                                               Installing a certificate to enable feature::
+
+                                                       echo "supervisor password" > authentication/Admin/current_password
+                                                       echo "signed certificate" > authentication/Admin/certificate
+
+                                               Updating the installed certificate::
+
+                                                       echo "signature" > authentication/Admin/signature
+                                                       echo "signed certificate" > authentication/Admin/certificate
 
-                                       Removing the installed certificate:
-                                               echo <signature> > authentication/Admin/signature
-                                               echo '' > authentication/Admin/certificate
+                                               Removing the installed certificate::
 
-                                       Changing a BIOS setting:
-                                               echo <signature> > authentication/Admin/signature
-                                               echo <save signature> > authentication/Admin/save_signature
-                                               echo Enable > attribute/PasswordBeep/current_value
+                                                       echo "signature" > authentication/Admin/signature
+                                                       echo "" > authentication/Admin/certificate
+
+                                               Changing a BIOS setting::
+
+                                                       echo "signature" > authentication/Admin/signature
+                                                       echo "save signature" > authentication/Admin/save_signature
+                                                       echo Enable > attribute/PasswordBeep/current_value
 
                                        You cannot enable certificate authentication if a supervisor password
                                        has not been set.
@@ -288,9 +291,10 @@ Description:
                certificate_to_password:
                                        Write only attribute used to switch from certificate based authentication
                                        back to password based.
-                                       Usage:
-                                               echo <signature> > authentication/Admin/signature
-                                               echo <password> > authentication/Admin/certificate_to_password
+                                       Usage::
+
+                                               echo "signature" > authentication/Admin/signature
+                                               echo "password" > authentication/Admin/certificate_to_password
 
 
 What:          /sys/class/firmware-attributes/*/attributes/pending_reboot
@@ -345,7 +349,7 @@ Description:
 
                    # echo "factory" > /sys/class/firmware-attributes/*/device/attributes/reset_bios
                    # cat /sys/class/firmware-attributes/*/device/attributes/reset_bios
-                   builtinsafe lastknowngood [factory] custom
+                   builtinsafe lastknowngood [factory] custom
 
                Note that any changes to this attribute requires a reboot
                for changes to take effect.
index ab12212..96b92c1 100644 (file)
@@ -13,17 +13,19 @@ Description:
                Should the operation fail, one of the following error codes
                may be returned:
 
+               ==========      =====
                Error Code      Cause
-               ----------      -----
-               EIO             General mailbox failure. Log may indicate cause.
-               EBUSY           Mailbox is owned by another agent.
-               EPERM           SDSI capability is not enabled in hardware.
-               EPROTO          Failure in mailbox protocol detected by driver.
+               ==========      =====
+               EIO             General mailbox failure. Log may indicate cause.
+               EBUSY           Mailbox is owned by another agent.
+               EPERM           SDSI capability is not enabled in hardware.
+               EPROTO          Failure in mailbox protocol detected by driver.
                                See log for details.
-               EOVERFLOW       For provision commands, the size of the data
+               EOVERFLOW       For provision commands, the size of the data
                                exceeds what may be written.
-               ESPIPE          Seeking is not allowed.
-               ETIMEDOUT       Failure to complete mailbox transaction in time.
+               ESPIPE          Seeking is not allowed.
+               ETIMEDOUT       Failure to complete mailbox transaction in time.
+               ==========      =====
 
 What:          /sys/bus/auxiliary/devices/intel_vsec.sdsi.X/guid
 Date:          Feb 2022
index ad168d1..867a4bb 100644 (file)
@@ -41,13 +41,18 @@ or ``VFAT_FS``. To run ``FAT_KUNIT_TEST``, the ``.kunitconfig`` has:
        CONFIG_MSDOS_FS=y
        CONFIG_FAT_KUNIT_TEST=y
 
-1. A good starting point for the ``.kunitconfig``, is the KUnit default
-   config. Run the command:
+1. A good starting point for the ``.kunitconfig`` is the KUnit default config.
+   You can generate it by running:
 
 .. code-block:: bash
 
        cd $PATH_TO_LINUX_REPO
-       cp tools/testing/kunit/configs/default.config .kunitconfig
+       tools/testing/kunit/kunit.py config
+       cat .kunit/.kunitconfig
+
+.. note ::
+   ``.kunitconfig`` lives in the ``--build_dir`` used by kunit.py, which is
+   ``.kunit`` by default.
 
 .. note ::
    You may want to remove CONFIG_KUNIT_ALL_TESTS from the ``.kunitconfig`` as
index 0afec83..564ae6a 100644 (file)
@@ -14,7 +14,6 @@ properties:
   compatible:
     enum:
       - nvidia,tegra20-pmc
-      - nvidia,tegra20-pmc
       - nvidia,tegra30-pmc
       - nvidia,tegra114-pmc
       - nvidia,tegra124-pmc
index bd40213..fced408 100644 (file)
@@ -35,7 +35,6 @@ properties:
       - items:
           - enum:
               - ti,sysc-omap2
-              - ti,sysc-omap2
               - ti,sysc-omap4
               - ti,sysc-omap4-simple
               - ti,sysc-omap2-timer
index f14f1d3..d819dfa 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos SoC Audio SubSystem clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 4e80628..0589a63 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos SoC clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 64d027d..c98eff6 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung SoC external/osc/XXTI/XusbXTI clock
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 1ed64ad..b644bbd 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos4412 SoC ISP clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index a3fac5c..b05f835 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos5260 SoC clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 032862e..b737c9d 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos5410 SoC clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index edd1b4a..3f9326e 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos5433 SoC clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 599baf0..c137c67 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos7 SoC clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 7e5a9ca..5073e56 100644 (file)
@@ -9,7 +9,7 @@ title: Samsung Exynos7885 SoC clock controller
 maintainers:
   - Dávid Virág <virag.david003@gmail.com>
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 80ba608..aa11815 100644 (file)
@@ -9,7 +9,7 @@ title: Samsung Exynos850 SoC clock controller
 maintainers:
   - Sam Protsenko <semen.protsenko@linaro.org>
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 1410c51..9248bfc 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2M and S5M family clock generator block
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index ae8f8fc..2659854 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung S5Pv210 SoC Audio SubSystem clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index dcb29a2..67a3366 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung S5P6442/S5PC110/S5PV210 SoC clock controller
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index d318fcc..2bdd05a 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos NoC (Network on Chip) Probe
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   The Samsung Exynos542x SoC has a NoC (Network on Chip) Probe for NoC bus.
index c9a8cb5..e300df4 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung Exynos SoC PPMU (Platform Performance Monitoring Unit)
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   The Samsung Exynos SoC has PPMU (Platform Performance Monitoring Unit) for
index 62c3bd4..7257fd0 100644 (file)
@@ -51,7 +51,6 @@ properties:
           Video port for MIPI DPI output (panel or connector).
 
     required:
-      - port@0
       - port@1
 
 required:
index 5216c27..a412a1d 100644 (file)
@@ -39,7 +39,6 @@ properties:
           Video port for MIPI DPI output (panel or connector).
 
     required:
-      - port@0
       - port@1
 
 required:
index d31483a..6fb7e32 100644 (file)
@@ -160,7 +160,7 @@ examples:
     mdss: mdss@5e00000 {
         #address-cells = <1>;
         #size-cells = <1>;
-        compatible = "qcom,qcm2290-mdss", "qcom,mdss";
+        compatible = "qcom,qcm2290-mdss";
         reg = <0x05e00000 0x1000>;
         reg-names = "mdss";
         power-domains = <&dispcc MDSS_GDSC>;
@@ -180,7 +180,7 @@ examples:
                  <&apps_smmu 0x421 0x0>;
         ranges;
 
-        mdss_mdp: mdp@5e01000 {
+        mdss_mdp: display-controller@5e01000 {
                 compatible = "qcom,qcm2290-dpu";
                 reg = <0x05e01000 0x8f000>,
                       <0x05eb0000 0x2008>;
index f297899..c2df8d2 100644 (file)
@@ -83,6 +83,8 @@ properties:
 required:
   - compatible
   - reg
+  - width-mm
+  - height-mm
   - panel-timing
 
 unevaluatedProperties: false
index 9bf592d..7749de9 100644 (file)
@@ -71,78 +71,72 @@ properties:
 
   hfront-porch:
     description: Horizontal front porch panel timing
+    $ref: /schemas/types.yaml#/definitions/uint32-array
     oneOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-        maxItems: 1
+      - maxItems: 1
         items:
           description: typical number of pixels
-      - $ref: /schemas/types.yaml#/definitions/uint32-array
-        minItems: 3
+      - minItems: 3
         maxItems: 3
         items:
           description: min, typ, max number of pixels
 
   hback-porch:
     description: Horizontal back porch timing
+    $ref: /schemas/types.yaml#/definitions/uint32-array
     oneOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-        maxItems: 1
+      - maxItems: 1
         items:
           description: typical number of pixels
-      - $ref: /schemas/types.yaml#/definitions/uint32-array
-        minItems: 3
+      - minItems: 3
         maxItems: 3
         items:
           description: min, typ, max number of pixels
 
   hsync-len:
     description: Horizontal sync length panel timing
+    $ref: /schemas/types.yaml#/definitions/uint32-array
     oneOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-        maxItems: 1
+      - maxItems: 1
         items:
           description: typical number of pixels
-      - $ref: /schemas/types.yaml#/definitions/uint32-array
-        minItems: 3
+      - minItems: 3
         maxItems: 3
         items:
           description: min, typ, max number of pixels
 
   vfront-porch:
     description: Vertical front porch panel timing
+    $ref: /schemas/types.yaml#/definitions/uint32-array
     oneOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-        maxItems: 1
+      - maxItems: 1
         items:
           description: typical number of lines
-      - $ref: /schemas/types.yaml#/definitions/uint32-array
-        minItems: 3
+      - minItems: 3
         maxItems: 3
         items:
           description: min, typ, max number of lines
 
   vback-porch:
     description: Vertical back porch panel timing
+    $ref: /schemas/types.yaml#/definitions/uint32-array
     oneOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-        maxItems: 1
+      - maxItems: 1
         items:
           description: typical number of lines
-      - $ref: /schemas/types.yaml#/definitions/uint32-array
-        minItems: 3
+      - minItems: 3
         maxItems: 3
         items:
           description: min, typ, max number of lines
 
   vsync-len:
     description: Vertical sync length panel timing
+    $ref: /schemas/types.yaml#/definitions/uint32-array
     oneOf:
-      - $ref: /schemas/types.yaml#/definitions/uint32
-        maxItems: 1
+      - maxItems: 1
         items:
           description: typical number of lines
-      - $ref: /schemas/types.yaml#/definitions/uint32-array
-        minItems: 3
+      - minItems: 3
         maxItems: 3
         items:
           description: min, typ, max number of lines
index f998a3a..919734c 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index cb8e735..63379fa 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index ba40284..00e325a 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description:
   Samsung Exynos SoC Mixer is responsible for mixing and blending multiple data
index 6f79683..7c37470 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   DECON (Display and Enhancement Controller) is the Display Controller for the
index 01fccb1..c5c6239 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   MIC (Mobile Image Compressor) resides between DECON and MIPI DSI. MIPI DSI is
index afa137d..320eedc 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   DECON (Display and Enhancement Controller) is the Display Controller for the
index 9cf5f12..c62ea9d 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index f9ffe3d..1289605 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77843 MicroUSB and Companion Power Management IC Extcon
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77843 MicroUSB
@@ -25,7 +25,7 @@ properties:
     $ref: /schemas/connector/usb-connector.yaml#
 
   ports:
-    $ref: /schemas/graph.yaml#/properties/port
+    $ref: /schemas/graph.yaml#/properties/ports
     description:
       Any connector to the data bus of this controller should be modelled using
       the OF graph bindings specified
index 4d6bfae..85f8d47 100644 (file)
@@ -20,6 +20,7 @@ properties:
           - mediatek,mt8183-mali
           - realtek,rtd1619-mali
           - renesas,r9a07g044-mali
+          - renesas,r9a07g054-mali
           - rockchip,px30-mali
           - rockchip,rk3568-mali
       - const: arm,mali-bifrost # Mali Bifrost GPU model/revision is fully discoverable
@@ -109,7 +110,9 @@ allOf:
       properties:
         compatible:
           contains:
-            const: renesas,r9a07g044-mali
+            enum:
+              - renesas,r9a07g044-mali
+              - renesas,r9a07g054-mali
     then:
       properties:
         interrupts:
index 4b5851c..b1a4c23 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: LTC4151 High Voltage I2C Current and Voltage Monitor
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index c42051f..028d6e5 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Microchip MCP3021 A/D converter
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 4669217..80df718 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Sensirion SHT15 humidity and temperature sensor
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index d3eff4f..c5a889e 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: TMP102 temperature sensor
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index eda55bb..dcbc6fb 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: TMP108 temperature sensor
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 801ca9b..e7493e2 100644 (file)
@@ -58,9 +58,8 @@ patternProperties:
           The value (two's complement) to be programmed in the channel specific N correction register.
           For remote channels only.
         $ref: /schemas/types.yaml#/definitions/int32
-        items:
-          minimum: -128
-          maximum: 127
+        minimum: -128
+        maximum: 127
 
     required:
       - reg
index 19874e8..3e52a0d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung's High Speed I2C controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   The Samsung's High Speed I2C controller is used to interface with I2C devices
index 84051b0..c262305 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S3C/S5P/Exynos SoC I2C Controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index cf71108..666414a 100644 (file)
@@ -98,6 +98,7 @@ allOf:
               - ti,adc121s
               - ti,ads7866
               - ti,ads7868
+    then:
       required:
         - vcc-supply
   # Devices with a vref
index 7c260f2..92f9472 100644 (file)
@@ -108,9 +108,7 @@ patternProperties:
           - [1-5]: order 1 to 5.
           For audio purpose it is recommended to use order 3 to 5.
         $ref: /schemas/types.yaml#/definitions/uint32
-        items:
-          minimum: 0
-          maximum: 5
+        maximum: 5
 
       "#io-channel-cells":
         const: 1
@@ -174,7 +172,7 @@ patternProperties:
               contains:
                 const: st,stm32-dfsdm-adc
 
-      - then:
+        then:
           properties:
             st,adc-channels:
               minItems: 1
@@ -206,7 +204,7 @@ patternProperties:
               contains:
                 const: st,stm32-dfsdm-dmic
 
-      - then:
+        then:
           properties:
             st,adc-channels:
               maxItems: 1
@@ -254,7 +252,7 @@ allOf:
           contains:
             const: st,stm32h7-dfsdm
 
-  - then:
+    then:
       patternProperties:
         "^filter@[0-9]+$":
           properties:
@@ -269,7 +267,7 @@ allOf:
           contains:
             const: st,stm32mp1-dfsdm
 
-  - then:
+    then:
       patternProperties:
         "^filter@[0-9]+$":
           properties:
index 0d8fb56..65f86f2 100644 (file)
@@ -59,9 +59,9 @@ allOf:
           contains:
             enum:
               - adi,ad5371
-      then:
-        required:
-          - vref2-supply
+    then:
+      required:
+        - vref2-supply
 
 examples:
   - |
index 89853b4..8a676fe 100644 (file)
@@ -93,48 +93,48 @@ allOf:
               - qcom,sdm660-gnoc
               - qcom,sdm660-snoc
 
-      then:
-        properties:
-          clock-names:
-            items:
-              - const: bus
-              - const: bus_a
-
-          clocks:
-            items:
-              - description: Bus Clock
-              - description: Bus A Clock
-
-        # Child node's properties
-        patternProperties:
-          '^interconnect-[a-z0-9]+$':
-            type: object
-            description:
-              snoc-mm is a child of snoc, sharing snoc's register address space.
-
-            properties:
-              compatible:
-                enum:
-                  - qcom,msm8939-snoc-mm
-
-              '#interconnect-cells':
-                const: 1
-
-              clock-names:
-                items:
-                  - const: bus
-                  - const: bus_a
-
-              clocks:
-                items:
-                  - description: Bus Clock
-                  - description: Bus A Clock
-
-            required:
-              - compatible
-              - '#interconnect-cells'
-              - clock-names
-              - clocks
+    then:
+      properties:
+        clock-names:
+          items:
+            - const: bus
+            - const: bus_a
+
+        clocks:
+          items:
+            - description: Bus Clock
+            - description: Bus A Clock
+
+      # Child node's properties
+      patternProperties:
+        '^interconnect-[a-z0-9]+$':
+          type: object
+          description:
+            snoc-mm is a child of snoc, sharing snoc's register address space.
+
+          properties:
+            compatible:
+              enum:
+                - qcom,msm8939-snoc-mm
+
+            '#interconnect-cells':
+              const: 1
+
+            clock-names:
+              items:
+                - const: bus
+                - const: bus_a
+
+            clocks:
+              items:
+                - description: Bus Clock
+                - description: Bus A Clock
+
+          required:
+            - compatible
+            - '#interconnect-cells'
+            - clock-names
+            - clocks
 
   - if:
       properties:
index 372ccbf..5a583bf 100644 (file)
@@ -7,10 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Marvell MMP/Orion Interrupt controller bindings
 
 maintainers:
-  - Thomas Gleixner <tglx@linutronix.de>
-  - Jason Cooper <jason@lakedaemon.net>
-  - Marc Zyngier <maz@kernel.org>
-  - Rob Herring <robh+dt@kernel.org>
+  - Andrew Lunn <andrew@lunn.ch>
+  - Gregory Clement <gregory.clement@bootlin.com>
 
 allOf:
   - if:
index d631b75..72456a0 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC Interrupt Combiner Controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   Samsung's Exynos4 architecture includes a interrupt combiner controller which
diff --git a/Documentation/devicetree/bindings/iommu/apple,sart.yaml b/Documentation/devicetree/bindings/iommu/apple,sart.yaml
new file mode 100644 (file)
index 0000000..1524fa3
--- /dev/null
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iommu/apple,sart.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Apple SART DMA address filter
+
+maintainers:
+  - Sven Peter <sven@svenpeter.dev>
+
+description:
+  Apple SART is a simple address filter for DMA transactions. Regions of
+  physical memory must be added to the SART's allow list before any
+  DMA can target these. Unlike a proper IOMMU no remapping can be done and
+  special support in the consumer driver is required since not all DMA
+  transactions of a single device are subject to SART filtering.
+
+  SART1 has first been used since at least the A11 (iPhone 8 and iPhone X)
+  and allows 36 bit of physical address space and filter entries with sizes
+  up to 24 bit.
+
+  SART2, first seen in A14 and M1, allows 36 bit of physical address space
+  and filter entry size up to 36 bit.
+
+  SART3, first seen in M1 Pro/Max, extends both the address space and filter
+  entry size to 42 bit.
+
+properties:
+  compatible:
+    enum:
+      - apple,t6000-sart
+      - apple,t8103-sart
+
+  reg:
+    maxItems: 1
+
+  power-domains:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    iommu@7bc50000 {
+      compatible = "apple,t8103-sart";
+      reg = <0x7bc50000 0x4000>;
+    };
index 86a0005..e27f57b 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX77693 MicroUSB and Companion Power Management IC LEDs
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77693 MicroUSB Integrated
index 36781ee..c9d5adb 100644 (file)
@@ -65,7 +65,6 @@ properties:
   iram:
     $ref: /schemas/types.yaml#/definitions/phandle
     description: phandle pointing to the SRAM device node
-    maxItems: 1
 
 required:
   - compatible
index 9b179bb..aa55ca6 100644 (file)
@@ -63,13 +63,11 @@ properties:
 
   mediatek,vpu:
     $ref: /schemas/types.yaml#/definitions/phandle
-    maxItems: 1
     description:
       Describes point to vpu.
 
   mediatek,scp:
     $ref: /schemas/types.yaml#/definitions/phandle
-    maxItems: 1
     description:
       Describes point to scp.
 
index e7b65a9..deb5b65 100644 (file)
@@ -55,13 +55,11 @@ properties:
 
   mediatek,vpu:
     $ref: /schemas/types.yaml#/definitions/phandle
-    maxItems: 1
     description:
       Describes point to vpu.
 
   mediatek,scp:
     $ref: /schemas/types.yaml#/definitions/phandle
-    maxItems: 1
     description:
       Describes point to scp.
 
@@ -106,7 +104,6 @@ allOf:
           enum:
             - mediatek,mt8173-vcodec-enc
             - mediatek,mt8192-vcodec-enc
-            - mediatek,mt8173-vcodec-enc
 
     then:
       properties:
index 7687be0..c73bf23 100644 (file)
@@ -61,7 +61,6 @@ properties:
 
   mediatek,scp:
     $ref: /schemas/types.yaml#/definitions/phandle
-    maxItems: 1
     description: |
       The node of system control processor (SCP), using
       the remoteproc & rpmsg framework.
index 769f132..08cbdcd 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: DDR PHY Front End (DPFE) for Broadcom STB
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Markus Mayer <mmayer@broadcom.com>
 
 properties:
index f3e62ee..1daa665 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: LPDDR2 SDRAM AC timing parameters for a given speed-bin
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index dd2141c..9d78f14 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: LPDDR2 SDRAM compliant to JEDEC JESD209-2
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 97c3e98..5c6512c 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: LPDDR3 SDRAM AC timing parameters for a given speed-bin
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index c542f32..48908a1 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: LPDDR3 SDRAM compliant to JEDEC JESD209-3
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 14a6bc8..9249624 100644 (file)
@@ -8,7 +8,7 @@ title: Marvell MVEBU SDRAM controller
 
 maintainers:
   - Jan Luebbe <jlu@pengutronix.de>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 9566b34..0c511ab 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Qualcomm Atheros AR7xxx/AR9xxx DDR controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   The DDR controller of the AR7xxx and AR9xxx families provides an interface to
index 2b18cef..514b2c5 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: H8/300 bus controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Yoshinori Sato <ysato@users.sourceforge.jp>
 
 properties:
index 9a6dbf5..ae31dc9 100644 (file)
@@ -31,8 +31,13 @@ properties:
               - renesas,r8a774b1-rpc-if       # RZ/G2N
               - renesas,r8a774c0-rpc-if       # RZ/G2E
               - renesas,r8a774e1-rpc-if       # RZ/G2H
+              - renesas,r8a7795-rpc-if        # R-Car H3
+              - renesas,r8a7796-rpc-if        # R-Car M3-W
+              - renesas,r8a77961-rpc-if       # R-Car M3-W+
+              - renesas,r8a77965-rpc-if       # R-Car M3-N
               - renesas,r8a77970-rpc-if       # R-Car V3M
               - renesas,r8a77980-rpc-if       # R-Car V3H
+              - renesas,r8a77990-rpc-if       # R-Car E3
               - renesas,r8a77995-rpc-if       # R-Car D3
               - renesas,r8a779a0-rpc-if       # R-Car V3U
           - const: renesas,rcar-gen3-rpc-if   # a generic R-Car gen3 or RZ/G2{E,H,M,N} device
index f152243..098348b 100644 (file)
@@ -9,7 +9,7 @@ title: |
   Controller device
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Lukasz Luba <lukasz.luba@arm.com>
 
 description: |
index fb7ae38..f46e957 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Synopsys IntelliDDR Multi Protocol memory controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Manish Narani <manish.narani@xilinx.com>
   - Michal Simek <michal.simek@xilinx.com>
 
@@ -24,9 +24,9 @@ description: |
 properties:
   compatible:
     enum:
+      - snps,ddrc-3.80a
       - xlnx,zynq-ddrc-a05
       - xlnx,zynqmp-ddrc-2.40a
-      - snps,ddrc-3.80a
 
   interrupts:
     maxItems: 1
@@ -43,7 +43,9 @@ allOf:
       properties:
         compatible:
           contains:
-            const: xlnx,zynqmp-ddrc-2.40a
+            enum:
+              - snps,ddrc-3.80a
+              - xlnx,zynqmp-ddrc-2.40a
     then:
       required:
         - interrupts
index 9ed5118..382ddab 100644 (file)
@@ -8,7 +8,7 @@ title: Texas Instruments da8xx DDR2/mDDR memory controller
 
 maintainers:
   - Bartosz Golaszewski <bgolaszewski@baylibre.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   Documentation:
index 27870b8..52edd1b 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX14577/MAX77836 MicroUSB and Companion Power Management IC
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX14577/MAX77836 MicroUSB
index 859655a..d027aab 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77686 Power Management IC
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77686 Power Management
index 9061011..1b06a77 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77693 MicroUSB and Companion Power Management IC
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77693 MicroUSB
index baa1346..ad20139 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77802 Power Management IC
 
 maintainers:
   - Javier Martinez Canillas <javier@dowhile0.org>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77802 Power Management
index 61a0f9d..f30f96b 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX77843 MicroUSB and Companion Power Management IC
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77843 MicroUSB
index bae55c9..f7bb67d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC Low Power Audio Subsystem (LPASS)
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
 properties:
index 017befd..055dfc3 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPA01 Power Management IC
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index 771b3f1..5ff6546 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPS11/13/14/15 and S2MPU02 Power Management IC
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index 5531718..10c7b40 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S5M8767 Power Management IC
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index ce64b34..f3f4d5b 100644 (file)
@@ -197,6 +197,8 @@ allOf:
               - nvidia,tegra30-sdhci
               - nvidia,tegra114-sdhci
               - nvidia,tegra124-sdhci
+    then:
+      properties:
         clocks:
           items:
             - description: module clock
index 817794e..4f15463 100644 (file)
@@ -106,6 +106,12 @@ properties:
   phy-mode:
     $ref: "#/properties/phy-connection-type"
 
+  pcs-handle:
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description:
+      Specifies a reference to a node representing a PCS PHY device on a MDIO
+      bus to link with an external PHY (phy-handle) if exists.
+
   phy-handle:
     $ref: /schemas/types.yaml#/definitions/phandle
     description:
index c5ab62c..8d157f0 100644 (file)
@@ -45,20 +45,3 @@ Optional properties:
 
        In fiber mode, auto-negotiation is disabled and the PHY can only work in
        100base-fx (full and half duplex) modes.
-
- - lan8814,ignore-ts: If present the PHY will not support timestamping.
-
-       This option acts as check whether Timestamping is supported by
-       hardware or not. LAN8814 phy support hardware tmestamping.
-
- - lan8814,latency_rx_10: Configures Latency value of phy in ingress at 10 Mbps.
-
- - lan8814,latency_tx_10: Configures Latency value of phy in egress at 10 Mbps.
-
- - lan8814,latency_rx_100: Configures Latency value of phy in ingress at 100 Mbps.
-
- - lan8814,latency_tx_100: Configures Latency value of phy in egress at 100 Mbps.
-
- - lan8814,latency_rx_1000: Configures Latency value of phy in ingress at 1000 Mbps.
-
- - lan8814,latency_tx_1000: Configures Latency value of phy in egress at 1000 Mbps.
index 15a45db..1bcaf6b 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Marvell International Ltd. NCI NFC controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 7465aea..e381a3c 100644 (file)
@@ -8,7 +8,7 @@ title: NXP Semiconductors NCI NFC controller
 
 maintainers:
   - Charles Gorand <charles.gorand@effinnov.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index d8ba5a1..0509e01 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: NXP Semiconductors PN532 NFC controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index d520414..18b3a7d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: NXP Semiconductors PN544 NFC Controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index a6a1bc7..ef11550 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: STMicroelectronics ST NCI NFC controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 4356eac..8a72743 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: STMicroelectronics SAS ST21NFCA NFC controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index d3bca37..963d953 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: STMicroelectronics ST95HF NFC controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 40da2ac..404c8df 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Texas Instruments TRF7970A RFID/NFC/15693 Transceiver
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Mark Greer <mgreer@animalcreek.com>
 
 properties:
index 2d5248f..36c85eb 100644 (file)
@@ -53,20 +53,18 @@ properties:
         - allwinner,sun8i-r40-gmac
         - allwinner,sun8i-v3s-emac
         - allwinner,sun50i-a64-emac
-        - loongson,ls2k-dwmac
-        - loongson,ls7a-dwmac
         - amlogic,meson6-dwmac
         - amlogic,meson8b-dwmac
         - amlogic,meson8m2-dwmac
         - amlogic,meson-gxbb-dwmac
         - amlogic,meson-axg-dwmac
-        - loongson,ls2k-dwmac
-        - loongson,ls7a-dwmac
         - ingenic,jz4775-mac
         - ingenic,x1000-mac
         - ingenic,x1600-mac
         - ingenic,x1830-mac
         - ingenic,x2000-mac
+        - loongson,ls2k-dwmac
+        - loongson,ls7a-dwmac
         - rockchip,px30-gmac
         - rockchip,rk3128-gmac
         - rockchip,rk3228-gmac
index e602761..b0ebcef 100644 (file)
@@ -13,9 +13,6 @@ description: |
   This describes the devicetree bindings for AVE ethernet controller
   implemented on Socionext UniPhier SoCs.
 
-allOf:
-  - $ref: ethernet-controller.yaml#
-
 properties:
   compatible:
     enum:
@@ -44,25 +41,13 @@ properties:
     minItems: 1
     maxItems: 4
 
-  clock-names:
-    oneOf:
-      - items:          # for Pro4
-          - const: gio
-          - const: ether
-          - const: ether-gb
-          - const: ether-phy
-      - const: ether    # for others
+  clock-names: true
 
   resets:
     minItems: 1
     maxItems: 2
 
-  reset-names:
-    oneOf:
-      - items:          # for Pro4
-          - const: gio
-          - const: ether
-      - const: ether    # for others
+  reset-names: true
 
   socionext,syscon-phy-mode:
     $ref: /schemas/types.yaml#/definitions/phandle-array
@@ -78,6 +63,42 @@ properties:
     $ref: mdio.yaml#
     unevaluatedProperties: false
 
+allOf:
+  - $ref: ethernet-controller.yaml#
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: socionext,uniphier-pro4-ave4
+    then:
+      properties:
+        clocks:
+          minItems: 4
+          maxItems: 4
+        clock-names:
+          items:
+            - const: gio
+            - const: ether
+            - const: ether-gb
+            - const: ether-phy
+        resets:
+          minItems: 2
+          maxItems: 2
+        reset-names:
+          items:
+            - const: gio
+            - const: ether
+    else:
+      properties:
+        clocks:
+          maxItems: 1
+        clock-names:
+          const: ether
+        resets:
+          maxItems: 1
+        reset-names:
+          const: ether
+
 required:
   - compatible
   - reg
@@ -90,7 +111,7 @@ required:
   - reset-names
   - mdio
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index dbfca5e..6f44f95 100644 (file)
@@ -56,6 +56,7 @@ if:
     compatible:
       contains:
         const: ti,davinci_mdio
+then:
   required:
     - bus_freq
 
index b8e4894..1aa4c60 100644 (file)
@@ -26,7 +26,8 @@ Required properties:
                  specified, the TX/RX DMA interrupts should be on that node
                  instead, and only the Ethernet core interrupt is optionally
                  specified here.
-- phy-handle   : Should point to the external phy device.
+- phy-handle   : Should point to the external phy device if exists. Pointing
+                 this to the PCS/PMA PHY is deprecated and should be avoided.
                  See ethernet.txt file in the same directory.
 - xlnx,rxmem   : Set to allocated memory buffer for Rx/Tx in the hardware
 
@@ -68,6 +69,11 @@ Optional properties:
                  required through the core's MDIO interface (i.e. always,
                  unless the PHY is accessed through a different bus).
 
+ - pcs-handle:           Phandle to the internal PCS/PMA PHY in SGMII or 1000Base-X
+                 modes, where "pcs-handle" should be used to point
+                 to the PCS/PMA PHY, and "phy-handle" should point to an
+                 external PHY if exists.
+
 Example:
        axi_ethernet_eth: ethernet@40c00000 {
                compatible = "xlnx,axi-ethernet-1.00.a";
diff --git a/Documentation/devicetree/bindings/nvme/apple,nvme-ans.yaml b/Documentation/devicetree/bindings/nvme/apple,nvme-ans.yaml
new file mode 100644 (file)
index 0000000..ddff923
--- /dev/null
@@ -0,0 +1,111 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvme/apple,nvme-ans.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Apple ANS NVM Express host controller
+
+maintainers:
+  - Sven Peter <sven@svenpeter.dev>
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - apple,t8103-nvme-ans2
+          - apple,t6000-nvme-ans2
+      - const: apple,nvme-ans2
+
+  reg:
+    items:
+      - description: NVMe and NVMMU registers
+      - description: ANS2 co-processor control registers
+
+  reg-names:
+    items:
+      - const: nvme
+      - const: ans
+
+  resets:
+    maxItems: 1
+
+  power-domains:
+    # two domains for t8103, three for t6000
+    minItems: 2
+    items:
+      - description: power domain for the NVMe controller.
+      - description: power domain for the first PCIe bus connecting the NVMe
+          controller to the storage modules.
+      - description: optional power domain for the second PCIe bus
+          connecting the NVMe controller to the storage modules.
+
+  power-domain-names:
+    minItems: 2
+    items:
+      - const: ans
+      - const: apcie0
+      - const: apcie1
+
+  mboxes:
+    maxItems: 1
+    description: Mailbox of the ANS2 co-processor
+
+  interrupts:
+    maxItems: 1
+
+  apple,sart:
+    maxItems: 1
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description: |
+      Reference to the SART address filter.
+
+      The SART address filter is documented in iommu/apple,sart.yaml.
+
+if:
+  properties:
+    compatible:
+      contains:
+        const: apple,t8103-nvme-ans2
+then:
+  properties:
+    power-domains:
+      maxItems: 2
+    power-domain-names:
+      maxItems: 2
+else:
+  properties:
+    power-domains:
+      minItems: 3
+    power-domain-names:
+      minItems: 3
+
+required:
+  - compatible
+  - reg
+  - reg-names
+  - resets
+  - power-domains
+  - power-domain-names
+  - mboxes
+  - interrupts
+  - apple,sart
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/apple-aic.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    nvme@7bcc0000 {
+      compatible = "apple,t8103-nvme-ans2", "apple,nvme-ans2";
+      reg = <0x7bcc0000 0x40000>, <0x77400000 0x4000>;
+      reg-names = "nvme", "ans";
+      interrupts = <AIC_IRQ 590 IRQ_TYPE_LEVEL_HIGH>;
+      mboxes = <&ans>;
+      apple,sart = <&sart>;
+      power-domains = <&ps_ans2>, <&ps_apcie_st>;
+      power-domain-names = "ans", "apcie0";
+      resets = <&ps_ans2>;
+    };
index dfde0ea..d61585c 100644 (file)
@@ -275,17 +275,17 @@ allOf:
           - nvidia,hssquelch-level
           - nvidia,hsdiscon-level
 
-        else:
-          properties:
-            clocks:
-              maxItems: 4
+      else:
+        properties:
+          clocks:
+            maxItems: 4
 
-            clock-names:
-              items:
-                - const: reg
-                - const: pll_u
-                - const: timer
-                - const: utmi-pads
+          clock-names:
+            items:
+              - const: reg
+              - const: pll_u
+              - const: timer
+              - const: utmi-pads
 
   - if:
       properties:
index e23e559..0655e48 100644 (file)
@@ -14,24 +14,24 @@ if:
     compatible:
       contains:
         const: qcom,usb-hs-phy-apq8064
-  then:
-    properties:
-      resets:
-        maxItems: 1
+then:
+  properties:
+    resets:
+      maxItems: 1
 
-      reset-names:
-        const: por
+    reset-names:
+      const: por
 
-  else:
-    properties:
-      resets:
-        minItems: 2
-        maxItems: 2
+else:
+  properties:
+    resets:
+      minItems: 2
+      maxItems: 2
 
-      reset-names:
-        items:
-          - const: phy
-          - const: por
+    reset-names:
+      items:
+        - const: phy
+        - const: por
 
 properties:
   compatible:
@@ -92,6 +92,8 @@ additionalProperties: false
 examples:
   - |
     otg: usb-controller {
+      #reset-cells = <1>;
+
       ulpi {
         phy {
           compatible = "qcom,usb-hs-phy-msm8974", "qcom,usb-hs-phy";
index 838c6d4..b03b2f0 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC DisplayPort PHY
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Marek Szyprowski <m.szyprowski@samsung.com>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
index c61574e..3e5f035 100644 (file)
@@ -11,7 +11,7 @@ maintainers:
   - Joonyoung Shim <jy0922.shim@samsung.com>
   - Seung-Woo Kim <sw0312.kim@samsung.com>
   - Kyungmin Park <kyungmin.park@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 62b39bb..8751e55 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos5250 SoC SATA PHY
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Marek Szyprowski <m.szyprowski@samsung.com>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
index 54aa056..415440a 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S5P/Exynos SoC MIPI CSIS/DSIM DPHY
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Marek Szyprowski <m.szyprowski@samsung.com>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
index 056e270..d9f22a8 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S5P/Exynos SoC USB 2.0 PHY
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Marek Szyprowski <m.szyprowski@samsung.com>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
index f83f0f8..5ba55f9 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC USB 3.0 DRD PHY USB 2.0 PHY
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Marek Szyprowski <m.szyprowski@samsung.com>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
index 8a90d82..6bd42e4 100644 (file)
@@ -48,13 +48,12 @@ properties:
               Name of one pin group to configure.
             enum: [ aif1, aif2, aif3, aif4, mif1, mif2, mif3, pdmspk1,
                     pdmspk2, dmic4, dmic5, dmic6, gpio1, gpio2, gpio3,
-                    gpio4, gpio5, gpio6, gpio7, gpio7, gpio8, gpio9,
+                    gpio4, gpio5, gpio6, gpio7, gpio8, gpio9,
                     gpio10, gpio11, gpio12, gpio13, gpio14, gpio15,
-                    gpio16, gpio17, gpio17, gpio18, gpio19, gpio20,
-                    gpio21, gpio22, gpio23, gpio24, gpio25, gpio26,
-                    gpio27, gpio27, gpio28, gpio29, gpio30, gpio31,
-                    gpio32, gpio33, gpio34, gpio35, gpio36, gpio37,
-                    gpio37, gpio38, gpio39 ]
+                    gpio16, gpio17, gpio18, gpio19, gpio20, gpio21,
+                    gpio22, gpio23, gpio24, gpio25, gpio26, gpio27,
+                    gpio28, gpio29, gpio30, gpio31, gpio32, gpio33,
+                    gpio34, gpio35, gpio36, gpio37, gpio38, gpio39 ]
 
           function:
             description:
index f73348c..8cf3c47 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S3C/S5P/Exynos SoC pin controller - gpio bank
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index c71939a..9869d4d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S3C/S5P/Exynos SoC pin controller - pins configuration
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index a822f70..1de91a5 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S3C/S5P/Exynos SoC pin controller - wake-up interrupt controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 989e48c..3a65c66 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S3C/S5P/Exynos SoC pin controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
   - Tomasz Figa <tomasz.figa@gmail.com>
 
index 4d293b2..d77fc88 100644 (file)
@@ -36,7 +36,8 @@ properties:
   cpus:
     $ref: /schemas/types.yaml#/definitions/phandle-array
     items:
-      maxItems: 1
+      minItems: 1
+      maxItems: 4
     description: |
       Array of phandles pointing to CPU cores, which should match the order of
       CPU cores used by the WUPCR and PSTR registers in the Advanced Power
index f8461f0..118cf48 100644 (file)
@@ -17,7 +17,6 @@ properties:
   compatible:
     enum:
       - ti,bq24150
-      - ti,bq24150
       - ti,bq24150a
       - ti,bq24151
       - ti,bq24151a
index 3978b48..4d3a1d0 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX14577/MAX77836 MicroUSB and Companion Power Management IC Charger
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX14577/MAX77836 MicroUSB
index a21dc1a..f5fd53d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX77693 MicroUSB and Companion Power Management IC Charger
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77693 MicroUSB Integrated
index 9b131c6..84eeaef 100644 (file)
@@ -18,23 +18,23 @@ description:
 
 allOf:
   - $ref: "regulator.yaml#"
-
-if:
-  properties:
-    compatible:
-      contains:
-        const: regulator-fixed-clock
-  required:
-    - clocks
-else:
-  if:
-    properties:
-      compatible:
-        contains:
-          const: regulator-fixed-domain
-    required:
-      - power-domains
-      - required-opps
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: regulator-fixed-clock
+    then:
+      required:
+        - clocks
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: regulator-fixed-domain
+    then:
+      required:
+        - power-domains
+        - required-opps
 
 properties:
   compatible:
index 16f0188..285dc71 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX14577/MAX77836 MicroUSB and Companion Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX14577/MAX77836 MicroUSB
index bb64b67..0e7cd4b 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77686 Power Management IC regulators
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77686 Power Management
index 20d8559..945a539 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77693 MicroUSB and Companion Power Management IC regulators
 
 maintainers:
   - Chanwoo Choi <cw00.choi@samsung.com>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77693 MicroUSB Integrated
index f2b4dd1..236348c 100644 (file)
@@ -8,7 +8,7 @@ title: Maxim MAX77802 Power Management IC regulators
 
 maintainers:
   - Javier Martinez Canillas <javier@dowhile0.org>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77802 Power Management
index a963025..9695e72 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX77843 MicroUSB and Companion Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for Maxim MAX77843 MicroUSB Integrated
index e4e8c58..3ff0d7d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX8952 voltage regulator
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 allOf:
   - $ref: regulator.yaml#
index 5898dcf..b92eef6 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX8973/MAX77621 voltage regulator
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 allOf:
   - $ref: regulator.yaml#
index d5a44ca..4321f06 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Maxim MAX8997 Power Management IC
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   The Maxim MAX8997 is a Power Management IC which includes voltage and current
index 0627dec..0f9eb31 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPA01 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index e3b7807..f1c50dc 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPS11 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index 579d77a..53b105a 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPS13 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index fdea290..01f9d4e 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPS14 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index b3a883c..9576c2d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPS15 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index 0ded695..39b652c 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S2MPU02 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index 3c1617b..172631c 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S5M8767 Power Management IC regulators
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   This is a part of device tree bindings for S2M and S5M family of Power
index 2424de7..d99a729 100644 (file)
@@ -104,8 +104,7 @@ properties:
   qcom,smem-state-names:
     $ref: /schemas/types.yaml#/definitions/string
     description: The names of the state bits used for SMP2P output
-    items:
-      - const: stop
+    const: stop
 
   glink-edge:
     type: object
@@ -130,7 +129,6 @@ properties:
       qcom,remote-pid:
         $ref: /schemas/types.yaml#/definitions/uint32
         description: ID of the shared memory used by GLINK for communication with WPSS
-        maxItems: 1
 
     required:
       - interrupts
diff --git a/Documentation/devicetree/bindings/reset/altr,rst-mgr.yaml b/Documentation/devicetree/bindings/reset/altr,rst-mgr.yaml
new file mode 100644 (file)
index 0000000..4379cec
--- /dev/null
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/altr,rst-mgr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Altera SOCFPGA Reset Manager
+
+maintainers:
+  - Dinh Nguyen <dinguyen@altera.com>
+
+properties:
+  compatible:
+    oneOf:
+      - description: Cyclone5/Arria5/Arria10
+        const: altr,rst-mgr
+      - description: Stratix10 ARM64 SoC
+        items:
+          - const: altr,stratix10-rst-mgr
+          - const: altr,rst-mgr
+
+  reg:
+    maxItems: 1
+
+  altr,modrst-offset:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    description: Offset of the first modrst register
+
+  '#reset-cells':
+    const: 1
+
+required:
+  - compatible
+  - reg
+  - altr,modrst-offset
+  - '#reset-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    rstmgr@ffd05000 {
+        compatible = "altr,rst-mgr";
+        reg = <0xffd05000 0x1000>;
+        altr,modrst-offset = <0x10>;
+        #reset-cells = <1>;
+    };
diff --git a/Documentation/devicetree/bindings/reset/amlogic,meson-axg-audio-arb.txt b/Documentation/devicetree/bindings/reset/amlogic,meson-axg-audio-arb.txt
deleted file mode 100644 (file)
index 43e580e..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-* Amlogic audio memory arbiter controller
-
-The Amlogic Audio ARB is a simple device which enables or
-disables the access of Audio FIFOs to DDR on AXG based SoC.
-
-Required properties:
-- compatible: 'amlogic,meson-axg-audio-arb' or
-             'amlogic,meson-sm1-audio-arb'
-- reg: physical base address of the controller and length of memory
-       mapped region.
-- clocks: phandle to the fifo peripheral clock provided by the audio
-         clock controller.
-- #reset-cells: must be 1.
-
-Example on the A113 SoC:
-
-arb: reset-controller@280 {
-       compatible = "amlogic,meson-axg-audio-arb";
-       reg = <0x0 0x280 0x0 0x4>;
-       #reset-cells = <1>;
-       clocks = <&clkc_audio AUD_CLKID_DDR_ARB>;
-};
diff --git a/Documentation/devicetree/bindings/reset/amlogic,meson-axg-audio-arb.yaml b/Documentation/devicetree/bindings/reset/amlogic,meson-axg-audio-arb.yaml
new file mode 100644 (file)
index 0000000..704a502
--- /dev/null
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 BayLibre, SAS
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/reset/amlogic,meson-axg-audio-arb.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Amlogic audio memory arbiter controller
+
+maintainers:
+  - Jerome Brunet <jbrunet@baylibre.com>
+
+description: The Amlogic Audio ARB is a simple device which enables or disables
+  the access of Audio FIFOs to DDR on AXG based SoC.
+
+properties:
+  compatible:
+    enum:
+      - amlogic,meson-axg-audio-arb
+      - amlogic,meson-sm1-audio-arb
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+    description: |
+      phandle to the fifo peripheral clock provided by the audio clock
+      controller.
+
+  "#reset-cells":
+    const: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    // on the A113 SoC:
+    #include <dt-bindings/clock/axg-audio-clkc.h>
+    bus {
+        #address-cells = <2>;
+        #size-cells = <2>;
+
+        arb: reset-controller@280 {
+            compatible = "amlogic,meson-axg-audio-arb";
+            reg = <0x0 0x280 0x0 0x4>;
+            #reset-cells = <1>;
+            clocks = <&clkc_audio AUD_CLKID_DDR_ARB>;
+        };
+    };
index 92922d3..494a454 100644 (file)
@@ -17,6 +17,7 @@ properties:
       - amlogic,meson-gxbb-reset # Reset Controller on GXBB and compatible SoCs
       - amlogic,meson-axg-reset # Reset Controller on AXG and compatible SoCs
       - amlogic,meson-a1-reset # Reset Controller on A1 and compatible SoCs
+      - amlogic,meson-s4-reset # Reset Controller on S4 and compatible SoCs
 
   reg:
     maxItems: 1
diff --git a/Documentation/devicetree/bindings/reset/ath79-reset.txt b/Documentation/devicetree/bindings/reset/ath79-reset.txt
deleted file mode 100644 (file)
index 4c56330..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-Binding for Qualcomm Atheros AR7xxx/AR9XXX reset controller
-
-Please also refer to reset.txt in this directory for common reset
-controller binding usage.
-
-Required Properties:
-- compatible: has to be "qca,<soctype>-reset", "qca,ar7100-reset"
-              as fallback
-- reg: Base address and size of the controllers memory area
-- #reset-cells : Specifies the number of cells needed to encode reset
-                 line, should be 1
-
-Example:
-
-       reset-controller@1806001c {
-               compatible = "qca,ar9132-reset", "qca,ar7100-reset";
-               reg = <0x1806001c 0x4>;
-
-               #reset-cells = <1>;
-       };
diff --git a/Documentation/devicetree/bindings/reset/berlin,reset.txt b/Documentation/devicetree/bindings/reset/berlin,reset.txt
deleted file mode 100644 (file)
index 514fee0..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-Marvell Berlin reset controller
-===============================
-
-Please also refer to reset.txt in this directory for common reset
-controller binding usage.
-
-The reset controller node must be a sub-node of the chip controller
-node on Berlin SoCs.
-
-Required properties:
-- compatible: should be "marvell,berlin2-reset"
-- #reset-cells: must be set to 2
-
-Example:
-
-chip_rst: reset {
-       compatible = "marvell,berlin2-reset";
-       #reset-cells = <2>;
-};
-
-&usb_phy0 {
-       resets = <&chip_rst 0x104 12>;
-};
diff --git a/Documentation/devicetree/bindings/reset/bitmain,bm1880-reset.txt b/Documentation/devicetree/bindings/reset/bitmain,bm1880-reset.txt
deleted file mode 100644 (file)
index a6f8455..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-Bitmain BM1880 SoC Reset Controller
-===================================
-
-Please also refer to reset.txt in this directory for common reset
-controller binding usage.
-
-Required properties:
-- compatible:   Should be "bitmain,bm1880-reset"
-- reg:          Offset and length of reset controller space in SCTRL.
-- #reset-cells: Must be 1.
-
-Example:
-
-        rst: reset-controller@c00 {
-                compatible = "bitmain,bm1880-reset";
-                reg = <0xc00 0x8>;
-                #reset-cells = <1>;
-        };
diff --git a/Documentation/devicetree/bindings/reset/bitmain,bm1880-reset.yaml b/Documentation/devicetree/bindings/reset/bitmain,bm1880-reset.yaml
new file mode 100644 (file)
index 0000000..f0aca74
--- /dev/null
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Manivannan Sadhasivam <mani@kernel.org>
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/reset/bitmain,bm1880-reset.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Bitmain BM1880 SoC Reset Controller
+
+maintainers:
+  - Manivannan Sadhasivam <mani@kernel.org>
+
+properties:
+  compatible:
+    const: bitmain,bm1880-reset
+
+  reg:
+    maxItems: 1
+
+  "#reset-cells":
+    const: 1
+
+required:
+  - compatible
+  - reg
+  - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    rst: reset-controller@c00 {
+        compatible = "bitmain,bm1880-reset";
+        reg = <0xc00 0x8>;
+        #reset-cells = <1>;
+    };
index b0c41ab..cdfcf32 100644 (file)
@@ -24,6 +24,11 @@ properties:
           - const: hisilicon,hi3670-reset
           - const: hisilicon,hi3660-reset
 
+  hisi,rst-syscon:
+    deprecated: true
+    description: phandle of the reset's syscon, use hisilicon,rst-syscon instead
+    $ref: /schemas/types.yaml#/definitions/phandle
+
   hisilicon,rst-syscon:
     description: phandle of the reset's syscon.
     $ref: /schemas/types.yaml#/definitions/phandle
diff --git a/Documentation/devicetree/bindings/reset/lantiq,reset.txt b/Documentation/devicetree/bindings/reset/lantiq,reset.txt
deleted file mode 100644 (file)
index c6aef36..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-Lantiq XWAY SoC RCU reset controller binding
-============================================
-
-This binding describes a reset-controller found on the RCU module on Lantiq
-XWAY SoCs.
-
-This node has to be a sub node of the Lantiq RCU block.
-
--------------------------------------------------------------------------------
-Required properties:
-- compatible           : Should be one of
-                               "lantiq,danube-reset"
-                               "lantiq,xrx200-reset"
-- reg                  : Defines the following sets of registers in the parent
-                         syscon device
-                       - Offset of the reset set register
-                       - Offset of the reset status register
-- #reset-cells         : Specifies the number of cells needed to encode the
-                         reset line, should be 2.
-                         The first cell takes the reset set bit and the
-                         second cell takes the status bit.
-
--------------------------------------------------------------------------------
-Example for the reset-controllers on the xRX200 SoCs:
-       reset0: reset-controller@10 {
-               compatible = "lantiq,xrx200-reset";
-               reg <0x10 0x04>, <0x14 0x04>;
-
-               #reset-cells = <2>;
-       };
diff --git a/Documentation/devicetree/bindings/reset/lantiq,reset.yaml b/Documentation/devicetree/bindings/reset/lantiq,reset.yaml
new file mode 100644 (file)
index 0000000..15d65a5
--- /dev/null
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/lantiq,reset.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Lantiq XWAY SoC RCU reset controller
+
+maintainers:
+  - Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+
+description: |
+  This binding describes a reset-controller found on the RCU module on Lantiq
+  XWAY SoCs. This node has to be a sub node of the Lantiq RCU block.
+
+properties:
+  compatible:
+    enum:
+      - lantiq,danube-reset
+      - lantiq,xrx200-reset
+
+  reg:
+    description: |
+      Defines the following sets of registers in the parent syscon device
+      Offset of the reset set register
+      Offset of the reset status register
+    maxItems: 2
+
+  '#reset-cells':
+    description: |
+      The first cell takes the reset set bit and the second cell takes the
+      status bit.
+    const: 2
+
+required:
+  - compatible
+  - reg
+  - '#reset-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    // On the xRX200 SoCs:
+    reset0: reset-controller@10 {
+        compatible = "lantiq,xrx200-reset";
+        reg = <0x10 0x04>, <0x14 0x04>;
+        #reset-cells = <2>;
+    };
diff --git a/Documentation/devicetree/bindings/reset/marvell,berlin2-reset.yaml b/Documentation/devicetree/bindings/reset/marvell,berlin2-reset.yaml
new file mode 100644 (file)
index 0000000..d71d0f0
--- /dev/null
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2015 Antoine Tenart <atenart@kernel.org>
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/reset/marvell,berlin2-reset.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Marvell Berlin reset controller
+
+maintainers:
+  - Antoine Tenart <atenart@kernel.org>
+
+description: The reset controller node must be a sub-node of the chip
+  controller node on Berlin SoCs.
+
+properties:
+  compatible:
+    const: marvell,berlin2-reset
+
+  "#reset-cells":
+    const: 2
+
+required:
+  - compatible
+  - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    chip: chip-control@ea0000 {
+        reg = <0xea0000 0x400>;
+
+        chip_rst: reset {
+            compatible = "marvell,berlin2-reset";
+            #reset-cells = <2>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/reset/nuvoton,npcm-reset.txt b/Documentation/devicetree/bindings/reset/nuvoton,npcm-reset.txt
deleted file mode 100644 (file)
index 17b7a6a..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-Nuvoton NPCM Reset controller
-
-Required properties:
-- compatible : "nuvoton,npcm750-reset" for NPCM7XX BMC
-- reg : specifies physical base address and size of the register.
-- #reset-cells: must be set to 2
-
-Optional property:
-- nuvoton,sw-reset-number - Contains the software reset number to restart the SoC.
-  NPCM7xx contain four software reset that represent numbers 1 to 4.
-
-  If 'nuvoton,sw-reset-number' is not specified software reset is disabled.
-
-Example:
-       rstc: rstc@f0801000 {
-               compatible = "nuvoton,npcm750-reset";
-               reg = <0xf0801000 0x70>;
-               #reset-cells = <2>;
-               nuvoton,sw-reset-number = <2>;
-       };
-
-Specifying reset lines connected to IP NPCM7XX modules
-======================================================
-example:
-
-        spi0: spi@..... {
-                ...
-                resets = <&rstc NPCM7XX_RESET_IPSRST2 NPCM7XX_RESET_PSPI1>;
-                ...
-        };
-
-The index could be found in <dt-bindings/reset/nuvoton,npcm7xx-reset.h>.
diff --git a/Documentation/devicetree/bindings/reset/nuvoton,npcm750-reset.yaml b/Documentation/devicetree/bindings/reset/nuvoton,npcm750-reset.yaml
new file mode 100644 (file)
index 0000000..fa5e4ea
--- /dev/null
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/nuvoton,npcm750-reset.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Nuvoton NPCM Reset controller
+
+maintainers:
+  - Tomer Maimon <tmaimon77@gmail.com>
+
+properties:
+  compatible:
+    const: nuvoton,npcm750-reset
+
+  reg:
+    maxItems: 1
+
+  '#reset-cells':
+    const: 2
+
+  nuvoton,sw-reset-number:
+    $ref: /schemas/types.yaml#/definitions/uint32
+    minimum: 1
+    maximum: 4
+    description: |
+      Contains the software reset number to restart the SoC.
+      If not specified, software reset is disabled.
+
+required:
+  - compatible
+  - reg
+  - '#reset-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/reset/nuvoton,npcm7xx-reset.h>
+    rstc: rstc@f0801000 {
+        compatible = "nuvoton,npcm750-reset";
+        reg = <0xf0801000 0x70>;
+        #reset-cells = <2>;
+        nuvoton,sw-reset-number = <2>;
+    };
+
+    // Specifying reset lines connected to IP NPCM7XX modules
+    spi0: spi {
+        resets = <&rstc NPCM7XX_RESET_IPSRST2 NPCM7XX_RESET_PSPI1>;
+    };
diff --git a/Documentation/devicetree/bindings/reset/qca,ar7100-reset.yaml b/Documentation/devicetree/bindings/reset/qca,ar7100-reset.yaml
new file mode 100644 (file)
index 0000000..9be60e5
--- /dev/null
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2015 Alban Bedel <albeu@free.fr>
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/reset/qca,ar7100-reset.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Qualcomm Atheros AR7xxx/AR9XXX reset controller
+
+maintainers:
+  - Alban Bedel <albeu@free.fr>
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - qca,ar9132-reset
+          - qca,ar9331-reset
+      - const: qca,ar7100-reset
+
+  reg:
+    maxItems: 1
+
+  "#reset-cells":
+    const: 1
+
+required:
+  - compatible
+  - reg
+  - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    reset-controller@1806001c {
+          compatible = "qca,ar9132-reset", "qca,ar7100-reset";
+          reg = <0x1806001c 0x4>;
+          #reset-cells = <1>;
+    };
diff --git a/Documentation/devicetree/bindings/reset/snps,axs10x-reset.txt b/Documentation/devicetree/bindings/reset/snps,axs10x-reset.txt
deleted file mode 100644 (file)
index 32d8435..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-Binding for the AXS10x reset controller
-
-This binding describes the ARC AXS10x boards custom IP-block which allows
-to control reset signals of selected peripherals. For example DW GMAC, etc...
-This block is controlled via memory-mapped register (AKA CREG) which
-represents up-to 32 reset lines.
-
-As of today only the following lines are used:
- - DW GMAC - line 5
-
-This binding uses the common reset binding[1].
-
-[1] Documentation/devicetree/bindings/reset/reset.txt
-
-Required properties:
-- compatible: should be "snps,axs10x-reset".
-- reg: should always contain pair address - length: for creg reset
-  bits register.
-- #reset-cells: from common reset binding; Should always be set to 1.
-
-Example:
-       reset: reset-controller@11220 {
-               compatible = "snps,axs10x-reset";
-               #reset-cells = <1>;
-               reg = <0x11220 0x4>;
-       };
-
-Specifying reset lines connected to IP modules:
-       ethernet@.... {
-               ....
-               resets = <&reset 5>;
-               ....
-       };
diff --git a/Documentation/devicetree/bindings/reset/snps,axs10x-reset.yaml b/Documentation/devicetree/bindings/reset/snps,axs10x-reset.yaml
new file mode 100644 (file)
index 0000000..a75db3d
--- /dev/null
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/snps,axs10x-reset.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: AXS10x reset controller
+
+maintainers:
+  - Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
+
+description: |
+  This binding describes the ARC AXS10x boards custom IP-block which allows
+  to control reset signals of selected peripherals. For example DW GMAC, etc...
+  This block is controlled via memory-mapped register (AKA CREG) which
+  represents up-to 32 reset lines.
+  As of today only the following lines are used:
+   - DW GMAC - line 5
+
+properties:
+  compatible:
+    const: snps,axs10x-reset
+
+  reg:
+    maxItems: 1
+
+  '#reset-cells':
+    const: 1
+
+required:
+  - compatible
+  - reg
+  - '#reset-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    reset: reset-controller@11220 {
+        compatible = "snps,axs10x-reset";
+        #reset-cells = <1>;
+        reg = <0x11220 0x4>;
+    };
+
+    // Specifying reset lines connected to IP modules:
+    ethernet {
+        resets = <&reset 5>;
+    };
diff --git a/Documentation/devicetree/bindings/reset/socfpga-reset.txt b/Documentation/devicetree/bindings/reset/socfpga-reset.txt
deleted file mode 100644 (file)
index 38fe34f..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-Altera SOCFPGA Reset Manager
-
-Required properties:
-- compatible : "altr,rst-mgr" for (Cyclone5/Arria5/Arria10)
-              "altr,stratix10-rst-mgr","altr,rst-mgr" for Stratix10 ARM64 SoC
-- reg : Should contain 1 register ranges(address and length)
-- altr,modrst-offset : Should contain the offset of the first modrst register.
-- #reset-cells: 1
-
-Example:
-        rstmgr@ffd05000 {
-               #reset-cells = <1>;
-               compatible = "altr,rst-mgr";
-               reg = <0xffd05000 0x1000>;
-               altr,modrst-offset = <0x10>;
-       };
index bfbd3e9..0a2c13e 100644 (file)
@@ -38,25 +38,49 @@ properties:
     minItems: 1
     maxItems: 2
 
-  clock-names:
-    oneOf:
-      - items:           # for Pro4, Pro5
-          - const: gio
-          - const: link
-      - items:           # for others
-          - const: link
+  clock-names: true
 
   resets:
     minItems: 1
     maxItems: 2
 
-  reset-names:
-    oneOf:
-      - items:           # for Pro4, Pro5
-          - const: gio
-          - const: link
-      - items:           # for others
-          - const: link
+  reset-names: true
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - socionext,uniphier-pro4-usb3-reset
+              - socionext,uniphier-pro5-usb3-reset
+              - socionext,uniphier-pro4-ahci-reset
+    then:
+      properties:
+        clocks:
+          minItems: 2
+          maxItems: 2
+        clock-names:
+          items:
+            - const: gio
+            - const: link
+        resets:
+          minItems: 2
+          maxItems: 2
+        reset-names:
+          items:
+            - const: gio
+            - const: link
+    else:
+      properties:
+        clocks:
+          maxItems: 1
+        clock-names:
+          const: link
+        resets:
+          maxItems: 1
+        reset-names:
+          const: link
 
 additionalProperties: false
 
diff --git a/Documentation/devicetree/bindings/reset/st,sti-picophyreset.txt b/Documentation/devicetree/bindings/reset/st,sti-picophyreset.txt
deleted file mode 100644 (file)
index 9ca2776..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-STMicroelectronics STi family Sysconfig Picophy SoftReset Controller
-=============================================================================
-
-This binding describes a reset controller device that is used to enable and
-disable on-chip PicoPHY USB2 phy(s) using "softreset" control bits found in
-the STi family SoC system configuration registers.
-
-The actual action taken when softreset is asserted is hardware dependent.
-However, when asserted it may not be possible to access the hardware's
-registers and after an assert/deassert sequence the hardware's previous state
-may no longer be valid.
-
-Please refer to Documentation/devicetree/bindings/reset/reset.txt
-for common reset controller binding usage.
-
-Required properties:
-- compatible: Should be "st,stih407-picophyreset"
-- #reset-cells: 1, see below
-
-Example:
-
-       picophyreset: picophyreset-controller {
-               compatible = "st,stih407-picophyreset";
-               #reset-cells = <1>;
-       };
-
-Specifying picophyreset control of devices
-=======================================
-
-Device nodes should specify the reset channel required in their "resets"
-property, containing a phandle to the picophyreset device node and an
-index specifying which channel to use, as described in
-Documentation/devicetree/bindings/reset/reset.txt.
-
-Example:
-
-       usb2_picophy0: usbpicophy@0 {
-               resets = <&picophyreset STIH407_PICOPHY0_RESET>;
-       };
-
-Macro definitions for the supported reset channels can be found in:
-include/dt-bindings/reset/stih407-resets.h
diff --git a/Documentation/devicetree/bindings/reset/st,sti-powerdown.txt b/Documentation/devicetree/bindings/reset/st,sti-powerdown.txt
deleted file mode 100644 (file)
index 9252713..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-STMicroelectronics STi family Sysconfig Peripheral Powerdown Reset Controller
-=============================================================================
-
-This binding describes a reset controller device that is used to enable and
-disable on-chip peripheral controllers such as USB and SATA, using
-"powerdown" control bits found in the STi family SoC system configuration
-registers. These have been grouped together into a single reset controller
-device for convenience.
-
-The actual action taken when powerdown is asserted is hardware dependent.
-However, when asserted it may not be possible to access the hardware's
-registers and after an assert/deassert sequence the hardware's previous state
-may no longer be valid.
-
-Please refer to reset.txt in this directory for common reset
-controller binding usage.
-
-Required properties:
-- compatible: Should be "st,stih407-powerdown"
-- #reset-cells: 1, see below
-
-example:
-
-       powerdown: powerdown-controller {
-               compatible = "st,stih407-powerdown";
-               #reset-cells = <1>;
-       };
-
-
-Specifying powerdown control of devices
-=======================================
-
-Device nodes should specify the reset channel required in their "resets"
-property, containing a phandle to the powerdown device node and an
-index specifying which channel to use, as described in reset.txt
-
-example:
-
-       st_dwc3: dwc3@8f94000 {
-               resets          = <&powerdown STIH407_USB3_POWERDOWN>,
-       };
-
-Macro definitions for the supported reset channels can be found in:
-
-include/dt-bindings/reset/stih407-resets.h
diff --git a/Documentation/devicetree/bindings/reset/st,stih407-picophyreset.yaml b/Documentation/devicetree/bindings/reset/st,stih407-picophyreset.yaml
new file mode 100644 (file)
index 0000000..329ae4a
--- /dev/null
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/st,stih407-picophyreset.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STi family Sysconfig Picophy SoftReset Controller
+
+maintainers:
+  - Peter Griffin <peter.griffin@linaro.org>
+
+description: |
+  This binding describes a reset controller device that is used to enable and
+  disable on-chip PicoPHY USB2 phy(s) using "softreset" control bits found in
+  the STi family SoC system configuration registers.
+
+  The actual action taken when softreset is asserted is hardware dependent.
+  However, when asserted it may not be possible to access the hardware's
+  registers and after an assert/deassert sequence the hardware's previous state
+  may no longer be valid.
+
+properties:
+  compatible:
+    const: st,stih407-picophyreset
+
+  '#reset-cells':
+    const: 1
+
+required:
+  - compatible
+  - '#reset-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/reset/stih407-resets.h>
+
+    picophyreset: picophyreset-controller {
+        compatible = "st,stih407-picophyreset";
+        #reset-cells = <1>;
+    };
+
+    // Specifying picophyreset control of devices
+    usb2_picophy0: usbpicophy {
+        resets = <&picophyreset STIH407_PICOPHY0_RESET>;
+    };
diff --git a/Documentation/devicetree/bindings/reset/st,stih407-powerdown.yaml b/Documentation/devicetree/bindings/reset/st,stih407-powerdown.yaml
new file mode 100644 (file)
index 0000000..d3790e6
--- /dev/null
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/st,stih407-powerdown.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STi family Sysconfig Peripheral Powerdown Reset Controller
+
+maintainers:
+  - Srinivas Kandagatla <srinivas.kandagatla@st.com>
+
+description: |
+  This binding describes a reset controller device that is used to enable and
+  disable on-chip peripheral controllers such as USB and SATA, using
+  "powerdown" control bits found in the STi family SoC system configuration
+  registers. These have been grouped together into a single reset controller
+  device for convenience.
+
+  The actual action taken when powerdown is asserted is hardware dependent.
+  However, when asserted it may not be possible to access the hardware's
+  registers and after an assert/deassert sequence the hardware's previous state
+  may no longer be valid.
+
+properties:
+  compatible:
+    const: st,stih407-powerdown
+
+  '#reset-cells':
+    const: 1
+
+required:
+  - compatible
+  - '#reset-cells'
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/reset/stih407-resets.h>
+
+    powerdown: powerdown-controller {
+        compatible = "st,stih407-powerdown";
+        #reset-cells = <1>;
+    };
+
+    // Specifying powerdown control of devices:
+    st_dwc3: dwc3 {
+        resets = <&powerdown STIH407_USB3_POWERDOWN>;
+    };
index a50c34d..765d9f9 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC True Random Number Generator
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Łukasz Stelmach <l.stelmach@samsung.com>
 
 properties:
index 84bf518..4754174 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: TimerIO Random Number Generator
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index a98ed66..fde886a 100644 (file)
@@ -8,7 +8,7 @@ title: Samsung's Exynos USI (Universal Serial Interface) binding
 
 maintainers:
   - Sam Protsenko <semen.protsenko@linaro.org>
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   USI IP-core provides selectable serial protocol (UART, SPI or High-Speed I2C).
@@ -77,7 +77,7 @@ patternProperties:
     description: Child node describing underlying UART/serial
 
   "^spi@[0-9a-f]+$":
-    type: object
+    $ref: /schemas/spi/samsung,spi.yaml
     description: Child node describing underlying SPI
 
 required:
index cea2bf3..9bc4585 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Insignal Arndale boards audio complex
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
 properties:
index cb51af9..ac151d3 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung SMDK5250 audio complex with WM8994 codec
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
 properties:
index 0c3b330..51a83d3 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Google Snow audio complex with MAX9809x codec
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
 properties:
index 74712d6..491e080 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos5433 TM2(E) audio complex with WM5110 codec
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
   - Sylwester Nawrocki <s.nawrocki@samsung.com>
 
 properties:
index b3dbcba..fe2e155 100644 (file)
@@ -136,8 +136,7 @@ allOf:
         compatible:
           contains:
             const: st,stm32f4-sai
-
-  - then:
+    then:
       properties:
         clocks:
           items:
@@ -148,8 +147,7 @@ allOf:
           items:
             - const: x8k
             - const: x11k
-
-  - else:
+    else:
       properties:
         clocks:
           items:
index b104899..5de710a 100644 (file)
@@ -124,7 +124,6 @@ properties:
     description: |
       Override the default TX fifo size.  Unit is words.  Ignored if 0.
     $ref: /schemas/types.yaml#/definitions/uint32
-    maxItems: 1
     default: 64
 
   renesas,rx-fifo-size:
@@ -132,7 +131,6 @@ properties:
     description: |
       Override the default RX fifo size.  Unit is words.  Ignored if 0.
     $ref: /schemas/types.yaml#/definitions/uint32
-    maxItems: 1
     default: 64
 
 required:
index f0db3fb..25b1b6c 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Peripheral-specific properties for Samsung S3C/S5P/Exynos SoC SPI controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description:
   See spi-peripheral-props.yaml for more info.
index bf9a76d..a50f24f 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung S3C/S5P/Exynos SoC SPI controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description:
   All the SPI controller nodes should be represented in the aliases node using
index 668a9a4..993430b 100644 (file)
@@ -136,14 +136,14 @@ required:
   - reg
 
 if:
-  properties:
-    compatible:
-      contains:
-        enum:
-          - qcom,rpm-msg-ram
-          - rockchip,rk3288-pmu-sram
-
-else:
+  not:
+    properties:
+      compatible:
+        contains:
+          enum:
+            - qcom,rpm-msg-ram
+            - rockchip,rk3288-pmu-sram
+then:
   required:
     - "#address-cells"
     - "#size-cells"
index 17129f7..1344df7 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC Thermal Management Unit (TMU)
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 description: |
   For multi-instance tmu each instance should have an alias correctly numbered
index f11cbc7..1584944 100644 (file)
@@ -19,9 +19,20 @@ description: |+
 
 properties:
   compatible:
-    enum:
-      - samsung,exynos4210-mct
-      - samsung,exynos4412-mct
+    oneOf:
+      - enum:
+          - samsung,exynos4210-mct
+          - samsung,exynos4412-mct
+      - items:
+          - enum:
+              - samsung,exynos3250-mct
+              - samsung,exynos5250-mct
+              - samsung,exynos5260-mct
+              - samsung,exynos5420-mct
+              - samsung,exynos5433-mct
+              - samsung,exynos850-mct
+              - tesla,fsd-mct
+          - const: samsung,exynos4210-mct
 
   clocks:
     minItems: 2
@@ -63,6 +74,56 @@ required:
   - interrupts
   - reg
 
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: samsung,exynos3250-mct
+    then:
+      properties:
+        interrupts:
+          minItems: 8
+          maxItems: 8
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: samsung,exynos5250-mct
+    then:
+      properties:
+        interrupts:
+          minItems: 6
+          maxItems: 6
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - samsung,exynos5260-mct
+              - samsung,exynos5420-mct
+              - samsung,exynos5433-mct
+              - samsung,exynos850-mct
+    then:
+      properties:
+        interrupts:
+          minItems: 12
+          maxItems: 12
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - tesla,fsd-mct
+    then:
+      properties:
+        interrupts:
+          minItems: 16
+          maxItems: 16
+
 additionalProperties: false
 
 examples:
index 22b91a2..6b9a3bc 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC USB 3.0 DWC3 Controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index fbf07d6..340dff8 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Samsung Exynos SoC USB 2.0 EHCI/OHCI Controller
 
 maintainers:
-  - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+  - Krzysztof Kozlowski <krzk@kernel.org>
 
 properties:
   compatible:
index 5500667..36a76cb 100644 (file)
@@ -185,6 +185,12 @@ DMA Fence Chain
 .. kernel-doc:: include/linux/dma-fence-chain.h
    :internal:
 
+DMA Fence unwrap
+~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: include/linux/dma-fence-unwrap.h
+   :internal:
+
 DMA Fence uABI/Sync File
 ~~~~~~~~~~~~~~~~~~~~~~~~
 
index be793c4..d7507be 100644 (file)
@@ -73,7 +73,7 @@ busy.
 If successful, the cache backend can then start setting up the cache.  In the
 event that the initialisation fails, the cache backend should call::
 
-       void fscache_relinquish_cookie(struct fscache_cache *cache);
+       void fscache_relinquish_cache(struct fscache_cache *cache);
 
 to reset and discard the cookie.
 
@@ -110,9 +110,9 @@ to withdraw them, calling::
 
 on the cookie that each object belongs to.  This schedules the specified cookie
 for withdrawal.  This gets offloaded to a workqueue.  The cache backend can
-test for completion by calling::
+wait for completion by calling::
 
-       bool fscache_are_objects_withdrawn(struct fscache_cookie *cache);
+       void fscache_wait_for_objects(struct fscache_cache *cache);
 
 Once all the cookies are withdrawn, a cache backend can withdraw all the
 volumes, calling::
@@ -125,7 +125,7 @@ outstanding accesses on the volume to complete before returning.
 When the the cache is completely withdrawn, fscache should be notified by
 calling::
 
-       void fscache_cache_relinquish(struct fscache_cache *cache);
+       void fscache_relinquish_cache(struct fscache_cache *cache);
 
 to clear fields in the cookie and discard the caller's ref on it.
 
index 5066113..7308d76 100644 (file)
@@ -404,22 +404,21 @@ schedule a write of that region::
 And if an error occurs before that point is reached, the marks can be removed
 by calling::
 
-       void fscache_clear_page_bits(struct fscache_cookie *cookie,
-                                    struct address_space *mapping,
+       void fscache_clear_page_bits(struct address_space *mapping,
                                     loff_t start, size_t len,
                                     bool caching)
 
-In both of these functions, the cookie representing the cache object to be
-written to and a pointer to the mapping to which the source pages are attached
-are passed in; start and len indicate the size of the region that's going to be
-written (it doesn't have to align to page boundaries necessarily, but it does
-have to align to DIO boundaries on the backing filesystem).  The caching
-parameter indicates if caching should be skipped, and if false, the functions
-do nothing.
-
-The write function takes some additional parameters: i_size indicates the size
-of the netfs file and term_func indicates an optional completion function, to
-which term_func_priv will be passed, along with the error or amount written.
+In these functions, a pointer to the mapping to which the source pages are
+attached is passed in and start and len indicate the size of the region that's
+going to be written (it doesn't have to align to page boundaries necessarily,
+but it does have to align to DIO boundaries on the backing filesystem).  The
+caching parameter indicates if caching should be skipped, and if false, the
+functions do nothing.
+
+The write function takes some additional parameters: the cookie representing
+the cache object to be written to, i_size indicates the size of the netfs file
+and term_func indicates an optional completion function, to which
+term_func_priv will be passed, along with the error or amount written.
 
 Note that the write function will always run asynchronously and will unmark all
 the pages upon completion before calling term_func.
index 525e684..43be378 100644 (file)
@@ -894,7 +894,7 @@ xmit_hash_policy
                Uses XOR of hardware MAC addresses and packet type ID
                field to generate the hash. The formula is
 
-               hash = source MAC XOR destination MAC XOR packet type ID
+               hash = source MAC[5] XOR destination MAC[5] XOR packet type ID
                slave number = hash modulo slave count
 
                This algorithm will place all traffic to a particular
@@ -910,7 +910,7 @@ xmit_hash_policy
                Uses XOR of hardware MAC addresses and IP addresses to
                generate the hash.  The formula is
 
-               hash = source MAC XOR destination MAC XOR packet type ID
+               hash = source MAC[5] XOR destination MAC[5] XOR packet type ID
                hash = hash XOR source IP XOR destination IP
                hash = hash XOR (hash RSHIFT 16)
                hash = hash XOR (hash RSHIFT 8)
index 89bb4fa..ddc1dd0 100644 (file)
@@ -10,21 +10,21 @@ in joining the effort.
 Design principles
 =================
 
-The Distributed Switch Architecture is a subsystem which was primarily designed
-to support Marvell Ethernet switches (MV88E6xxx, a.k.a Linkstreet product line)
-using Linux, but has since evolved to support other vendors as well.
+The Distributed Switch Architecture subsystem was primarily designed to
+support Marvell Ethernet switches (MV88E6xxx, a.k.a. Link Street product
+line) using Linux, but has since evolved to support other vendors as well.
 
 The original philosophy behind this design was to be able to use unmodified
 Linux tools such as bridge, iproute2, ifconfig to work transparently whether
 they configured/queried a switch port network device or a regular network
 device.
 
-An Ethernet switch is typically comprised of multiple front-panel ports, and one
-or more CPU or management port. The DSA subsystem currently relies on the
+An Ethernet switch typically comprises multiple front-panel ports and one
+or more CPU or management ports. The DSA subsystem currently relies on the
 presence of a management port connected to an Ethernet controller capable of
 receiving Ethernet frames from the switch. This is a very common setup for all
 kinds of Ethernet switches found in Small Home and Office products: routers,
-gateways, or even top-of-the rack switches. This host Ethernet controller will
+gateways, or even top-of-rack switches. This host Ethernet controller will
 be later referred to as "master" and "cpu" in DSA terminology and code.
 
 The D in DSA stands for Distributed, because the subsystem has been designed
@@ -33,14 +33,14 @@ using upstream and downstream Ethernet links between switches. These specific
 ports are referred to as "dsa" ports in DSA terminology and code. A collection
 of multiple switches connected to each other is called a "switch tree".
 
-For each front-panel port, DSA will create specialized network devices which are
+For each front-panel port, DSA creates specialized network devices which are
 used as controlling and data-flowing endpoints for use by the Linux networking
 stack. These specialized network interfaces are referred to as "slave" network
 interfaces in DSA terminology and code.
 
 The ideal case for using DSA is when an Ethernet switch supports a "switch tag"
 which is a hardware feature making the switch insert a specific tag for each
-Ethernet frames it received to/from specific ports to help the management
+Ethernet frame it receives to/from specific ports to help the management
 interface figure out:
 
 - what port is this frame coming from
@@ -125,7 +125,7 @@ other switches from the same fabric, and in this case, the outermost switch
 ports must decapsulate the packet.
 
 Note that in certain cases, it might be the case that the tagging format used
-by a leaf switch (not connected directly to the CPU) to not be the same as what
+by a leaf switch (not connected directly to the CPU) is not the same as what
 the network stack sees. This can be seen with Marvell switch trees, where the
 CPU port can be configured to use either the DSA or the Ethertype DSA (EDSA)
 format, but the DSA links are configured to use the shorter (without Ethertype)
@@ -270,21 +270,21 @@ These interfaces are specialized in order to:
   to/from specific switch ports
 - query the switch for ethtool operations: statistics, link state,
   Wake-on-LAN, register dumps...
-- external/internal PHY management: link, auto-negotiation etc.
+- manage external/internal PHY: link, auto-negotiation, etc.
 
 These slave network devices have custom net_device_ops and ethtool_ops function
 pointers which allow DSA to introduce a level of layering between the networking
-stack/ethtool, and the switch driver implementation.
+stack/ethtool and the switch driver implementation.
 
 Upon frame transmission from these slave network devices, DSA will look up which
-switch tagging protocol is currently registered with these network devices, and
+switch tagging protocol is currently registered with these network devices and
 invoke a specific transmit routine which takes care of adding the relevant
 switch tag in the Ethernet frames.
 
 These frames are then queued for transmission using the master network device
-``ndo_start_xmit()`` function, since they contain the appropriate switch tag, the
+``ndo_start_xmit()`` function. Since they contain the appropriate switch tag, the
 Ethernet switch will be able to process these incoming frames from the
-management interface and delivers these frames to the physical switch port.
+management interface and deliver them to the physical switch port.
 
 Graphical representation
 ------------------------
@@ -330,9 +330,9 @@ MDIO reads/writes towards specific PHY addresses. In most MDIO-connected
 switches, these functions would utilize direct or indirect PHY addressing mode
 to return standard MII registers from the switch builtin PHYs, allowing the PHY
 library and/or to return link status, link partner pages, auto-negotiation
-results etc..
+results, etc.
 
-For Ethernet switches which have both external and internal MDIO busses, the
+For Ethernet switches which have both external and internal MDIO buses, the
 slave MII bus can be utilized to mux/demux MDIO reads and writes towards either
 internal or external MDIO devices this switch might be connected to: internal
 PHYs, external PHYs, or even external switches.
@@ -349,7 +349,7 @@ DSA data structures are defined in ``include/net/dsa.h`` as well as
   table indication (when cascading switches)
 
 - ``dsa_platform_data``: platform device configuration data which can reference
-  a collection of dsa_chip_data structure if multiples switches are cascaded,
+  a collection of dsa_chip_data structures if multiple switches are cascaded,
   the master network device this switch tree is attached to needs to be
   referenced
 
@@ -426,7 +426,7 @@ logic basically looks like this:
   "phy-handle" property, if found, this PHY device is created and registered
   using ``of_phy_connect()``
 
-- if Device Tree is used, and the PHY device is "fixed", that is, conforms to
+- if Device Tree is used and the PHY device is "fixed", that is, conforms to
   the definition of a non-MDIO managed PHY as defined in
   ``Documentation/devicetree/bindings/net/fixed-link.txt``, the PHY is registered
   and connected transparently using the special fixed MDIO bus driver
@@ -481,7 +481,7 @@ Device Tree
 DSA features a standardized binding which is documented in
 ``Documentation/devicetree/bindings/net/dsa/dsa.txt``. PHY/MDIO library helper
 functions such as ``of_get_phy_mode()``, ``of_phy_connect()`` are also used to query
-per-port PHY specific details: interface connection, MDIO bus location etc..
+per-port PHY specific details: interface connection, MDIO bus location, etc.
 
 Driver development
 ==================
@@ -509,7 +509,7 @@ Switch configuration
 
 - ``setup``: setup function for the switch, this function is responsible for setting
   up the ``dsa_switch_ops`` private structure with all it needs: register maps,
-  interrupts, mutexes, locks etc.. This function is also expected to properly
+  interrupts, mutexes, locks, etc. This function is also expected to properly
   configure the switch to separate all network interfaces from each other, that
   is, they should be isolated by the switch hardware itself, typically by creating
   a Port-based VLAN ID for each port and allowing only the CPU port and the
@@ -526,13 +526,13 @@ PHY devices and link management
 - ``get_phy_flags``: Some switches are interfaced to various kinds of Ethernet PHYs,
   if the PHY library PHY driver needs to know about information it cannot obtain
   on its own (e.g.: coming from switch memory mapped registers), this function
-  should return a 32-bits bitmask of "flags", that is private between the switch
+  should return a 32-bit bitmask of "flags" that is private between the switch
   driver and the Ethernet PHY driver in ``drivers/net/phy/\*``.
 
 - ``phy_read``: Function invoked by the DSA slave MDIO bus when attempting to read
   the switch port MDIO registers. If unavailable, return 0xffff for each read.
   For builtin switch Ethernet PHYs, this function should allow reading the link
-  status, auto-negotiation results, link partner pages etc..
+  status, auto-negotiation results, link partner pages, etc.
 
 - ``phy_write``: Function invoked by the DSA slave MDIO bus when attempting to write
   to the switch port MDIO registers. If unavailable return a negative error
@@ -554,7 +554,7 @@ Ethtool operations
 ------------------
 
 - ``get_strings``: ethtool function used to query the driver's strings, will
-  typically return statistics strings, private flags strings etc.
+  typically return statistics strings, private flags strings, etc.
 
 - ``get_ethtool_stats``: ethtool function used to query per-port statistics and
   return their values. DSA overlays slave network devices general statistics:
@@ -564,7 +564,7 @@ Ethtool operations
 - ``get_sset_count``: ethtool function used to query the number of statistics items
 
 - ``get_wol``: ethtool function used to obtain Wake-on-LAN settings per-port, this
-  function may, for certain implementations also query the master network device
+  function may for certain implementations also query the master network device
   Wake-on-LAN settings if this interface needs to participate in Wake-on-LAN
 
 - ``set_wol``: ethtool function used to configure Wake-on-LAN settings per-port,
@@ -607,14 +607,14 @@ Power management
   in a fully active state
 
 - ``port_enable``: function invoked by the DSA slave network device ndo_open
-  function when a port is administratively brought up, this function should be
-  fully enabling a given switch port. DSA takes care of marking the port with
+  function when a port is administratively brought up, this function should
+  fully enable a given switch port. DSA takes care of marking the port with
   ``BR_STATE_BLOCKING`` if the port is a bridge member, or ``BR_STATE_FORWARDING`` if it
   was not, and propagating these changes down to the hardware
 
 - ``port_disable``: function invoked by the DSA slave network device ndo_close
-  function when a port is administratively brought down, this function should be
-  fully disabling a given switch port. DSA takes care of marking the port with
+  function when a port is administratively brought down, this function should
+  fully disable a given switch port. DSA takes care of marking the port with
   ``BR_STATE_DISABLED`` and propagating changes to the hardware if this port is
   disabled while being a bridge member
 
@@ -622,12 +622,12 @@ Bridge layer
 ------------
 
 - ``port_bridge_join``: bridge layer function invoked when a given switch port is
-  added to a bridge, this function should be doing the necessary at the switch
-  level to permit the joining port from being added to the relevant logical
+  added to a bridge, this function should do what's necessary at the switch
+  level to permit the joining port to be added to the relevant logical
   domain for it to ingress/egress traffic with other members of the bridge.
 
 - ``port_bridge_leave``: bridge layer function invoked when a given switch port is
-  removed from a bridge, this function should be doing the necessary at the
+  removed from a bridge, this function should do what's necessary at the
   switch level to deny the leaving port from ingress/egress traffic from the
   remaining bridge members. When the port leaves the bridge, it should be aged
   out at the switch hardware for the switch to (re) learn MAC addresses behind
@@ -663,7 +663,7 @@ Bridge layer
   point for drivers that need to configure the hardware for enabling this
   feature.
 
-- ``port_bridge_tx_fwd_unoffload``: bridge layer function invoken when a driver
+- ``port_bridge_tx_fwd_unoffload``: bridge layer function invoked when a driver
   leaves a bridge port which had the TX forwarding offload feature enabled.
 
 Bridge VLAN filtering
index d13fa66..85c7abc 100644 (file)
@@ -6190,6 +6190,7 @@ Valid values for 'type' are:
                        unsigned long args[6];
                        unsigned long ret[2];
                } riscv_sbi;
+
 If exit reason is KVM_EXIT_RISCV_SBI then it indicates that the VCPU has
 done a SBI call which is not handled by KVM RISC-V kernel module. The details
 of the SBI call are available in 'riscv_sbi' member of kvm_run structure. The
index db43ee5..31f62b6 100644 (file)
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 =================
 KVM VCPU Requests
 =================
index 1c6847f..2d30781 100644 (file)
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 ======================================
 Secure Encrypted Virtualization (SEV)
 ======================================
index 806f049..410e0aa 100644 (file)
@@ -1,3 +1,4 @@
+.. SPDX-License-Identifier: GPL-2.0
 
 =======================================
 Known limitations of CPU virtualization
@@ -36,4 +37,3 @@ Nested virtualization features
 ------------------------------
 
 TBD
-
index bd70c69..a27e676 100644 (file)
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
 ==============================
 Running nested guests with KVM
 ==============================
index fd768d4..c541804 100644 (file)
@@ -201,6 +201,7 @@ F:  include/net/ieee80211_radiotap.h
 F:     include/net/iw_handler.h
 F:     include/net/wext.h
 F:     include/uapi/linux/nl80211.h
+F:     include/uapi/linux/wireless.h
 F:     net/wireless/
 
 8169 10/100/1000 GIGABIT ETHERNET DRIVER
@@ -1836,7 +1837,9 @@ F:        Documentation/devicetree/bindings/arm/apple/*
 F:     Documentation/devicetree/bindings/clock/apple,nco.yaml
 F:     Documentation/devicetree/bindings/i2c/apple,i2c.yaml
 F:     Documentation/devicetree/bindings/interrupt-controller/apple,*
+F:     Documentation/devicetree/bindings/iommu/apple,sart.yaml
 F:     Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml
+F:     Documentation/devicetree/bindings/nvme/apple,nvme-ans.yaml
 F:     Documentation/devicetree/bindings/pci/apple,pcie.yaml
 F:     Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml
 F:     Documentation/devicetree/bindings/power/apple*
@@ -1847,12 +1850,14 @@ F:      drivers/i2c/busses/i2c-pasemi-core.c
 F:     drivers/i2c/busses/i2c-pasemi-platform.c
 F:     drivers/irqchip/irq-apple-aic.c
 F:     drivers/mailbox/apple-mailbox.c
+F:     drivers/nvme/host/apple.c
 F:     drivers/pinctrl/pinctrl-apple-gpio.c
 F:     drivers/soc/apple/*
 F:     drivers/watchdog/apple_wdt.c
 F:     include/dt-bindings/interrupt-controller/apple-aic.h
 F:     include/dt-bindings/pinctrl/apple.h
 F:     include/linux/apple-mailbox.h
+F:     include/linux/soc/apple/*
 
 ARM/ARTPEC MACHINE SUPPORT
 M:     Jesper Nilsson <jesper.nilsson@axis.com>
@@ -2636,7 +2641,7 @@ F:        sound/soc/rockchip/
 N:     rockchip
 
 ARM/SAMSUNG S3C, S5P AND EXYNOS ARM ARCHITECTURES
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 R:     Alim Akhtar <alim.akhtar@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org
@@ -3742,7 +3747,7 @@ F:        include/linux/platform_data/b53.h
 
 BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE
 M:     Nicolas Saenz Julienne <nsaenz@kernel.org>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -3757,7 +3762,7 @@ BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
 M:     Ray Jui <rjui@broadcom.com>
 M:     Scott Branden <sbranden@broadcom.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 S:     Maintained
 T:     git git://github.com/broadcom/mach-bcm
 F:     arch/arm/mach-bcm/
@@ -3777,7 +3782,7 @@ F:        arch/mips/include/asm/mach-bcm47xx/*
 
 BROADCOM BCM4908 ETHERNET DRIVER
 M:     Rafał Miłecki <rafal@milecki.pl>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/brcm,bcm4908-enet.yaml
@@ -3786,7 +3791,7 @@ F:        drivers/net/ethernet/broadcom/unimac.h
 
 BROADCOM BCM4908 PINMUX DRIVER
 M:     Rafał Miłecki <rafal@milecki.pl>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-gpio@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/pinctrl/brcm,bcm4908-pinctrl.yaml
@@ -3796,7 +3801,7 @@ BROADCOM BCM5301X ARM ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
 M:     Hauke Mehrtens <hauke@hauke-m.de>
 M:     Rafał Miłecki <zajec5@gmail.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/boot/dts/bcm470*
@@ -3807,7 +3812,7 @@ F:        arch/arm/mach-bcm/bcm_5301x.c
 BROADCOM BCM53573 ARM ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
 M:     Rafał Miłecki <rafal@milecki.pl>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/boot/dts/bcm47189*
@@ -3815,7 +3820,7 @@ F:        arch/arm/boot/dts/bcm53573*
 
 BROADCOM BCM63XX ARM ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://github.com/broadcom/stblinux.git
@@ -3829,7 +3834,7 @@ F:        drivers/usb/gadget/udc/bcm63xx_udc.*
 
 BROADCOM BCM7XXX ARM ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://github.com/broadcom/stblinux.git
@@ -3847,21 +3852,21 @@ N:      bcm7120
 BROADCOM BDC DRIVER
 M:     Al Cooper <alcooperx@gmail.com>
 L:     linux-usb@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 S:     Maintained
 F:     Documentation/devicetree/bindings/usb/brcm,bdc.yaml
 F:     drivers/usb/gadget/udc/bdc/
 
 BROADCOM BMIPS CPUFREQ DRIVER
 M:     Markus Mayer <mmayer@broadcom.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 F:     drivers/cpufreq/bmips-cpufreq.c
 
 BROADCOM BMIPS MIPS ARCHITECTURE
 M:     Florian Fainelli <f.fainelli@gmail.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-mips@vger.kernel.org
 S:     Maintained
 T:     git git://github.com/broadcom/stblinux.git
@@ -3927,53 +3932,53 @@ F:      drivers/net/wireless/broadcom/brcm80211/
 BROADCOM BRCMSTB GPIO DRIVER
 M:     Doug Berger <opendmb@gmail.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 S:     Supported
 F:     Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.yaml
 F:     drivers/gpio/gpio-brcmstb.c
 
 BROADCOM BRCMSTB I2C DRIVER
 M:     Kamal Dasu <kdasu.kdev@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-i2c@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Supported
 F:     Documentation/devicetree/bindings/i2c/brcm,brcmstb-i2c.yaml
 F:     drivers/i2c/busses/i2c-brcmstb.c
 
 BROADCOM BRCMSTB UART DRIVER
 M:     Al Cooper <alcooperx@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-serial@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
 F:     drivers/tty/serial/8250/8250_bcm7271.c
 
 BROADCOM BRCMSTB USB EHCI DRIVER
 M:     Al Cooper <alcooperx@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-usb@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     Documentation/devicetree/bindings/usb/brcm,bcm7445-ehci.yaml
 F:     drivers/usb/host/ehci-brcm.*
 
 BROADCOM BRCMSTB USB PIN MAP DRIVER
 M:     Al Cooper <alcooperx@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-usb@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     Documentation/devicetree/bindings/usb/brcm,usb-pinmap.yaml
 F:     drivers/usb/misc/brcmstb-usb-pinmap.c
 
 BROADCOM BRCMSTB USB2 and USB3 PHY DRIVER
 M:     Al Cooper <alcooperx@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-kernel@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     drivers/phy/broadcom/phy-brcm-usb*
 
 BROADCOM ETHERNET PHY DRIVERS
 M:     Florian Fainelli <f.fainelli@gmail.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt
@@ -3984,7 +3989,7 @@ F:        include/linux/brcmphy.h
 BROADCOM GENET ETHERNET DRIVER
 M:     Doug Berger <opendmb@gmail.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     Documentation/devicetree/bindings/net/brcm,bcmgenet.yaml
@@ -3998,7 +4003,7 @@ F:        include/linux/platform_data/mdio-bcm-unimac.h
 BROADCOM IPROC ARM ARCHITECTURE
 M:     Ray Jui <rjui@broadcom.com>
 M:     Scott Branden <sbranden@broadcom.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://github.com/broadcom/stblinux.git
@@ -4026,7 +4031,7 @@ N:        stingray
 
 BROADCOM IPROC GBIT ETHERNET DRIVER
 M:     Rafał Miłecki <rafal@milecki.pl>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/brcm,amac.yaml
@@ -4035,7 +4040,7 @@ F:        drivers/net/ethernet/broadcom/unimac.h
 
 BROADCOM KONA GPIO DRIVER
 M:     Ray Jui <rjui@broadcom.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 S:     Supported
 F:     Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt
 F:     drivers/gpio/gpio-bcm-kona.c
@@ -4068,7 +4073,7 @@ F:        drivers/firmware/broadcom/*
 BROADCOM PMB (POWER MANAGEMENT BUS) DRIVER
 M:     Rafał Miłecki <rafal@milecki.pl>
 M:     Florian Fainelli <f.fainelli@gmail.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 T:     git git://github.com/broadcom/stblinux.git
@@ -4084,7 +4089,7 @@ F:        include/linux/bcma/
 
 BROADCOM SPI DRIVER
 M:     Kamal Dasu <kdasu.kdev@gmail.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 S:     Maintained
 F:     Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.yaml
 F:     drivers/spi/spi-bcm-qspi.*
@@ -4093,7 +4098,7 @@ F:        drivers/spi/spi-iproc-qspi.c
 
 BROADCOM STB AVS CPUFREQ DRIVER
 M:     Markus Mayer <mmayer@broadcom.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/cpufreq/brcm,stb-avs-cpu-freq.txt
@@ -4101,7 +4106,7 @@ F:        drivers/cpufreq/brcmstb*
 
 BROADCOM STB AVS TMON DRIVER
 M:     Markus Mayer <mmayer@broadcom.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml
@@ -4109,7 +4114,7 @@ F:        drivers/thermal/broadcom/brcmstb*
 
 BROADCOM STB DPFE DRIVER
 M:     Markus Mayer <mmayer@broadcom.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     Documentation/devicetree/bindings/memory-controllers/brcm,dpfe-cpu.yaml
@@ -4118,8 +4123,8 @@ F:        drivers/memory/brcmstb_dpfe.c
 BROADCOM STB NAND FLASH DRIVER
 M:     Brian Norris <computersforpeace@gmail.com>
 M:     Kamal Dasu <kdasu.kdev@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-mtd@lists.infradead.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     drivers/mtd/nand/raw/brcmnand/
 F:     include/linux/platform_data/brcmnand.h
@@ -4128,7 +4133,7 @@ BROADCOM STB PCIE DRIVER
 M:     Jim Quinlan <jim2101024@gmail.com>
 M:     Nicolas Saenz Julienne <nsaenz@kernel.org>
 M:     Florian Fainelli <f.fainelli@gmail.com>
-M:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-pci@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
@@ -4136,7 +4141,7 @@ F:        drivers/pci/controller/pcie-brcmstb.c
 
 BROADCOM SYSTEMPORT ETHERNET DRIVER
 M:     Florian Fainelli <f.fainelli@gmail.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/broadcom/bcmsysport.*
@@ -4153,7 +4158,7 @@ F:        drivers/net/ethernet/broadcom/tg3.*
 
 BROADCOM VK DRIVER
 M:     Scott Branden <scott.branden@broadcom.com>
-L:     bcm-kernel-feedback-list@broadcom.com
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 S:     Supported
 F:     drivers/misc/bcm-vk/
 F:     include/uapi/linux/misc/bcm_vk.h
@@ -4791,6 +4796,7 @@ F:        .clang-format
 CLANG/LLVM BUILD SUPPORT
 M:     Nathan Chancellor <nathan@kernel.org>
 M:     Nick Desaulniers <ndesaulniers@google.com>
+R:     Tom Rix <trix@redhat.com>
 L:     llvm@lists.linux.dev
 S:     Supported
 W:     https://clangbuiltlinux.github.io/
@@ -5715,7 +5721,7 @@ W:        http://lanana.org/docs/device-list/index.html
 
 DEVICE RESOURCE MANAGEMENT HELPERS
 M:     Hans de Goede <hdegoede@redhat.com>
-R:     Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+R:     Matti Vaittinen <mazziesaccount@gmail.com>
 S:     Maintained
 F:     include/linux/devm-helpers.h
 
@@ -8675,7 +8681,6 @@ F:        include/linux/cciss*.h
 F:     include/uapi/linux/cciss*.h
 
 HFI1 DRIVER
-M:     Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
 M:     Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
@@ -9598,6 +9603,7 @@ F:        drivers/iio/pressure/dps310.c
 
 INFINIBAND SUBSYSTEM
 M:     Jason Gunthorpe <jgg@nvidia.com>
+M:     Leon Romanovsky <leonro@nvidia.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 W:     https://github.com/linux-rdma/rdma-core
@@ -10369,6 +10375,7 @@ F:      include/linux/isapnp.h
 ISCSI
 M:     Lee Duncan <lduncan@suse.com>
 M:     Chris Leech <cleech@redhat.com>
+M:     Mike Christie <michael.christie@oracle.com>
 L:     open-iscsi@googlegroups.com
 L:     linux-scsi@vger.kernel.org
 S:     Maintained
@@ -11208,7 +11215,7 @@ F:      scripts/spdxcheck.py
 
 LINEAR RANGES HELPERS
 M:     Mark Brown <broonie@kernel.org>
-R:     Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+R:     Matti Vaittinen <mazziesaccount@gmail.com>
 F:     lib/linear_ranges.c
 F:     lib/test_linear_ranges.c
 F:     include/linux/linear_range.h
@@ -11905,7 +11912,7 @@ F:      drivers/iio/proximity/mb1232.c
 
 MAXIM MAX17040 FAMILY FUEL GAUGE DRIVERS
 R:     Iskren Chernev <iskren.chernev@gmail.com>
-R:     Krzysztof Kozlowski <krzk@kernel.org>
+R:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 R:     Marek Szyprowski <m.szyprowski@samsung.com>
 R:     Matheus Castello <matheus@castello.eng.br>
 L:     linux-pm@vger.kernel.org
@@ -11915,7 +11922,7 @@ F:      drivers/power/supply/max17040_battery.c
 
 MAXIM MAX17042 FAMILY FUEL GAUGE DRIVERS
 R:     Hans de Goede <hdegoede@redhat.com>
-R:     Krzysztof Kozlowski <krzk@kernel.org>
+R:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 R:     Marek Szyprowski <m.szyprowski@samsung.com>
 R:     Sebastian Krzyszkowiak <sebastian.krzyszkowiak@puri.sm>
 R:     Purism Kernel Team <kernel@puri.sm>
@@ -11967,7 +11974,7 @@ F:      Documentation/devicetree/bindings/power/supply/maxim,max77976.yaml
 F:     drivers/power/supply/max77976_charger.c
 
 MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-pm@vger.kernel.org
 S:     Supported
@@ -11978,7 +11985,7 @@ F:      drivers/power/supply/max77693_charger.c
 
 MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
 M:     Chanwoo Choi <cw00.choi@samsung.com>
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-kernel@vger.kernel.org
 S:     Supported
@@ -12401,7 +12408,7 @@ F:      drivers/mmc/host/mtk-sd.c
 
 MEDIATEK MT76 WIRELESS LAN DRIVER
 M:     Felix Fietkau <nbd@nbd.name>
-M:     Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+M:     Lorenzo Bianconi <lorenzo@kernel.org>
 M:     Ryder Lee <ryder.lee@mediatek.com>
 R:     Shayne Chen <shayne.chen@mediatek.com>
 R:     Sean Wang <sean.wang@mediatek.com>
@@ -12672,7 +12679,7 @@ F:      mm/memblock.c
 F:     tools/testing/memblock/
 
 MEMORY CONTROLLER DRIVERS
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux-mem-ctrl.git
@@ -13816,7 +13823,7 @@ F:      include/uapi/linux/nexthop.h
 F:     net/ipv4/nexthop.c
 
 NFC SUBSYSTEM
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 L:     linux-nfc@lists.01.org (subscribers-only)
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -14133,7 +14140,7 @@ F:      Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
 F:     drivers/regulator/pf8x00-regulator.c
 
 NXP PTN5150A CC LOGIC AND EXTCON DRIVER
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
@@ -14656,7 +14663,6 @@ F:      drivers/rtc/rtc-optee.c
 
 OPA-VNIC DRIVER
 M:     Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
-M:     Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 F:     drivers/infiniband/ulp/opa_vnic
@@ -14687,7 +14693,7 @@ F:      scripts/dtc/
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
 M:     Rob Herring <robh+dt@kernel.org>
-M:     Krzysztof Kozlowski <krzk+dt@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski+dt@linaro.org>
 L:     devicetree@vger.kernel.org
 S:     Maintained
 C:     irc://irc.libera.chat/devicetree
@@ -15599,7 +15605,7 @@ F:      drivers/pinctrl/renesas/
 
 PIN CONTROLLER - SAMSUNG
 M:     Tomasz Figa <tomasz.figa@gmail.com>
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 R:     Alim Akhtar <alim.akhtar@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -16098,7 +16104,6 @@ F:      include/uapi/linux/qemu_fw_cfg.h
 
 QIB DRIVER
 M:     Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
-M:     Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 F:     drivers/infiniband/hw/qib/
@@ -16616,7 +16621,6 @@ F:      drivers/net/ethernet/rdc/r6040.c
 
 RDMAVT - RDMA verbs software
 M:     Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com>
-M:     Mike Marciniszyn <mike.marciniszyn@cornelisnetworks.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 F:     drivers/infiniband/sw/rdmavt
@@ -17011,8 +17015,7 @@ S:      Odd Fixes
 F:     drivers/tty/serial/rp2.*
 
 ROHM BD99954 CHARGER IC
-R:     Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
-L:     linux-power@fi.rohmeurope.com
+R:     Matti Vaittinen <mazziesaccount@gmail.com>
 S:     Supported
 F:     drivers/power/supply/bd99954-charger.c
 F:     drivers/power/supply/bd99954-charger.h
@@ -17035,8 +17038,7 @@ F:      drivers/regulator/bd9571mwv-regulator.c
 F:     include/linux/mfd/bd9571mwv.h
 
 ROHM POWER MANAGEMENT IC DEVICE DRIVERS
-R:     Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
-L:     linux-power@fi.rohmeurope.com
+R:     Matti Vaittinen <mazziesaccount@gmail.com>
 S:     Supported
 F:     drivers/clk/clk-bd718x7.c
 F:     drivers/gpio/gpio-bd71815.c
@@ -17278,7 +17280,7 @@ W:      http://www.ibm.com/developerworks/linux/linux390/
 F:     drivers/s390/scsi/zfcp_*
 
 S3C ADC BATTERY DRIVER
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 L:     linux-samsung-soc@vger.kernel.org
 S:     Odd Fixes
 F:     drivers/power/supply/s3c_adc_battery.c
@@ -17323,7 +17325,7 @@ F:      Documentation/admin-guide/LSM/SafeSetID.rst
 F:     security/safesetid/
 
 SAMSUNG AUDIO (ASoC) DRIVERS
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Supported
@@ -17331,7 +17333,7 @@ F:      Documentation/devicetree/bindings/sound/samsung*
 F:     sound/soc/samsung/
 
 SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 L:     linux-crypto@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
 S:     Maintained
@@ -17366,7 +17368,7 @@ S:      Maintained
 F:     drivers/platform/x86/samsung-laptop.c
 
 SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-kernel@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
@@ -17392,7 +17394,7 @@ F:      drivers/media/platform/samsung/s3c-camif/
 F:     include/media/drv-intf/s3c_camif.h
 
 SAMSUNG S3FWRN5 NFC DRIVER
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Krzysztof Opasiak <k.opasiak@samsung.com>
 L:     linux-nfc@lists.01.org (subscribers-only)
 S:     Maintained
@@ -17414,7 +17416,7 @@ S:      Supported
 F:     drivers/media/i2c/s5k5baf.c
 
 SAMSUNG S5P Security SubSystem (SSS) DRIVER
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Vladimir Zapolskiy <vz@mleia.com>
 L:     linux-crypto@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
@@ -17449,7 +17451,7 @@ F:      include/linux/clk/samsung.h
 F:     include/linux/platform_data/clk-s3c2410.h
 
 SAMSUNG SPI DRIVERS
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Andi Shyti <andi@etezian.org>
 L:     linux-spi@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
@@ -17467,7 +17469,7 @@ F:      drivers/net/ethernet/samsung/sxgbe/
 
 SAMSUNG THERMAL DRIVER
 M:     Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
-M:     Krzysztof Kozlowski <krzk@kernel.org>
+M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 L:     linux-pm@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
 S:     Maintained
@@ -17651,8 +17653,8 @@ K:      \bTIF_SECCOMP\b
 
 SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) Broadcom BRCMSTB DRIVER
 M:     Al Cooper <alcooperx@gmail.com>
+R:     Broadcom Kernel Team <bcm-kernel-feedback-list@broadcom.com>
 L:     linux-mmc@vger.kernel.org
-L:     bcm-kernel-feedback-list@broadcom.com
 S:     Maintained
 F:     drivers/mmc/host/sdhci-brcmstb*
 
@@ -21118,7 +21120,7 @@ F:      include/linux/regulator/
 K:     regulator_get_optional
 
 VOLTAGE AND CURRENT REGULATOR IRQ HELPERS
-R:     Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
+R:     Matti Vaittinen <mazziesaccount@gmail.com>
 F:     drivers/regulator/irq_helpers.c
 
 VRF
@@ -21231,10 +21233,8 @@ S:     Maintained
 F:     drivers/hid/hid-wiimote*
 
 WILOCITY WIL6210 WIRELESS DRIVER
-M:     Maya Erez <merez@codeaurora.org>
 L:     linux-wireless@vger.kernel.org
-L:     wil6210@qti.qualcomm.com
-S:     Supported
+S:     Orphan
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/wil6210
 F:     drivers/net/wireless/ath/wil6210/
 
index 8c7de9a..fa5112a 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 18
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc3
 NAME = Superb Owl
 
 # *DOCUMENTATION*
index 2799b2a..f7d90cf 100644 (file)
                pinctrl_pio_io_reset: gpio_io_reset {
                        pinmux = <PIN_PB30__GPIO>;
                        bias-disable;
-                       drive-open-drain = <1>;
+                       drive-open-drain;
                        output-low;
                };
                pinctrl_pio_input: gpio_input {
index abe27ad..4656646 100644 (file)
                pinmux = <PIN_PD12__FLEXCOM4_IO0>, //DATA
                <PIN_PD13__FLEXCOM4_IO1>; //CLK
                bias-disable;
-               drive-open-drain = <1>;
+               drive-open-drain;
        };
 
        pinctrl_pwm0 {
index 1e2a28c..2fb51b9 100644 (file)
                nand0: nand@40000000 {
                        nand-bus-width = <8>;
                        nand-ecc-mode = "soft";
-                       nand-on-flash-bbt = <1>;
+                       nand-on-flash-bbt;
                        status = "okay";
                };
 
index 87c517d..e9aecac 100644 (file)
        status = "okay";
        pinctrl-names = "default";
        pinctrl-0 = <&spi1_pins &spi1_cs0_pin>;
-       flash: m25p80@0 {
+       flash: flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
                compatible = "jedec,spi-nor";
index 5126e2d..778796c 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&mcspi1_pins>;
 
-       m25p80@0 {
+       flash@0 {
                compatible = "w25x32";
                spi-max-frequency = <48000000>;
                reg = <0>;
index 097ec35..0d58da1 100644 (file)
@@ -26,7 +26,7 @@
                                pinctrl-0 = <&mmc0_4bit_pins_a
                                             &mmc0_sck_cfg
                                             &en_sd_pwr>;
-                               broken-cd = <1>;
+                               broken-cd;
                                bus-width = <4>;
                                vmmc-supply = <&reg_vddio_sd0>;
                                status = "okay";
index 563bf9d..0b90c3f 100644 (file)
                regulators {
                        bcore1 {
                                regulator-name = "bcore1";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        bcore2 {
                                regulator-name = "bcore2";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        bpro {
                                regulator-name = "bpro";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        bperi {
                                regulator-name = "bperi";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        bmem {
                                regulator-name = "bmem";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo2 {
                                regulator-name = "ldo2";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <1800000>;
                        };
 
                        ldo3 {
                                regulator-name = "ldo3";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo4 {
                                regulator-name = "ldo4";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo5 {
                                regulator-name = "ldo5";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo6 {
                                regulator-name = "ldo6";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo7 {
                                regulator-name = "ldo7";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo8 {
                                regulator-name = "ldo8";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo9 {
                                regulator-name = "ldo9";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo10 {
                                regulator-name = "ldo10";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        ldo11 {
                                regulator-name = "ldo11";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <300000>;
                                regulator-max-microvolt = <3300000>;
                        };
 
                        bio {
                                regulator-name = "bio";
-                               regulator-always-on = <1>;
+                               regulator-always-on;
                                regulator-min-microvolt = <1800000>;
                                regulator-max-microvolt = <1800000>;
                        };
index 7cda694..205e4d4 100644 (file)
@@ -72,8 +72,8 @@
                        st,settling = <2>;
                        st,fraction-z = <7>;
                        st,i-drive = <1>;
-                       touchscreen-inverted-x = <1>;
-                       touchscreen-inverted-y = <1>;
+                       touchscreen-inverted-x;
+                       touchscreen-inverted-y;
                };
        };
 };
index b4664ab..d3da8b1 100644 (file)
                gpmc,device-width = <2>;
                gpmc,wait-pin = <0>;
                gpmc,burst-length = <4>;
-               gpmc,cycle2cycle-samecsen = <1>;
-               gpmc,cycle2cycle-diffcsen = <1>;
+               gpmc,cycle2cycle-samecsen;
+               gpmc,cycle2cycle-diffcsen;
                gpmc,cs-on-ns = <0>;
                gpmc,cs-rd-off-ns = <45>;
                gpmc,cs-wr-off-ns = <45>;
index cbe42c4..b4d286a 100644 (file)
@@ -76,7 +76,7 @@
                pinconf {
                        pins = "gpio20", "gpio21";
                        drive-strength = <2>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
                pinconf {
                        pins = "gpio24", "gpio25";
                        drive-strength = <2>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
                pinconf {
                        pins = "gpio8", "gpio9";
                        drive-strength = <2>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
                pinconf {
                        pins = "gpio12", "gpio13";
                        drive-strength = <2>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
                pinconf {
                        pins = "gpio16", "gpio17";
                        drive-strength = <2>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
                pinconf {
                        pins = "gpio84", "gpio85";
                        drive-strength = <2>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
index 996f445..8cb04aa 100644 (file)
 
                        snps,axi-config = <&stmmac_axi_setup>;
                        snps,pbl = <32>;
-                       snps,aal = <1>;
+                       snps,aal;
 
                        qcom,nss-common = <&nss_common>;
                        qcom,qsgmii-csr = <&qsgmii_csr>;
 
                        snps,axi-config = <&stmmac_axi_setup>;
                        snps,pbl = <32>;
-                       snps,aal = <1>;
+                       snps,aal;
 
                        qcom,nss-common = <&nss_common>;
                        qcom,qsgmii-csr = <&qsgmii_csr>;
 
                        snps,axi-config = <&stmmac_axi_setup>;
                        snps,pbl = <32>;
-                       snps,aal = <1>;
+                       snps,aal;
 
                        qcom,nss-common = <&nss_common>;
                        qcom,qsgmii-csr = <&qsgmii_csr>;
 
                        snps,axi-config = <&stmmac_axi_setup>;
                        snps,pbl = <32>;
-                       snps,aal = <1>;
+                       snps,aal;
 
                        qcom,nss-common = <&nss_common>;
                        qcom,qsgmii-csr = <&qsgmii_csr>;
index 4cbadcb..ddd1cf4 100644 (file)
                                        };
                                };
 
-                               m25p80@1 {
+                               flash@1 {
                                        compatible = "st,m25p80";
                                        reg = <1>;
                                        spi-max-frequency = <12000000>;
index fd194eb..3a51a41 100644 (file)
                                cs-gpios = <&gpiopinctrl 80 0>, <&gpiopinctrl 24 0>,
                                           <&gpiopinctrl 85 0>;
 
-                               m25p80@0 {
+                               flash@0 {
                                        compatible = "m25p80";
                                        reg = <0>;
                                        spi-max-frequency = <12000000>;
index 33ae5e0..ac53ee3 100644 (file)
        #size-cells = <0>;
        status = "okay";
 
-       flash0: is25lp016d@0 {
+       flash0: flash@0 {
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <133000000>;
index e222d2d..d142dd3 100644 (file)
        #size-cells = <0>;
        status = "okay";
 
-       flash0: mx66l51235l@0 {
+       flash0: flash@0 {
                compatible = "jedec,spi-nor";
                reg = <0>;
                spi-rx-bus-width = <4>;
                #size-cells = <1>;
        };
 
-       flash1: mx66l51235l@1 {
+       flash1: flash@1 {
                compatible = "jedec,spi-nor";
                reg = <1>;
                spi-rx-bus-width = <4>;
index a7acfee..a80bc8a 100644 (file)
@@ -49,11 +49,13 @@ CONFIG_ATA=y
 CONFIG_PATA_FTIDE010=y
 CONFIG_NETDEVICES=y
 CONFIG_TUN=y
+CONFIG_NET_DSA_REALTEK=y
 CONFIG_NET_DSA_REALTEK_SMI=y
+CONFIG_NET_DSA_REALTEK_RTL8366RB=y
 CONFIG_GEMINI_ETHERNET=y
+CONFIG_MARVELL_PHY=y
 CONFIG_MDIO_BITBANG=y
 CONFIG_MDIO_GPIO=y
-CONFIG_MARVELL_PHY=y
 CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_GPIO=y
 # CONFIG_INPUT_MOUSE is not set
@@ -66,6 +68,7 @@ CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_I2C_GPIO=y
 CONFIG_SPI=y
 CONFIG_SPI_GPIO=y
+CONFIG_SENSORS_DRIVETEMP=y
 CONFIG_SENSORS_GPIO_FAN=y
 CONFIG_SENSORS_LM75=y
 CONFIG_THERMAL=y
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
deleted file mode 100644 (file)
index 015b7ef..0000000
+++ /dev/null
@@ -1,365 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_SYSFS_DEPRECATED_V2=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
-CONFIG_EXPERT=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_PXA=y
-CONFIG_MACH_INTELMOTE2=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="root=/dev/mtdblock2 rootfstype=jffs2 console=ttyS2,115200 mem=32M"
-CONFIG_KEXEC=y
-CONFIG_FPE_NWFPE=y
-CONFIG_BINFMT_AOUT=m
-CONFIG_BINFMT_MISC=m
-CONFIG_PM=y
-CONFIG_APM_EMULATION=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_DIAG is not set
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_IPCOMP=m
-CONFIG_IPV6_MIP6=m
-CONFIG_IPV6_TUNNEL=m
-CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_IPV6_SUBTREES=y
-CONFIG_NETFILTER=y
-CONFIG_NETFILTER_NETLINK_QUEUE=m
-CONFIG_NF_CONNTRACK=m
-CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_SCTP=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
-CONFIG_NF_CONNTRACK_AMANDA=m
-CONFIG_NF_CONNTRACK_FTP=m
-CONFIG_NF_CONNTRACK_H323=m
-CONFIG_NF_CONNTRACK_IRC=m
-CONFIG_NF_CONNTRACK_NETBIOS_NS=m
-CONFIG_NF_CONNTRACK_PPTP=m
-CONFIG_NF_CONNTRACK_SANE=m
-CONFIG_NF_CONNTRACK_SIP=m
-CONFIG_NF_CONNTRACK_TFTP=m
-CONFIG_NF_CT_NETLINK=m
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
-CONFIG_NETFILTER_XT_TARGET_LED=m
-CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFLOG=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
-CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
-CONFIG_NETFILTER_XT_MATCH_DSCP=m
-CONFIG_NETFILTER_XT_MATCH_ESP=m
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_HELPER=m
-CONFIG_NETFILTER_XT_MATCH_LENGTH=m
-CONFIG_NETFILTER_XT_MATCH_LIMIT=m
-CONFIG_NETFILTER_XT_MATCH_MAC=m
-CONFIG_NETFILTER_XT_MATCH_MARK=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
-CONFIG_NETFILTER_XT_MATCH_POLICY=m
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
-CONFIG_NETFILTER_XT_MATCH_QUOTA=m
-CONFIG_NETFILTER_XT_MATCH_REALM=m
-CONFIG_NETFILTER_XT_MATCH_SCTP=m
-CONFIG_NETFILTER_XT_MATCH_STATE=m
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
-CONFIG_NETFILTER_XT_MATCH_STRING=m
-CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_TIME=m
-CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_ADDRTYPE=m
-CONFIG_IP_NF_MATCH_AH=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_TARGET_LOG=m
-CONFIG_NF_NAT=m
-CONFIG_IP_NF_TARGET_MASQUERADE=m
-CONFIG_IP_NF_TARGET_NETMAP=m
-CONFIG_IP_NF_TARGET_REDIRECT=m
-CONFIG_NF_NAT_SNMP_BASIC=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_TTL=m
-CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
-CONFIG_IP6_NF_IPTABLES=m
-CONFIG_IP6_NF_MATCH_AH=m
-CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_MATCH_FRAG=m
-CONFIG_IP6_NF_MATCH_OPTS=m
-CONFIG_IP6_NF_MATCH_HL=m
-CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_MH=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_FILTER=m
-CONFIG_IP6_NF_TARGET_REJECT=m
-CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_RAW=m
-CONFIG_BRIDGE=m
-# CONFIG_BRIDGE_IGMP_SNOOPING is not set
-CONFIG_IEEE802154=y
-# CONFIG_WIRELESS is not set
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_FW_LOADER=m
-CONFIG_CONNECTOR=m
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_AFS_PARTS=y
-CONFIG_MTD_AR7_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_ADV_OPTIONS=y
-CONFIG_MTD_CFI_GEOMETRY=y
-# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set
-# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
-# CONFIG_MTD_CFI_I2 is not set
-CONFIG_MTD_OTP=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_PXA2XX=y
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_NETDEVICES=y
-CONFIG_DUMMY=y
-# CONFIG_WLAN is not set
-CONFIG_PPP=m
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPP_BSDCOMP=m
-# CONFIG_INPUT_MOUSEDEV is not set
-CONFIG_INPUT_EVDEV=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_GPIO=y
-CONFIG_KEYBOARD_PXA27x=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_INPUT_MISC=y
-CONFIG_INPUT_UINPUT=y
-# CONFIG_SERIO is not set
-CONFIG_SERIAL_PXA=y
-CONFIG_SERIAL_PXA_CONSOLE=y
-CONFIG_LEGACY_PTY_COUNT=8
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_PXA=y
-CONFIG_SPI=y
-CONFIG_SPI_PXA2XX=y
-CONFIG_GPIO_SYSFS=y
-CONFIG_POWER_SUPPLY=y
-# CONFIG_HWMON is not set
-CONFIG_PMIC_DA903X=y
-CONFIG_REGULATOR=y
-CONFIG_REGULATOR_DEBUG=y
-CONFIG_REGULATOR_DA903X=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_VIDEO_DEV=y
-CONFIG_MEDIA_TUNER_CUSTOMISE=y
-# CONFIG_MEDIA_TUNER_SIMPLE is not set
-# CONFIG_MEDIA_TUNER_TDA8290 is not set
-# CONFIG_MEDIA_TUNER_TDA827X is not set
-# CONFIG_MEDIA_TUNER_TDA18271 is not set
-# CONFIG_MEDIA_TUNER_TDA9887 is not set
-# CONFIG_MEDIA_TUNER_TEA5761 is not set
-# CONFIG_MEDIA_TUNER_TEA5767 is not set
-# CONFIG_MEDIA_TUNER_MT20XX is not set
-# CONFIG_MEDIA_TUNER_MT2060 is not set
-# CONFIG_MEDIA_TUNER_MT2266 is not set
-# CONFIG_MEDIA_TUNER_MT2131 is not set
-# CONFIG_MEDIA_TUNER_QT1010 is not set
-# CONFIG_MEDIA_TUNER_XC2028 is not set
-# CONFIG_MEDIA_TUNER_XC5000 is not set
-# CONFIG_MEDIA_TUNER_MXL5005S is not set
-# CONFIG_MEDIA_TUNER_MXL5007T is not set
-# CONFIG_MEDIA_TUNER_MC44S803 is not set
-# CONFIG_VIDEO_HELPER_CHIPS_AUTO is not set
-CONFIG_VIDEO_PXA27x=y
-# CONFIG_V4L_USB_DRIVERS is not set
-# CONFIG_RADIO_ADAPTERS is not set
-CONFIG_FB=y
-CONFIG_FB_PXA=y
-CONFIG_FB_PXA_OVERLAY=y
-CONFIG_FB_PXA_PARAMETERS=y
-# CONFIG_LCD_CLASS_DEVICE is not set
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_FONTS=y
-CONFIG_FONT_MINI_4x6=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
-# CONFIG_SND_DRIVERS is not set
-# CONFIG_SND_ARM is not set
-# CONFIG_SND_SPI is not set
-# CONFIG_SND_USB is not set
-CONFIG_SND_SOC=y
-CONFIG_SND_PXA2XX_SOC=y
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_PXA27X=y
-CONFIG_USB_ETH=m
-# CONFIG_USB_ETH_RNDIS is not set
-CONFIG_MMC=y
-CONFIG_SDIO_UART=m
-CONFIG_MMC_PXA=y
-CONFIG_MMC_SPI=y
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_LP3944=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_BACKLIGHT=y
-CONFIG_LEDS_TRIGGER_GPIO=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_PXA=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=m
-CONFIG_AUTOFS4_FS=y
-CONFIG_FUSE_FS=m
-CONFIG_CUSE=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_FS_WBUF_VERIFY=y
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_JFFS2_FS_XATTR=y
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_JFFS2_LZO=y
-CONFIG_JFFS2_RUBIN=y
-CONFIG_CRAMFS=m
-CONFIG_SQUASHFS=m
-CONFIG_ROMFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3_ACL=y
-CONFIG_SMB_FS=m
-CONFIG_CIFS=m
-CONFIG_CIFS_STATS=y
-CONFIG_CIFS_XATTR=y
-CONFIG_CIFS_POSIX=y
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
-# CONFIG_SCHED_DEBUG is not set
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_PROVE_LOCKING=y
-# CONFIG_FTRACE is not set
-CONFIG_DEBUG_USER=y
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_GHASH=m
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_AES=m
-CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SEED=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
-CONFIG_CRC16=y
index 3b30913..a352207 100644 (file)
@@ -20,7 +20,6 @@ CONFIG_VFP=y
 CONFIG_NEON=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_CMA=y
 CONFIG_NET=y
@@ -41,6 +40,8 @@ CONFIG_MAC80211_LEDS=y
 CONFIG_CAIF=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_GNSS=y
+CONFIG_GNSS_SIRF_SERIAL=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=65536
 CONFIG_NETDEVICES=y
@@ -83,6 +84,8 @@ CONFIG_SPI_GPIO=y
 CONFIG_SPI_PL022=y
 CONFIG_GPIO_STMPE=y
 CONFIG_GPIO_TC3589X=y
+CONFIG_BATTERY_SAMSUNG_SDI=y
+CONFIG_AB8500_BM=y
 CONFIG_SENSORS_IIO_HWMON=y
 CONFIG_SENSORS_NTC_THERMISTOR=y
 CONFIG_THERMAL=y
@@ -98,10 +101,13 @@ CONFIG_VIDEO_V4L2_SUBDEV_API=y
 CONFIG_V4L2_FLASH_LED_CLASS=y
 CONFIG_DRM=y
 CONFIG_DRM_PANEL_NOVATEK_NT35510=y
+CONFIG_DRM_PANEL_NOVATEK_NT35560=y
+CONFIG_DRM_PANEL_SAMSUNG_DB7430=y
 CONFIG_DRM_PANEL_SAMSUNG_S6D16D0=y
+CONFIG_DRM_PANEL_SAMSUNG_S6D27A1=y
 CONFIG_DRM_PANEL_SAMSUNG_S6E63M0=y
 CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI=y
-CONFIG_DRM_PANEL_SONY_ACX424AKP=y
+CONFIG_DRM_PANEL_WIDECHIPS_WS2401=y
 CONFIG_DRM_LIMA=y
 CONFIG_DRM_MCDE=y
 CONFIG_FB=y
@@ -129,6 +135,7 @@ CONFIG_LEDS_LM3530=y
 CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_LP55XX_COMMON=y
 CONFIG_LEDS_LP5521=y
+CONFIG_LEDS_REGULATOR=y
 CONFIG_LEDS_RT8515=y
 CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_RTC_CLASS=y
@@ -144,17 +151,22 @@ CONFIG_IIO_SW_TRIGGER=y
 CONFIG_BMA180=y
 CONFIG_BMC150_ACCEL=y
 CONFIG_IIO_ST_ACCEL_3AXIS=y
+# CONFIG_IIO_ST_ACCEL_SPI_3AXIS is not set
 CONFIG_IIO_RESCALE=y
 CONFIG_MPU3050_I2C=y
 CONFIG_IIO_ST_GYRO_3AXIS=y
+# CONFIG_IIO_ST_GYRO_SPI_3AXIS is not set
 CONFIG_INV_MPU6050_I2C=y
 CONFIG_BH1780=y
 CONFIG_GP2AP002=y
+CONFIG_TSL2772=y
 CONFIG_AK8974=y
 CONFIG_IIO_ST_MAGN_3AXIS=y
+# CONFIG_IIO_ST_MAGN_SPI_3AXIS is not set
 CONFIG_YAMAHA_YAS530=y
 CONFIG_IIO_HRTIMER_TRIGGER=y
 CONFIG_IIO_ST_PRESS=y
+# CONFIG_IIO_ST_PRESS_SPI is not set
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
@@ -173,10 +185,9 @@ CONFIG_CRYPTO_DEV_UX500_CRYP=y
 CONFIG_CRYPTO_DEV_UX500_HASH=y
 CONFIG_CRYPTO_DEV_UX500_DEBUG=y
 CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_KERNEL=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_FTRACE is not set
 CONFIG_DEBUG_USER=y
index 4280126..7f7f6ba 100644 (file)
@@ -1101,11 +1101,13 @@ static int __init da850_evm_config_emac(void)
        int ret;
        u32 val;
        struct davinci_soc_info *soc_info = &davinci_soc_info;
-       u8 rmii_en = soc_info->emac_pdata->rmii_en;
+       u8 rmii_en;
 
        if (!machine_is_davinci_da850_evm())
                return 0;
 
+       rmii_en = soc_info->emac_pdata->rmii_en;
+
        cfg_chip3_base = DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG);
 
        val = __raw_readl(cfg_chip3_base);
index cc75087..4fa6ea5 100644 (file)
@@ -148,8 +148,10 @@ static struct clk_hw *ep93xx_clk_register_gate(const char *name,
        psc->lock = &clk_lock;
 
        clk = clk_register(NULL, &psc->hw);
-       if (IS_ERR(clk))
+       if (IS_ERR(clk)) {
                kfree(psc);
+               return ERR_CAST(clk);
+       }
 
        return &psc->hw;
 }
@@ -207,7 +209,7 @@ static int ep93xx_mux_determine_rate(struct clk_hw *hw,
                                struct clk_rate_request *req)
 {
        unsigned long rate = req->rate;
-       struct clk *best_parent = 0;
+       struct clk *best_parent = NULL;
        unsigned long __parent_rate;
        unsigned long best_rate = 0, actual_rate, mclk_rate;
        unsigned long best_parent_rate;
index 2882674..7135a0a 100644 (file)
@@ -7,6 +7,8 @@
 #include <asm/traps.h>
 #include <asm/ptrace.h>
 
+#include "iop3xx.h"
+
 void iop_enable_cp6(void)
 {
        u32 temp;
index 1da11bd..6e6985e 100644 (file)
@@ -122,13 +122,13 @@ static inline bool cluster_is_a15(u32 cluster)
 }
 
 /**
- * ve_spc_global_wakeup_irq()
+ * ve_spc_global_wakeup_irq() - sets/clears global wakeup IRQs
+ *
+ * @set: if true, global wake-up IRQs are set, if false they are cleared
  *
  * Function to set/clear global wakeup IRQs. Not protected by locking since
  * it might be used in code paths where normal cacheable locks are not
  * working. Locking must be provided by the caller to ensure atomicity.
- *
- * @set: if true, global wake-up IRQs are set, if false they are cleared
  */
 void ve_spc_global_wakeup_irq(bool set)
 {
@@ -145,15 +145,15 @@ void ve_spc_global_wakeup_irq(bool set)
 }
 
 /**
- * ve_spc_cpu_wakeup_irq()
- *
- * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since
- * it might be used in code paths where normal cacheable locks are not
- * working. Locking must be provided by the caller to ensure atomicity.
+ * ve_spc_cpu_wakeup_irq() - sets/clears per-CPU wake-up IRQs
  *
  * @cluster: mpidr[15:8] bitfield describing cluster affinity level
  * @cpu: mpidr[7:0] bitfield describing cpu affinity level
  * @set: if true, wake-up IRQs are set, if false they are cleared
+ *
+ * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since
+ * it might be used in code paths where normal cacheable locks are not
+ * working. Locking must be provided by the caller to ensure atomicity.
  */
 void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set)
 {
@@ -200,14 +200,14 @@ void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr)
 }
 
 /**
- * ve_spc_powerdown()
+ * ve_spc_powerdown() - enables/disables cluster powerdown
+ *
+ * @cluster: mpidr[15:8] bitfield describing cluster affinity level
+ * @enable: if true enables powerdown, if false disables it
  *
  * Function to enable/disable cluster powerdown. Not protected by locking
  * since it might be used in code paths where normal cacheable locks are not
  * working. Locking must be provided by the caller to ensure atomicity.
- *
- * @cluster: mpidr[15:8] bitfield describing cluster affinity level
- * @enable: if true enables powerdown, if false disables it
  */
 void ve_spc_powerdown(u32 cluster, bool enable)
 {
@@ -228,7 +228,7 @@ static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster)
 }
 
 /**
- * ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
+ * ve_spc_cpu_in_wfi() - Checks if the specified CPU is in WFI or not
  *
  * @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster
  * @cluster: mpidr[15:8] bitfield describing cluster affinity level
@@ -580,7 +580,7 @@ static int __init ve_spc_clk_init(void)
                }
 
                cluster = topology_physical_package_id(cpu_dev->id);
-               if (init_opp_table[cluster])
+               if (cluster < 0 || init_opp_table[cluster])
                        continue;
 
                if (ve_init_opp_table(cpu_dev))
index 1dc9d18..a0bd540 100644 (file)
                pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
 
                ti,x-min = /bits/ 16 <125>;
-               touchscreen-size-x = /bits/ 16 <4008>;
+               touchscreen-size-x = <4008>;
                ti,y-min = /bits/ 16 <282>;
-               touchscreen-size-y = /bits/ 16 <3864>;
+               touchscreen-size-y = <3864>;
                ti,x-plate-ohms = /bits/ 16 <180>;
-               touchscreen-max-pressure = /bits/ 16 <255>;
-               touchscreen-average-samples = /bits/ 16 <10>;
+               touchscreen-max-pressure = <255>;
+               touchscreen-average-samples = <10>;
                ti,debounce-tol = /bits/ 16 <3>;
                ti,debounce-rep = /bits/ 16 <1>;
                ti,settle-delay-usec = /bits/ 16 <150>;
index b16c7ca..87b5e23 100644 (file)
                pendown-gpio = <&gpio1 3 GPIO_ACTIVE_LOW>;
 
                ti,x-min = /bits/ 16 <125>;
-               touchscreen-size-x = /bits/ 16 <4008>;
+               touchscreen-size-x = <4008>;
                ti,y-min = /bits/ 16 <282>;
-               touchscreen-size-y = /bits/ 16 <3864>;
+               touchscreen-size-y = <3864>;
                ti,x-plate-ohms = /bits/ 16 <180>;
-               touchscreen-max-pressure = /bits/ 16 <255>;
-               touchscreen-average-samples = /bits/ 16 <10>;
+               touchscreen-max-pressure = <255>;
+               touchscreen-average-samples = <10>;
                ti,debounce-tol = /bits/ 16 <3>;
                ti,debounce-rep = /bits/ 16 <1>;
                ti,settle-delay-usec = /bits/ 16 <150>;
index aff857d..1df8433 100644 (file)
                                        gpio4 {
                                                pins = "gpio4";
                                                function = "32k-out1";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio5 {
                                                pins = "gpio5";
                                                function = "gpio";
-                                               drive-push-pull = <0>;
+                                               drive-push-pull;
                                        };
 
                                        gpio6 {
                                                pins = "gpio6";
                                                function = "gpio";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio7 {
                                                pins = "gpio7";
                                                function = "gpio";
-                                               drive-push-pull = <0>;
+                                               drive-push-pull;
                                        };
                                };
 
index 4631504..1ab132c 100644 (file)
                                        gpio4 {
                                                pins = "gpio4";
                                                function = "32k-out1";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio5 {
                                                pins = "gpio5";
                                                function = "gpio";
-                                               drive-push-pull = <0>;
+                                               drive-push-pull;
                                        };
 
                                        gpio6 {
                                                pins = "gpio6";
                                                function = "gpio";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio7 {
                                                pins = "gpio7";
                                                function = "gpio";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
                                };
 
index a7d7cfd..634d0f4 100644 (file)
                                        gpio4 {
                                                pins = "gpio4";
                                                function = "32k-out1";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio6 {
                                                pins = "gpio6";
                                                function = "gpio";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio7 {
                                                pins = "gpio7";
                                                function = "gpio";
-                                               drive-push-pull = <0>;
+                                               drive-push-pull;
                                        };
                                };
 
index 0bd66f9..0b219e7 100644 (file)
                                        gpio4 {
                                                pins = "gpio4";
                                                function = "32k-out1";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio6 {
                                                pins = "gpio6";
                                                function = "gpio";
-                                               drive-push-pull = <1>;
+                                               drive-push-pull;
                                        };
 
                                        gpio7 {
                                                pins = "gpio7";
                                                function = "gpio";
-                                               drive-push-pull = <0>;
+                                               drive-push-pull;
                                        };
                                };
 
index 75eb743..0fe772b 100644 (file)
@@ -59,7 +59,7 @@
                                gpio1 {
                                        pins = "gpio1";
                                        function = "fps-out";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                        maxim,active-fps-power-up-slot = <7>;
                                        maxim,active-fps-power-down-slot = <0>;
@@ -68,7 +68,7 @@
                                gpio2_3 {
                                        pins = "gpio2", "gpio3";
                                        function = "fps-out";
-                                       drive-open-drain = <1>;
+                                       drive-open-drain;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                };
 
@@ -80,7 +80,7 @@
                                gpio5_6_7 {
                                        pins = "gpio5", "gpio6", "gpio7";
                                        function = "gpio";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                };
                        };
 
index 10347b6..936a309 100644 (file)
                                gpio1 {
                                        pins = "gpio1";
                                        function = "fps-out";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                        maxim,active-fps-power-up-slot = <7>;
                                        maxim,active-fps-power-down-slot = <0>;
                                gpio2 {
                                        pins = "gpio2";
                                        function = "fps-out";
-                                       drive-open-drain = <1>;
+                                       drive-open-drain;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                };
 
                                gpio3 {
                                        pins = "gpio3";
                                        function = "fps-out";
-                                       drive-open-drain = <1>;
+                                       drive-open-drain;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                };
 
                                gpio5_6_7 {
                                        pins = "gpio5", "gpio6", "gpio7";
                                        function = "gpio";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                };
                        };
 
index 72c2dc3..f644612 100644 (file)
                                gpio1 {
                                        pins = "gpio1";
                                        function = "fps-out";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_NONE>;
                                        maxim,active-fps-power-up-slot = <0>;
                                        maxim,active-fps-power-down-slot = <7>;
                                gpio2 {
                                        pins = "gpio2";
                                        function = "fps-out";
-                                       drive-open-drain = <1>;
+                                       drive-open-drain;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                        maxim,active-fps-power-up-slot = <0>;
                                        maxim,active-fps-power-down-slot = <7>;
                                gpio3 {
                                        pins = "gpio3";
                                        function = "fps-out";
-                                       drive-open-drain = <1>;
+                                       drive-open-drain;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                        maxim,active-fps-power-up-slot = <4>;
                                        maxim,active-fps-power-down-slot = <3>;
                                gpio5_6_7 {
                                        pins = "gpio5", "gpio6", "gpio7";
                                        function = "gpio";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                };
                        };
 
index a263d51..e42384f 100644 (file)
                                gpio3 {
                                        pins = "gpio3";
                                        function = "fps-out";
-                                       drive-open-drain = <1>;
+                                       drive-open-drain;
                                        maxim,active-fps-source = <MAX77620_FPS_SRC_0>;
                                        maxim,active-fps-power-up-slot = <4>;
                                        maxim,active-fps-power-down-slot = <2>;
                                gpio5_6 {
                                        pins = "gpio5", "gpio6";
                                        function = "gpio";
-                                       drive-push-pull = <1>;
+                                       drive-push-pull;
                                };
 
                                gpio4 {
index f0f81c2..b9a48cf 100644 (file)
                                pins = "gpio47", "gpio48";
                                function = "blsp_i2c3";
                                drive-strength = <16>;
-                               bias-disable = <0>;
+                               bias-disable;
                        };
 
                        blsp1_i2c3_sleep: blsp1-i2c2-sleep {
                                pins = "gpio47", "gpio48";
                                function = "gpio";
                                drive-strength = <2>;
-                               bias-disable = <0>;
+                               bias-disable;
                        };
 
                        blsp2_uart3_4pins_default: blsp2-uart2-4pins {
index e90f99e..e47c74e 100644 (file)
@@ -33,7 +33,7 @@ ap_h1_spi: &spi0 {};
 };
 
 &alc5682 {
-       realtek,dmic-clk-driving-high = "true";
+       realtek,dmic-clk-driving-high;
 };
 
 &cpu6_alert0 {
index 1084d5c..07b729f 100644 (file)
                        pins = "gpio6", "gpio25", "gpio26";
                        function = "gpio";
                        drive-strength = <8>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
index 8553c8b..103cc40 100644 (file)
                config {
                        pins = "gpio6", "gpio11";
                        drive-strength = <8>;
-                       bias-disable = <0>;
+                       bias-disable;
                };
        };
 
index 232b439..ff8f451 100644 (file)
@@ -75,6 +75,7 @@
 #define ARM_CPU_PART_CORTEX_A77                0xD0D
 #define ARM_CPU_PART_NEOVERSE_V1       0xD40
 #define ARM_CPU_PART_CORTEX_A78                0xD41
+#define ARM_CPU_PART_CORTEX_A78AE      0xD42
 #define ARM_CPU_PART_CORTEX_X1         0xD44
 #define ARM_CPU_PART_CORTEX_A510       0xD46
 #define ARM_CPU_PART_CORTEX_A710       0xD47
 #define MIDR_CORTEX_A77        MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
 #define MIDR_NEOVERSE_V1       MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
 #define MIDR_CORTEX_A78        MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
+#define MIDR_CORTEX_A78AE      MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
 #define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
 #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
 #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
index 7f3c87f..c31be7e 100644 (file)
        isb                                     // Make sure SRE is now set
        mrs_s   x0, SYS_ICC_SRE_EL2             // Read SRE back,
        tbz     x0, #0, .Lskip_gicv3_\@         // and check that it sticks
-       msr_s   SYS_ICH_HCR_EL2, xzr            // Reset ICC_HCR_EL2 to defaults
+       msr_s   SYS_ICH_HCR_EL2, xzr            // Reset ICH_HCR_EL2 to defaults
 .Lskip_gicv3_\@:
 .endm
 
index d62405c..7496dea 100644 (file)
@@ -43,10 +43,22 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
 
 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
 
+#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
 {
        return !(vcpu->arch.hcr_el2 & HCR_RW);
 }
+#else
+static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = vcpu->kvm;
+
+       WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED,
+                              &kvm->arch.flags));
+
+       return test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
+}
+#endif
 
 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
 {
@@ -72,15 +84,14 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
                vcpu->arch.hcr_el2 |= HCR_TVM;
        }
 
-       if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
+       if (vcpu_el1_is_32bit(vcpu))
                vcpu->arch.hcr_el2 &= ~HCR_RW;
-
-       /*
-        * TID3: trap feature register accesses that we virtualise.
-        * For now this is conditional, since no AArch32 feature regs
-        * are currently virtualised.
-        */
-       if (!vcpu_el1_is_32bit(vcpu))
+       else
+               /*
+                * TID3: trap feature register accesses that we virtualise.
+                * For now this is conditional, since no AArch32 feature regs
+                * are currently virtualised.
+                */
                vcpu->arch.hcr_el2 |= HCR_TID3;
 
        if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
index e3b25dc..94a27a7 100644 (file)
@@ -127,6 +127,16 @@ struct kvm_arch {
 #define KVM_ARCH_FLAG_MTE_ENABLED                      1
        /* At least one vCPU has ran in the VM */
 #define KVM_ARCH_FLAG_HAS_RAN_ONCE                     2
+       /*
+        * The following two bits are used to indicate the guest's EL1
+        * register width configuration. A value of KVM_ARCH_FLAG_EL1_32BIT
+        * bit is valid only when KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED is set.
+        * Otherwise, the guest's EL1 register width has not yet been
+        * determined yet.
+        */
+#define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED             3
+#define KVM_ARCH_FLAG_EL1_32BIT                                4
+
        unsigned long flags;
 
        /*
index 3fb79b7..7bbf510 100644 (file)
@@ -42,7 +42,7 @@ bool alternative_is_applied(u16 cpufeature)
 /*
  * Check if the target PC is within an alternative block.
  */
-static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
+static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
 {
        unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
        return !(pc >= replptr && pc <= (replptr + alt->alt_len));
@@ -50,7 +50,7 @@ static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
 
 #define align_down(x, a)       ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
 
-static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
+static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
 {
        u32 insn;
 
@@ -95,7 +95,7 @@ static u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnp
        return insn;
 }
 
-static void patch_alternative(struct alt_instr *alt,
+static noinstr void patch_alternative(struct alt_instr *alt,
                              __le32 *origptr, __le32 *updptr, int nr_inst)
 {
        __le32 *replptr;
index 3ed39c6..2b3f3d0 100644 (file)
@@ -8,16 +8,9 @@
 #include <asm/cpufeature.h>
 #include <asm/mte.h>
 
-#ifndef VMA_ITERATOR
-#define VMA_ITERATOR(name, mm, addr)   \
-       struct mm_struct *name = mm
-#define for_each_vma(vmi, vma)         \
-       for (vma = vmi->mmap; vma; vma = vma->vm_next)
-#endif
-
-#define for_each_mte_vma(vmi, vma)                                     \
+#define for_each_mte_vma(tsk, vma)                                     \
        if (system_supports_mte())                                      \
-               for_each_vma(vmi, vma)                                  \
+               for (vma = tsk->mm->mmap; vma; vma = vma->vm_next)      \
                        if (vma->vm_flags & VM_MTE)
 
 static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
@@ -32,10 +25,11 @@ static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
 static int mte_dump_tag_range(struct coredump_params *cprm,
                              unsigned long start, unsigned long end)
 {
+       int ret = 1;
        unsigned long addr;
+       void *tags = NULL;
 
        for (addr = start; addr < end; addr += PAGE_SIZE) {
-               char tags[MTE_PAGE_TAG_STORAGE];
                struct page *page = get_dump_page(addr);
 
                /*
@@ -59,22 +53,36 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
                        continue;
                }
 
+               if (!tags) {
+                       tags = mte_allocate_tag_storage();
+                       if (!tags) {
+                               put_page(page);
+                               ret = 0;
+                               break;
+                       }
+               }
+
                mte_save_page_tags(page_address(page), tags);
                put_page(page);
-               if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE))
-                       return 0;
+               if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) {
+                       mte_free_tag_storage(tags);
+                       ret = 0;
+                       break;
+               }
        }
 
-       return 1;
+       if (tags)
+               mte_free_tag_storage(tags);
+
+       return ret;
 }
 
 Elf_Half elf_core_extra_phdrs(void)
 {
        struct vm_area_struct *vma;
        int vma_count = 0;
-       VMA_ITERATOR(vmi, current->mm, 0);
 
-       for_each_mte_vma(vmi, vma)
+       for_each_mte_vma(current, vma)
                vma_count++;
 
        return vma_count;
@@ -83,9 +91,8 @@ Elf_Half elf_core_extra_phdrs(void)
 int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
 {
        struct vm_area_struct *vma;
-       VMA_ITERATOR(vmi, current->mm, 0);
 
-       for_each_mte_vma(vmi, vma) {
+       for_each_mte_vma(current, vma) {
                struct elf_phdr phdr;
 
                phdr.p_type = PT_ARM_MEMTAG_MTE;
@@ -109,9 +116,8 @@ size_t elf_core_extra_data_size(void)
 {
        struct vm_area_struct *vma;
        size_t data_size = 0;
-       VMA_ITERATOR(vmi, current->mm, 0);
 
-       for_each_mte_vma(vmi, vma)
+       for_each_mte_vma(current, vma)
                data_size += mte_vma_tag_dump_size(vma);
 
        return data_size;
@@ -120,9 +126,8 @@ size_t elf_core_extra_data_size(void)
 int elf_core_write_extra_data(struct coredump_params *cprm)
 {
        struct vm_area_struct *vma;
-       VMA_ITERATOR(vmi, current->mm, 0);
 
-       for_each_mte_vma(vmi, vma) {
+       for_each_mte_vma(current, vma) {
                if (vma->vm_flags & VM_DONTDUMP)
                        continue;
 
index 712e97c..cd86808 100644 (file)
@@ -701,7 +701,7 @@ NOKPROBE_SYMBOL(breakpoint_handler);
  * addresses. There is no straight-forward way, short of disassembling the
  * offending instruction, to map that address back to the watchpoint. This
  * function computes the distance of the memory access from the watchpoint as a
- * heuristic for the likelyhood that a given access triggered the watchpoint.
+ * heuristic for the likelihood that a given access triggered the watchpoint.
  *
  * See Section D2.10.5 "Determining the memory location that caused a Watchpoint
  * exception" of ARMv8 Architecture Reference Manual for details.
index e53493d..a3d0494 100644 (file)
@@ -220,7 +220,7 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
                         * increasing the section's alignment so that the
                         * resulting address of this instruction is guaranteed
                         * to equal the offset in that particular bit (as well
-                        * as all less signficant bits). This ensures that the
+                        * as all less significant bits). This ensures that the
                         * address modulo 4 KB != 0xfff8 or 0xfffc (which would
                         * have all ones in bits [11:3])
                         */
index 771f543..33e0fab 100644 (file)
@@ -117,8 +117,8 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
        int i, ret = 0;
        struct aarch64_insn_patch *pp = arg;
 
-       /* The first CPU becomes master */
-       if (atomic_inc_return(&pp->cpu_count) == 1) {
+       /* The last CPU becomes master */
+       if (atomic_inc_return(&pp->cpu_count) == num_online_cpus()) {
                for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
                        ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
                                                             pp->new_insns[i]);
index 5777929..40be3a7 100644 (file)
@@ -853,6 +853,7 @@ u8 spectre_bhb_loop_affected(int scope)
        if (scope == SCOPE_LOCAL_CPU) {
                static const struct midr_range spectre_bhb_k32_list[] = {
                        MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
+                       MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
                        MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
                        MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
                        MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
index 27df5c1..3b46041 100644 (file)
@@ -234,6 +234,7 @@ asmlinkage notrace void secondary_start_kernel(void)
         * Log the CPU info before it is marked online and might get read.
         */
        cpuinfo_store_cpu();
+       store_cpu_topology(cpu);
 
        /*
         * Enable GIC and timers.
@@ -242,7 +243,6 @@ asmlinkage notrace void secondary_start_kernel(void)
 
        ipi_setup(cpu);
 
-       store_cpu_topology(cpu);
        numa_add_cpu(cpu);
 
        /*
index 19ee7c3..2b0887e 100644 (file)
@@ -140,7 +140,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
        /*
         * Restore pstate flags. OS lock and mdscr have been already
         * restored, so from this point onwards, debugging is fully
-        * renabled if it was enabled when core started shutdown.
+        * reenabled if it was enabled when core started shutdown.
         */
        local_daif_restore(flags);
 
index 0d19259..53ae2c0 100644 (file)
@@ -1079,7 +1079,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        gfn_t gfn;
        kvm_pfn_t pfn;
        bool logging_active = memslot_is_logging(memslot);
-       bool logging_perm_fault = false;
+       bool use_read_lock = false;
        unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
        unsigned long vma_pagesize, fault_granule;
        enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
@@ -1114,7 +1114,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (logging_active) {
                force_pte = true;
                vma_shift = PAGE_SHIFT;
-               logging_perm_fault = (fault_status == FSC_PERM && write_fault);
+               use_read_lock = (fault_status == FSC_PERM && write_fault &&
+                                fault_granule == PAGE_SIZE);
        } else {
                vma_shift = get_vma_page_shift(vma, hva);
        }
@@ -1218,7 +1219,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * logging dirty logging, only acquire read lock for permission
         * relaxation.
         */
-       if (logging_perm_fault)
+       if (use_read_lock)
                read_lock(&kvm->mmu_lock);
        else
                write_lock(&kvm->mmu_lock);
@@ -1268,6 +1269,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (fault_status == FSC_PERM && vma_pagesize == fault_granule) {
                ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
        } else {
+               WARN_ONCE(use_read_lock, "Attempted stage-2 map outside of write lock\n");
+
                ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
                                             __pfn_to_phys(pfn), prot,
                                             memcache);
@@ -1280,7 +1283,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        }
 
 out_unlock:
-       if (logging_perm_fault)
+       if (use_read_lock)
                read_unlock(&kvm->mmu_lock);
        else
                write_unlock(&kvm->mmu_lock);
index 372da09..baac2b4 100644 (file)
@@ -215,15 +215,11 @@ static void kvm_psci_narrow_to_32bit(struct kvm_vcpu *vcpu)
 
 static unsigned long kvm_psci_check_allowed_function(struct kvm_vcpu *vcpu, u32 fn)
 {
-       switch(fn) {
-       case PSCI_0_2_FN64_CPU_SUSPEND:
-       case PSCI_0_2_FN64_CPU_ON:
-       case PSCI_0_2_FN64_AFFINITY_INFO:
-               /* Disallow these functions for 32bit guests */
-               if (vcpu_mode_is_32bit(vcpu))
-                       return PSCI_RET_NOT_SUPPORTED;
-               break;
-       }
+       /*
+        * Prevent 32 bit guests from calling 64 bit PSCI functions.
+        */
+       if ((fn & PSCI_0_2_64BIT) && vcpu_mode_is_32bit(vcpu))
+               return PSCI_RET_NOT_SUPPORTED;
 
        return 0;
 }
@@ -235,10 +231,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
        unsigned long val;
        int ret = 1;
 
-       val = kvm_psci_check_allowed_function(vcpu, psci_fn);
-       if (val)
-               goto out;
-
        switch (psci_fn) {
        case PSCI_0_2_FN_PSCI_VERSION:
                /*
@@ -306,7 +298,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
                break;
        }
 
-out:
        smccc_set_retval(vcpu, val, 0, 0, 0);
        return ret;
 }
@@ -318,9 +309,6 @@ static int kvm_psci_1_x_call(struct kvm_vcpu *vcpu, u32 minor)
        unsigned long val;
        int ret = 1;
 
-       if (minor > 1)
-               return -EINVAL;
-
        switch(psci_fn) {
        case PSCI_0_2_FN_PSCI_VERSION:
                val = minor == 0 ? KVM_ARM_PSCI_1_0 : KVM_ARM_PSCI_1_1;
@@ -426,6 +414,15 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
  */
 int kvm_psci_call(struct kvm_vcpu *vcpu)
 {
+       u32 psci_fn = smccc_get_function(vcpu);
+       unsigned long val;
+
+       val = kvm_psci_check_allowed_function(vcpu, psci_fn);
+       if (val) {
+               smccc_set_retval(vcpu, val, 0, 0, 0);
+               return 1;
+       }
+
        switch (kvm_psci_version(vcpu)) {
        case KVM_ARM_PSCI_1_1:
                return kvm_psci_1_x_call(vcpu, 1);
index ecc40c8..6c70c6f 100644 (file)
@@ -181,27 +181,51 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
+/**
+ * kvm_set_vm_width() - set the register width for the guest
+ * @vcpu: Pointer to the vcpu being configured
+ *
+ * Set both KVM_ARCH_FLAG_EL1_32BIT and KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED
+ * in the VM flags based on the vcpu's requested register width, the HW
+ * capabilities and other options (such as MTE).
+ * When REG_WIDTH_CONFIGURED is already set, the vcpu settings must be
+ * consistent with the value of the FLAG_EL1_32BIT bit in the flags.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int kvm_set_vm_width(struct kvm_vcpu *vcpu)
 {
-       struct kvm_vcpu *tmp;
+       struct kvm *kvm = vcpu->kvm;
        bool is32bit;
-       unsigned long i;
 
        is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
+
+       lockdep_assert_held(&kvm->lock);
+
+       if (test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags)) {
+               /*
+                * The guest's register width is already configured.
+                * Make sure that the vcpu is consistent with it.
+                */
+               if (is32bit == test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags))
+                       return 0;
+
+               return -EINVAL;
+       }
+
        if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
-               return false;
+               return -EINVAL;
 
        /* MTE is incompatible with AArch32 */
-       if (kvm_has_mte(vcpu->kvm) && is32bit)
-               return false;
+       if (kvm_has_mte(kvm) && is32bit)
+               return -EINVAL;
 
-       /* Check that the vcpus are either all 32bit or all 64bit */
-       kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
-               if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
-                       return false;
-       }
+       if (is32bit)
+               set_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
 
-       return true;
+       set_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED, &kvm->arch.flags);
+
+       return 0;
 }
 
 /**
@@ -230,10 +254,16 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
        u32 pstate;
 
        mutex_lock(&vcpu->kvm->lock);
-       reset_state = vcpu->arch.reset_state;
-       WRITE_ONCE(vcpu->arch.reset_state.reset, false);
+       ret = kvm_set_vm_width(vcpu);
+       if (!ret) {
+               reset_state = vcpu->arch.reset_state;
+               WRITE_ONCE(vcpu->arch.reset_state.reset, false);
+       }
        mutex_unlock(&vcpu->kvm->lock);
 
+       if (ret)
+               return ret;
+
        /* Reset PMU outside of the non-preemptible section */
        kvm_pmu_vcpu_reset(vcpu);
 
@@ -260,14 +290,9 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
                }
        }
 
-       if (!vcpu_allowed_register_width(vcpu)) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        switch (vcpu->arch.target) {
        default:
-               if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
+               if (vcpu_el1_is_32bit(vcpu)) {
                        pstate = VCPU_RESET_PSTATE_SVC;
                } else {
                        pstate = VCPU_RESET_PSTATE_EL1;
index f38c40a..78cde68 100644 (file)
@@ -82,7 +82,7 @@ static bool end_of_vgic(struct vgic_state_iter *iter)
 
 static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
 {
-       struct kvm *kvm = (struct kvm *)s->private;
+       struct kvm *kvm = s->private;
        struct vgic_state_iter *iter;
 
        mutex_lock(&kvm->lock);
@@ -110,7 +110,7 @@ out:
 
 static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
 {
-       struct kvm *kvm = (struct kvm *)s->private;
+       struct kvm *kvm = s->private;
        struct vgic_state_iter *iter = kvm->arch.vgic.iter;
 
        ++*pos;
@@ -122,7 +122,7 @@ static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
 
 static void vgic_debug_stop(struct seq_file *s, void *v)
 {
-       struct kvm *kvm = (struct kvm *)s->private;
+       struct kvm *kvm = s->private;
        struct vgic_state_iter *iter;
 
        /*
@@ -229,8 +229,8 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
 
 static int vgic_debug_show(struct seq_file *s, void *v)
 {
-       struct kvm *kvm = (struct kvm *)s->private;
-       struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
+       struct kvm *kvm = s->private;
+       struct vgic_state_iter *iter = v;
        struct vgic_irq *irq;
        struct kvm_vcpu *vcpu = NULL;
        unsigned long flags;
index 089fc2f..2e13402 100644 (file)
@@ -2143,7 +2143,7 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
 static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
                                void *ptr, void *opaque)
 {
-       struct its_device *dev = (struct its_device *)opaque;
+       struct its_device *dev = opaque;
        struct its_collection *collection;
        struct kvm *kvm = its->dev->kvm;
        struct kvm_vcpu *vcpu = NULL;
index 8ac25f1..1e7b155 100644 (file)
@@ -73,7 +73,7 @@ EXPORT_SYMBOL(memstart_addr);
  * In this scheme a comparatively quicker boot is observed.
  *
  * If ZONE_DMA configs are defined, crash kernel memory reservation
- * is delayed until DMA zone memory range size initilazation performed in
+ * is delayed until DMA zone memory range size initialization performed in
  * zone_sizes_init().  The defer is necessary to steer clear of DMA zone
  * memory range to avoid overlap allocation.  So crash kernel memory boundaries
  * are not known when mapping all bank memory ranges, which otherwise means
@@ -81,7 +81,7 @@ EXPORT_SYMBOL(memstart_addr);
  * so page-granularity mappings are created for the entire memory range.
  * Hence a slightly slower boot is observed.
  *
- * Note: Page-granularity mapppings are necessary for crash kernel memory
+ * Note: Page-granularity mappings are necessary for crash kernel memory
  * range for shrinking its size via /sys/kernel/kexec_crash_size interface.
  */
 #if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)
index 827038a..4def2bd 100644 (file)
 #include <asm/ppc-opcode.h>
 #include <asm/pte-walk.h>
 
-#ifdef CONFIG_PPC_PSERIES
-static inline bool kvmhv_on_pseries(void)
-{
-       return !cpu_has_feature(CPU_FTR_HVMODE);
-}
-#else
-static inline bool kvmhv_on_pseries(void)
-{
-       return false;
-}
-#endif
-
 /*
  * Structure for a nested guest, that is, for a guest that is managed by
  * one of our guests.
index c583d0c..838d4cb 100644 (file)
@@ -586,6 +586,18 @@ static inline bool kvm_hv_mode_active(void)                { return false; }
 
 #endif
 
+#ifdef CONFIG_PPC_PSERIES
+static inline bool kvmhv_on_pseries(void)
+{
+       return !cpu_has_feature(CPU_FTR_HVMODE);
+}
+#else
+static inline bool kvmhv_on_pseries(void)
+{
+       return false;
+}
+#endif
+
 #ifdef CONFIG_KVM_XICS
 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
 {
index 2546872..f2c5c26 100644 (file)
@@ -132,7 +132,11 @@ static inline bool pfn_valid(unsigned long pfn)
 #define virt_to_page(kaddr)    pfn_to_page(virt_to_pfn(kaddr))
 #define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
 
-#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
+#define virt_addr_valid(vaddr) ({                                      \
+       unsigned long _addr = (unsigned long)vaddr;                     \
+       _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory &&   \
+       pfn_valid(virt_to_pfn(_addr));                                  \
+})
 
 /*
  * On Book-E parts we need __va to parse the device tree and we can't
index 049ca26..8fa37ef 100644 (file)
@@ -28,11 +28,13 @@ void setup_panic(void);
 #define ARCH_PANIC_TIMEOUT 180
 
 #ifdef CONFIG_PPC_PSERIES
+extern bool pseries_reloc_on_exception(void);
 extern bool pseries_enable_reloc_on_exc(void);
 extern void pseries_disable_reloc_on_exc(void);
 extern void pseries_big_endian_exceptions(void);
 void __init pseries_little_endian_exceptions(void);
 #else
+static inline bool pseries_reloc_on_exception(void) { return false; }
 static inline bool pseries_enable_reloc_on_exc(void) { return false; }
 static inline void pseries_disable_reloc_on_exc(void) {}
 static inline void pseries_big_endian_exceptions(void) {}
index 0a0bc79..de1018c 100644 (file)
@@ -24,5 +24,6 @@
 
 #define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func)      __PPC_SCT(name, "b " #func)
 #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)       __PPC_SCT(name, "blr")
+#define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)       __PPC_SCT(name, "b .+20")
 
 #endif /* _ASM_POWERPC_STATIC_CALL_H */
index 55caeee..b66dd6f 100644 (file)
@@ -809,6 +809,10 @@ __start_interrupts:
  * - MSR_EE|MSR_RI is clear (no reentrant exceptions)
  * - Standard kernel environment is set up (stack, paca, etc)
  *
+ * KVM:
+ * These interrupts do not elevate HV 0->1, so HV is not involved. PR KVM
+ * ensures that FSCR[SCV] is disabled whenever it has to force AIL off.
+ *
  * Call convention:
  *
  * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
index e547066..a96f050 100644 (file)
@@ -196,6 +196,34 @@ static void __init configure_exceptions(void)
 
        /* Under a PAPR hypervisor, we need hypercalls */
        if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
+               /*
+                * - PR KVM does not support AIL mode interrupts in the host
+                *   while a PR guest is running.
+                *
+                * - SCV system call interrupt vectors are only implemented for
+                *   AIL mode interrupts.
+                *
+                * - On pseries, AIL mode can only be enabled and disabled
+                *   system-wide so when a PR VM is created on a pseries host,
+                *   all CPUs of the host are set to AIL=0 mode.
+                *
+                * - Therefore host CPUs must not execute scv while a PR VM
+                *   exists.
+                *
+                * - SCV support can not be disabled dynamically because the
+                *   feature is advertised to host userspace. Disabling the
+                *   facility and emulating it would be possible but is not
+                *   implemented.
+                *
+                * - So SCV support is blanket disabled if PR KVM could possibly
+                *   run. That is, PR support compiled in, booting on pseries
+                *   with hash MMU.
+                */
+               if (IS_ENABLED(CONFIG_KVM_BOOK3S_PR_POSSIBLE) && !radix_enabled()) {
+                       init_task.thread.fscr &= ~FSCR_SCV;
+                       cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_SCV;
+               }
+
                /* Enable AIL if possible */
                if (!pseries_enable_reloc_on_exc()) {
                        init_task.thread.fscr &= ~FSCR_SCV;
index 18e5808..ddd8817 100644 (file)
@@ -112,12 +112,21 @@ config KVM_BOOK3S_64_PR
          guest in user mode (problem state) and emulating all
          privileged instructions and registers.
 
+         This is only available for hash MMU mode and only supports
+         guests that use hash MMU mode.
+
          This is not as fast as using hypervisor mode, but works on
          machines where hypervisor mode is not available or not usable,
          and can emulate processors that are different from the host
          processor, including emulating 32-bit processors on a 64-bit
          host.
 
+         Selecting this option will cause the SCV facility to be
+         disabled when the kernel is booted on the pseries platform in
+         hash MMU mode (regardless of PR VMs running). When any PR VMs
+         are running, "AIL" mode is disabled which may slow interrupts
+         and system calls on the host.
+
 config KVM_BOOK3S_HV_EXIT_TIMING
        bool "Detailed timing for hypervisor real-mode code"
        depends on KVM_BOOK3S_HV_POSSIBLE && DEBUG_FS
index 05e003e..e42d1c6 100644 (file)
@@ -414,10 +414,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
         */
        ld      r10,HSTATE_SCRATCH0(r13)
        cmpwi   r10,BOOK3S_INTERRUPT_MACHINE_CHECK
-       beq     machine_check_common
+       beq     .Lcall_machine_check_common
 
        cmpwi   r10,BOOK3S_INTERRUPT_SYSTEM_RESET
-       beq     system_reset_common
+       beq     .Lcall_system_reset_common
 
        b       .
+
+.Lcall_machine_check_common:
+       b       machine_check_common
+
+.Lcall_system_reset_common:
+       b       system_reset_common
 #endif
index c886557..6fa518f 100644 (file)
@@ -225,6 +225,13 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
        int cpu;
        struct rcuwait *waitp;
 
+       /*
+        * rcuwait_wake_up contains smp_mb() which orders prior stores that
+        * create pending work vs below loads of cpu fields. The other side
+        * is the barrier in vcpu run that orders setting the cpu fields vs
+        * testing for pending work.
+        */
+
        waitp = kvm_arch_vcpu_get_wait(vcpu);
        if (rcuwait_wake_up(waitp))
                ++vcpu->stat.generic.halt_wakeup;
@@ -1089,7 +1096,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                        break;
                }
                tvcpu->arch.prodded = 1;
-               smp_mb();
+               smp_mb(); /* This orders prodded store vs ceded load */
                if (tvcpu->arch.ceded)
                        kvmppc_fast_vcpu_kick_hv(tvcpu);
                break;
@@ -3766,6 +3773,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
                pvc = core_info.vc[sub];
                pvc->pcpu = pcpu + thr;
                for_each_runnable_thread(i, vcpu, pvc) {
+                       /*
+                        * XXX: is kvmppc_start_thread called too late here?
+                        * It updates vcpu->cpu and vcpu->arch.thread_cpu
+                        * which are used by kvmppc_fast_vcpu_kick_hv(), but
+                        * kick is called after new exceptions become available
+                        * and exceptions are checked earlier than here, by
+                        * kvmppc_core_prepare_to_enter.
+                        */
                        kvmppc_start_thread(vcpu, pvc);
                        kvmppc_create_dtl_entry(vcpu, pvc);
                        trace_kvm_guest_enter(vcpu);
@@ -4487,6 +4502,21 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
        if (need_resched() || !kvm->arch.mmu_ready)
                goto out;
 
+       vcpu->cpu = pcpu;
+       vcpu->arch.thread_cpu = pcpu;
+       vc->pcpu = pcpu;
+       local_paca->kvm_hstate.kvm_vcpu = vcpu;
+       local_paca->kvm_hstate.ptid = 0;
+       local_paca->kvm_hstate.fake_suspend = 0;
+
+       /*
+        * Orders set cpu/thread_cpu vs testing for pending interrupts and
+        * doorbells below. The other side is when these fields are set vs
+        * kvmppc_fast_vcpu_kick_hv reading the cpu/thread_cpu fields to
+        * kick a vCPU to notice the pending interrupt.
+        */
+       smp_mb();
+
        if (!nested) {
                kvmppc_core_prepare_to_enter(vcpu);
                if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
@@ -4506,13 +4536,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
 
        tb = mftb();
 
-       vcpu->cpu = pcpu;
-       vcpu->arch.thread_cpu = pcpu;
-       vc->pcpu = pcpu;
-       local_paca->kvm_hstate.kvm_vcpu = vcpu;
-       local_paca->kvm_hstate.ptid = 0;
-       local_paca->kvm_hstate.fake_suspend = 0;
-
        __kvmppc_create_dtl_entry(vcpu, pcpu, tb + vc->tb_offset, 0);
 
        trace_kvm_guest_enter(vcpu);
@@ -4614,6 +4637,8 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
        run->exit_reason = KVM_EXIT_INTR;
        vcpu->arch.ret = -EINTR;
  out:
+       vcpu->cpu = -1;
+       vcpu->arch.thread_cpu = -1;
        powerpc_local_irq_pmu_restore(flags);
        preempt_enable();
        goto done;
index 34a801c..7bf9e6c 100644 (file)
@@ -137,12 +137,15 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
        svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
        svcpu->in_use = 0;
        svcpu_put(svcpu);
-#endif
 
        /* Disable AIL if supported */
-       if (cpu_has_feature(CPU_FTR_HVMODE) &&
-           cpu_has_feature(CPU_FTR_ARCH_207S))
-               mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
+       if (cpu_has_feature(CPU_FTR_HVMODE)) {
+               if (cpu_has_feature(CPU_FTR_ARCH_207S))
+                       mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
+               if (cpu_has_feature(CPU_FTR_ARCH_300) && (current->thread.fscr & FSCR_SCV))
+                       mtspr(SPRN_FSCR, mfspr(SPRN_FSCR) & ~FSCR_SCV);
+       }
+#endif
 
        vcpu->cpu = smp_processor_id();
 #ifdef CONFIG_PPC_BOOK3S_32
@@ -165,6 +168,14 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
        memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
        to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
        svcpu_put(svcpu);
+
+       /* Enable AIL if supported */
+       if (cpu_has_feature(CPU_FTR_HVMODE)) {
+               if (cpu_has_feature(CPU_FTR_ARCH_207S))
+                       mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
+               if (cpu_has_feature(CPU_FTR_ARCH_300) && (current->thread.fscr & FSCR_SCV))
+                       mtspr(SPRN_FSCR, mfspr(SPRN_FSCR) | FSCR_SCV);
+       }
 #endif
 
        if (kvmppc_is_split_real(vcpu))
@@ -174,11 +185,6 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
        kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
        kvmppc_save_tm_pr(vcpu);
 
-       /* Enable AIL if supported */
-       if (cpu_has_feature(CPU_FTR_HVMODE) &&
-           cpu_has_feature(CPU_FTR_ARCH_207S))
-               mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
-
        vcpu->cpu = -1;
 }
 
@@ -1037,6 +1043,8 @@ static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
 
 void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
 {
+       if (fscr & FSCR_SCV)
+               fscr &= ~FSCR_SCV; /* SCV must not be enabled */
        if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
                /* TAR got dropped, drop it in shadow too */
                kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
index 1f10e7d..dc4f51a 100644 (file)
@@ -281,6 +281,22 @@ static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu)
        return EMULATE_DONE;
 }
 
+static int kvmppc_h_pr_set_mode(struct kvm_vcpu *vcpu)
+{
+       unsigned long mflags = kvmppc_get_gpr(vcpu, 4);
+       unsigned long resource = kvmppc_get_gpr(vcpu, 5);
+
+       if (resource == H_SET_MODE_RESOURCE_ADDR_TRANS_MODE) {
+               /* KVM PR does not provide AIL!=0 to guests */
+               if (mflags == 0)
+                       kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+               else
+                       kvmppc_set_gpr(vcpu, 3, H_UNSUPPORTED_FLAG_START - 63);
+               return EMULATE_DONE;
+       }
+       return EMULATE_FAIL;
+}
+
 #ifdef CONFIG_SPAPR_TCE_IOMMU
 static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
 {
@@ -384,6 +400,8 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
                return kvmppc_h_pr_logical_ci_load(vcpu);
        case H_LOGICAL_CI_STORE:
                return kvmppc_h_pr_logical_ci_store(vcpu);
+       case H_SET_MODE:
+               return kvmppc_h_pr_set_mode(vcpu);
        case H_XIRR:
        case H_CPPR:
        case H_EOI:
@@ -421,6 +439,7 @@ int kvmppc_hcall_impl_pr(unsigned long cmd)
        case H_CEDE:
        case H_LOGICAL_CI_LOAD:
        case H_LOGICAL_CI_STORE:
+       case H_SET_MODE:
 #ifdef CONFIG_KVM_XICS
        case H_XIRR:
        case H_CPPR:
@@ -447,6 +466,7 @@ static unsigned int default_hcall_list[] = {
        H_BULK_REMOVE,
        H_PUT_TCE,
        H_CEDE,
+       H_SET_MODE,
 #ifdef CONFIG_KVM_XICS
        H_XIRR,
        H_CPPR,
index 9772b17..875c30c 100644 (file)
@@ -705,6 +705,23 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
                r = 1;
                break;
 #endif
+       case KVM_CAP_PPC_AIL_MODE_3:
+               r = 0;
+               /*
+                * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode.
+                * The POWER9s can support it if the guest runs in hash mode,
+                * but QEMU doesn't necessarily query the capability in time.
+                */
+               if (hv_enabled) {
+                       if (kvmhv_on_pseries()) {
+                               if (pseries_reloc_on_exception())
+                                       r = 1;
+                       } else if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
+                                 !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
+                               r = 1;
+                       }
+               }
+               break;
        default:
                r = 0;
                break;
index 8e301cd..4d221d0 100644 (file)
@@ -255,7 +255,7 @@ void __init mem_init(void)
 #endif
 
        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
-       set_max_mapnr(max_low_pfn);
+       set_max_mapnr(max_pfn);
 
        kasan_late_init();
 
index b9b7fef..13022d7 100644 (file)
@@ -1436,7 +1436,7 @@ int find_and_online_cpu_nid(int cpu)
        if (new_nid < 0 || !node_possible(new_nid))
                new_nid = first_online_node;
 
-       if (NODE_DATA(new_nid) == NULL) {
+       if (!node_online(new_nid)) {
 #ifdef CONFIG_MEMORY_HOTPLUG
                /*
                 * Need to ensure that NODE_DATA is initialized for a node from
index 069d7b3..955ff8a 100644 (file)
@@ -353,6 +353,14 @@ static void pseries_lpar_idle(void)
        pseries_idle_epilog();
 }
 
+static bool pseries_reloc_on_exception_enabled;
+
+bool pseries_reloc_on_exception(void)
+{
+       return pseries_reloc_on_exception_enabled;
+}
+EXPORT_SYMBOL_GPL(pseries_reloc_on_exception);
+
 /*
  * Enable relocation on during exceptions. This has partition wide scope and
  * may take a while to complete, if it takes longer than one second we will
@@ -377,6 +385,7 @@ bool pseries_enable_reloc_on_exc(void)
                                        " on exceptions: %ld\n", rc);
                                return false;
                        }
+                       pseries_reloc_on_exception_enabled = true;
                        return true;
                }
 
@@ -404,7 +413,9 @@ void pseries_disable_reloc_on_exc(void)
                        break;
                mdelay(get_longbusy_msecs(rc));
        }
-       if (rc != H_SUCCESS)
+       if (rc == H_SUCCESS)
+               pseries_reloc_on_exception_enabled = false;
+       else
                pr_warn("Warning: Failed to disable relocation on exceptions: %ld\n",
                        rc);
 }
index 4a7fcde..909535c 100644 (file)
@@ -99,6 +99,7 @@ static struct attribute *vas_def_capab_attrs[] = {
        &nr_used_credits_attribute.attr,
        NULL,
 };
+ATTRIBUTE_GROUPS(vas_def_capab);
 
 static struct attribute *vas_qos_capab_attrs[] = {
        &nr_total_credits_attribute.attr,
@@ -106,6 +107,7 @@ static struct attribute *vas_qos_capab_attrs[] = {
        &update_total_credits_attribute.attr,
        NULL,
 };
+ATTRIBUTE_GROUPS(vas_qos_capab);
 
 static ssize_t vas_type_show(struct kobject *kobj, struct attribute *attr,
                             char *buf)
@@ -154,13 +156,13 @@ static const struct sysfs_ops vas_sysfs_ops = {
 static struct kobj_type vas_def_attr_type = {
                .release        =       vas_type_release,
                .sysfs_ops      =       &vas_sysfs_ops,
-               .default_attrs  =       vas_def_capab_attrs,
+               .default_groups =       vas_def_capab_groups,
 };
 
 static struct kobj_type vas_qos_attr_type = {
                .release        =       vas_type_release,
                .sysfs_ops      =       &vas_sysfs_ops,
-               .default_attrs  =       vas_qos_capab_attrs,
+               .default_groups =       vas_qos_capab_groups,
 };
 
 static char *vas_caps_kobj_name(struct vas_caps_entry *centry,
index 6241660..6785aef 100644 (file)
@@ -653,8 +653,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
                                     vcpu->arch.isa);
        kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
 
-       csr_write(CSR_HGATP, 0);
-
        csr->vsstatus = csr_read(CSR_VSSTATUS);
        csr->vsie = csr_read(CSR_VSIE);
        csr->vstvec = csr_read(CSR_VSTVEC);
index 4449a97..d4308c5 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/err.h>
 #include <linux/kvm_host.h>
 #include <linux/uaccess.h>
+#include <asm/hwcap.h>
 
 #ifdef CONFIG_FPU
 void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
index 77b5a03..e084c72 100644 (file)
@@ -255,6 +255,10 @@ config HAVE_MARCH_Z15_FEATURES
        def_bool n
        select HAVE_MARCH_Z14_FEATURES
 
+config HAVE_MARCH_Z16_FEATURES
+       def_bool n
+       select HAVE_MARCH_Z15_FEATURES
+
 choice
        prompt "Processor type"
        default MARCH_Z196
@@ -312,6 +316,14 @@ config MARCH_Z15
          and 8561 series). The kernel will be slightly faster but will not
          work on older machines.
 
+config MARCH_Z16
+       bool "IBM z16"
+       select HAVE_MARCH_Z16_FEATURES
+       depends on $(cc-option,-march=z16)
+       help
+         Select this to enable optimizations for IBM z16 (3931 and
+         3932 series).
+
 endchoice
 
 config MARCH_Z10_TUNE
@@ -332,6 +344,9 @@ config MARCH_Z14_TUNE
 config MARCH_Z15_TUNE
        def_bool TUNE_Z15 || MARCH_Z15 && TUNE_DEFAULT
 
+config MARCH_Z16_TUNE
+       def_bool TUNE_Z16 || MARCH_Z16 && TUNE_DEFAULT
+
 choice
        prompt "Tune code generation"
        default TUNE_DEFAULT
@@ -372,6 +387,10 @@ config TUNE_Z15
        bool "IBM z15"
        depends on $(cc-option,-mtune=z15)
 
+config TUNE_Z16
+       bool "IBM z16"
+       depends on $(cc-option,-mtune=z16)
+
 endchoice
 
 config 64BIT
index 7a65bca..e441b60 100644 (file)
@@ -42,6 +42,7 @@ mflags-$(CONFIG_MARCH_ZEC12)  := -march=zEC12
 mflags-$(CONFIG_MARCH_Z13)    := -march=z13
 mflags-$(CONFIG_MARCH_Z14)    := -march=z14
 mflags-$(CONFIG_MARCH_Z15)    := -march=z15
+mflags-$(CONFIG_MARCH_Z16)    := -march=z16
 
 export CC_FLAGS_MARCH := $(mflags-y)
 
@@ -54,6 +55,7 @@ cflags-$(CONFIG_MARCH_ZEC12_TUNE)     += -mtune=zEC12
 cflags-$(CONFIG_MARCH_Z13_TUNE)                += -mtune=z13
 cflags-$(CONFIG_MARCH_Z14_TUNE)                += -mtune=z14
 cflags-$(CONFIG_MARCH_Z15_TUNE)                += -mtune=z15
+cflags-$(CONFIG_MARCH_Z16_TUNE)                += -mtune=z16
 
 cflags-y += -Wa,-I$(srctree)/arch/$(ARCH)/include
 
index 498bed9..f6dfde5 100644 (file)
@@ -499,11 +499,13 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_CHELSIO is not set
 # CONFIG_NET_VENDOR_CISCO is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DAVICOM is not set
 # CONFIG_NET_VENDOR_DEC is not set
 # CONFIG_NET_VENDOR_DLINK is not set
 # CONFIG_NET_VENDOR_EMULEX is not set
 # CONFIG_NET_VENDOR_ENGLEDER is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_FUNGIBLE is not set
 # CONFIG_NET_VENDOR_GOOGLE is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
@@ -588,13 +590,13 @@ CONFIG_MLX5_INFINIBAND=m
 CONFIG_SYNC_FILE=y
 CONFIG_VFIO=m
 CONFIG_VFIO_PCI=m
+CONFIG_MLX5_VFIO_PCI=m
 CONFIG_VFIO_MDEV=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
 CONFIG_VHOST_NET=m
 CONFIG_VHOST_VSOCK=m
-# CONFIG_SURFACE_PLATFORMS is not set
 CONFIG_S390_CCW_IOMMU=y
 CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
@@ -690,6 +692,7 @@ CONFIG_ENCRYPTED_KEYS=m
 CONFIG_KEY_NOTIFICATIONS=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_FORTIFY_SOURCE=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
@@ -733,6 +736,7 @@ CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
@@ -786,7 +790,6 @@ CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_DEBUG_INFO_BTF=y
 CONFIG_GDB_SCRIPTS=y
@@ -814,6 +817,7 @@ CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
 CONFIG_DEBUG_PER_CPU_MAPS=y
 CONFIG_KFENCE=y
+CONFIG_KFENCE_DEFERRABLE=y
 CONFIG_KFENCE_STATIC_KEYS=y
 CONFIG_DEBUG_SHIRQ=y
 CONFIG_PANIC_ON_OOPS=y
index 61e36b9..706df3a 100644 (file)
@@ -490,11 +490,13 @@ CONFIG_NLMON=m
 # CONFIG_NET_VENDOR_CHELSIO is not set
 # CONFIG_NET_VENDOR_CISCO is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DAVICOM is not set
 # CONFIG_NET_VENDOR_DEC is not set
 # CONFIG_NET_VENDOR_DLINK is not set
 # CONFIG_NET_VENDOR_EMULEX is not set
 # CONFIG_NET_VENDOR_ENGLEDER is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_FUNGIBLE is not set
 # CONFIG_NET_VENDOR_GOOGLE is not set
 # CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
@@ -578,13 +580,13 @@ CONFIG_MLX5_INFINIBAND=m
 CONFIG_SYNC_FILE=y
 CONFIG_VFIO=m
 CONFIG_VFIO_PCI=m
+CONFIG_MLX5_VFIO_PCI=m
 CONFIG_VFIO_MDEV=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
 CONFIG_VHOST_NET=m
 CONFIG_VHOST_VSOCK=m
-# CONFIG_SURFACE_PLATFORMS is not set
 CONFIG_S390_CCW_IOMMU=y
 CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
@@ -720,6 +722,7 @@ CONFIG_CRYPTO_MD5=y
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
@@ -772,7 +775,6 @@ CONFIG_DMA_CMA=y
 CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_DEBUG_INFO_BTF=y
 CONFIG_GDB_SCRIPTS=y
index c55c668..a87fcc4 100644 (file)
@@ -26,6 +26,7 @@ CONFIG_CRASH_DUMP=y
 # CONFIG_S390_GUEST is not set
 # CONFIG_SECCOMP is not set
 # CONFIG_GCC_PLUGINS is not set
+# CONFIG_BLOCK_LEGACY_AUTOLOAD is not set
 CONFIG_PARTITION_ADVANCED=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 # CONFIG_COMPACTION is not set
@@ -60,7 +61,6 @@ CONFIG_ZFCP=y
 # CONFIG_HID is not set
 # CONFIG_VIRTIO_MENU is not set
 # CONFIG_VHOST_MENU is not set
-# CONFIG_SURFACE_PLATFORMS is not set
 # CONFIG_IOMMU_SUPPORT is not set
 # CONFIG_DNOTIFY is not set
 # CONFIG_INOTIFY_USER is not set
@@ -71,10 +71,10 @@ CONFIG_LSM="yama,loadpin,safesetid,integrity"
 CONFIG_XZ_DEC_MICROLZMA=y
 CONFIG_PRINTK_TIME=y
 # CONFIG_SYMBOLIC_ERRNAME is not set
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_DEBUG_INFO_BTF=y
 CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_KERNEL=y
 CONFIG_PANIC_ON_OOPS=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
index eabab24..2f0a1ca 100644 (file)
@@ -58,7 +58,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
 
 static inline bool on_thread_stack(void)
 {
-       return !(((unsigned long)(current->stack) ^ current_stack_pointer()) & ~(THREAD_SIZE - 1));
+       return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
 }
 
 #endif
index eee8d96..ff1e25d 100644 (file)
@@ -200,13 +200,7 @@ unsigned long __get_wchan(struct task_struct *p);
 /* Has task runtime instrumentation enabled ? */
 #define is_ri_task(tsk) (!!(tsk)->thread.ri_cb)
 
-static __always_inline unsigned long current_stack_pointer(void)
-{
-       unsigned long sp;
-
-       asm volatile("la %0,0(15)" : "=a" (sp));
-       return sp;
-}
+register unsigned long current_stack_pointer asm("r15");
 
 static __always_inline unsigned short stap(void)
 {
index 275f425..f850019 100644 (file)
@@ -46,7 +46,7 @@ struct stack_frame {
 };
 
 /*
- * Unlike current_stack_pointer() which simply returns current value of %r15
+ * Unlike current_stack_pointer which simply contains the current value of %r15
  * current_frame_address() returns function stack frame address, which matches
  * %r15 upon function invocation. It may differ from %r15 later if function
  * allocates stack for local variables or new stack frame to call other
index b2ef014..6ebf02e 100644 (file)
@@ -54,7 +54,7 @@ static void __do_machine_kdump(void *image)
         * This need to be done *after* s390_reset_system set the
         * prefix register of this CPU to zero
         */
-       memcpy((void *) __LC_FPREGS_SAVE_AREA,
+       memcpy(absolute_pointer(__LC_FPREGS_SAVE_AREA),
               (void *)(prefix + __LC_FPREGS_SAVE_AREA), 512);
 
        __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
index 7a74ea5..aa0e0e7 100644 (file)
@@ -283,6 +283,10 @@ static int __init setup_elf_platform(void)
        case 0x8562:
                strcpy(elf_platform, "z15");
                break;
+       case 0x3931:
+       case 0x3932:
+               strcpy(elf_platform, "z16");
+               break;
        }
        return 0;
 }
index 9bb0673..5a053b3 100644 (file)
@@ -147,7 +147,7 @@ static __always_inline struct pt_regs fake_pt_regs(void)
        struct pt_regs regs;
 
        memset(&regs, 0, sizeof(regs));
-       regs.gprs[15] = current_stack_pointer();
+       regs.gprs[15] = current_stack_pointer;
 
        asm volatile(
                "basr   %[psw_addr],0\n"
index e88791b..fc7f458 100644 (file)
@@ -302,7 +302,7 @@ static struct extra_reg intel_spr_extra_regs[] __read_mostly = {
        INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
        INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
-       INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
+       INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff1f, FE),
        INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
        INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
        EVENT_EXTRA_END
@@ -5536,7 +5536,11 @@ static void intel_pmu_check_event_constraints(struct event_constraint *event_con
                        /* Disabled fixed counters which are not in CPUID */
                        c->idxmsk64 &= intel_ctrl;
 
-                       if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES)
+                       /*
+                        * Don't extend the pseudo-encoding to the
+                        * generic counters
+                        */
+                       if (!use_fixed_pseudo_encoding(c->code))
                                c->idxmsk64 |= (1ULL << num_counters) - 1;
                }
                c->idxmsk64 &=
@@ -6212,6 +6216,7 @@ __init int intel_pmu_init(void)
 
        case INTEL_FAM6_ALDERLAKE:
        case INTEL_FAM6_ALDERLAKE_L:
+       case INTEL_FAM6_RAPTORLAKE:
                /*
                 * Alder Lake has 2 types of CPU, core and atom.
                 *
index c6262b1..5d77622 100644 (file)
@@ -40,7 +40,7 @@
  * Model specific counters:
  *     MSR_CORE_C1_RES: CORE C1 Residency Counter
  *                      perf code: 0x00
- *                      Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL
+ *                      Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL
  *                      Scope: Core (each processor core has a MSR)
  *     MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
  *                            perf code: 0x01
  *                            perf code: 0x02
  *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
  *                                             SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
- *                                             TGL,TNT,RKL,ADL
+ *                                             TGL,TNT,RKL,ADL,RPL
  *                            Scope: Core
  *     MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
  *                            perf code: 0x03
  *                            Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
- *                                             ICL,TGL,RKL,ADL
+ *                                             ICL,TGL,RKL,ADL,RPL
  *                            Scope: Core
  *     MSR_PKG_C2_RESIDENCY:  Package C2 Residency Counter.
  *                            perf code: 0x00
  *                            Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
- *                                             KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL
+ *                                             KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
+ *                                             RPL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C3_RESIDENCY:  Package C3 Residency Counter.
  *                            perf code: 0x01
  *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
  *                                             GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL,
- *                                             ADL
+ *                                             ADL,RPL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C6_RESIDENCY:  Package C6 Residency Counter.
  *                            perf code: 0x02
  *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
  *                                             SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
- *                                             TGL,TNT,RKL,ADL
+ *                                             TGL,TNT,RKL,ADL,RPL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
  *                            perf code: 0x03
  *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
- *                                             KBL,CML,ICL,TGL,RKL,ADL
+ *                                             KBL,CML,ICL,TGL,RKL,ADL,RPL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
  *                            perf code: 0x04
  *                            Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
- *                                             ADL
+ *                                             ADL,RPL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
  *                            perf code: 0x05
  *                            Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
- *                                             ADL
+ *                                             ADL,RPL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
  *                            perf code: 0x06
  *                            Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
- *                                             TNT,RKL,ADL
+ *                                             TNT,RKL,ADL,RPL
  *                            Scope: Package (physical package)
  *
  */
@@ -680,6 +681,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE,          &icl_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,           &adl_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,         &adl_cstates),
+       X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,          &adl_cstates),
        { },
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
index e497da9..7695dca 100644 (file)
@@ -1828,6 +1828,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE,          &rkl_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,           &adl_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,         &adl_uncore_init),
+       X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,          &adl_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,    &spr_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D,      &snr_uncore_init),
        {},
index f698a55..4262351 100644 (file)
 #define PCI_DEVICE_ID_INTEL_ADL_14_IMC         0x4650
 #define PCI_DEVICE_ID_INTEL_ADL_15_IMC         0x4668
 #define PCI_DEVICE_ID_INTEL_ADL_16_IMC         0x4670
+#define PCI_DEVICE_ID_INTEL_RPL_1_IMC          0xA700
+#define PCI_DEVICE_ID_INTEL_RPL_2_IMC          0xA702
+#define PCI_DEVICE_ID_INTEL_RPL_3_IMC          0xA706
+#define PCI_DEVICE_ID_INTEL_RPL_4_IMC          0xA709
 
 /* SNB event control */
 #define SNB_UNC_CTL_EV_SEL_MASK                        0x000000ff
@@ -1406,6 +1410,22 @@ static const struct pci_device_id tgl_uncore_pci_ids[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_16_IMC),
                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
        },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RPL_1_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RPL_2_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RPL_3_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_RPL_4_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
        { /* end: all zeroes */ }
 };
 
index 96c775a..6d759f8 100644 (file)
@@ -103,6 +103,7 @@ static bool test_intel(int idx, void *data)
        case INTEL_FAM6_ROCKETLAKE:
        case INTEL_FAM6_ALDERLAKE:
        case INTEL_FAM6_ALDERLAKE_L:
+       case INTEL_FAM6_RAPTORLAKE:
                if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
                        return true;
                break;
index c878fed..fbcfec4 100644 (file)
 
 # define DEFINE_EXTABLE_TYPE_REG \
        ".macro extable_type_reg type:req reg:req\n"                                            \
-       ".set found, 0\n"                                                                       \
-       ".set regnr, 0\n"                                                                       \
+       ".set .Lfound, 0\n"                                                                     \
+       ".set .Lregnr, 0\n"                                                                     \
        ".irp rs,rax,rcx,rdx,rbx,rsp,rbp,rsi,rdi,r8,r9,r10,r11,r12,r13,r14,r15\n"               \
        ".ifc \\reg, %%\\rs\n"                                                                  \
-       ".set found, found+1\n"                                                                 \
-       ".long \\type + (regnr << 8)\n"                                                         \
+       ".set .Lfound, .Lfound+1\n"                                                             \
+       ".long \\type + (.Lregnr << 8)\n"                                                       \
        ".endif\n"                                                                              \
-       ".set regnr, regnr+1\n"                                                                 \
+       ".set .Lregnr, .Lregnr+1\n"                                                             \
        ".endr\n"                                                                               \
-       ".set regnr, 0\n"                                                                       \
+       ".set .Lregnr, 0\n"                                                                     \
        ".irp rs,eax,ecx,edx,ebx,esp,ebp,esi,edi,r8d,r9d,r10d,r11d,r12d,r13d,r14d,r15d\n"       \
        ".ifc \\reg, %%\\rs\n"                                                                  \
-       ".set found, found+1\n"                                                                 \
-       ".long \\type + (regnr << 8)\n"                                                         \
+       ".set .Lfound, .Lfound+1\n"                                                             \
+       ".long \\type + (.Lregnr << 8)\n"                                                       \
        ".endif\n"                                                                              \
-       ".set regnr, regnr+1\n"                                                                 \
+       ".set .Lregnr, .Lregnr+1\n"                                                             \
        ".endr\n"                                                                               \
-       ".if (found != 1)\n"                                                                    \
+       ".if (.Lfound != 1)\n"                                                                  \
        ".error \"extable_type_reg: bad register argument\"\n"                                  \
        ".endif\n"                                                                              \
        ".endm\n"
index 4d20a29..aaf0cb0 100644 (file)
@@ -78,9 +78,9 @@ do {                                                          \
  */
 #define __WARN_FLAGS(flags)                                    \
 do {                                                           \
-       __auto_type f = BUGFLAG_WARNING|(flags);                \
+       __auto_type __flags = BUGFLAG_WARNING|(flags);          \
        instrumentation_begin();                                \
-       _BUG_FLAGS(ASM_UD2, f, ASM_REACHABLE);                  \
+       _BUG_FLAGS(ASM_UD2, __flags, ASM_REACHABLE);            \
        instrumentation_end();                                  \
 } while (0)
 
index 7516e41..20fd0ac 100644 (file)
@@ -28,15 +28,13 @@ typedef u16         compat_ipc_pid_t;
 typedef __kernel_fsid_t        compat_fsid_t;
 
 struct compat_stat {
-       compat_dev_t    st_dev;
-       u16             __pad1;
+       u32             st_dev;
        compat_ino_t    st_ino;
        compat_mode_t   st_mode;
        compat_nlink_t  st_nlink;
        __compat_uid_t  st_uid;
        __compat_gid_t  st_gid;
-       compat_dev_t    st_rdev;
-       u16             __pad2;
+       u32             st_rdev;
        u32             st_size;
        u32             st_blksize;
        u32             st_blocks;
index f6d91ec..e9736af 100644 (file)
@@ -210,8 +210,6 @@ void __iomem *ioremap(resource_size_t offset, unsigned long size);
 extern void iounmap(volatile void __iomem *addr);
 #define iounmap iounmap
 
-extern void set_iounmap_nonlazy(void);
-
 #ifdef __KERNEL__
 
 void memcpy_fromio(void *, const volatile void __iomem *, size_t);
index d23e80a..e0c0f0e 100644 (file)
@@ -974,12 +974,10 @@ enum hv_tsc_page_status {
        HV_TSC_PAGE_UNSET = 0,
        /* TSC page MSR was written by the guest, update pending */
        HV_TSC_PAGE_GUEST_CHANGED,
-       /* TSC page MSR was written by KVM userspace, update pending */
+       /* TSC page update was triggered from the host side */
        HV_TSC_PAGE_HOST_CHANGED,
        /* TSC page was properly set up and is currently active  */
        HV_TSC_PAGE_SET,
-       /* TSC page is currently being updated and therefore is inactive */
-       HV_TSC_PAGE_UPDATING,
        /* TSC page was set up with an inaccessible GPA */
        HV_TSC_PAGE_BROKEN,
 };
@@ -1052,6 +1050,7 @@ enum kvm_apicv_inhibit {
        APICV_INHIBIT_REASON_X2APIC,
        APICV_INHIBIT_REASON_BLOCKIRQ,
        APICV_INHIBIT_REASON_ABSENT,
+       APICV_INHIBIT_REASON_SEV,
 };
 
 struct kvm_arch {
@@ -1585,8 +1584,9 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
 #define kvm_arch_pmi_in_guest(vcpu) \
        ((vcpu) && (vcpu)->arch.handling_intr_from_guest)
 
-int kvm_mmu_module_init(void);
-void kvm_mmu_module_exit(void);
+void kvm_mmu_x86_module_init(void);
+int kvm_mmu_vendor_module_init(void);
+void kvm_mmu_vendor_module_exit(void);
 
 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
 int kvm_mmu_create(struct kvm_vcpu *vcpu);
index b85147d..d71c7e8 100644 (file)
@@ -12,14 +12,17 @@ int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
 /* Structs and defines for the X86 specific MSI message format */
 
 typedef struct x86_msi_data {
-       u32     vector                  :  8,
-               delivery_mode           :  3,
-               dest_mode_logical       :  1,
-               reserved                :  2,
-               active_low              :  1,
-               is_level                :  1;
-
-       u32     dmar_subhandle;
+       union {
+               struct {
+                       u32     vector                  :  8,
+                               delivery_mode           :  3,
+                               dest_mode_logical       :  1,
+                               reserved                :  2,
+                               active_low              :  1,
+                               is_level                :  1;
+               };
+               u32     dmar_subhandle;
+       };
 } __attribute__ ((packed)) arch_msi_msg_data_t;
 #define arch_msi_msg_data      x86_msi_data
 
index 0eb90d2..ee15311 100644 (file)
 #define TSX_CTRL_RTM_DISABLE           BIT(0)  /* Disable RTM feature */
 #define TSX_CTRL_CPUID_CLEAR           BIT(1)  /* Disable TSX enumeration */
 
-/* SRBDS support */
 #define MSR_IA32_MCU_OPT_CTRL          0x00000123
-#define RNGDS_MITG_DIS                 BIT(0)
+#define RNGDS_MITG_DIS                 BIT(0)  /* SRBDS support */
+#define RTM_ALLOW                      BIT(1)  /* TSX development mode */
 
 #define MSR_IA32_SYSENTER_CS           0x00000174
 #define MSR_IA32_SYSENTER_ESP          0x00000175
index a3c33b7..13c0d63 100644 (file)
@@ -38,9 +38,9 @@
 #define arch_raw_cpu_ptr(ptr)                          \
 ({                                                     \
        unsigned long tcp_ptr__;                        \
-       asm volatile("add " __percpu_arg(1) ", %0"      \
-                    : "=r" (tcp_ptr__)                 \
-                    : "m" (this_cpu_off), "0" (ptr));  \
+       asm ("add " __percpu_arg(1) ", %0"              \
+            : "=r" (tcp_ptr__)                         \
+            : "m" (this_cpu_off), "0" (ptr));          \
        (typeof(*(ptr)) __kernel __force *)tcp_ptr__;   \
 })
 #else
index 58d9e4b..b06e4c5 100644 (file)
@@ -241,6 +241,11 @@ struct x86_pmu_capability {
 #define INTEL_PMC_IDX_FIXED_SLOTS      (INTEL_PMC_IDX_FIXED + 3)
 #define INTEL_PMC_MSK_FIXED_SLOTS      (1ULL << INTEL_PMC_IDX_FIXED_SLOTS)
 
+static inline bool use_fixed_pseudo_encoding(u64 code)
+{
+       return !(code & 0xff);
+}
+
 /*
  * We model BTS tracing as another fixed-mode PMC.
  *
index ed4f8bb..2455d72 100644 (file)
@@ -38,6 +38,8 @@
 #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)                       \
        __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; int3; nop; nop; nop")
 
+#define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)                       \
+       ARCH_DEFINE_STATIC_CALL_TRAMP(name, __static_call_return0)
 
 #define ARCH_ADD_TRAMP_KEY(name)                                       \
        asm(".pushsection .static_call_tramp_key, \"a\"         \n"     \
index ed44175..e342ae4 100644 (file)
@@ -1855,6 +1855,8 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
        validate_apic_and_package_id(c);
        x86_spec_ctrl_setup_ap();
        update_srbds_msr();
+
+       tsx_ap_init();
 }
 
 static __init int setup_noclflush(char *arg)
index ee6f23f..2a8e584 100644 (file)
@@ -55,11 +55,10 @@ enum tsx_ctrl_states {
 extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
 
 extern void __init tsx_init(void);
-extern void tsx_enable(void);
-extern void tsx_disable(void);
-extern void tsx_clear_cpuid(void);
+void tsx_ap_init(void);
 #else
 static inline void tsx_init(void) { }
+static inline void tsx_ap_init(void) { }
 #endif /* CONFIG_CPU_SUP_INTEL */
 
 extern void get_cpu_cap(struct cpuinfo_x86 *c);
index 8321c43..f7a5370 100644 (file)
@@ -717,13 +717,6 @@ static void init_intel(struct cpuinfo_x86 *c)
 
        init_intel_misc_features(c);
 
-       if (tsx_ctrl_state == TSX_CTRL_ENABLE)
-               tsx_enable();
-       else if (tsx_ctrl_state == TSX_CTRL_DISABLE)
-               tsx_disable();
-       else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT)
-               tsx_clear_cpuid();
-
        split_lock_init();
        bus_lock_init();
 
index 9c7a5f0..ec7bbac 100644 (file)
@@ -19,7 +19,7 @@
 
 enum tsx_ctrl_states tsx_ctrl_state __ro_after_init = TSX_CTRL_NOT_SUPPORTED;
 
-void tsx_disable(void)
+static void tsx_disable(void)
 {
        u64 tsx;
 
@@ -39,7 +39,7 @@ void tsx_disable(void)
        wrmsrl(MSR_IA32_TSX_CTRL, tsx);
 }
 
-void tsx_enable(void)
+static void tsx_enable(void)
 {
        u64 tsx;
 
@@ -58,7 +58,7 @@ void tsx_enable(void)
        wrmsrl(MSR_IA32_TSX_CTRL, tsx);
 }
 
-static bool __init tsx_ctrl_is_supported(void)
+static bool tsx_ctrl_is_supported(void)
 {
        u64 ia32_cap = x86_read_arch_cap_msr();
 
@@ -84,7 +84,45 @@ static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
        return TSX_CTRL_ENABLE;
 }
 
-void tsx_clear_cpuid(void)
+/*
+ * Disabling TSX is not a trivial business.
+ *
+ * First of all, there's a CPUID bit: X86_FEATURE_RTM_ALWAYS_ABORT
+ * which says that TSX is practically disabled (all transactions are
+ * aborted by default). When that bit is set, the kernel unconditionally
+ * disables TSX.
+ *
+ * In order to do that, however, it needs to dance a bit:
+ *
+ * 1. The first method to disable it is through MSR_TSX_FORCE_ABORT and
+ * the MSR is present only when *two* CPUID bits are set:
+ *
+ * - X86_FEATURE_RTM_ALWAYS_ABORT
+ * - X86_FEATURE_TSX_FORCE_ABORT
+ *
+ * 2. The second method is for CPUs which do not have the above-mentioned
+ * MSR: those use a different MSR - MSR_IA32_TSX_CTRL and disable TSX
+ * through that one. Those CPUs can also have the initially mentioned
+ * CPUID bit X86_FEATURE_RTM_ALWAYS_ABORT set and for those the same strategy
+ * applies: TSX gets disabled unconditionally.
+ *
+ * When either of the two methods are present, the kernel disables TSX and
+ * clears the respective RTM and HLE feature flags.
+ *
+ * An additional twist in the whole thing presents late microcode loading
+ * which, when done, may cause for the X86_FEATURE_RTM_ALWAYS_ABORT CPUID
+ * bit to be set after the update.
+ *
+ * A subsequent hotplug operation on any logical CPU except the BSP will
+ * cause for the supported CPUID feature bits to get re-detected and, if
+ * RTM and HLE get cleared all of a sudden, but, userspace did consult
+ * them before the update, then funny explosions will happen. Long story
+ * short: the kernel doesn't modify CPUID feature bits after booting.
+ *
+ * That's why, this function's call in init_intel() doesn't clear the
+ * feature flags.
+ */
+static void tsx_clear_cpuid(void)
 {
        u64 msr;
 
@@ -97,6 +135,39 @@ void tsx_clear_cpuid(void)
                rdmsrl(MSR_TSX_FORCE_ABORT, msr);
                msr |= MSR_TFA_TSX_CPUID_CLEAR;
                wrmsrl(MSR_TSX_FORCE_ABORT, msr);
+       } else if (tsx_ctrl_is_supported()) {
+               rdmsrl(MSR_IA32_TSX_CTRL, msr);
+               msr |= TSX_CTRL_CPUID_CLEAR;
+               wrmsrl(MSR_IA32_TSX_CTRL, msr);
+       }
+}
+
+/*
+ * Disable TSX development mode
+ *
+ * When the microcode released in Feb 2022 is applied, TSX will be disabled by
+ * default on some processors. MSR 0x122 (TSX_CTRL) and MSR 0x123
+ * (IA32_MCU_OPT_CTRL) can be used to re-enable TSX for development, doing so is
+ * not recommended for production deployments. In particular, applying MD_CLEAR
+ * flows for mitigation of the Intel TSX Asynchronous Abort (TAA) transient
+ * execution attack may not be effective on these processors when Intel TSX is
+ * enabled with updated microcode.
+ */
+static void tsx_dev_mode_disable(void)
+{
+       u64 mcu_opt_ctrl;
+
+       /* Check if RTM_ALLOW exists */
+       if (!boot_cpu_has_bug(X86_BUG_TAA) || !tsx_ctrl_is_supported() ||
+           !cpu_feature_enabled(X86_FEATURE_SRBDS_CTRL))
+               return;
+
+       rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl);
+
+       if (mcu_opt_ctrl & RTM_ALLOW) {
+               mcu_opt_ctrl &= ~RTM_ALLOW;
+               wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_opt_ctrl);
+               setup_force_cpu_cap(X86_FEATURE_RTM_ALWAYS_ABORT);
        }
 }
 
@@ -105,14 +176,14 @@ void __init tsx_init(void)
        char arg[5] = {};
        int ret;
 
+       tsx_dev_mode_disable();
+
        /*
-        * Hardware will always abort a TSX transaction if both CPUID bits
-        * RTM_ALWAYS_ABORT and TSX_FORCE_ABORT are set. In this case, it is
-        * better not to enumerate CPUID.RTM and CPUID.HLE bits. Clear them
-        * here.
+        * Hardware will always abort a TSX transaction when the CPUID bit
+        * RTM_ALWAYS_ABORT is set. In this case, it is better not to enumerate
+        * CPUID.RTM and CPUID.HLE bits. Clear them here.
         */
-       if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) &&
-           boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+       if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
                tsx_ctrl_state = TSX_CTRL_RTM_ALWAYS_ABORT;
                tsx_clear_cpuid();
                setup_clear_cpu_cap(X86_FEATURE_RTM);
@@ -175,3 +246,16 @@ void __init tsx_init(void)
                setup_force_cpu_cap(X86_FEATURE_HLE);
        }
 }
+
+void tsx_ap_init(void)
+{
+       tsx_dev_mode_disable();
+
+       if (tsx_ctrl_state == TSX_CTRL_ENABLE)
+               tsx_enable();
+       else if (tsx_ctrl_state == TSX_CTRL_DISABLE)
+               tsx_disable();
+       else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT)
+               /* See comment over that function for more details. */
+               tsx_clear_cpuid();
+}
index a7f617a..9752955 100644 (file)
@@ -37,7 +37,6 @@ static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
        } else
                memcpy(buf, vaddr + offset, csize);
 
-       set_iounmap_nonlazy();
        iounmap((void __iomem *)vaddr);
        return csize;
 }
index 531fb4c..aa72cef 100644 (file)
@@ -12,10 +12,9 @@ enum insn_type {
 };
 
 /*
- * data16 data16 xorq %rax, %rax - a single 5 byte instruction that clears %rax
- * The REX.W cancels the effect of any data16.
+ * cs cs cs xorl %eax, %eax - a single 5 byte instruction that clears %[er]ax
  */
-static const u8 xor5rax[] = { 0x66, 0x66, 0x48, 0x31, 0xc0 };
+static const u8 xor5rax[] = { 0x2e, 0x2e, 0x2e, 0x31, 0xc0 };
 
 static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc };
 
index 123b677..46f9dfb 100644 (file)
@@ -1135,11 +1135,13 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
        BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
        BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
 
+       mutex_lock(&hv->hv_lock);
+
        if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
+           hv->hv_tsc_page_status == HV_TSC_PAGE_SET ||
            hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
-               return;
+               goto out_unlock;
 
-       mutex_lock(&hv->hv_lock);
        if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
                goto out_unlock;
 
@@ -1201,45 +1203,19 @@ out_unlock:
        mutex_unlock(&hv->hv_lock);
 }
 
-void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
+void kvm_hv_request_tsc_page_update(struct kvm *kvm)
 {
        struct kvm_hv *hv = to_kvm_hv(kvm);
-       u64 gfn;
-       int idx;
-
-       if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
-           hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
-           tsc_page_update_unsafe(hv))
-               return;
 
        mutex_lock(&hv->hv_lock);
 
-       if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
-               goto out_unlock;
-
-       /* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */
-       if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
-               hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
+       if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET &&
+           !tsc_page_update_unsafe(hv))
+               hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
 
-       gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
-
-       hv->tsc_ref.tsc_sequence = 0;
-
-       /*
-        * Take the srcu lock as memslots will be accessed to check the gfn
-        * cache generation against the memslots generation.
-        */
-       idx = srcu_read_lock(&kvm->srcu);
-       if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
-                           &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
-               hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
-       srcu_read_unlock(&kvm->srcu, idx);
-
-out_unlock:
        mutex_unlock(&hv->hv_lock);
 }
 
-
 static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
 {
        if (!hv_vcpu->enforce_cpuid)
index e19c00e..da2737f 100644 (file)
@@ -137,7 +137,7 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
 
 void kvm_hv_setup_tsc_page(struct kvm *kvm,
                           struct pvclock_vcpu_time_info *hv_clock);
-void kvm_hv_invalidate_tsc_page(struct kvm *kvm);
+void kvm_hv_request_tsc_page_update(struct kvm *kvm);
 
 void kvm_hv_init_vm(struct kvm *kvm);
 void kvm_hv_destroy_vm(struct kvm *kvm);
index 8f19ea7..f9080ee 100644 (file)
@@ -6237,12 +6237,24 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
        return 0;
 }
 
-int kvm_mmu_module_init(void)
+/*
+ * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as
+ * its default value of -1 is technically undefined behavior for a boolean.
+ */
+void kvm_mmu_x86_module_init(void)
 {
-       int ret = -ENOMEM;
-
        if (nx_huge_pages == -1)
                __set_nx_huge_pages(get_nx_auto_mode());
+}
+
+/*
+ * The bulk of the MMU initialization is deferred until the vendor module is
+ * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need
+ * to be reset when a potentially different vendor module is loaded.
+ */
+int kvm_mmu_vendor_module_init(void)
+{
+       int ret = -ENOMEM;
 
        /*
         * MMU roles use union aliasing which is, generally speaking, an
@@ -6290,7 +6302,7 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
        mmu_free_memory_caches(vcpu);
 }
 
-void kvm_mmu_module_exit(void)
+void kvm_mmu_vendor_module_exit(void)
 {
        mmu_destroy_caches();
        percpu_counter_destroy(&kvm_total_used_mmu_pages);
index d71d177..c472769 100644 (file)
@@ -51,7 +51,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
        if (!kvm->arch.tdp_mmu_enabled)
                return;
 
-       flush_workqueue(kvm->arch.tdp_mmu_zap_wq);
+       /* Also waits for any queued work items.  */
        destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
 
        WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
index a1cf9c3..4216195 100644 (file)
@@ -837,7 +837,8 @@ bool avic_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
                          BIT(APICV_INHIBIT_REASON_IRQWIN) |
                          BIT(APICV_INHIBIT_REASON_PIT_REINJ) |
                          BIT(APICV_INHIBIT_REASON_X2APIC) |
-                         BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
+                         BIT(APICV_INHIBIT_REASON_BLOCKIRQ) |
+                         BIT(APICV_INHIBIT_REASON_SEV);
 
        return supported & BIT(reason);
 }
index 75fa6dd..537aadd 100644 (file)
@@ -260,6 +260,8 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
        INIT_LIST_HEAD(&sev->regions_list);
        INIT_LIST_HEAD(&sev->mirror_vms);
 
+       kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_SEV);
+
        return 0;
 
 e_free:
@@ -465,6 +467,7 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages)
                page_virtual = kmap_atomic(pages[i]);
                clflush_cache_range(page_virtual, PAGE_SIZE);
                kunmap_atomic(page_virtual);
+               cond_resched();
        }
 }
 
index 0c0ca59..547ba00 100644 (file)
@@ -2901,7 +2901,7 @@ static void kvm_end_pvclock_update(struct kvm *kvm)
 
 static void kvm_update_masterclock(struct kvm *kvm)
 {
-       kvm_hv_invalidate_tsc_page(kvm);
+       kvm_hv_request_tsc_page_update(kvm);
        kvm_start_pvclock_update(kvm);
        pvclock_update_vm_gtod_copy(kvm);
        kvm_end_pvclock_update(kvm);
@@ -3113,8 +3113,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                                       offsetof(struct compat_vcpu_info, time));
        if (vcpu->xen.vcpu_time_info_set)
                kvm_setup_pvclock_page(v, &vcpu->xen.vcpu_time_info_cache, 0);
-       if (!v->vcpu_idx)
-               kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
+       kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
        return 0;
 }
 
@@ -6241,7 +6240,7 @@ static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp)
        if (data.flags & ~KVM_CLOCK_VALID_FLAGS)
                return -EINVAL;
 
-       kvm_hv_invalidate_tsc_page(kvm);
+       kvm_hv_request_tsc_page_update(kvm);
        kvm_start_pvclock_update(kvm);
        pvclock_update_vm_gtod_copy(kvm);
 
@@ -8926,7 +8925,7 @@ int kvm_arch_init(void *opaque)
        }
        kvm_nr_uret_msrs = 0;
 
-       r = kvm_mmu_module_init();
+       r = kvm_mmu_vendor_module_init();
        if (r)
                goto out_free_percpu;
 
@@ -8974,7 +8973,7 @@ void kvm_arch_exit(void)
        cancel_work_sync(&pvclock_gtod_work);
 #endif
        kvm_x86_ops.hardware_enable = NULL;
-       kvm_mmu_module_exit();
+       kvm_mmu_vendor_module_exit();
        free_percpu(user_return_msrs);
        kmem_cache_destroy(x86_emulator_cache);
 #ifdef CONFIG_KVM_XEN
@@ -12986,3 +12985,19 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit);
+
+static int __init kvm_x86_init(void)
+{
+       kvm_mmu_x86_module_init();
+       return 0;
+}
+module_init(kvm_x86_init);
+
+static void __exit kvm_x86_exit(void)
+{
+       /*
+        * If module_init() is implemented, module_exit() must also be
+        * implemented to allow module unload.
+        */
+}
+module_exit(kvm_x86_exit);
index 6eb4d91..d400b6d 100644 (file)
@@ -855,13 +855,11 @@ done:
                        nr_invalidate);
 }
 
-static bool tlb_is_not_lazy(int cpu)
+static bool tlb_is_not_lazy(int cpu, void *data)
 {
        return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
 }
 
-static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
-
 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
 EXPORT_PER_CPU_SYMBOL(cpu_tlbstate_shared);
 
@@ -890,36 +888,11 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
         * up on the new contents of what used to be page tables, while
         * doing a speculative memory access.
         */
-       if (info->freed_tables) {
+       if (info->freed_tables)
                on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
-       } else {
-               /*
-                * Although we could have used on_each_cpu_cond_mask(),
-                * open-coding it has performance advantages, as it eliminates
-                * the need for indirect calls or retpolines. In addition, it
-                * allows to use a designated cpumask for evaluating the
-                * condition, instead of allocating one.
-                *
-                * This code works under the assumption that there are no nested
-                * TLB flushes, an assumption that is already made in
-                * flush_tlb_mm_range().
-                *
-                * cond_cpumask is logically a stack-local variable, but it is
-                * more efficient to have it off the stack and not to allocate
-                * it on demand. Preemption is disabled and this code is
-                * non-reentrant.
-                */
-               struct cpumask *cond_cpumask = this_cpu_ptr(&flush_tlb_mask);
-               int cpu;
-
-               cpumask_clear(cond_cpumask);
-
-               for_each_cpu(cpu, cpumask) {
-                       if (tlb_is_not_lazy(cpu))
-                               __cpumask_set_cpu(cpu, cond_cpumask);
-               }
-               on_each_cpu_mask(cond_cpumask, flush_tlb_func, (void *)info, true);
-       }
+       else
+               on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func,
+                               (void *)info, 1, cpumask);
 }
 
 void flush_tlb_multi(const struct cpumask *cpumask,
index 8fe35ed..16b6efa 100644 (file)
@@ -412,6 +412,7 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
                EMIT_LFENCE();
                EMIT2(0xFF, 0xE0 + reg);
        } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
+               OPTIMIZER_HIDE_VAR(reg);
                emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
        } else
 #endif
index 9f2b251..3822666 100644 (file)
@@ -40,7 +40,8 @@ static void msr_save_context(struct saved_context *ctxt)
        struct saved_msr *end = msr + ctxt->saved_msrs.num;
 
        while (msr < end) {
-               msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q);
+               if (msr->valid)
+                       rdmsrl(msr->info.msr_no, msr->info.reg.q);
                msr++;
        }
 }
@@ -424,8 +425,10 @@ static int msr_build_context(const u32 *msr_id, const int num)
        }
 
        for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
+               u64 dummy;
+
                msr_array[i].info.msr_no        = msr_id[j];
-               msr_array[i].valid              = false;
+               msr_array[i].valid              = !rdmsrl_safe(msr_id[j], &dummy);
                msr_array[i].info.reg.q         = 0;
        }
        saved_msrs->num   = total_num;
@@ -500,10 +503,24 @@ static int pm_cpu_check(const struct x86_cpu_id *c)
        return ret;
 }
 
+static void pm_save_spec_msr(void)
+{
+       u32 spec_msr_id[] = {
+               MSR_IA32_SPEC_CTRL,
+               MSR_IA32_TSX_CTRL,
+               MSR_TSX_FORCE_ABORT,
+               MSR_IA32_MCU_OPT_CTRL,
+               MSR_AMD64_LS_CFG,
+       };
+
+       msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
+}
+
 static int pm_check_save_msr(void)
 {
        dmi_check_system(msr_save_dmi_table);
        pm_cpu_check(msr_save_cpu_table);
+       pm_save_spec_msr();
 
        return 0;
 }
index cdd7b29..4259125 100644 (file)
@@ -1598,7 +1598,7 @@ EXPORT_SYMBOL(bio_split);
 void bio_trim(struct bio *bio, sector_t offset, sector_t size)
 {
        if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
-                        offset + size > bio->bi_iter.bi_size))
+                        offset + size > bio_sectors(bio)))
                return;
 
        size <<= 9;
index ed3ed86..c4370d2 100644 (file)
@@ -794,7 +794,8 @@ bool blk_update_request(struct request *req, blk_status_t error,
 #endif
 
        if (unlikely(error && !blk_rq_is_passthrough(req) &&
-                    !(req->rq_flags & RQF_QUIET))) {
+                    !(req->rq_flags & RQF_QUIET)) &&
+                    !test_bit(GD_DEAD, &req->q->disk->state)) {
                blk_print_req_error(req, error);
                trace_block_rq_error(req, error, nr_bytes);
        }
index 4a86340..f8703db 100644 (file)
@@ -629,7 +629,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
                return compat_put_long(argp,
                        (bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
        case BLKGETSIZE:
-               if (bdev_nr_sectors(bdev) > ~0UL)
+               if (bdev_nr_sectors(bdev) > ~(compat_ulong_t)0)
                        return -EFBIG;
                return compat_put_ulong(argp, bdev_nr_sectors(bdev));
 
index 32b20ef..4556c86 100644 (file)
@@ -570,8 +570,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
 {
        struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
 
-       if (cx->type == ACPI_STATE_C3)
-               ACPI_FLUSH_CPU_CACHE();
+       ACPI_FLUSH_CPU_CACHE();
 
        while (1) {
 
index 9efbfe0..762b61f 100644 (file)
@@ -588,19 +588,6 @@ static struct acpi_device *handle_to_device(acpi_handle handle,
        return adev;
 }
 
-int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
-{
-       if (!device)
-               return -EINVAL;
-
-       *device = handle_to_device(handle, NULL);
-       if (!*device)
-               return -ENODEV;
-
-       return 0;
-}
-EXPORT_SYMBOL(acpi_bus_get_device);
-
 /**
  * acpi_fetch_acpi_dev - Retrieve ACPI device object.
  * @handle: ACPI handle associated with the requested ACPI device object.
index e5641e6..bb45a9c 100644 (file)
@@ -115,14 +115,16 @@ config SATA_AHCI
 
          If unsure, say N.
 
-config SATA_LPM_POLICY
+config SATA_MOBILE_LPM_POLICY
        int "Default SATA Link Power Management policy for low power chipsets"
        range 0 4
        default 0
        depends on SATA_AHCI
        help
          Select the Default SATA Link Power Management (LPM) policy to use
-         for chipsets / "South Bridges" designated as supporting low power.
+         for chipsets / "South Bridges" supporting low-power modes. Such
+         chipsets are typically found on most laptops but desktops and
+         servers now also widely use chipsets supporting low power modes.
 
          The value set has the following meanings:
                0 => Keep firmware settings
index 84456c0..397dfd2 100644 (file)
@@ -1595,7 +1595,7 @@ static int ahci_init_msi(struct pci_dev *pdev, unsigned int n_ports,
 static void ahci_update_initial_lpm_policy(struct ata_port *ap,
                                           struct ahci_host_priv *hpriv)
 {
-       int policy = CONFIG_SATA_LPM_POLICY;
+       int policy = CONFIG_SATA_MOBILE_LPM_POLICY;
 
 
        /* Ignore processing for chipsets that don't use policy */
index 6ead58c..ad11a4c 100644 (file)
@@ -236,7 +236,7 @@ enum {
        AHCI_HFLAG_NO_WRITE_TO_RO       = (1 << 24), /* don't write to read
                                                        only registers */
        AHCI_HFLAG_USE_LPM_POLICY       = (1 << 25), /* chipset that should use
-                                                       SATA_LPM_POLICY
+                                                       SATA_MOBILE_LPM_POLICY
                                                        as default lpm_policy */
        AHCI_HFLAG_SUSPEND_PHYS         = (1 << 26), /* handle PHYs during
                                                        suspend/resume */
index cceedde..ca64837 100644 (file)
@@ -4014,6 +4014,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Crucial_CT*MX100*",          "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "Samsung SSD 840 EVO*",       NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
+                                               ATA_HORKAGE_NO_DMA_LOG |
+                                               ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Samsung SSD 840*",           NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Samsung SSD 850*",           NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
index b3be7a8..b1666ad 100644 (file)
@@ -1634,7 +1634,7 @@ EXPORT_SYMBOL_GPL(ata_sff_interrupt);
 
 void ata_sff_lost_interrupt(struct ata_port *ap)
 {
-       u8 status;
+       u8 status = 0;
        struct ata_queued_cmd *qc;
 
        /* Only one outstanding command per SFF channel */
index bec33d7..e3263e9 100644 (file)
@@ -137,7 +137,11 @@ struct sata_dwc_device {
 #endif
 };
 
-#define SATA_DWC_QCMD_MAX      32
+/*
+ * Allow one extra special slot for commands and DMA management
+ * to account for libata internal commands.
+ */
+#define SATA_DWC_QCMD_MAX      (ATA_MAX_QUEUE + 1)
 
 struct sata_dwc_device_port {
        struct sata_dwc_device  *hsdev;
index af6bea5..3fc3b59 100644 (file)
@@ -296,6 +296,7 @@ int driver_deferred_probe_check_state(struct device *dev)
 
        return -EPROBE_DEFER;
 }
+EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
 
 static void deferred_probe_timeout_work_func(struct work_struct *work)
 {
index 4b55e86..4d3efaa 100644 (file)
@@ -1638,22 +1638,22 @@ struct sib_info {
 };
 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
 
-extern void notify_resource_state(struct sk_buff *,
+extern int notify_resource_state(struct sk_buff *,
                                  unsigned int,
                                  struct drbd_resource *,
                                  struct resource_info *,
                                  enum drbd_notification_type);
-extern void notify_device_state(struct sk_buff *,
+extern int notify_device_state(struct sk_buff *,
                                unsigned int,
                                struct drbd_device *,
                                struct device_info *,
                                enum drbd_notification_type);
-extern void notify_connection_state(struct sk_buff *,
+extern int notify_connection_state(struct sk_buff *,
                                    unsigned int,
                                    struct drbd_connection *,
                                    struct connection_info *,
                                    enum drbd_notification_type);
-extern void notify_peer_device_state(struct sk_buff *,
+extern int notify_peer_device_state(struct sk_buff *,
                                     unsigned int,
                                     struct drbd_peer_device *,
                                     struct peer_device_info *,
index 9676a1d..4b0b25c 100644 (file)
@@ -2719,6 +2719,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
        sprintf(disk->disk_name, "drbd%d", minor);
        disk->private_data = device;
 
+       blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, disk->queue);
        blk_queue_write_cache(disk->queue, true, true);
        /* Setting the max_hw_sectors to an odd value of 8kibyte here
           This triggers a max_bio_size message upon first attach or connect */
@@ -2773,12 +2774,12 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
 
        if (init_submitter(device)) {
                err = ERR_NOMEM;
-               goto out_idr_remove_vol;
+               goto out_idr_remove_from_resource;
        }
 
        err = add_disk(disk);
        if (err)
-               goto out_idr_remove_vol;
+               goto out_idr_remove_from_resource;
 
        /* inherit the connection state */
        device->state.conn = first_connection(resource)->cstate;
@@ -2792,8 +2793,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
        drbd_debugfs_device_add(device);
        return NO_ERROR;
 
-out_idr_remove_vol:
-       idr_remove(&connection->peer_devices, vnr);
 out_idr_remove_from_resource:
        for_each_connection(connection, resource) {
                peer_device = idr_remove(&connection->peer_devices, vnr);
index 02030c9..b7216c1 100644 (file)
@@ -4549,7 +4549,7 @@ static int nla_put_notification_header(struct sk_buff *msg,
        return drbd_notification_header_to_skb(msg, &nh, true);
 }
 
-void notify_resource_state(struct sk_buff *skb,
+int notify_resource_state(struct sk_buff *skb,
                           unsigned int seq,
                           struct drbd_resource *resource,
                           struct resource_info *resource_info,
@@ -4591,16 +4591,17 @@ void notify_resource_state(struct sk_buff *skb,
                if (err && err != -ESRCH)
                        goto failed;
        }
-       return;
+       return 0;
 
 nla_put_failure:
        nlmsg_free(skb);
 failed:
        drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
                        err, seq);
+       return err;
 }
 
-void notify_device_state(struct sk_buff *skb,
+int notify_device_state(struct sk_buff *skb,
                         unsigned int seq,
                         struct drbd_device *device,
                         struct device_info *device_info,
@@ -4640,16 +4641,17 @@ void notify_device_state(struct sk_buff *skb,
                if (err && err != -ESRCH)
                        goto failed;
        }
-       return;
+       return 0;
 
 nla_put_failure:
        nlmsg_free(skb);
 failed:
        drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
                 err, seq);
+       return err;
 }
 
-void notify_connection_state(struct sk_buff *skb,
+int notify_connection_state(struct sk_buff *skb,
                             unsigned int seq,
                             struct drbd_connection *connection,
                             struct connection_info *connection_info,
@@ -4689,16 +4691,17 @@ void notify_connection_state(struct sk_buff *skb,
                if (err && err != -ESRCH)
                        goto failed;
        }
-       return;
+       return 0;
 
 nla_put_failure:
        nlmsg_free(skb);
 failed:
        drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
                 err, seq);
+       return err;
 }
 
-void notify_peer_device_state(struct sk_buff *skb,
+int notify_peer_device_state(struct sk_buff *skb,
                              unsigned int seq,
                              struct drbd_peer_device *peer_device,
                              struct peer_device_info *peer_device_info,
@@ -4739,13 +4742,14 @@ void notify_peer_device_state(struct sk_buff *skb,
                if (err && err != -ESRCH)
                        goto failed;
        }
-       return;
+       return 0;
 
 nla_put_failure:
        nlmsg_free(skb);
 failed:
        drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
                 err, seq);
+       return err;
 }
 
 void notify_helper(enum drbd_notification_type type,
@@ -4796,7 +4800,7 @@ fail:
                 err, seq);
 }
 
-static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
+static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
 {
        struct drbd_genlmsghdr *dh;
        int err;
@@ -4810,11 +4814,12 @@ static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
        if (nla_put_notification_header(skb, NOTIFY_EXISTS))
                goto nla_put_failure;
        genlmsg_end(skb, dh);
-       return;
+       return 0;
 
 nla_put_failure:
        nlmsg_free(skb);
        pr_err("Error %d sending event. Event seq:%u\n", err, seq);
+       return err;
 }
 
 static void free_state_changes(struct list_head *list)
@@ -4841,6 +4846,7 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
        unsigned int seq = cb->args[2];
        unsigned int n;
        enum drbd_notification_type flags = 0;
+       int err = 0;
 
        /* There is no need for taking notification_mutex here: it doesn't
           matter if the initial state events mix with later state chage
@@ -4849,32 +4855,32 @@ static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
 
        cb->args[5]--;
        if (cb->args[5] == 1) {
-               notify_initial_state_done(skb, seq);
+               err = notify_initial_state_done(skb, seq);
                goto out;
        }
        n = cb->args[4]++;
        if (cb->args[4] < cb->args[3])
                flags |= NOTIFY_CONTINUES;
        if (n < 1) {
-               notify_resource_state_change(skb, seq, state_change->resource,
+               err = notify_resource_state_change(skb, seq, state_change->resource,
                                             NOTIFY_EXISTS | flags);
                goto next;
        }
        n--;
        if (n < state_change->n_connections) {
-               notify_connection_state_change(skb, seq, &state_change->connections[n],
+               err = notify_connection_state_change(skb, seq, &state_change->connections[n],
                                               NOTIFY_EXISTS | flags);
                goto next;
        }
        n -= state_change->n_connections;
        if (n < state_change->n_devices) {
-               notify_device_state_change(skb, seq, &state_change->devices[n],
+               err = notify_device_state_change(skb, seq, &state_change->devices[n],
                                           NOTIFY_EXISTS | flags);
                goto next;
        }
        n -= state_change->n_devices;
        if (n < state_change->n_devices * state_change->n_connections) {
-               notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
+               err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
                                                NOTIFY_EXISTS | flags);
                goto next;
        }
@@ -4889,7 +4895,10 @@ next:
                cb->args[4] = 0;
        }
 out:
-       return skb->len;
+       if (err)
+               return err;
+       else
+               return skb->len;
 }
 
 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
index b8a2781..4ee11ae 100644 (file)
@@ -1537,7 +1537,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device,
        return rv;
 }
 
-void notify_resource_state_change(struct sk_buff *skb,
+int notify_resource_state_change(struct sk_buff *skb,
                                  unsigned int seq,
                                  struct drbd_resource_state_change *resource_state_change,
                                  enum drbd_notification_type type)
@@ -1550,10 +1550,10 @@ void notify_resource_state_change(struct sk_buff *skb,
                .res_susp_fen = resource_state_change->susp_fen[NEW],
        };
 
-       notify_resource_state(skb, seq, resource, &resource_info, type);
+       return notify_resource_state(skb, seq, resource, &resource_info, type);
 }
 
-void notify_connection_state_change(struct sk_buff *skb,
+int notify_connection_state_change(struct sk_buff *skb,
                                    unsigned int seq,
                                    struct drbd_connection_state_change *connection_state_change,
                                    enum drbd_notification_type type)
@@ -1564,10 +1564,10 @@ void notify_connection_state_change(struct sk_buff *skb,
                .conn_role = connection_state_change->peer_role[NEW],
        };
 
-       notify_connection_state(skb, seq, connection, &connection_info, type);
+       return notify_connection_state(skb, seq, connection, &connection_info, type);
 }
 
-void notify_device_state_change(struct sk_buff *skb,
+int notify_device_state_change(struct sk_buff *skb,
                                unsigned int seq,
                                struct drbd_device_state_change *device_state_change,
                                enum drbd_notification_type type)
@@ -1577,10 +1577,10 @@ void notify_device_state_change(struct sk_buff *skb,
                .dev_disk_state = device_state_change->disk_state[NEW],
        };
 
-       notify_device_state(skb, seq, device, &device_info, type);
+       return notify_device_state(skb, seq, device, &device_info, type);
 }
 
-void notify_peer_device_state_change(struct sk_buff *skb,
+int notify_peer_device_state_change(struct sk_buff *skb,
                                     unsigned int seq,
                                     struct drbd_peer_device_state_change *p,
                                     enum drbd_notification_type type)
@@ -1594,7 +1594,7 @@ void notify_peer_device_state_change(struct sk_buff *skb,
                .peer_resync_susp_dependency = p->resync_susp_dependency[NEW],
        };
 
-       notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
+       return notify_peer_device_state(skb, seq, peer_device, &peer_device_info, type);
 }
 
 static void broadcast_state_change(struct drbd_state_change *state_change)
@@ -1602,7 +1602,7 @@ static void broadcast_state_change(struct drbd_state_change *state_change)
        struct drbd_resource_state_change *resource_state_change = &state_change->resource[0];
        bool resource_state_has_changed;
        unsigned int n_device, n_connection, n_peer_device, n_peer_devices;
-       void (*last_func)(struct sk_buff *, unsigned int, void *,
+       int (*last_func)(struct sk_buff *, unsigned int, void *,
                          enum drbd_notification_type) = NULL;
        void *last_arg = NULL;
 
index ba80f61..d5b0479 100644 (file)
@@ -44,19 +44,19 @@ extern struct drbd_state_change *remember_old_state(struct drbd_resource *, gfp_
 extern void copy_old_to_new_state_change(struct drbd_state_change *);
 extern void forget_state_change(struct drbd_state_change *);
 
-extern void notify_resource_state_change(struct sk_buff *,
+extern int notify_resource_state_change(struct sk_buff *,
                                         unsigned int,
                                         struct drbd_resource_state_change *,
                                         enum drbd_notification_type type);
-extern void notify_connection_state_change(struct sk_buff *,
+extern int notify_connection_state_change(struct sk_buff *,
                                           unsigned int,
                                           struct drbd_connection_state_change *,
                                           enum drbd_notification_type type);
-extern void notify_device_state_change(struct sk_buff *,
+extern int notify_device_state_change(struct sk_buff *,
                                       unsigned int,
                                       struct drbd_device_state_change *,
                                       enum drbd_notification_type type);
-extern void notify_peer_device_state_change(struct sk_buff *,
+extern int notify_peer_device_state_change(struct sk_buff *,
                                            unsigned int,
                                            struct drbd_peer_device_state_change *,
                                            enum drbd_notification_type type);
index 05b1120..c441a49 100644 (file)
@@ -1600,7 +1600,7 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
         * Only fake timeouts need to execute blk_mq_complete_request() here.
         */
        cmd->error = BLK_STS_TIMEOUT;
-       if (cmd->fake_timeout)
+       if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL)
                blk_mq_complete_request(rq);
        return BLK_EH_DONE;
 }
index 183d5cc..b0c3704 100644 (file)
@@ -536,7 +536,6 @@ static struct platform_driver brcmstb_gisb_arb_driver = {
                .name   = "brcm-gisb-arb",
                .of_match_table = brcmstb_gisb_arb_of_match,
                .pm     = &brcmstb_gisb_arb_pm_ops,
-               .suppress_bind_attrs = true,
        },
 };
 
index 54c0ee6..41d5e69 100644 (file)
@@ -3049,7 +3049,7 @@ static const struct soc_device_attribute sysc_soc_match[] = {
        SOC_FLAG("AM43*", SOC_AM4),
        SOC_FLAG("DRA7*", SOC_DRA7),
 
-       { /* sentinel */ },
+       { /* sentinel */ }
 };
 
 /*
@@ -3070,7 +3070,7 @@ static const struct soc_device_attribute sysc_soc_feat_match[] = {
        SOC_FLAG("OMAP3615/AM3715", DIS_IVA),
        SOC_FLAG("OMAP3621", DIS_ISP),
 
-       { /* sentinel */ },
+       { /* sentinel */ }
 };
 
 static int sysc_add_disabled(unsigned long base)
index 7bd10d6..2dc9da6 100644 (file)
@@ -1365,7 +1365,6 @@ out_free:
  */
 int cdrom_number_of_slots(struct cdrom_device_info *cdi) 
 {
-       int status;
        int nslots = 1;
        struct cdrom_changer_info *info;
 
@@ -1377,7 +1376,7 @@ int cdrom_number_of_slots(struct cdrom_device_info *cdi)
        if (!info)
                return -ENOMEM;
 
-       if ((status = cdrom_read_mech_status(cdi, info)) == 0)
+       if (cdrom_read_mech_status(cdi, info) == 0)
                nslots = info->hdr.nslots;
 
        kfree(info);
index 1d82429..3a293f9 100644 (file)
@@ -333,7 +333,7 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
        chacha20_block(chacha_state, first_block);
 
        memcpy(key, first_block, CHACHA_KEY_SIZE);
-       memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
+       memmove(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
        memzero_explicit(first_block, sizeof(first_block));
 }
 
@@ -437,11 +437,8 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
  * This shouldn't be set by functions like add_device_randomness(),
  * where we can't trust the buffer passed to it is guaranteed to be
  * unpredictable (so it might not have any entropy at all).
- *
- * Returns the number of bytes processed from input, which is bounded
- * by CRNG_INIT_CNT_THRESH if account is true.
  */
-static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
+static void crng_pre_init_inject(const void *input, size_t len, bool account)
 {
        static int crng_init_cnt = 0;
        struct blake2s_state hash;
@@ -452,18 +449,15 @@ static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
        spin_lock_irqsave(&base_crng.lock, flags);
        if (crng_init != 0) {
                spin_unlock_irqrestore(&base_crng.lock, flags);
-               return 0;
+               return;
        }
 
-       if (account)
-               len = min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt);
-
        blake2s_update(&hash, base_crng.key, sizeof(base_crng.key));
        blake2s_update(&hash, input, len);
        blake2s_final(&hash, base_crng.key);
 
        if (account) {
-               crng_init_cnt += len;
+               crng_init_cnt += min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt);
                if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
                        ++base_crng.generation;
                        crng_init = 1;
@@ -474,8 +468,6 @@ static size_t crng_pre_init_inject(const void *input, size_t len, bool account)
 
        if (crng_init == 1)
                pr_notice("fast init done\n");
-
-       return len;
 }
 
 static void _get_random_bytes(void *buf, size_t nbytes)
@@ -531,49 +523,59 @@ EXPORT_SYMBOL(get_random_bytes);
 
 static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
 {
-       bool large_request = nbytes > 256;
-       ssize_t ret = 0;
-       size_t len;
+       size_t len, left, ret = 0;
        u32 chacha_state[CHACHA_STATE_WORDS];
        u8 output[CHACHA_BLOCK_SIZE];
 
        if (!nbytes)
                return 0;
 
-       len = min_t(size_t, 32, nbytes);
-       crng_make_state(chacha_state, output, len);
-
-       if (copy_to_user(buf, output, len))
-               return -EFAULT;
-       nbytes -= len;
-       buf += len;
-       ret += len;
-
-       while (nbytes) {
-               if (large_request && need_resched()) {
-                       if (signal_pending(current))
-                               break;
-                       schedule();
-               }
+       /*
+        * Immediately overwrite the ChaCha key at index 4 with random
+        * bytes, in case userspace causes copy_to_user() below to sleep
+        * forever, so that we still retain forward secrecy in that case.
+        */
+       crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
+       /*
+        * However, if we're doing a read of len <= 32, we don't need to
+        * use chacha_state after, so we can simply return those bytes to
+        * the user directly.
+        */
+       if (nbytes <= CHACHA_KEY_SIZE) {
+               ret = nbytes - copy_to_user(buf, &chacha_state[4], nbytes);
+               goto out_zero_chacha;
+       }
 
+       for (;;) {
                chacha20_block(chacha_state, output);
                if (unlikely(chacha_state[12] == 0))
                        ++chacha_state[13];
 
                len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE);
-               if (copy_to_user(buf, output, len)) {
-                       ret = -EFAULT;
+               left = copy_to_user(buf, output, len);
+               if (left) {
+                       ret += len - left;
                        break;
                }
 
-               nbytes -= len;
                buf += len;
                ret += len;
+               nbytes -= len;
+               if (!nbytes)
+                       break;
+
+               BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
+               if (ret % PAGE_SIZE == 0) {
+                       if (signal_pending(current))
+                               break;
+                       cond_resched();
+               }
        }
 
-       memzero_explicit(chacha_state, sizeof(chacha_state));
        memzero_explicit(output, sizeof(output));
-       return ret;
+out_zero_chacha:
+       memzero_explicit(chacha_state, sizeof(chacha_state));
+       return ret ? ret : -EFAULT;
 }
 
 /*
@@ -1016,7 +1018,7 @@ int __init rand_initialize(void)
  */
 void add_device_randomness(const void *buf, size_t size)
 {
-       cycles_t cycles = random_get_entropy();
+       unsigned long cycles = random_get_entropy();
        unsigned long flags, now = jiffies;
 
        if (crng_init == 0 && size)
@@ -1047,8 +1049,7 @@ struct timer_rand_state {
  */
 static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
 {
-       cycles_t cycles = random_get_entropy();
-       unsigned long flags, now = jiffies;
+       unsigned long cycles = random_get_entropy(), now = jiffies, flags;
        long delta, delta2, delta3;
 
        spin_lock_irqsave(&input_pool.lock, flags);
@@ -1141,12 +1142,9 @@ void add_hwgenerator_randomness(const void *buffer, size_t count,
                                size_t entropy)
 {
        if (unlikely(crng_init == 0 && entropy < POOL_MIN_BITS)) {
-               size_t ret = crng_pre_init_inject(buffer, count, true);
-               mix_pool_bytes(buffer, ret);
-               count -= ret;
-               buffer += ret;
-               if (!count || crng_init == 0)
-                       return;
+               crng_pre_init_inject(buffer, count, true);
+               mix_pool_bytes(buffer, count);
+               return;
        }
 
        /*
@@ -1340,8 +1338,7 @@ static void mix_interrupt_randomness(struct work_struct *work)
 void add_interrupt_randomness(int irq)
 {
        enum { MIX_INFLIGHT = 1U << 31 };
-       cycles_t cycles = random_get_entropy();
-       unsigned long now = jiffies;
+       unsigned long cycles = random_get_entropy(), now = jiffies;
        struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
        struct pt_regs *regs = get_irq_regs();
        unsigned int new_count;
@@ -1354,16 +1351,12 @@ void add_interrupt_randomness(int irq)
        if (cycles == 0)
                cycles = get_reg(fast_pool, regs);
 
-       if (sizeof(cycles) == 8)
+       if (sizeof(unsigned long) == 8) {
                irq_data.u64[0] = cycles ^ rol64(now, 32) ^ irq;
-       else {
+               irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_;
+       } else {
                irq_data.u32[0] = cycles ^ irq;
                irq_data.u32[1] = now;
-       }
-
-       if (sizeof(unsigned long) == 8)
-               irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_;
-       else {
                irq_data.u32[2] = regs ? instruction_pointer(regs) : _RET_IP_;
                irq_data.u32[3] = get_reg(fast_pool, regs);
        }
@@ -1410,7 +1403,7 @@ static void entropy_timer(struct timer_list *t)
 static void try_to_generate_entropy(void)
 {
        struct {
-               cycles_t cycles;
+               unsigned long cycles;
                struct timer_list timer;
        } stack;
 
@@ -1545,6 +1538,13 @@ static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
 {
        static int maxwarn = 10;
 
+       /*
+        * Opportunistically attempt to initialize the RNG on platforms that
+        * have fast cycle counters, but don't (for now) require it to succeed.
+        */
+       if (!crng_ready())
+               try_to_generate_entropy();
+
        if (!crng_ready() && maxwarn > 0) {
                maxwarn--;
                if (__ratelimit(&urandom_warning))
index 8a7267d..3f2182d 100644 (file)
@@ -436,7 +436,6 @@ static int wait_for_media_ready(struct cxl_dev_state *cxlds)
 
        for (i = mbox_ready_timeout; i; i--) {
                u32 temp;
-               int rc;
 
                rc = pci_read_config_dword(
                        pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp);
index 511805d..4c9eb53 100644 (file)
@@ -12,6 +12,7 @@ dmabuf_selftests-y := \
        selftest.o \
        st-dma-fence.o \
        st-dma-fence-chain.o \
+       st-dma-fence-unwrap.o \
        st-dma-resv.o
 
 obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o
index cb1bacb..5c8a708 100644 (file)
@@ -159,6 +159,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
        struct dma_fence_array *array;
        size_t size = sizeof(*array);
 
+       WARN_ON(!num_fences || !fences);
+
        /* Allocate the callback structures behind the array. */
        size += num_fences * sizeof(struct dma_fence_array_cb);
        array = kzalloc(size, GFP_KERNEL);
@@ -219,3 +221,33 @@ bool dma_fence_match_context(struct dma_fence *fence, u64 context)
        return true;
 }
 EXPORT_SYMBOL(dma_fence_match_context);
+
+struct dma_fence *dma_fence_array_first(struct dma_fence *head)
+{
+       struct dma_fence_array *array;
+
+       if (!head)
+               return NULL;
+
+       array = to_dma_fence_array(head);
+       if (!array)
+               return head;
+
+       if (!array->num_fences)
+               return NULL;
+
+       return array->fences[0];
+}
+EXPORT_SYMBOL(dma_fence_array_first);
+
+struct dma_fence *dma_fence_array_next(struct dma_fence *head,
+                                      unsigned int index)
+{
+       struct dma_fence_array *array = to_dma_fence_array(head);
+
+       if (!array || index >= array->num_fences)
+               return NULL;
+
+       return array->fences[index];
+}
+EXPORT_SYMBOL(dma_fence_array_next);
index 97d73aa..8519658 100644 (file)
@@ -12,4 +12,5 @@
 selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
 selftest(dma_fence, dma_fence)
 selftest(dma_fence_chain, dma_fence_chain)
+selftest(dma_fence_unwrap, dma_fence_unwrap)
 selftest(dma_resv, dma_resv)
diff --git a/drivers/dma-buf/st-dma-fence-unwrap.c b/drivers/dma-buf/st-dma-fence-unwrap.c
new file mode 100644 (file)
index 0000000..039f016
--- /dev/null
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/dma-fence-unwrap.h>
+#if 0
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#endif
+
+#include "selftest.h"
+
+#define CHAIN_SZ (4 << 10)
+
+static inline struct mock_fence {
+       struct dma_fence base;
+       spinlock_t lock;
+} *to_mock_fence(struct dma_fence *f) {
+       return container_of(f, struct mock_fence, base);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+       return "mock";
+}
+
+static const struct dma_fence_ops mock_ops = {
+       .get_driver_name = mock_name,
+       .get_timeline_name = mock_name,
+};
+
+static struct dma_fence *mock_fence(void)
+{
+       struct mock_fence *f;
+
+       f = kmalloc(sizeof(*f), GFP_KERNEL);
+       if (!f)
+               return NULL;
+
+       spin_lock_init(&f->lock);
+       dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
+
+       return &f->base;
+}
+
+static struct dma_fence *mock_array(unsigned int num_fences, ...)
+{
+       struct dma_fence_array *array;
+       struct dma_fence **fences;
+       va_list valist;
+       int i;
+
+       fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
+       if (!fences)
+               return NULL;
+
+       va_start(valist, num_fences);
+       for (i = 0; i < num_fences; ++i)
+               fences[i] = va_arg(valist, typeof(*fences));
+       va_end(valist);
+
+       array = dma_fence_array_create(num_fences, fences,
+                                      dma_fence_context_alloc(1),
+                                      1, false);
+       if (!array)
+               goto cleanup;
+       return &array->base;
+
+cleanup:
+       for (i = 0; i < num_fences; ++i)
+               dma_fence_put(fences[i]);
+       kfree(fences);
+       return NULL;
+}
+
+static struct dma_fence *mock_chain(struct dma_fence *prev,
+                                   struct dma_fence *fence)
+{
+       struct dma_fence_chain *f;
+
+       f = dma_fence_chain_alloc();
+       if (!f) {
+               dma_fence_put(prev);
+               dma_fence_put(fence);
+               return NULL;
+       }
+
+       dma_fence_chain_init(f, prev, fence, 1);
+       return &f->base;
+}
+
+static int sanitycheck(void *arg)
+{
+       struct dma_fence *f, *chain, *array;
+       int err = 0;
+
+       f = mock_fence();
+       if (!f)
+               return -ENOMEM;
+
+       array = mock_array(1, f);
+       if (!array)
+               return -ENOMEM;
+
+       chain = mock_chain(NULL, array);
+       if (!chain)
+               return -ENOMEM;
+
+       dma_fence_signal(f);
+       dma_fence_put(chain);
+       return err;
+}
+
+static int unwrap_array(void *arg)
+{
+       struct dma_fence *fence, *f1, *f2, *array;
+       struct dma_fence_unwrap iter;
+       int err = 0;
+
+       f1 = mock_fence();
+       if (!f1)
+               return -ENOMEM;
+
+       f2 = mock_fence();
+       if (!f2) {
+               dma_fence_put(f1);
+               return -ENOMEM;
+       }
+
+       array = mock_array(2, f1, f2);
+       if (!array)
+               return -ENOMEM;
+
+       dma_fence_unwrap_for_each(fence, &iter, array) {
+               if (fence == f1) {
+                       f1 = NULL;
+               } else if (fence == f2) {
+                       f2 = NULL;
+               } else {
+                       pr_err("Unexpected fence!\n");
+                       err = -EINVAL;
+               }
+       }
+
+       if (f1 || f2) {
+               pr_err("Not all fences seen!\n");
+               err = -EINVAL;
+       }
+
+       dma_fence_signal(f1);
+       dma_fence_signal(f2);
+       dma_fence_put(array);
+       return 0;
+}
+
+static int unwrap_chain(void *arg)
+{
+       struct dma_fence *fence, *f1, *f2, *chain;
+       struct dma_fence_unwrap iter;
+       int err = 0;
+
+       f1 = mock_fence();
+       if (!f1)
+               return -ENOMEM;
+
+       f2 = mock_fence();
+       if (!f2) {
+               dma_fence_put(f1);
+               return -ENOMEM;
+       }
+
+       chain = mock_chain(f1, f2);
+       if (!chain)
+               return -ENOMEM;
+
+       dma_fence_unwrap_for_each(fence, &iter, chain) {
+               if (fence == f1) {
+                       f1 = NULL;
+               } else if (fence == f2) {
+                       f2 = NULL;
+               } else {
+                       pr_err("Unexpected fence!\n");
+                       err = -EINVAL;
+               }
+       }
+
+       if (f1 || f2) {
+               pr_err("Not all fences seen!\n");
+               err = -EINVAL;
+       }
+
+       dma_fence_signal(f1);
+       dma_fence_signal(f2);
+       dma_fence_put(chain);
+       return 0;
+}
+
+static int unwrap_chain_array(void *arg)
+{
+       struct dma_fence *fence, *f1, *f2, *array, *chain;
+       struct dma_fence_unwrap iter;
+       int err = 0;
+
+       f1 = mock_fence();
+       if (!f1)
+               return -ENOMEM;
+
+       f2 = mock_fence();
+       if (!f2) {
+               dma_fence_put(f1);
+               return -ENOMEM;
+       }
+
+       array = mock_array(2, f1, f2);
+       if (!array)
+               return -ENOMEM;
+
+       chain = mock_chain(NULL, array);
+       if (!chain)
+               return -ENOMEM;
+
+       dma_fence_unwrap_for_each(fence, &iter, chain) {
+               if (fence == f1) {
+                       f1 = NULL;
+               } else if (fence == f2) {
+                       f2 = NULL;
+               } else {
+                       pr_err("Unexpected fence!\n");
+                       err = -EINVAL;
+               }
+       }
+
+       if (f1 || f2) {
+               pr_err("Not all fences seen!\n");
+               err = -EINVAL;
+       }
+
+       dma_fence_signal(f1);
+       dma_fence_signal(f2);
+       dma_fence_put(chain);
+       return 0;
+}
+
+int dma_fence_unwrap(void)
+{
+       static const struct subtest tests[] = {
+               SUBTEST(sanitycheck),
+               SUBTEST(unwrap_array),
+               SUBTEST(unwrap_chain),
+               SUBTEST(unwrap_chain_array),
+       };
+
+       return subtests(tests, NULL);
+}
index 394e6e1..514d213 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright (C) 2012 Google, Inc.
  */
 
+#include <linux/dma-fence-unwrap.h>
 #include <linux/export.h>
 #include <linux/file.h>
 #include <linux/fs.h>
@@ -172,20 +173,6 @@ static int sync_file_set_fence(struct sync_file *sync_file,
        return 0;
 }
 
-static struct dma_fence **get_fences(struct sync_file *sync_file,
-                                    int *num_fences)
-{
-       if (dma_fence_is_array(sync_file->fence)) {
-               struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
-
-               *num_fences = array->num_fences;
-               return array->fences;
-       }
-
-       *num_fences = 1;
-       return &sync_file->fence;
-}
-
 static void add_fence(struct dma_fence **fences,
                      int *i, struct dma_fence *fence)
 {
@@ -210,86 +197,97 @@ static void add_fence(struct dma_fence **fences,
 static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
                                         struct sync_file *b)
 {
+       struct dma_fence *a_fence, *b_fence, **fences;
+       struct dma_fence_unwrap a_iter, b_iter;
+       unsigned int index, num_fences;
        struct sync_file *sync_file;
-       struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences;
-       int i = 0, i_a, i_b, num_fences, a_num_fences, b_num_fences;
 
        sync_file = sync_file_alloc();
        if (!sync_file)
                return NULL;
 
-       a_fences = get_fences(a, &a_num_fences);
-       b_fences = get_fences(b, &b_num_fences);
-       if (a_num_fences > INT_MAX - b_num_fences)
-               goto err;
+       num_fences = 0;
+       dma_fence_unwrap_for_each(a_fence, &a_iter, a->fence)
+               ++num_fences;
+       dma_fence_unwrap_for_each(b_fence, &b_iter, b->fence)
+               ++num_fences;
 
-       num_fences = a_num_fences + b_num_fences;
+       if (num_fences > INT_MAX)
+               goto err_free_sync_file;
 
        fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
        if (!fences)
-               goto err;
+               goto err_free_sync_file;
 
        /*
-        * Assume sync_file a and b are both ordered and have no
-        * duplicates with the same context.
+        * We can't guarantee that fences in both a and b are ordered, but it is
+        * still quite likely.
         *
-        * If a sync_file can only be created with sync_file_merge
-        * and sync_file_create, this is a reasonable assumption.
+        * So attempt to order the fences as we pass over them and merge fences
+        * with the same context.
         */
-       for (i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
-               struct dma_fence *pt_a = a_fences[i_a];
-               struct dma_fence *pt_b = b_fences[i_b];
 
-               if (pt_a->context < pt_b->context) {
-                       add_fence(fences, &i, pt_a);
+       index = 0;
+       for (a_fence = dma_fence_unwrap_first(a->fence, &a_iter),
+            b_fence = dma_fence_unwrap_first(b->fence, &b_iter);
+            a_fence || b_fence; ) {
+
+               if (!b_fence) {
+                       add_fence(fences, &index, a_fence);
+                       a_fence = dma_fence_unwrap_next(&a_iter);
+
+               } else if (!a_fence) {
+                       add_fence(fences, &index, b_fence);
+                       b_fence = dma_fence_unwrap_next(&b_iter);
+
+               } else if (a_fence->context < b_fence->context) {
+                       add_fence(fences, &index, a_fence);
+                       a_fence = dma_fence_unwrap_next(&a_iter);
 
-                       i_a++;
-               } else if (pt_a->context > pt_b->context) {
-                       add_fence(fences, &i, pt_b);
+               } else if (b_fence->context < a_fence->context) {
+                       add_fence(fences, &index, b_fence);
+                       b_fence = dma_fence_unwrap_next(&b_iter);
+
+               } else if (__dma_fence_is_later(a_fence->seqno, b_fence->seqno,
+                                               a_fence->ops)) {
+                       add_fence(fences, &index, a_fence);
+                       a_fence = dma_fence_unwrap_next(&a_iter);
+                       b_fence = dma_fence_unwrap_next(&b_iter);
 
-                       i_b++;
                } else {
-                       if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno,
-                                                pt_a->ops))
-                               add_fence(fences, &i, pt_a);
-                       else
-                               add_fence(fences, &i, pt_b);
-
-                       i_a++;
-                       i_b++;
+                       add_fence(fences, &index, b_fence);
+                       a_fence = dma_fence_unwrap_next(&a_iter);
+                       b_fence = dma_fence_unwrap_next(&b_iter);
                }
        }
 
-       for (; i_a < a_num_fences; i_a++)
-               add_fence(fences, &i, a_fences[i_a]);
-
-       for (; i_b < b_num_fences; i_b++)
-               add_fence(fences, &i, b_fences[i_b]);
-
-       if (i == 0)
-               fences[i++] = dma_fence_get(a_fences[0]);
+       if (index == 0)
+               fences[index++] = dma_fence_get_stub();
 
-       if (num_fences > i) {
-               nfences = krealloc_array(fences, i, sizeof(*fences), GFP_KERNEL);
-               if (!nfences)
-                       goto err;
+       if (num_fences > index) {
+               struct dma_fence **tmp;
 
-               fences = nfences;
+               /* Keep going even when reducing the size failed */
+               tmp = krealloc_array(fences, index, sizeof(*fences),
+                                    GFP_KERNEL);
+               if (tmp)
+                       fences = tmp;
        }
 
-       if (sync_file_set_fence(sync_file, fences, i) < 0)
-               goto err;
+       if (sync_file_set_fence(sync_file, fences, index) < 0)
+               goto err_put_fences;
 
        strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name));
        return sync_file;
 
-err:
-       while (i)
-               dma_fence_put(fences[--i]);
+err_put_fences:
+       while (index)
+               dma_fence_put(fences[--index]);
        kfree(fences);
+
+err_free_sync_file:
        fput(sync_file->file);
        return NULL;
-
 }
 
 static int sync_file_release(struct inode *inode, struct file *file)
@@ -398,11 +396,13 @@ static int sync_fill_fence_info(struct dma_fence *fence,
 static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
                                       unsigned long arg)
 {
-       struct sync_file_info info;
        struct sync_fence_info *fence_info = NULL;
-       struct dma_fence **fences;
+       struct dma_fence_unwrap iter;
+       struct sync_file_info info;
+       unsigned int num_fences;
+       struct dma_fence *fence;
+       int ret;
        __u32 size;
-       int num_fences, ret, i;
 
        if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
                return -EFAULT;
@@ -410,7 +410,9 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
        if (info.flags || info.pad)
                return -EINVAL;
 
-       fences = get_fences(sync_file, &num_fences);
+       num_fences = 0;
+       dma_fence_unwrap_for_each(fence, &iter, sync_file->fence)
+               ++num_fences;
 
        /*
         * Passing num_fences = 0 means that userspace doesn't want to
@@ -433,8 +435,11 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
        if (!fence_info)
                return -ENOMEM;
 
-       for (i = 0; i < num_fences; i++) {
-               int status = sync_fill_fence_info(fences[i], &fence_info[i]);
+       num_fences = 0;
+       dma_fence_unwrap_for_each(fence, &iter, sync_file->fence) {
+               int status;
+
+               status = sync_fill_fence_info(fence, &fence_info[num_fences++]);
                info.status = info.status <= 0 ? info.status : status;
        }
 
index 14f9000..ec731e9 100644 (file)
@@ -398,11 +398,15 @@ static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz,
        if (ret.a0 == FFA_ERROR)
                return ffa_to_linux_errno((int)ret.a2);
 
-       if (ret.a0 != FFA_SUCCESS)
+       if (ret.a0 == FFA_SUCCESS) {
+               if (handle)
+                       *handle = PACK_HANDLE(ret.a2, ret.a3);
+       } else if (ret.a0 == FFA_MEM_FRAG_RX) {
+               if (handle)
+                       *handle = PACK_HANDLE(ret.a1, ret.a2);
+       } else {
                return -EOPNOTSUPP;
-
-       if (handle)
-               *handle = PACK_HANDLE(ret.a2, ret.a3);
+       }
 
        return frag_len;
 }
@@ -426,10 +430,12 @@ static int ffa_mem_next_frag(u64 handle, u32 frag_len)
        if (ret.a0 == FFA_ERROR)
                return ffa_to_linux_errno((int)ret.a2);
 
-       if (ret.a0 != FFA_MEM_FRAG_RX)
-               return -EOPNOTSUPP;
+       if (ret.a0 == FFA_MEM_FRAG_RX)
+               return ret.a3;
+       else if (ret.a0 == FFA_SUCCESS)
+               return 0;
 
-       return ret.a3;
+       return -EOPNOTSUPP;
 }
 
 static int
@@ -582,7 +588,7 @@ static int ffa_partition_info_get(const char *uuid_str,
                return -ENODEV;
        }
 
-       count = ffa_partition_probe(&uuid_null, &pbuf);
+       count = ffa_partition_probe(&uuid, &pbuf);
        if (count <= 0)
                return -ENOENT;
 
@@ -688,8 +694,6 @@ static void ffa_setup_partitions(void)
                               __func__, tpbuf->id);
                        continue;
                }
-
-               ffa_dev_set_drvdata(ffa_dev, drv_info);
        }
        kfree(pbuf);
 }
index 7794bd4..1e7b7fe 100644 (file)
@@ -59,6 +59,7 @@ config ARM_SCMI_TRANSPORT_OPTEE
        depends on OPTEE=y || OPTEE=ARM_SCMI_PROTOCOL
        select ARM_SCMI_HAVE_TRANSPORT
        select ARM_SCMI_HAVE_SHMEM
+       select ARM_SCMI_HAVE_MSG
        default y
        help
          This enables the OP-TEE service based transport for SCMI.
index f521933..20fba73 100644 (file)
@@ -178,6 +178,7 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
        __le32 *num_skip, *num_ret;
        u32 tot_num_ret = 0, loop_num_ret;
        struct device *dev = ph->dev;
+       struct scmi_revision_info *rev = ph->get_priv(ph);
 
        ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_LIST_PROTOCOLS,
                                      sizeof(*num_skip), 0, &t);
@@ -189,6 +190,9 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
        list = t->rx.buf + sizeof(*num_ret);
 
        do {
+               size_t real_list_sz;
+               u32 calc_list_sz;
+
                /* Set the number of protocols to be skipped/already read */
                *num_skip = cpu_to_le32(tot_num_ret);
 
@@ -197,8 +201,30 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
                        break;
 
                loop_num_ret = le32_to_cpu(*num_ret);
-               if (tot_num_ret + loop_num_ret > MAX_PROTOCOLS_IMP) {
-                       dev_err(dev, "No. of Protocol > MAX_PROTOCOLS_IMP");
+               if (!loop_num_ret)
+                       break;
+
+               if (loop_num_ret > rev->num_protocols - tot_num_ret) {
+                       dev_err(dev,
+                               "No. Returned protocols > Total protocols.\n");
+                       break;
+               }
+
+               if (t->rx.len < (sizeof(u32) * 2)) {
+                       dev_err(dev, "Truncated reply - rx.len:%zd\n",
+                               t->rx.len);
+                       ret = -EPROTO;
+                       break;
+               }
+
+               real_list_sz = t->rx.len - sizeof(u32);
+               calc_list_sz = (1 + (loop_num_ret - 1) / sizeof(u32)) *
+                               sizeof(u32);
+               if (calc_list_sz != real_list_sz) {
+                       dev_err(dev,
+                               "Malformed reply - real_sz:%zd  calc_sz:%u\n",
+                               real_list_sz, calc_list_sz);
+                       ret = -EPROTO;
                        break;
                }
 
@@ -208,7 +234,7 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph,
                tot_num_ret += loop_num_ret;
 
                ph->xops->reset_rx_to_maxsz(ph, t);
-       } while (loop_num_ret);
+       } while (tot_num_ret < rev->num_protocols);
 
        ph->xops->xfer_put(ph, t);
 
@@ -351,15 +377,19 @@ static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph)
        if (ret)
                return ret;
 
-       prot_imp = devm_kcalloc(dev, MAX_PROTOCOLS_IMP, sizeof(u8), GFP_KERNEL);
-       if (!prot_imp)
-               return -ENOMEM;
-
        rev->major_ver = PROTOCOL_REV_MAJOR(version),
        rev->minor_ver = PROTOCOL_REV_MINOR(version);
        ph->set_priv(ph, rev);
 
-       scmi_base_attributes_get(ph);
+       ret = scmi_base_attributes_get(ph);
+       if (ret)
+               return ret;
+
+       prot_imp = devm_kcalloc(dev, rev->num_protocols, sizeof(u8),
+                               GFP_KERNEL);
+       if (!prot_imp)
+               return -ENOMEM;
+
        scmi_base_vendor_id_get(ph, false);
        scmi_base_vendor_id_get(ph, true);
        scmi_base_implementation_version_get(ph);
index cf6fed6..4d36a9a 100644 (file)
@@ -2,13 +2,15 @@
 /*
  * System Control and Management Interface (SCMI) Clock Protocol
  *
- * Copyright (C) 2018-2021 ARM Ltd.
+ * Copyright (C) 2018-2022 ARM Ltd.
  */
 
 #include <linux/module.h>
+#include <linux/limits.h>
 #include <linux/sort.h>
 
-#include "common.h"
+#include "protocols.h"
+#include "notify.h"
 
 enum scmi_clock_protocol_cmd {
        CLOCK_ATTRIBUTES = 0x3,
@@ -16,6 +18,9 @@ enum scmi_clock_protocol_cmd {
        CLOCK_RATE_SET = 0x5,
        CLOCK_RATE_GET = 0x6,
        CLOCK_CONFIG_SET = 0x7,
+       CLOCK_NAME_GET = 0x8,
+       CLOCK_RATE_NOTIFY = 0x9,
+       CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA,
 };
 
 struct scmi_msg_resp_clock_protocol_attributes {
@@ -27,7 +32,10 @@ struct scmi_msg_resp_clock_protocol_attributes {
 struct scmi_msg_resp_clock_attributes {
        __le32 attributes;
 #define        CLOCK_ENABLE    BIT(0)
-       u8 name[SCMI_MAX_STR_SIZE];
+#define SUPPORTS_RATE_CHANGED_NOTIF(x)         ((x) & BIT(31))
+#define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x)        ((x) & BIT(30))
+#define SUPPORTS_EXTENDED_NAMES(x)             ((x) & BIT(29))
+       u8 name[SCMI_SHORT_NAME_MAX_SIZE];
        __le32 clock_enable_latency;
 };
 
@@ -49,7 +57,7 @@ struct scmi_msg_resp_clock_describe_rates {
        struct {
                __le32 value_low;
                __le32 value_high;
-       } rate[0];
+       } rate[];
 #define RATE_TO_U64(X)         \
 ({                             \
        typeof(X) x = (X);      \
@@ -68,6 +76,24 @@ struct scmi_clock_set_rate {
        __le32 value_high;
 };
 
+struct scmi_msg_resp_set_rate_complete {
+       __le32 id;
+       __le32 rate_low;
+       __le32 rate_high;
+};
+
+struct scmi_msg_clock_rate_notify {
+       __le32 clk_id;
+       __le32 notify_enable;
+};
+
+struct scmi_clock_rate_notify_payld {
+       __le32 agent_id;
+       __le32 clock_id;
+       __le32 rate_low;
+       __le32 rate_high;
+};
+
 struct clock_info {
        u32 version;
        int num_clocks;
@@ -76,6 +102,11 @@ struct clock_info {
        struct scmi_clock_info *clk;
 };
 
+static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
+       CLOCK_RATE_NOTIFY,
+       CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
+};
+
 static int
 scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
                                   struct clock_info *ci)
@@ -102,9 +133,11 @@ scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
 }
 
 static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
-                                    u32 clk_id, struct scmi_clock_info *clk)
+                                    u32 clk_id, struct scmi_clock_info *clk,
+                                    u32 version)
 {
        int ret;
+       u32 attributes;
        struct scmi_xfer *t;
        struct scmi_msg_resp_clock_attributes *attr;
 
@@ -118,16 +151,33 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
 
        ret = ph->xops->do_xfer(ph, t);
        if (!ret) {
+               u32 latency = 0;
+               attributes = le32_to_cpu(attr->attributes);
                strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE);
-               /* Is optional field clock_enable_latency provided ? */
-               if (t->rx.len == sizeof(*attr))
-                       clk->enable_latency =
-                               le32_to_cpu(attr->clock_enable_latency);
-       } else {
-               clk->name[0] = '\0';
+               /* clock_enable_latency field is present only since SCMI v3.1 */
+               if (PROTOCOL_REV_MAJOR(version) >= 0x2)
+                       latency = le32_to_cpu(attr->clock_enable_latency);
+               clk->enable_latency = latency ? : U32_MAX;
        }
 
        ph->xops->xfer_put(ph, t);
+
+       /*
+        * If supported overwrite short name with the extended one;
+        * on error just carry on and use already provided short name.
+        */
+       if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) {
+               if (SUPPORTS_EXTENDED_NAMES(attributes))
+                       ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id,
+                                                   clk->name,
+                                                   SCMI_MAX_STR_SIZE);
+
+               if (SUPPORTS_RATE_CHANGED_NOTIF(attributes))
+                       clk->rate_changed_notifications = true;
+               if (SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
+                       clk->rate_change_requested_notifications = true;
+       }
+
        return ret;
 }
 
@@ -143,80 +193,111 @@ static int rate_cmp_func(const void *_r1, const void *_r2)
                return 1;
 }
 
-static int
-scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
-                             struct scmi_clock_info *clk)
-{
-       u64 *rate = NULL;
-       int ret, cnt;
-       bool rate_discrete = false;
-       u32 tot_rate_cnt = 0, rates_flag;
-       u16 num_returned, num_remaining;
-       struct scmi_xfer *t;
-       struct scmi_msg_clock_describe_rates *clk_desc;
-       struct scmi_msg_resp_clock_describe_rates *rlist;
+struct scmi_clk_ipriv {
+       u32 clk_id;
+       struct scmi_clock_info *clk;
+};
 
-       ret = ph->xops->xfer_get_init(ph, CLOCK_DESCRIBE_RATES,
-                                     sizeof(*clk_desc), 0, &t);
-       if (ret)
-               return ret;
+static void iter_clk_describe_prepare_message(void *message,
+                                             const unsigned int desc_index,
+                                             const void *priv)
+{
+       struct scmi_msg_clock_describe_rates *msg = message;
+       const struct scmi_clk_ipriv *p = priv;
 
-       clk_desc = t->tx.buf;
-       rlist = t->rx.buf;
+       msg->id = cpu_to_le32(p->clk_id);
+       /* Set the number of rates to be skipped/already read */
+       msg->rate_index = cpu_to_le32(desc_index);
+}
 
-       do {
-               clk_desc->id = cpu_to_le32(clk_id);
-               /* Set the number of rates to be skipped/already read */
-               clk_desc->rate_index = cpu_to_le32(tot_rate_cnt);
+static int
+iter_clk_describe_update_state(struct scmi_iterator_state *st,
+                              const void *response, void *priv)
+{
+       u32 flags;
+       struct scmi_clk_ipriv *p = priv;
+       const struct scmi_msg_resp_clock_describe_rates *r = response;
 
-               ret = ph->xops->do_xfer(ph, t);
-               if (ret)
-                       goto err;
+       flags = le32_to_cpu(r->num_rates_flags);
+       st->num_remaining = NUM_REMAINING(flags);
+       st->num_returned = NUM_RETURNED(flags);
+       p->clk->rate_discrete = RATE_DISCRETE(flags);
 
-               rates_flag = le32_to_cpu(rlist->num_rates_flags);
-               num_remaining = NUM_REMAINING(rates_flag);
-               rate_discrete = RATE_DISCRETE(rates_flag);
-               num_returned = NUM_RETURNED(rates_flag);
+       return 0;
+}
 
-               if (tot_rate_cnt + num_returned > SCMI_MAX_NUM_RATES) {
-                       dev_err(ph->dev, "No. of rates > MAX_NUM_RATES");
+static int
+iter_clk_describe_process_response(const struct scmi_protocol_handle *ph,
+                                  const void *response,
+                                  struct scmi_iterator_state *st, void *priv)
+{
+       int ret = 0;
+       struct scmi_clk_ipriv *p = priv;
+       const struct scmi_msg_resp_clock_describe_rates *r = response;
+
+       if (!p->clk->rate_discrete) {
+               switch (st->desc_index + st->loop_idx) {
+               case 0:
+                       p->clk->range.min_rate = RATE_TO_U64(r->rate[0]);
                        break;
-               }
-
-               if (!rate_discrete) {
-                       clk->range.min_rate = RATE_TO_U64(rlist->rate[0]);
-                       clk->range.max_rate = RATE_TO_U64(rlist->rate[1]);
-                       clk->range.step_size = RATE_TO_U64(rlist->rate[2]);
-                       dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
-                               clk->range.min_rate, clk->range.max_rate,
-                               clk->range.step_size);
+               case 1:
+                       p->clk->range.max_rate = RATE_TO_U64(r->rate[1]);
+                       break;
+               case 2:
+                       p->clk->range.step_size = RATE_TO_U64(r->rate[2]);
+                       break;
+               default:
+                       ret = -EINVAL;
                        break;
                }
+       } else {
+               u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx];
 
-               rate = &clk->list.rates[tot_rate_cnt];
-               for (cnt = 0; cnt < num_returned; cnt++, rate++) {
-                       *rate = RATE_TO_U64(rlist->rate[cnt]);
-                       dev_dbg(ph->dev, "Rate %llu Hz\n", *rate);
-               }
+               *rate = RATE_TO_U64(r->rate[st->loop_idx]);
+               p->clk->list.num_rates++;
+               //XXX dev_dbg(ph->dev, "Rate %llu Hz\n", *rate);
+       }
 
-               tot_rate_cnt += num_returned;
+       return ret;
+}
+
+static int
+scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
+                             struct scmi_clock_info *clk)
+{
+       int ret;
 
-               ph->xops->reset_rx_to_maxsz(ph, t);
-               /*
-                * check for both returned and remaining to avoid infinite
-                * loop due to buggy firmware
-                */
-       } while (num_returned && num_remaining);
+       void *iter;
+       struct scmi_msg_clock_describe_rates *msg;
+       struct scmi_iterator_ops ops = {
+               .prepare_message = iter_clk_describe_prepare_message,
+               .update_state = iter_clk_describe_update_state,
+               .process_response = iter_clk_describe_process_response,
+       };
+       struct scmi_clk_ipriv cpriv = {
+               .clk_id = clk_id,
+               .clk = clk,
+       };
+
+       iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES,
+                                           CLOCK_DESCRIBE_RATES,
+                                           sizeof(*msg), &cpriv);
+       if (IS_ERR(iter))
+               return PTR_ERR(iter);
+
+       ret = ph->hops->iter_response_run(iter);
+       if (ret)
+               return ret;
 
-       if (rate_discrete && rate) {
-               clk->list.num_rates = tot_rate_cnt;
-               sort(rate, tot_rate_cnt, sizeof(*rate), rate_cmp_func, NULL);
+       if (!clk->rate_discrete) {
+               dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
+                       clk->range.min_rate, clk->range.max_rate,
+                       clk->range.step_size);
+       } else if (clk->list.num_rates) {
+               sort(clk->list.rates, clk->list.num_rates,
+                    sizeof(clk->list.rates[0]), rate_cmp_func, NULL);
        }
 
-       clk->rate_discrete = rate_discrete;
-
-err:
-       ph->xops->xfer_put(ph, t);
        return ret;
 }
 
@@ -265,10 +346,22 @@ static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
        cfg->value_low = cpu_to_le32(rate & 0xffffffff);
        cfg->value_high = cpu_to_le32(rate >> 32);
 
-       if (flags & CLOCK_SET_ASYNC)
+       if (flags & CLOCK_SET_ASYNC) {
                ret = ph->xops->do_xfer_with_response(ph, t);
-       else
+               if (!ret) {
+                       struct scmi_msg_resp_set_rate_complete *resp;
+
+                       resp = t->rx.buf;
+                       if (le32_to_cpu(resp->id) == clk_id)
+                               dev_dbg(ph->dev,
+                                       "Clk ID %d set async to %llu\n", clk_id,
+                                       get_unaligned_le64(&resp->rate_low));
+                       else
+                               ret = -EPROTO;
+               }
+       } else {
                ret = ph->xops->do_xfer(ph, t);
+       }
 
        if (ci->max_async_req)
                atomic_dec(&ci->cur_async_req);
@@ -354,13 +447,111 @@ static const struct scmi_clk_proto_ops clk_proto_ops = {
        .disable_atomic = scmi_clock_disable_atomic,
 };
 
+static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph,
+                               u32 clk_id, int message_id, bool enable)
+{
+       int ret;
+       struct scmi_xfer *t;
+       struct scmi_msg_clock_rate_notify *notify;
+
+       ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
+       if (ret)
+               return ret;
+
+       notify = t->tx.buf;
+       notify->clk_id = cpu_to_le32(clk_id);
+       notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
+
+       ret = ph->xops->do_xfer(ph, t);
+
+       ph->xops->xfer_put(ph, t);
+       return ret;
+}
+
+static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph,
+                                      u8 evt_id, u32 src_id, bool enable)
+{
+       int ret, cmd_id;
+
+       if (evt_id >= ARRAY_SIZE(evt_2_cmd))
+               return -EINVAL;
+
+       cmd_id = evt_2_cmd[evt_id];
+       ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable);
+       if (ret)
+               pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
+                        evt_id, src_id, ret);
+
+       return ret;
+}
+
+static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph,
+                                        u8 evt_id, ktime_t timestamp,
+                                        const void *payld, size_t payld_sz,
+                                        void *report, u32 *src_id)
+{
+       const struct scmi_clock_rate_notify_payld *p = payld;
+       struct scmi_clock_rate_notif_report *r = report;
+
+       if (sizeof(*p) != payld_sz ||
+           (evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED &&
+            evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED))
+               return NULL;
+
+       r->timestamp = timestamp;
+       r->agent_id = le32_to_cpu(p->agent_id);
+       r->clock_id = le32_to_cpu(p->clock_id);
+       r->rate = get_unaligned_le64(&p->rate_low);
+       *src_id = r->clock_id;
+
+       return r;
+}
+
+static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph)
+{
+       struct clock_info *ci = ph->get_priv(ph);
+
+       if (!ci)
+               return -EINVAL;
+
+       return ci->num_clocks;
+}
+
+static const struct scmi_event clk_events[] = {
+       {
+               .id = SCMI_EVENT_CLOCK_RATE_CHANGED,
+               .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
+               .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
+       },
+       {
+               .id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED,
+               .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
+               .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
+       },
+};
+
+static const struct scmi_event_ops clk_event_ops = {
+       .get_num_sources = scmi_clk_get_num_sources,
+       .set_notify_enabled = scmi_clk_set_notify_enabled,
+       .fill_custom_report = scmi_clk_fill_custom_report,
+};
+
+static const struct scmi_protocol_events clk_protocol_events = {
+       .queue_sz = SCMI_PROTO_QUEUE_SZ,
+       .ops = &clk_event_ops,
+       .evts = clk_events,
+       .num_events = ARRAY_SIZE(clk_events),
+};
+
 static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
 {
        u32 version;
        int clkid, ret;
        struct clock_info *cinfo;
 
-       ph->xops->version_get(ph, &version);
+       ret = ph->xops->version_get(ph, &version);
+       if (ret)
+               return ret;
 
        dev_dbg(ph->dev, "Clock Version %d.%d\n",
                PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
@@ -369,7 +560,9 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
        if (!cinfo)
                return -ENOMEM;
 
-       scmi_clock_protocol_attributes_get(ph, cinfo);
+       ret = scmi_clock_protocol_attributes_get(ph, cinfo);
+       if (ret)
+               return ret;
 
        cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
                                  sizeof(*cinfo->clk), GFP_KERNEL);
@@ -379,7 +572,7 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
        for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
                struct scmi_clock_info *clk = cinfo->clk + clkid;
 
-               ret = scmi_clock_attributes_get(ph, clkid, clk);
+               ret = scmi_clock_attributes_get(ph, clkid, clk, version);
                if (!ret)
                        scmi_clock_describe_rates_get(ph, clkid, clk);
        }
@@ -393,6 +586,7 @@ static const struct scmi_protocol scmi_clock = {
        .owner = THIS_MODULE,
        .instance_init = &scmi_clock_protocol_init,
        .ops = &clk_proto_ops,
+       .events = &clk_protocol_events,
 };
 
 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)
index 4fda84b..61aba74 100644 (file)
@@ -4,7 +4,7 @@
  * driver common header file containing some definitions, structures
  * and function prototypes used in all the different SCMI protocols.
  *
- * Copyright (C) 2018-2021 ARM Ltd.
+ * Copyright (C) 2018-2022 ARM Ltd.
  */
 #ifndef _SCMI_COMMON_H
 #define _SCMI_COMMON_H
 
 #include <asm/unaligned.h>
 
+#include "protocols.h"
 #include "notify.h"
 
-#define PROTOCOL_REV_MINOR_MASK        GENMASK(15, 0)
-#define PROTOCOL_REV_MAJOR_MASK        GENMASK(31, 16)
-#define PROTOCOL_REV_MAJOR(x)  (u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))
-#define PROTOCOL_REV_MINOR(x)  (u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x)))
-#define MAX_PROTOCOLS_IMP      16
-#define MAX_OPPS               16
-
-enum scmi_common_cmd {
-       PROTOCOL_VERSION = 0x0,
-       PROTOCOL_ATTRIBUTES = 0x1,
-       PROTOCOL_MESSAGE_ATTRIBUTES = 0x2,
-};
-
-/**
- * struct scmi_msg_resp_prot_version - Response for a message
- *
- * @minor_version: Minor version of the ABI that firmware supports
- * @major_version: Major version of the ABI that firmware supports
- *
- * In general, ABI version changes follow the rule that minor version increments
- * are backward compatible. Major revision changes in ABI may not be
- * backward compatible.
- *
- * Response to a generic message with message type SCMI_MSG_VERSION
- */
-struct scmi_msg_resp_prot_version {
-       __le16 minor_version;
-       __le16 major_version;
-};
-
 #define MSG_ID_MASK            GENMASK(7, 0)
 #define MSG_XTRACT_ID(hdr)     FIELD_GET(MSG_ID_MASK, (hdr))
 #define MSG_TYPE_MASK          GENMASK(9, 8)
@@ -80,28 +51,6 @@ struct scmi_msg_resp_prot_version {
 #define SCMI_PENDING_XFERS_HT_ORDER_SZ         9
 
 /**
- * struct scmi_msg_hdr - Message(Tx/Rx) header
- *
- * @id: The identifier of the message being sent
- * @protocol_id: The identifier of the protocol used to send @id message
- * @type: The SCMI type for this message
- * @seq: The token to identify the message. When a message returns, the
- *     platform returns the whole message header unmodified including the
- *     token
- * @status: Status of the transfer once it's complete
- * @poll_completion: Indicate if the transfer needs to be polled for
- *     completion or interrupt mode is used
- */
-struct scmi_msg_hdr {
-       u8 id;
-       u8 protocol_id;
-       u8 type;
-       u16 seq;
-       u32 status;
-       bool poll_completion;
-};
-
-/**
  * pack_scmi_header() - packs and returns 32-bit header
  *
  * @hdr: pointer to header containing all the information on message id,
@@ -130,72 +79,6 @@ static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
        hdr->type = MSG_XTRACT_TYPE(msg_hdr);
 }
 
-/**
- * struct scmi_msg - Message(Tx/Rx) structure
- *
- * @buf: Buffer pointer
- * @len: Length of data in the Buffer
- */
-struct scmi_msg {
-       void *buf;
-       size_t len;
-};
-
-/**
- * struct scmi_xfer - Structure representing a message flow
- *
- * @transfer_id: Unique ID for debug & profiling purpose
- * @hdr: Transmit message header
- * @tx: Transmit message
- * @rx: Receive message, the buffer should be pre-allocated to store
- *     message. If request-ACK protocol is used, we can reuse the same
- *     buffer for the rx path as we use for the tx path.
- * @done: command message transmit completion event
- * @async_done: pointer to delayed response message received event completion
- * @pending: True for xfers added to @pending_xfers hashtable
- * @node: An hlist_node reference used to store this xfer, alternatively, on
- *       the free list @free_xfers or in the @pending_xfers hashtable
- * @users: A refcount to track the active users for this xfer.
- *        This is meant to protect against the possibility that, when a command
- *        transaction times out concurrently with the reception of a valid
- *        response message, the xfer could be finally put on the TX path, and
- *        so vanish, while on the RX path scmi_rx_callback() is still
- *        processing it: in such a case this refcounting will ensure that, even
- *        though the timed-out transaction will anyway cause the command
- *        request to be reported as failed by time-out, the underlying xfer
- *        cannot be discarded and possibly reused until the last one user on
- *        the RX path has released it.
- * @busy: An atomic flag to ensure exclusive write access to this xfer
- * @state: The current state of this transfer, with states transitions deemed
- *        valid being:
- *         - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ]
- *         - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK
- *           (Missing synchronous response is assumed OK and ignored)
- * @lock: A spinlock to protect state and busy fields.
- * @priv: A pointer for transport private usage.
- */
-struct scmi_xfer {
-       int transfer_id;
-       struct scmi_msg_hdr hdr;
-       struct scmi_msg tx;
-       struct scmi_msg rx;
-       struct completion done;
-       struct completion *async_done;
-       bool pending;
-       struct hlist_node node;
-       refcount_t users;
-#define SCMI_XFER_FREE         0
-#define SCMI_XFER_BUSY         1
-       atomic_t busy;
-#define SCMI_XFER_SENT_OK      0
-#define SCMI_XFER_RESP_OK      1
-#define SCMI_XFER_DRESP_OK     2
-       int state;
-       /* A lock to protect state and busy fields */
-       spinlock_t lock;
-       void *priv;
-};
-
 /*
  * An helper macro to lookup an xfer from the @pending_xfers hashtable
  * using the message sequence number token as a key.
@@ -211,64 +94,6 @@ struct scmi_xfer {
        xfer_;                                                  \
 })
 
-struct scmi_xfer_ops;
-
-/**
- * struct scmi_protocol_handle  - Reference to an initialized protocol instance
- *
- * @dev: A reference to the associated SCMI instance device (handle->dev).
- * @xops: A reference to a struct holding refs to the core xfer operations that
- *       can be used by the protocol implementation to generate SCMI messages.
- * @set_priv: A method to set protocol private data for this instance.
- * @get_priv: A method to get protocol private data previously set.
- *
- * This structure represents a protocol initialized against specific SCMI
- * instance and it will be used as follows:
- * - as a parameter fed from the core to the protocol initialization code so
- *   that it can access the core xfer operations to build and generate SCMI
- *   messages exclusively for the specific underlying protocol instance.
- * - as an opaque handle fed by an SCMI driver user when it tries to access
- *   this protocol through its own protocol operations.
- *   In this case this handle will be returned as an opaque object together
- *   with the related protocol operations when the SCMI driver tries to access
- *   the protocol.
- */
-struct scmi_protocol_handle {
-       struct device *dev;
-       const struct scmi_xfer_ops *xops;
-       int (*set_priv)(const struct scmi_protocol_handle *ph, void *priv);
-       void *(*get_priv)(const struct scmi_protocol_handle *ph);
-};
-
-/**
- * struct scmi_xfer_ops  - References to the core SCMI xfer operations.
- * @version_get: Get this version protocol.
- * @xfer_get_init: Initialize one struct xfer if any xfer slot is free.
- * @reset_rx_to_maxsz: Reset rx size to max transport size.
- * @do_xfer: Do the SCMI transfer.
- * @do_xfer_with_response: Do the SCMI transfer waiting for a response.
- * @xfer_put: Free the xfer slot.
- *
- * Note that all this operations expect a protocol handle as first parameter;
- * they then internally use it to infer the underlying protocol number: this
- * way is not possible for a protocol implementation to forge messages for
- * another protocol.
- */
-struct scmi_xfer_ops {
-       int (*version_get)(const struct scmi_protocol_handle *ph, u32 *version);
-       int (*xfer_get_init)(const struct scmi_protocol_handle *ph, u8 msg_id,
-                            size_t tx_size, size_t rx_size,
-                            struct scmi_xfer **p);
-       void (*reset_rx_to_maxsz)(const struct scmi_protocol_handle *ph,
-                                 struct scmi_xfer *xfer);
-       int (*do_xfer)(const struct scmi_protocol_handle *ph,
-                      struct scmi_xfer *xfer);
-       int (*do_xfer_with_response)(const struct scmi_protocol_handle *ph,
-                                    struct scmi_xfer *xfer);
-       void (*xfer_put)(const struct scmi_protocol_handle *ph,
-                        struct scmi_xfer *xfer);
-};
-
 struct scmi_revision_info *
 scmi_revision_area_get(const struct scmi_protocol_handle *ph);
 int scmi_handle_put(const struct scmi_handle *handle);
@@ -277,55 +102,9 @@ void scmi_set_handle(struct scmi_device *scmi_dev);
 void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph,
                                     u8 *prot_imp);
 
-typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *);
-
-/**
- * struct scmi_protocol  - Protocol descriptor
- * @id: Protocol ID.
- * @owner: Module reference if any.
- * @instance_init: Mandatory protocol initialization function.
- * @instance_deinit: Optional protocol de-initialization function.
- * @ops: Optional reference to the operations provided by the protocol and
- *      exposed in scmi_protocol.h.
- * @events: An optional reference to the events supported by this protocol.
- */
-struct scmi_protocol {
-       const u8                                id;
-       struct module                           *owner;
-       const scmi_prot_init_ph_fn_t            instance_init;
-       const scmi_prot_init_ph_fn_t            instance_deinit;
-       const void                              *ops;
-       const struct scmi_protocol_events       *events;
-};
-
 int __init scmi_bus_init(void);
 void __exit scmi_bus_exit(void);
 
-#define DECLARE_SCMI_REGISTER_UNREGISTER(func)         \
-       int __init scmi_##func##_register(void);        \
-       void __exit scmi_##func##_unregister(void)
-DECLARE_SCMI_REGISTER_UNREGISTER(base);
-DECLARE_SCMI_REGISTER_UNREGISTER(clock);
-DECLARE_SCMI_REGISTER_UNREGISTER(perf);
-DECLARE_SCMI_REGISTER_UNREGISTER(power);
-DECLARE_SCMI_REGISTER_UNREGISTER(reset);
-DECLARE_SCMI_REGISTER_UNREGISTER(sensors);
-DECLARE_SCMI_REGISTER_UNREGISTER(voltage);
-DECLARE_SCMI_REGISTER_UNREGISTER(system);
-
-#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto)  \
-static const struct scmi_protocol *__this_proto = &(proto);    \
-                                                               \
-int __init scmi_##name##_register(void)                                \
-{                                                              \
-       return scmi_protocol_register(__this_proto);            \
-}                                                              \
-                                                               \
-void __exit scmi_##name##_unregister(void)                     \
-{                                                              \
-       scmi_protocol_unregister(__this_proto);                 \
-}
-
 const struct scmi_protocol *scmi_protocol_get(int protocol_id);
 void scmi_protocol_put(int protocol_id);
 
index 4611830..c1922bd 100644 (file)
@@ -128,7 +128,8 @@ struct scmi_protocol_instance {
  *            usage.
  * @protocols_mtx: A mutex to protect protocols instances initialization.
  * @protocols_imp: List of protocols implemented, currently maximum of
- *     MAX_PROTOCOLS_IMP elements allocated by the base protocol
+ *                scmi_revision_info.num_protocols elements allocated by the
+ *                base protocol
  * @active_protocols: IDR storing device_nodes for protocols actually defined
  *                   in the DT and confirmed as implemented by fw.
  * @atomic_threshold: Optional system wide DT-configured threshold, expressed
@@ -679,7 +680,8 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
 
        xfer = scmi_xfer_command_acquire(cinfo, msg_hdr);
        if (IS_ERR(xfer)) {
-               scmi_clear_channel(info, cinfo);
+               if (MSG_XTRACT_TYPE(msg_hdr) == MSG_TYPE_DELAYED_RESP)
+                       scmi_clear_channel(info, cinfo);
                return;
        }
 
@@ -1101,6 +1103,167 @@ static const struct scmi_xfer_ops xfer_ops = {
        .xfer_put = xfer_put,
 };
 
+struct scmi_msg_resp_domain_name_get {
+       __le32 flags;
+       u8 name[SCMI_MAX_STR_SIZE];
+};
+
+/**
+ * scmi_common_extended_name_get  - Common helper to get extended resources name
+ * @ph: A protocol handle reference.
+ * @cmd_id: The specific command ID to use.
+ * @res_id: The specific resource ID to use.
+ * @name: A pointer to the preallocated area where the retrieved name will be
+ *       stored as a NULL terminated string.
+ * @len: The len in bytes of the @name char array.
+ *
+ * Return: 0 on Succcess
+ */
+static int scmi_common_extended_name_get(const struct scmi_protocol_handle *ph,
+                                        u8 cmd_id, u32 res_id, char *name,
+                                        size_t len)
+{
+       int ret;
+       struct scmi_xfer *t;
+       struct scmi_msg_resp_domain_name_get *resp;
+
+       ret = ph->xops->xfer_get_init(ph, cmd_id, sizeof(res_id),
+                                     sizeof(*resp), &t);
+       if (ret)
+               goto out;
+
+       put_unaligned_le32(res_id, t->tx.buf);
+       resp = t->rx.buf;
+
+       ret = ph->xops->do_xfer(ph, t);
+       if (!ret)
+               strscpy(name, resp->name, len);
+
+       ph->xops->xfer_put(ph, t);
+out:
+       if (ret)
+               dev_warn(ph->dev,
+                        "Failed to get extended name - id:%u (ret:%d). Using %s\n",
+                        res_id, ret, name);
+       return ret;
+}
+
+/**
+ * struct scmi_iterator  - Iterator descriptor
+ * @msg: A reference to the message TX buffer; filled by @prepare_message with
+ *      a proper custom command payload for each multi-part command request.
+ * @resp: A reference to the response RX buffer; used by @update_state and
+ *       @process_response to parse the multi-part replies.
+ * @t: A reference to the underlying xfer initialized and used transparently by
+ *     the iterator internal routines.
+ * @ph: A reference to the associated protocol handle to be used.
+ * @ops: A reference to the custom provided iterator operations.
+ * @state: The current iterator state; used and updated in turn by the iterators
+ *        internal routines and by the caller-provided @scmi_iterator_ops.
+ * @priv: A reference to optional private data as provided by the caller and
+ *       passed back to the @@scmi_iterator_ops.
+ */
+struct scmi_iterator {
+       void *msg;
+       void *resp;
+       struct scmi_xfer *t;
+       const struct scmi_protocol_handle *ph;
+       struct scmi_iterator_ops *ops;
+       struct scmi_iterator_state state;
+       void *priv;
+};
+
+static void *scmi_iterator_init(const struct scmi_protocol_handle *ph,
+                               struct scmi_iterator_ops *ops,
+                               unsigned int max_resources, u8 msg_id,
+                               size_t tx_size, void *priv)
+{
+       int ret;
+       struct scmi_iterator *i;
+
+       i = devm_kzalloc(ph->dev, sizeof(*i), GFP_KERNEL);
+       if (!i)
+               return ERR_PTR(-ENOMEM);
+
+       i->ph = ph;
+       i->ops = ops;
+       i->priv = priv;
+
+       ret = ph->xops->xfer_get_init(ph, msg_id, tx_size, 0, &i->t);
+       if (ret) {
+               devm_kfree(ph->dev, i);
+               return ERR_PTR(ret);
+       }
+
+       i->state.max_resources = max_resources;
+       i->msg = i->t->tx.buf;
+       i->resp = i->t->rx.buf;
+
+       return i;
+}
+
+static int scmi_iterator_run(void *iter)
+{
+       int ret = -EINVAL;
+       struct scmi_iterator_ops *iops;
+       const struct scmi_protocol_handle *ph;
+       struct scmi_iterator_state *st;
+       struct scmi_iterator *i = iter;
+
+       if (!i || !i->ops || !i->ph)
+               return ret;
+
+       iops = i->ops;
+       ph = i->ph;
+       st = &i->state;
+
+       do {
+               iops->prepare_message(i->msg, st->desc_index, i->priv);
+               ret = ph->xops->do_xfer(ph, i->t);
+               if (ret)
+                       break;
+
+               ret = iops->update_state(st, i->resp, i->priv);
+               if (ret)
+                       break;
+
+               if (st->num_returned > st->max_resources - st->desc_index) {
+                       dev_err(ph->dev,
+                               "No. of resources can't exceed %d\n",
+                               st->max_resources);
+                       ret = -EINVAL;
+                       break;
+               }
+
+               for (st->loop_idx = 0; st->loop_idx < st->num_returned;
+                    st->loop_idx++) {
+                       ret = iops->process_response(ph, i->resp, st, i->priv);
+                       if (ret)
+                               goto out;
+               }
+
+               st->desc_index += st->num_returned;
+               ph->xops->reset_rx_to_maxsz(ph, i->t);
+               /*
+                * check for both returned and remaining to avoid infinite
+                * loop due to buggy firmware
+                */
+       } while (st->num_returned && st->num_remaining);
+
+out:
+       /* Finalize and destroy iterator */
+       ph->xops->xfer_put(ph, i->t);
+       devm_kfree(ph->dev, i);
+
+       return ret;
+}
+
+static const struct scmi_proto_helpers_ops helpers_ops = {
+       .extended_name_get = scmi_common_extended_name_get,
+       .iter_response_init = scmi_iterator_init,
+       .iter_response_run = scmi_iterator_run,
+};
+
 /**
  * scmi_revision_area_get  - Retrieve version memory area.
  *
@@ -1161,6 +1324,7 @@ scmi_alloc_init_protocol_instance(struct scmi_info *info,
        pi->handle = handle;
        pi->ph.dev = handle->dev;
        pi->ph.xops = &xfer_ops;
+       pi->ph.hops = &helpers_ops;
        pi->ph.set_priv = scmi_set_protocol_priv;
        pi->ph.get_priv = scmi_get_protocol_priv;
        refcount_set(&pi->users, 1);
@@ -1309,11 +1473,12 @@ scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
 {
        int i;
        struct scmi_info *info = handle_to_scmi_info(handle);
+       struct scmi_revision_info *rev = handle->version;
 
        if (!info->protocols_imp)
                return false;
 
-       for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
+       for (i = 0; i < rev->num_protocols; i++)
                if (info->protocols_imp[i] == prot_id)
                        return true;
        return false;
index 734f1ee..b503c22 100644 (file)
@@ -64,6 +64,22 @@ enum scmi_optee_pta_cmd {
         * [in]     value[0].b: Requested capabilities mask (enum pta_scmi_caps)
         */
        PTA_SCMI_CMD_GET_CHANNEL = 3,
+
+       /*
+        * PTA_SCMI_CMD_PROCESS_MSG_CHANNEL - Process SCMI message in a MSG
+        * buffer pointed by memref parameters
+        *
+        * [in]     value[0].a: Channel handle
+        * [in]     memref[1]: Message buffer (MSG and SCMI payload)
+        * [out]    memref[2]: Response buffer (MSG and SCMI payload)
+        *
+        * Shared memories used for SCMI message/response are MSG buffers
+        * referenced by param[1] and param[2]. MSG transport protocol
+        * uses a 32bit header to carry SCMI meta-data (protocol ID and
+        * protocol message ID) followed by the effective SCMI message
+        * payload.
+        */
+       PTA_SCMI_CMD_PROCESS_MSG_CHANNEL = 4,
 };
 
 /*
@@ -72,9 +88,17 @@ enum scmi_optee_pta_cmd {
  * PTA_SCMI_CAPS_SMT_HEADER
  * When set, OP-TEE supports command using SMT header protocol (SCMI shmem) in
  * shared memory buffers to carry SCMI protocol synchronisation information.
+ *
+ * PTA_SCMI_CAPS_MSG_HEADER
+ * When set, OP-TEE supports command using MSG header protocol in an OP-TEE
+ * shared memory to carry SCMI protocol synchronisation information and SCMI
+ * message payload.
  */
 #define PTA_SCMI_CAPS_NONE             0
 #define PTA_SCMI_CAPS_SMT_HEADER       BIT(0)
+#define PTA_SCMI_CAPS_MSG_HEADER       BIT(1)
+#define PTA_SCMI_CAPS_MASK             (PTA_SCMI_CAPS_SMT_HEADER | \
+                                        PTA_SCMI_CAPS_MSG_HEADER)
 
 /**
  * struct scmi_optee_channel - Description of an OP-TEE SCMI channel
@@ -85,7 +109,8 @@ enum scmi_optee_pta_cmd {
  * @mu: Mutex protection on channel access
  * @cinfo: SCMI channel information
  * @shmem: Virtual base address of the shared memory
- * @tee_shm: Reference to TEE shared memory or NULL if using static shmem
+ * @req: Shared memory protocol handle for SCMI request and synchronous response
+ * @tee_shm: TEE shared memory handle @req or NULL if using IOMEM shmem
  * @link: Reference in agent's channel list
  */
 struct scmi_optee_channel {
@@ -94,7 +119,10 @@ struct scmi_optee_channel {
        u32 caps;
        struct mutex mu;
        struct scmi_chan_info *cinfo;
-       struct scmi_shared_mem __iomem *shmem;
+       union {
+               struct scmi_shared_mem __iomem *shmem;
+               struct scmi_msg_payld *msg;
+       } req;
        struct tee_shm *tee_shm;
        struct list_head link;
 };
@@ -178,8 +206,8 @@ static int get_capabilities(struct scmi_optee_agent *agent)
 
        caps = param[0].u.value.a;
 
-       if (!(caps & PTA_SCMI_CAPS_SMT_HEADER)) {
-               dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT\n");
+       if (!(caps & (PTA_SCMI_CAPS_SMT_HEADER | PTA_SCMI_CAPS_MSG_HEADER))) {
+               dev_err(agent->dev, "OP-TEE SCMI PTA doesn't support SMT and MSG\n");
                return -EOPNOTSUPP;
        }
 
@@ -193,9 +221,14 @@ static int get_channel(struct scmi_optee_channel *channel)
        struct device *dev = scmi_optee_private->dev;
        struct tee_ioctl_invoke_arg arg = { };
        struct tee_param param[1] = { };
-       unsigned int caps = PTA_SCMI_CAPS_SMT_HEADER;
+       unsigned int caps = 0;
        int ret;
 
+       if (channel->tee_shm)
+               caps = PTA_SCMI_CAPS_MSG_HEADER;
+       else
+               caps = PTA_SCMI_CAPS_SMT_HEADER;
+
        arg.func = PTA_SCMI_CMD_GET_CHANNEL;
        arg.session = channel->tee_session;
        arg.num_params = 1;
@@ -220,25 +253,48 @@ static int get_channel(struct scmi_optee_channel *channel)
 
 static int invoke_process_smt_channel(struct scmi_optee_channel *channel)
 {
-       struct tee_ioctl_invoke_arg arg = { };
-       struct tee_param param[2] = { };
+       struct tee_ioctl_invoke_arg arg = {
+               .func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL,
+               .session = channel->tee_session,
+               .num_params = 1,
+       };
+       struct tee_param param[1] = { };
        int ret;
 
-       arg.session = channel->tee_session;
        param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
        param[0].u.value.a = channel->channel_id;
 
-       if (channel->tee_shm) {
-               param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT;
-               param[1].u.memref.shm = channel->tee_shm;
-               param[1].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE;
-               arg.num_params = 2;
-               arg.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL_MESSAGE;
-       } else {
-               arg.num_params = 1;
-               arg.func = PTA_SCMI_CMD_PROCESS_SMT_CHANNEL;
+       ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
+       if (ret < 0 || arg.ret) {
+               dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n",
+                       channel->channel_id, ret, arg.ret);
+               return -EIO;
        }
 
+       return 0;
+}
+
+static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t msg_size)
+{
+       struct tee_ioctl_invoke_arg arg = {
+               .func = PTA_SCMI_CMD_PROCESS_MSG_CHANNEL,
+               .session = channel->tee_session,
+               .num_params = 3,
+       };
+       struct tee_param param[3] = { };
+       int ret;
+
+       param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+       param[0].u.value.a = channel->channel_id;
+
+       param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+       param[1].u.memref.shm = channel->tee_shm;
+       param[1].u.memref.size = msg_size;
+
+       param[2].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+       param[2].u.memref.shm = channel->tee_shm;
+       param[2].u.memref.size = SCMI_OPTEE_MAX_MSG_SIZE;
+
        ret = tee_client_invoke_func(scmi_optee_private->tee_ctx, &arg, param);
        if (ret < 0 || arg.ret) {
                dev_err(scmi_optee_private->dev, "Can't invoke channel %u: %d / %#x\n",
@@ -279,7 +335,26 @@ static void scmi_optee_clear_channel(struct scmi_chan_info *cinfo)
 {
        struct scmi_optee_channel *channel = cinfo->transport_info;
 
-       shmem_clear_channel(channel->shmem);
+       if (!channel->tee_shm)
+               shmem_clear_channel(channel->req.shmem);
+}
+
+static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *channel)
+{
+       const size_t msg_size = SCMI_OPTEE_MAX_MSG_SIZE;
+       void *shbuf;
+
+       channel->tee_shm = tee_shm_alloc_kernel_buf(scmi_optee_private->tee_ctx, msg_size);
+       if (IS_ERR(channel->tee_shm)) {
+               dev_err(channel->cinfo->dev, "shmem allocation failed\n");
+               return -ENOMEM;
+       }
+
+       shbuf = tee_shm_get_va(channel->tee_shm, 0);
+       memset(shbuf, 0, msg_size);
+       channel->req.msg = shbuf;
+
+       return 0;
 }
 
 static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo,
@@ -304,8 +379,8 @@ static int setup_static_shmem(struct device *dev, struct scmi_chan_info *cinfo,
 
        size = resource_size(&res);
 
-       channel->shmem = devm_ioremap(dev, res.start, size);
-       if (!channel->shmem) {
+       channel->req.shmem = devm_ioremap(dev, res.start, size);
+       if (!channel->req.shmem) {
                dev_err(dev, "Failed to ioremap SCMI Tx shared memory\n");
                ret = -EADDRNOTAVAIL;
                goto out;
@@ -325,7 +400,7 @@ static int setup_shmem(struct device *dev, struct scmi_chan_info *cinfo,
        if (of_find_property(cinfo->dev->of_node, "shmem", NULL))
                return setup_static_shmem(dev, cinfo, channel);
        else
-               return -ENOMEM;
+               return setup_dynamic_shmem(dev, channel);
 }
 
 static int scmi_optee_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx)
@@ -405,27 +480,22 @@ static int scmi_optee_chan_free(int id, void *p, void *data)
        return 0;
 }
 
-static struct scmi_shared_mem *get_channel_shm(struct scmi_optee_channel *chan,
-                                              struct scmi_xfer *xfer)
-{
-       if (!chan)
-               return NULL;
-
-       return chan->shmem;
-}
-
-
 static int scmi_optee_send_message(struct scmi_chan_info *cinfo,
                                   struct scmi_xfer *xfer)
 {
        struct scmi_optee_channel *channel = cinfo->transport_info;
-       struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer);
        int ret;
 
        mutex_lock(&channel->mu);
-       shmem_tx_prepare(shmem, xfer);
 
-       ret = invoke_process_smt_channel(channel);
+       if (channel->tee_shm) {
+               msg_tx_prepare(channel->req.msg, xfer);
+               ret = invoke_process_msg_channel(channel, msg_command_size(xfer));
+       } else {
+               shmem_tx_prepare(channel->req.shmem, xfer);
+               ret = invoke_process_smt_channel(channel);
+       }
+
        if (ret)
                mutex_unlock(&channel->mu);
 
@@ -436,9 +506,11 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo,
                                      struct scmi_xfer *xfer)
 {
        struct scmi_optee_channel *channel = cinfo->transport_info;
-       struct scmi_shared_mem *shmem = get_channel_shm(channel, xfer);
 
-       shmem_fetch_response(shmem, xfer);
+       if (channel->tee_shm)
+               msg_fetch_response(channel->req.msg, SCMI_OPTEE_MAX_MSG_SIZE, xfer);
+       else
+               shmem_fetch_response(channel->req.shmem, xfer);
 }
 
 static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
index f4cd519..8f4051a 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * System Control and Management Interface (SCMI) Performance Protocol
  *
- * Copyright (C) 2018-2021 ARM Ltd.
+ * Copyright (C) 2018-2022 ARM Ltd.
  */
 
 #define pr_fmt(fmt) "SCMI Notifications PERF - " fmt
 #include <linux/scmi_protocol.h>
 #include <linux/sort.h>
 
-#include "common.h"
+#include "protocols.h"
 #include "notify.h"
 
+#define MAX_OPPS               16
+
 enum scmi_performance_protocol_cmd {
        PERF_DOMAIN_ATTRIBUTES = 0x3,
        PERF_DESCRIBE_LEVELS = 0x4,
@@ -30,6 +32,7 @@ enum scmi_performance_protocol_cmd {
        PERF_NOTIFY_LIMITS = 0x9,
        PERF_NOTIFY_LEVEL = 0xa,
        PERF_DESCRIBE_FASTCHANNEL = 0xb,
+       PERF_DOMAIN_NAME_GET = 0xc,
 };
 
 struct scmi_opp {
@@ -42,6 +45,7 @@ struct scmi_msg_resp_perf_attributes {
        __le16 num_domains;
        __le16 flags;
 #define POWER_SCALE_IN_MILLIWATT(x)    ((x) & BIT(0))
+#define POWER_SCALE_IN_MICROWATT(x)    ((x) & BIT(1))
        __le32 stats_addr_low;
        __le32 stats_addr_high;
        __le32 stats_size;
@@ -54,10 +58,11 @@ struct scmi_msg_resp_perf_domain_attributes {
 #define SUPPORTS_PERF_LIMIT_NOTIFY(x)  ((x) & BIT(29))
 #define SUPPORTS_PERF_LEVEL_NOTIFY(x)  ((x) & BIT(28))
 #define SUPPORTS_PERF_FASTCHANNELS(x)  ((x) & BIT(27))
+#define SUPPORTS_EXTENDED_NAMES(x)     ((x) & BIT(26))
        __le32 rate_limit_us;
        __le32 sustained_freq_khz;
        __le32 sustained_perf_level;
-           u8 name[SCMI_MAX_STR_SIZE];
+           u8 name[SCMI_SHORT_NAME_MAX_SIZE];
 };
 
 struct scmi_msg_perf_describe_levels {
@@ -166,6 +171,7 @@ struct scmi_perf_info {
        u32 version;
        int num_domains;
        bool power_scale_mw;
+       bool power_scale_uw;
        u64 stats_addr;
        u32 stats_size;
        struct perf_dom_info *dom_info;
@@ -196,6 +202,8 @@ static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
 
                pi->num_domains = le16_to_cpu(attr->num_domains);
                pi->power_scale_mw = POWER_SCALE_IN_MILLIWATT(flags);
+               if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3)
+                       pi->power_scale_uw = POWER_SCALE_IN_MICROWATT(flags);
                pi->stats_addr = le32_to_cpu(attr->stats_addr_low) |
                                (u64)le32_to_cpu(attr->stats_addr_high) << 32;
                pi->stats_size = le32_to_cpu(attr->stats_size);
@@ -207,9 +215,11 @@ static int scmi_perf_attributes_get(const struct scmi_protocol_handle *ph,
 
 static int
 scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
-                               u32 domain, struct perf_dom_info *dom_info)
+                               u32 domain, struct perf_dom_info *dom_info,
+                               u32 version)
 {
        int ret;
+       u32 flags;
        struct scmi_xfer *t;
        struct scmi_msg_resp_perf_domain_attributes *attr;
 
@@ -223,7 +233,7 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
 
        ret = ph->xops->do_xfer(ph, t);
        if (!ret) {
-               u32 flags = le32_to_cpu(attr->flags);
+               flags = le32_to_cpu(attr->flags);
 
                dom_info->set_limits = SUPPORTS_SET_LIMITS(flags);
                dom_info->set_perf = SUPPORTS_SET_PERF_LVL(flags);
@@ -246,6 +256,16 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph,
        }
 
        ph->xops->xfer_put(ph, t);
+
+       /*
+        * If supported overwrite short name with the extended one;
+        * on error just carry on and use already provided short name.
+        */
+       if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
+           SUPPORTS_EXTENDED_NAMES(flags))
+               ph->hops->extended_name_get(ph, PERF_DOMAIN_NAME_GET, domain,
+                                           dom_info->name, SCMI_MAX_STR_SIZE);
+
        return ret;
 }
 
@@ -256,66 +276,87 @@ static int opp_cmp_func(const void *opp1, const void *opp2)
        return t1->perf - t2->perf;
 }
 
-static int
-scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
-                             struct perf_dom_info *perf_dom)
+struct scmi_perf_ipriv {
+       u32 domain;
+       struct perf_dom_info *perf_dom;
+};
+
+static void iter_perf_levels_prepare_message(void *message,
+                                            unsigned int desc_index,
+                                            const void *priv)
 {
-       int ret, cnt;
-       u32 tot_opp_cnt = 0;
-       u16 num_returned, num_remaining;
-       struct scmi_xfer *t;
-       struct scmi_opp *opp;
-       struct scmi_msg_perf_describe_levels *dom_info;
-       struct scmi_msg_resp_perf_describe_levels *level_info;
+       struct scmi_msg_perf_describe_levels *msg = message;
+       const struct scmi_perf_ipriv *p = priv;
 
-       ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_LEVELS,
-                                     sizeof(*dom_info), 0, &t);
-       if (ret)
-               return ret;
+       msg->domain = cpu_to_le32(p->domain);
+       /* Set the number of OPPs to be skipped/already read */
+       msg->level_index = cpu_to_le32(desc_index);
+}
 
-       dom_info = t->tx.buf;
-       level_info = t->rx.buf;
+static int iter_perf_levels_update_state(struct scmi_iterator_state *st,
+                                        const void *response, void *priv)
+{
+       const struct scmi_msg_resp_perf_describe_levels *r = response;
 
-       do {
-               dom_info->domain = cpu_to_le32(domain);
-               /* Set the number of OPPs to be skipped/already read */
-               dom_info->level_index = cpu_to_le32(tot_opp_cnt);
+       st->num_returned = le16_to_cpu(r->num_returned);
+       st->num_remaining = le16_to_cpu(r->num_remaining);
 
-               ret = ph->xops->do_xfer(ph, t);
-               if (ret)
-                       break;
+       return 0;
+}
 
-               num_returned = le16_to_cpu(level_info->num_returned);
-               num_remaining = le16_to_cpu(level_info->num_remaining);
-               if (tot_opp_cnt + num_returned > MAX_OPPS) {
-                       dev_err(ph->dev, "No. of OPPs exceeded MAX_OPPS");
-                       break;
-               }
+static int
+iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
+                                 const void *response,
+                                 struct scmi_iterator_state *st, void *priv)
+{
+       struct scmi_opp *opp;
+       const struct scmi_msg_resp_perf_describe_levels *r = response;
+       struct scmi_perf_ipriv *p = priv;
 
-               opp = &perf_dom->opp[tot_opp_cnt];
-               for (cnt = 0; cnt < num_returned; cnt++, opp++) {
-                       opp->perf = le32_to_cpu(level_info->opp[cnt].perf_val);
-                       opp->power = le32_to_cpu(level_info->opp[cnt].power);
-                       opp->trans_latency_us = le16_to_cpu
-                               (level_info->opp[cnt].transition_latency_us);
+       opp = &p->perf_dom->opp[st->desc_index + st->loop_idx];
+       opp->perf = le32_to_cpu(r->opp[st->loop_idx].perf_val);
+       opp->power = le32_to_cpu(r->opp[st->loop_idx].power);
+       opp->trans_latency_us =
+               le16_to_cpu(r->opp[st->loop_idx].transition_latency_us);
+       p->perf_dom->opp_count++;
 
-                       dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n",
-                               opp->perf, opp->power, opp->trans_latency_us);
-               }
+       dev_dbg(ph->dev, "Level %d Power %d Latency %dus\n",
+               opp->perf, opp->power, opp->trans_latency_us);
 
-               tot_opp_cnt += num_returned;
+       return 0;
+}
 
-               ph->xops->reset_rx_to_maxsz(ph, t);
-               /*
-                * check for both returned and remaining to avoid infinite
-                * loop due to buggy firmware
-                */
-       } while (num_returned && num_remaining);
+static int
+scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain,
+                             struct perf_dom_info *perf_dom)
+{
+       int ret;
+       void *iter;
+       struct scmi_msg_perf_describe_levels *msg;
+       struct scmi_iterator_ops ops = {
+               .prepare_message = iter_perf_levels_prepare_message,
+               .update_state = iter_perf_levels_update_state,
+               .process_response = iter_perf_levels_process_response,
+       };
+       struct scmi_perf_ipriv ppriv = {
+               .domain = domain,
+               .perf_dom = perf_dom,
+       };
+
+       iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS,
+                                           PERF_DESCRIBE_LEVELS,
+                                           sizeof(*msg), &ppriv);
+       if (IS_ERR(iter))
+               return PTR_ERR(iter);
+
+       ret = ph->hops->iter_response_run(iter);
+       if (ret)
+               return ret;
 
-       perf_dom->opp_count = tot_opp_cnt;
-       ph->xops->xfer_put(ph, t);
+       if (perf_dom->opp_count)
+               sort(perf_dom->opp, perf_dom->opp_count,
+                    sizeof(struct scmi_opp), opp_cmp_func, NULL);
 
-       sort(perf_dom->opp, tot_opp_cnt, sizeof(*opp), opp_cmp_func, NULL);
        return ret;
 }
 
@@ -382,6 +423,9 @@ static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph,
        struct scmi_perf_info *pi = ph->get_priv(ph);
        struct perf_dom_info *dom = pi->dom_info + domain;
 
+       if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf)
+               return -EINVAL;
+
        if (dom->fc_info && dom->fc_info->limit_set_addr) {
                iowrite32(max_perf, dom->fc_info->limit_set_addr);
                iowrite32(min_perf, dom->fc_info->limit_set_addr + 4);
@@ -873,11 +917,13 @@ static const struct scmi_protocol_events perf_protocol_events = {
 
 static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
 {
-       int domain;
+       int domain, ret;
        u32 version;
        struct scmi_perf_info *pinfo;
 
-       ph->xops->version_get(ph, &version);
+       ret = ph->xops->version_get(ph, &version);
+       if (ret)
+               return ret;
 
        dev_dbg(ph->dev, "Performance Version %d.%d\n",
                PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
@@ -886,7 +932,9 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
        if (!pinfo)
                return -ENOMEM;
 
-       scmi_perf_attributes_get(ph, pinfo);
+       ret = scmi_perf_attributes_get(ph, pinfo);
+       if (ret)
+               return ret;
 
        pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
                                       sizeof(*pinfo->dom_info), GFP_KERNEL);
@@ -896,7 +944,7 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
        for (domain = 0; domain < pinfo->num_domains; domain++) {
                struct perf_dom_info *dom = pinfo->dom_info + domain;
 
-               scmi_perf_domain_attributes_get(ph, domain, dom);
+               scmi_perf_domain_attributes_get(ph, domain, dom, version);
                scmi_perf_describe_levels_get(ph, domain, dom);
 
                if (dom->perf_fastchannels)
index ad2ab08..964882c 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * System Control and Management Interface (SCMI) Power Protocol
  *
- * Copyright (C) 2018-2021 ARM Ltd.
+ * Copyright (C) 2018-2022 ARM Ltd.
  */
 
 #define pr_fmt(fmt) "SCMI Notifications POWER - " fmt
@@ -10,7 +10,7 @@
 #include <linux/module.h>
 #include <linux/scmi_protocol.h>
 
-#include "common.h"
+#include "protocols.h"
 #include "notify.h"
 
 enum scmi_power_protocol_cmd {
@@ -18,6 +18,7 @@ enum scmi_power_protocol_cmd {
        POWER_STATE_SET = 0x4,
        POWER_STATE_GET = 0x5,
        POWER_STATE_NOTIFY = 0x6,
+       POWER_DOMAIN_NAME_GET = 0x8,
 };
 
 struct scmi_msg_resp_power_attributes {
@@ -33,7 +34,8 @@ struct scmi_msg_resp_power_domain_attributes {
 #define SUPPORTS_STATE_SET_NOTIFY(x)   ((x) & BIT(31))
 #define SUPPORTS_STATE_SET_ASYNC(x)    ((x) & BIT(30))
 #define SUPPORTS_STATE_SET_SYNC(x)     ((x) & BIT(29))
-           u8 name[SCMI_MAX_STR_SIZE];
+#define SUPPORTS_EXTENDED_NAMES(x)     ((x) & BIT(27))
+           u8 name[SCMI_SHORT_NAME_MAX_SIZE];
 };
 
 struct scmi_power_set_state {
@@ -97,9 +99,11 @@ static int scmi_power_attributes_get(const struct scmi_protocol_handle *ph,
 
 static int
 scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph,
-                                u32 domain, struct power_dom_info *dom_info)
+                                u32 domain, struct power_dom_info *dom_info,
+                                u32 version)
 {
        int ret;
+       u32 flags;
        struct scmi_xfer *t;
        struct scmi_msg_resp_power_domain_attributes *attr;
 
@@ -113,15 +117,26 @@ scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph,
 
        ret = ph->xops->do_xfer(ph, t);
        if (!ret) {
-               u32 flags = le32_to_cpu(attr->flags);
+               flags = le32_to_cpu(attr->flags);
 
                dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags);
                dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags);
                dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags);
                strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
        }
-
        ph->xops->xfer_put(ph, t);
+
+       /*
+        * If supported overwrite short name with the extended one;
+        * on error just carry on and use already provided short name.
+        */
+       if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
+           SUPPORTS_EXTENDED_NAMES(flags)) {
+               ph->hops->extended_name_get(ph, POWER_DOMAIN_NAME_GET,
+                                           domain, dom_info->name,
+                                           SCMI_MAX_STR_SIZE);
+       }
+
        return ret;
 }
 
@@ -174,8 +189,9 @@ static int scmi_power_num_domains_get(const struct scmi_protocol_handle *ph)
        return pi->num_domains;
 }
 
-static char *scmi_power_name_get(const struct scmi_protocol_handle *ph,
-                                u32 domain)
+static const char *
+scmi_power_name_get(const struct scmi_protocol_handle *ph,
+                   u32 domain)
 {
        struct scmi_power_info *pi = ph->get_priv(ph);
        struct power_dom_info *dom = pi->dom_info + domain;
@@ -280,11 +296,13 @@ static const struct scmi_protocol_events power_protocol_events = {
 
 static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph)
 {
-       int domain;
+       int domain, ret;
        u32 version;
        struct scmi_power_info *pinfo;
 
-       ph->xops->version_get(ph, &version);
+       ret = ph->xops->version_get(ph, &version);
+       if (ret)
+               return ret;
 
        dev_dbg(ph->dev, "Power Version %d.%d\n",
                PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
@@ -293,7 +311,9 @@ static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph)
        if (!pinfo)
                return -ENOMEM;
 
-       scmi_power_attributes_get(ph, pinfo);
+       ret = scmi_power_attributes_get(ph, pinfo);
+       if (ret)
+               return ret;
 
        pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
                                       sizeof(*pinfo->dom_info), GFP_KERNEL);
@@ -303,7 +323,7 @@ static int scmi_power_protocol_init(const struct scmi_protocol_handle *ph)
        for (domain = 0; domain < pinfo->num_domains; domain++) {
                struct power_dom_info *dom = pinfo->dom_info + domain;
 
-               scmi_power_domain_attributes_get(ph, domain, dom);
+               scmi_power_domain_attributes_get(ph, domain, dom, version);
        }
 
        pinfo->version = version;
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
new file mode 100644 (file)
index 0000000..73304af
--- /dev/null
@@ -0,0 +1,318 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * System Control and Management Interface (SCMI) Message Protocol
+ * protocols common header file containing some definitions, structures
+ * and function prototypes used in all the different SCMI protocols.
+ *
+ * Copyright (C) 2022 ARM Ltd.
+ */
+#ifndef _SCMI_PROTOCOLS_H
+#define _SCMI_PROTOCOLS_H
+
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/hashtable.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/refcount.h>
+#include <linux/scmi_protocol.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/unaligned.h>
+
+#define SCMI_SHORT_NAME_MAX_SIZE       16
+
+#define PROTOCOL_REV_MINOR_MASK        GENMASK(15, 0)
+#define PROTOCOL_REV_MAJOR_MASK        GENMASK(31, 16)
+#define PROTOCOL_REV_MAJOR(x)  ((u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x))))
+#define PROTOCOL_REV_MINOR(x)  ((u16)(FIELD_GET(PROTOCOL_REV_MINOR_MASK, (x))))
+
+enum scmi_common_cmd {
+       PROTOCOL_VERSION = 0x0,
+       PROTOCOL_ATTRIBUTES = 0x1,
+       PROTOCOL_MESSAGE_ATTRIBUTES = 0x2,
+};
+
+/**
+ * struct scmi_msg_resp_prot_version - Response for a message
+ *
+ * @minor_version: Minor version of the ABI that firmware supports
+ * @major_version: Major version of the ABI that firmware supports
+ *
+ * In general, ABI version changes follow the rule that minor version increments
+ * are backward compatible. Major revision changes in ABI may not be
+ * backward compatible.
+ *
+ * Response to a generic message with message type SCMI_MSG_VERSION
+ */
+struct scmi_msg_resp_prot_version {
+       __le16 minor_version;
+       __le16 major_version;
+};
+
+/**
+ * struct scmi_msg - Message(Tx/Rx) structure
+ *
+ * @buf: Buffer pointer
+ * @len: Length of data in the Buffer
+ */
+struct scmi_msg {
+       void *buf;
+       size_t len;
+};
+
+/**
+ * struct scmi_msg_hdr - Message(Tx/Rx) header
+ *
+ * @id: The identifier of the message being sent
+ * @protocol_id: The identifier of the protocol used to send @id message
+ * @type: The SCMI type for this message
+ * @seq: The token to identify the message. When a message returns, the
+ *     platform returns the whole message header unmodified including the
+ *     token
+ * @status: Status of the transfer once it's complete
+ * @poll_completion: Indicate if the transfer needs to be polled for
+ *     completion or interrupt mode is used
+ */
+struct scmi_msg_hdr {
+       u8 id;
+       u8 protocol_id;
+       u8 type;
+       u16 seq;
+       u32 status;
+       bool poll_completion;
+};
+
+/**
+ * struct scmi_xfer - Structure representing a message flow
+ *
+ * @transfer_id: Unique ID for debug & profiling purpose
+ * @hdr: Transmit message header
+ * @tx: Transmit message
+ * @rx: Receive message, the buffer should be pre-allocated to store
+ *     message. If request-ACK protocol is used, we can reuse the same
+ *     buffer for the rx path as we use for the tx path.
+ * @done: command message transmit completion event
+ * @async_done: pointer to delayed response message received event completion
+ * @pending: True for xfers added to @pending_xfers hashtable
+ * @node: An hlist_node reference used to store this xfer, alternatively, on
+ *       the free list @free_xfers or in the @pending_xfers hashtable
+ * @users: A refcount to track the active users for this xfer.
+ *        This is meant to protect against the possibility that, when a command
+ *        transaction times out concurrently with the reception of a valid
+ *        response message, the xfer could be finally put on the TX path, and
+ *        so vanish, while on the RX path scmi_rx_callback() is still
+ *        processing it: in such a case this refcounting will ensure that, even
+ *        though the timed-out transaction will anyway cause the command
+ *        request to be reported as failed by time-out, the underlying xfer
+ *        cannot be discarded and possibly reused until the last one user on
+ *        the RX path has released it.
+ * @busy: An atomic flag to ensure exclusive write access to this xfer
+ * @state: The current state of this transfer, with states transitions deemed
+ *        valid being:
+ *         - SCMI_XFER_SENT_OK -> SCMI_XFER_RESP_OK [ -> SCMI_XFER_DRESP_OK ]
+ *         - SCMI_XFER_SENT_OK -> SCMI_XFER_DRESP_OK
+ *           (Missing synchronous response is assumed OK and ignored)
+ * @lock: A spinlock to protect state and busy fields.
+ * @priv: A pointer for transport private usage.
+ */
+struct scmi_xfer {
+       int transfer_id;
+       struct scmi_msg_hdr hdr;
+       struct scmi_msg tx;
+       struct scmi_msg rx;
+       struct completion done;
+       struct completion *async_done;
+       bool pending;
+       struct hlist_node node;
+       refcount_t users;
+#define SCMI_XFER_FREE         0
+#define SCMI_XFER_BUSY         1
+       atomic_t busy;
+#define SCMI_XFER_SENT_OK      0
+#define SCMI_XFER_RESP_OK      1
+#define SCMI_XFER_DRESP_OK     2
+       int state;
+       /* A lock to protect state and busy fields */
+       spinlock_t lock;
+       void *priv;
+};
+
+struct scmi_xfer_ops;
+struct scmi_proto_helpers_ops;
+
+/**
+ * struct scmi_protocol_handle  - Reference to an initialized protocol instance
+ *
+ * @dev: A reference to the associated SCMI instance device (handle->dev).
+ * @xops: A reference to a struct holding refs to the core xfer operations that
+ *       can be used by the protocol implementation to generate SCMI messages.
+ * @set_priv: A method to set protocol private data for this instance.
+ * @get_priv: A method to get protocol private data previously set.
+ *
+ * This structure represents a protocol initialized against specific SCMI
+ * instance and it will be used as follows:
+ * - as a parameter fed from the core to the protocol initialization code so
+ *   that it can access the core xfer operations to build and generate SCMI
+ *   messages exclusively for the specific underlying protocol instance.
+ * - as an opaque handle fed by an SCMI driver user when it tries to access
+ *   this protocol through its own protocol operations.
+ *   In this case this handle will be returned as an opaque object together
+ *   with the related protocol operations when the SCMI driver tries to access
+ *   the protocol.
+ */
+struct scmi_protocol_handle {
+       struct device *dev;
+       const struct scmi_xfer_ops *xops;
+       const struct scmi_proto_helpers_ops *hops;
+       int (*set_priv)(const struct scmi_protocol_handle *ph, void *priv);
+       void *(*get_priv)(const struct scmi_protocol_handle *ph);
+};
+
+/**
+ * struct scmi_iterator_state  - Iterator current state descriptor
+ * @desc_index: Starting index for the current mulit-part request.
+ * @num_returned: Number of returned items in the last multi-part reply.
+ * @num_remaining: Number of remaining items in the multi-part message.
+ * @max_resources: Maximum acceptable number of items, configured by the caller
+ *                depending on the underlying resources that it is querying.
+ * @loop_idx: The iterator loop index in the current multi-part reply.
+ * @priv: Optional pointer to some additional state-related private data setup
+ *       by the caller during the iterations.
+ */
+struct scmi_iterator_state {
+       unsigned int desc_index;
+       unsigned int num_returned;
+       unsigned int num_remaining;
+       unsigned int max_resources;
+       unsigned int loop_idx;
+       void *priv;
+};
+
+/**
+ * struct scmi_iterator_ops  - Custom iterator operations
+ * @prepare_message: An operation to provide the custom logic to fill in the
+ *                  SCMI command request pointed by @message. @desc_index is
+ *                  a reference to the next index to use in the multi-part
+ *                  request.
+ * @update_state: An operation to provide the custom logic to update the
+ *               iterator state from the actual message response.
+ * @process_response: An operation to provide the custom logic needed to process
+ *                   each chunk of the multi-part message.
+ */
+struct scmi_iterator_ops {
+       void (*prepare_message)(void *message, unsigned int desc_index,
+                               const void *priv);
+       int (*update_state)(struct scmi_iterator_state *st,
+                           const void *response, void *priv);
+       int (*process_response)(const struct scmi_protocol_handle *ph,
+                               const void *response,
+                               struct scmi_iterator_state *st, void *priv);
+};
+
+/**
+ * struct scmi_proto_helpers_ops  - References to common protocol helpers
+ * @extended_name_get: A common helper function to retrieve extended naming
+ *                    for the specified resource using the specified command.
+ *                    Result is returned as a NULL terminated string in the
+ *                    pre-allocated area pointed to by @name with maximum
+ *                    capacity of @len bytes.
+ * @iter_response_init: A common helper to initialize a generic iterator to
+ *                     parse multi-message responses: when run the iterator
+ *                     will take care to send the initial command request as
+ *                     specified by @msg_id and @tx_size and then to parse the
+ *                     multi-part responses using the custom operations
+ *                     provided in @ops.
+ * @iter_response_run: A common helper to trigger the run of a previously
+ *                    initialized iterator.
+ */
+struct scmi_proto_helpers_ops {
+       int (*extended_name_get)(const struct scmi_protocol_handle *ph,
+                                u8 cmd_id, u32 res_id, char *name, size_t len);
+       void *(*iter_response_init)(const struct scmi_protocol_handle *ph,
+                                   struct scmi_iterator_ops *ops,
+                                   unsigned int max_resources, u8 msg_id,
+                                   size_t tx_size, void *priv);
+       int (*iter_response_run)(void *iter);
+};
+
+/**
+ * struct scmi_xfer_ops  - References to the core SCMI xfer operations.
+ * @version_get: Get this version protocol.
+ * @xfer_get_init: Initialize one struct xfer if any xfer slot is free.
+ * @reset_rx_to_maxsz: Reset rx size to max transport size.
+ * @do_xfer: Do the SCMI transfer.
+ * @do_xfer_with_response: Do the SCMI transfer waiting for a response.
+ * @xfer_put: Free the xfer slot.
+ *
+ * Note that all this operations expect a protocol handle as first parameter;
+ * they then internally use it to infer the underlying protocol number: this
+ * way is not possible for a protocol implementation to forge messages for
+ * another protocol.
+ */
+struct scmi_xfer_ops {
+       int (*version_get)(const struct scmi_protocol_handle *ph, u32 *version);
+       int (*xfer_get_init)(const struct scmi_protocol_handle *ph, u8 msg_id,
+                            size_t tx_size, size_t rx_size,
+                            struct scmi_xfer **p);
+       void (*reset_rx_to_maxsz)(const struct scmi_protocol_handle *ph,
+                                 struct scmi_xfer *xfer);
+       int (*do_xfer)(const struct scmi_protocol_handle *ph,
+                      struct scmi_xfer *xfer);
+       int (*do_xfer_with_response)(const struct scmi_protocol_handle *ph,
+                                    struct scmi_xfer *xfer);
+       void (*xfer_put)(const struct scmi_protocol_handle *ph,
+                        struct scmi_xfer *xfer);
+};
+
+typedef int (*scmi_prot_init_ph_fn_t)(const struct scmi_protocol_handle *);
+
+/**
+ * struct scmi_protocol  - Protocol descriptor
+ * @id: Protocol ID.
+ * @owner: Module reference if any.
+ * @instance_init: Mandatory protocol initialization function.
+ * @instance_deinit: Optional protocol de-initialization function.
+ * @ops: Optional reference to the operations provided by the protocol and
+ *      exposed in scmi_protocol.h.
+ * @events: An optional reference to the events supported by this protocol.
+ */
+struct scmi_protocol {
+       const u8                                id;
+       struct module                           *owner;
+       const scmi_prot_init_ph_fn_t            instance_init;
+       const scmi_prot_init_ph_fn_t            instance_deinit;
+       const void                              *ops;
+       const struct scmi_protocol_events       *events;
+};
+
+#define DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(name, proto)  \
+static const struct scmi_protocol *__this_proto = &(proto);    \
+                                                               \
+int __init scmi_##name##_register(void)                                \
+{                                                              \
+       return scmi_protocol_register(__this_proto);            \
+}                                                              \
+                                                               \
+void __exit scmi_##name##_unregister(void)                     \
+{                                                              \
+       scmi_protocol_unregister(__this_proto);                 \
+}
+
+#define DECLARE_SCMI_REGISTER_UNREGISTER(func)         \
+       int __init scmi_##func##_register(void);        \
+       void __exit scmi_##func##_unregister(void)
+DECLARE_SCMI_REGISTER_UNREGISTER(base);
+DECLARE_SCMI_REGISTER_UNREGISTER(clock);
+DECLARE_SCMI_REGISTER_UNREGISTER(perf);
+DECLARE_SCMI_REGISTER_UNREGISTER(power);
+DECLARE_SCMI_REGISTER_UNREGISTER(reset);
+DECLARE_SCMI_REGISTER_UNREGISTER(sensors);
+DECLARE_SCMI_REGISTER_UNREGISTER(voltage);
+DECLARE_SCMI_REGISTER_UNREGISTER(system);
+
+#endif /* _SCMI_PROTOCOLS_H */
index 9bf2478..a420a91 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * System Control and Management Interface (SCMI) Reset Protocol
  *
- * Copyright (C) 2019-2021 ARM Ltd.
+ * Copyright (C) 2019-2022 ARM Ltd.
  */
 
 #define pr_fmt(fmt) "SCMI Notifications RESET - " fmt
 #include <linux/module.h>
 #include <linux/scmi_protocol.h>
 
-#include "common.h"
+#include "protocols.h"
 #include "notify.h"
 
 enum scmi_reset_protocol_cmd {
        RESET_DOMAIN_ATTRIBUTES = 0x3,
        RESET = 0x4,
        RESET_NOTIFY = 0x5,
+       RESET_DOMAIN_NAME_GET = 0x6,
 };
 
 #define NUM_RESET_DOMAIN_MASK  0xffff
@@ -26,8 +27,9 @@ struct scmi_msg_resp_reset_domain_attributes {
        __le32 attributes;
 #define SUPPORTS_ASYNC_RESET(x)                ((x) & BIT(31))
 #define SUPPORTS_NOTIFY_RESET(x)       ((x) & BIT(30))
+#define SUPPORTS_EXTENDED_NAMES(x)     ((x) & BIT(29))
        __le32 latency;
-           u8 name[SCMI_MAX_STR_SIZE];
+       u8 name[SCMI_SHORT_NAME_MAX_SIZE];
 };
 
 struct scmi_msg_reset_domain_reset {
@@ -89,9 +91,11 @@ static int scmi_reset_attributes_get(const struct scmi_protocol_handle *ph,
 
 static int
 scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
-                                u32 domain, struct reset_dom_info *dom_info)
+                                u32 domain, struct reset_dom_info *dom_info,
+                                u32 version)
 {
        int ret;
+       u32 attributes;
        struct scmi_xfer *t;
        struct scmi_msg_resp_reset_domain_attributes *attr;
 
@@ -105,7 +109,7 @@ scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
 
        ret = ph->xops->do_xfer(ph, t);
        if (!ret) {
-               u32 attributes = le32_to_cpu(attr->attributes);
+               attributes = le32_to_cpu(attr->attributes);
 
                dom_info->async_reset = SUPPORTS_ASYNC_RESET(attributes);
                dom_info->reset_notify = SUPPORTS_NOTIFY_RESET(attributes);
@@ -116,6 +120,16 @@ scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph,
        }
 
        ph->xops->xfer_put(ph, t);
+
+       /*
+        * If supported overwrite short name with the extended one;
+        * on error just carry on and use already provided short name.
+        */
+       if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x3 &&
+           SUPPORTS_EXTENDED_NAMES(attributes))
+               ph->hops->extended_name_get(ph, RESET_DOMAIN_NAME_GET, domain,
+                                           dom_info->name, SCMI_MAX_STR_SIZE);
+
        return ret;
 }
 
@@ -126,8 +140,8 @@ static int scmi_reset_num_domains_get(const struct scmi_protocol_handle *ph)
        return pi->num_domains;
 }
 
-static char *scmi_reset_name_get(const struct scmi_protocol_handle *ph,
-                                u32 domain)
+static const char *
+scmi_reset_name_get(const struct scmi_protocol_handle *ph, u32 domain)
 {
        struct scmi_reset_info *pi = ph->get_priv(ph);
 
@@ -293,11 +307,13 @@ static const struct scmi_protocol_events reset_protocol_events = {
 
 static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph)
 {
-       int domain;
+       int domain, ret;
        u32 version;
        struct scmi_reset_info *pinfo;
 
-       ph->xops->version_get(ph, &version);
+       ret = ph->xops->version_get(ph, &version);
+       if (ret)
+               return ret;
 
        dev_dbg(ph->dev, "Reset Version %d.%d\n",
                PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
@@ -306,7 +322,9 @@ static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph)
        if (!pinfo)
                return -ENOMEM;
 
-       scmi_reset_attributes_get(ph, pinfo);
+       ret = scmi_reset_attributes_get(ph, pinfo);
+       if (ret)
+               return ret;
 
        pinfo->dom_info = devm_kcalloc(ph->dev, pinfo->num_domains,
                                       sizeof(*pinfo->dom_info), GFP_KERNEL);
@@ -316,7 +334,7 @@ static int scmi_reset_protocol_init(const struct scmi_protocol_handle *ph)
        for (domain = 0; domain < pinfo->num_domains; domain++) {
                struct reset_dom_info *dom = pinfo->dom_info + domain;
 
-               scmi_reset_domain_attributes_get(ph, domain, dom);
+               scmi_reset_domain_attributes_get(ph, domain, dom, version);
        }
 
        pinfo->version = version;
index cdbb287..21e0ce8 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * System Control and Management Interface (SCMI) Sensor Protocol
  *
- * Copyright (C) 2018-2021 ARM Ltd.
+ * Copyright (C) 2018-2022 ARM Ltd.
  */
 
 #define pr_fmt(fmt) "SCMI Notifications SENSOR - " fmt
@@ -11,7 +11,7 @@
 #include <linux/module.h>
 #include <linux/scmi_protocol.h>
 
-#include "common.h"
+#include "protocols.h"
 #include "notify.h"
 
 #define SCMI_MAX_NUM_SENSOR_AXIS       63
@@ -27,6 +27,8 @@ enum scmi_sensor_protocol_cmd {
        SENSOR_CONFIG_GET = 0x9,
        SENSOR_CONFIG_SET = 0xA,
        SENSOR_CONTINUOUS_UPDATE_NOTIFY = 0xB,
+       SENSOR_NAME_GET = 0xC,
+       SENSOR_AXIS_NAME_GET = 0xD,
 };
 
 struct scmi_msg_resp_sensor_attributes {
@@ -63,6 +65,10 @@ struct scmi_msg_resp_attrs {
        __le32 max_range_high;
 };
 
+struct scmi_msg_sensor_description {
+       __le32 desc_index;
+};
+
 struct scmi_msg_resp_sensor_description {
        __le16 num_returned;
        __le16 num_remaining;
@@ -71,6 +77,7 @@ struct scmi_msg_resp_sensor_description {
                __le32 attributes_low;
 /* Common attributes_low macros */
 #define SUPPORTS_ASYNC_READ(x)         FIELD_GET(BIT(31), (x))
+#define SUPPORTS_EXTENDED_NAMES(x)     FIELD_GET(BIT(29), (x))
 #define NUM_TRIP_POINTS(x)             FIELD_GET(GENMASK(7, 0), (x))
                __le32 attributes_high;
 /* Common attributes_high macros */
@@ -78,7 +85,7 @@ struct scmi_msg_resp_sensor_description {
 #define SENSOR_SCALE_SIGN              BIT(4)
 #define SENSOR_SCALE_EXTEND            GENMASK(31, 5)
 #define SENSOR_TYPE(x)                 FIELD_GET(GENMASK(7, 0), (x))
-               u8 name[SCMI_MAX_STR_SIZE];
+               u8 name[SCMI_SHORT_NAME_MAX_SIZE];
                /* only for version > 2.0 */
                __le32 power;
                __le32 resolution;
@@ -111,13 +118,22 @@ struct scmi_msg_resp_sensor_axis_description {
        struct scmi_axis_descriptor {
                __le32 id;
                __le32 attributes_low;
+#define SUPPORTS_EXTENDED_AXIS_NAMES(x)        FIELD_GET(BIT(9), (x))
                __le32 attributes_high;
-               u8 name[SCMI_MAX_STR_SIZE];
+               u8 name[SCMI_SHORT_NAME_MAX_SIZE];
                __le32 resolution;
                struct scmi_msg_resp_attrs attrs;
        } desc[];
 };
 
+struct scmi_msg_resp_sensor_axis_names_description {
+       __le32 num_axis_flags;
+       struct scmi_sensor_axis_name_descriptor {
+               __le32 axis_id;
+               u8 name[SCMI_MAX_STR_SIZE];
+       } desc[];
+};
+
 /* Base scmi_axis_descriptor size excluding extended attrs after name */
 #define SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ       28
 
@@ -231,335 +247,414 @@ static int scmi_sensor_attributes_get(const struct scmi_protocol_handle *ph,
 }
 
 static inline void scmi_parse_range_attrs(struct scmi_range_attrs *out,
-                                         struct scmi_msg_resp_attrs *in)
+                                         const struct scmi_msg_resp_attrs *in)
 {
        out->min_range = get_unaligned_le64((void *)&in->min_range_low);
        out->max_range = get_unaligned_le64((void *)&in->max_range_low);
 }
 
-static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph,
-                                       struct scmi_sensor_info *s)
-{
-       int ret, cnt;
-       u32 desc_index = 0;
-       u16 num_returned, num_remaining;
-       struct scmi_xfer *ti;
-       struct scmi_msg_resp_sensor_list_update_intervals *buf;
-       struct scmi_msg_sensor_list_update_intervals *msg;
-
-       ret = ph->xops->xfer_get_init(ph, SENSOR_LIST_UPDATE_INTERVALS,
-                                     sizeof(*msg), 0, &ti);
-       if (ret)
-               return ret;
-
-       buf = ti->rx.buf;
-       do {
-               u32 flags;
-
-               msg = ti->tx.buf;
-               /* Set the number of sensors to be skipped/already read */
-               msg->id = cpu_to_le32(s->id);
-               msg->index = cpu_to_le32(desc_index);
+struct scmi_sens_ipriv {
+       void *priv;
+       struct device *dev;
+};
 
-               ret = ph->xops->do_xfer(ph, ti);
-               if (ret)
-                       break;
+static void iter_intervals_prepare_message(void *message,
+                                          unsigned int desc_index,
+                                          const void *p)
+{
+       struct scmi_msg_sensor_list_update_intervals *msg = message;
+       const struct scmi_sensor_info *s;
 
-               flags = le32_to_cpu(buf->num_intervals_flags);
-               num_returned = NUM_INTERVALS_RETURNED(flags);
-               num_remaining = NUM_INTERVALS_REMAINING(flags);
+       s = ((const struct scmi_sens_ipriv *)p)->priv;
+       /* Set the number of sensors to be skipped/already read */
+       msg->id = cpu_to_le32(s->id);
+       msg->index = cpu_to_le32(desc_index);
+}
 
-               /*
-                * Max intervals is not declared previously anywhere so we
-                * assume it's returned+remaining.
-                */
-               if (!s->intervals.count) {
-                       s->intervals.segmented = SEGMENTED_INTVL_FORMAT(flags);
-                       s->intervals.count = num_returned + num_remaining;
-                       /* segmented intervals are reported in one triplet */
-                       if (s->intervals.segmented &&
-                           (num_remaining || num_returned != 3)) {
-                               dev_err(ph->dev,
-                                       "Sensor ID:%d advertises an invalid segmented interval (%d)\n",
-                                       s->id, s->intervals.count);
+static int iter_intervals_update_state(struct scmi_iterator_state *st,
+                                      const void *response, void *p)
+{
+       u32 flags;
+       struct scmi_sensor_info *s = ((struct scmi_sens_ipriv *)p)->priv;
+       struct device *dev = ((struct scmi_sens_ipriv *)p)->dev;
+       const struct scmi_msg_resp_sensor_list_update_intervals *r = response;
+
+       flags = le32_to_cpu(r->num_intervals_flags);
+       st->num_returned = NUM_INTERVALS_RETURNED(flags);
+       st->num_remaining = NUM_INTERVALS_REMAINING(flags);
+
+       /*
+        * Max intervals is not declared previously anywhere so we
+        * assume it's returned+remaining on first call.
+        */
+       if (!st->max_resources) {
+               s->intervals.segmented = SEGMENTED_INTVL_FORMAT(flags);
+               s->intervals.count = st->num_returned + st->num_remaining;
+               /* segmented intervals are reported in one triplet */
+               if (s->intervals.segmented &&
+                   (st->num_remaining || st->num_returned != 3)) {
+                       dev_err(dev,
+                               "Sensor ID:%d advertises an invalid segmented interval (%d)\n",
+                               s->id, s->intervals.count);
+                       s->intervals.segmented = false;
+                       s->intervals.count = 0;
+                       return -EINVAL;
+               }
+               /* Direct allocation when exceeding pre-allocated */
+               if (s->intervals.count >= SCMI_MAX_PREALLOC_POOL) {
+                       s->intervals.desc =
+                               devm_kcalloc(dev,
+                                            s->intervals.count,
+                                            sizeof(*s->intervals.desc),
+                                            GFP_KERNEL);
+                       if (!s->intervals.desc) {
                                s->intervals.segmented = false;
                                s->intervals.count = 0;
-                               ret = -EINVAL;
-                               break;
-                       }
-                       /* Direct allocation when exceeding pre-allocated */
-                       if (s->intervals.count >= SCMI_MAX_PREALLOC_POOL) {
-                               s->intervals.desc =
-                                       devm_kcalloc(ph->dev,
-                                                    s->intervals.count,
-                                                    sizeof(*s->intervals.desc),
-                                                    GFP_KERNEL);
-                               if (!s->intervals.desc) {
-                                       s->intervals.segmented = false;
-                                       s->intervals.count = 0;
-                                       ret = -ENOMEM;
-                                       break;
-                               }
+                               return -ENOMEM;
                        }
-               } else if (desc_index + num_returned > s->intervals.count) {
-                       dev_err(ph->dev,
-                               "No. of update intervals can't exceed %d\n",
-                               s->intervals.count);
-                       ret = -EINVAL;
-                       break;
                }
 
-               for (cnt = 0; cnt < num_returned; cnt++)
-                       s->intervals.desc[desc_index + cnt] =
-                                       le32_to_cpu(buf->intervals[cnt]);
+               st->max_resources = s->intervals.count;
+       }
+
+       return 0;
+}
 
-               desc_index += num_returned;
+static int
+iter_intervals_process_response(const struct scmi_protocol_handle *ph,
+                               const void *response,
+                               struct scmi_iterator_state *st, void *p)
+{
+       const struct scmi_msg_resp_sensor_list_update_intervals *r = response;
+       struct scmi_sensor_info *s = ((struct scmi_sens_ipriv *)p)->priv;
 
-               ph->xops->reset_rx_to_maxsz(ph, ti);
-               /*
-                * check for both returned and remaining to avoid infinite
-                * loop due to buggy firmware
-                */
-       } while (num_returned && num_remaining);
+       s->intervals.desc[st->desc_index + st->loop_idx] =
+               le32_to_cpu(r->intervals[st->loop_idx]);
 
-       ph->xops->xfer_put(ph, ti);
-       return ret;
+       return 0;
 }
 
-static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph,
+static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph,
                                        struct scmi_sensor_info *s)
 {
-       int ret, cnt;
-       u32 desc_index = 0;
-       u16 num_returned, num_remaining;
-       struct scmi_xfer *te;
-       struct scmi_msg_resp_sensor_axis_description *buf;
-       struct scmi_msg_sensor_axis_description_get *msg;
+       void *iter;
+       struct scmi_msg_sensor_list_update_intervals *msg;
+       struct scmi_iterator_ops ops = {
+               .prepare_message = iter_intervals_prepare_message,
+               .update_state = iter_intervals_update_state,
+               .process_response = iter_intervals_process_response,
+       };
+       struct scmi_sens_ipriv upriv = {
+               .priv = s,
+               .dev = ph->dev,
+       };
+
+       iter = ph->hops->iter_response_init(ph, &ops, s->intervals.count,
+                                           SENSOR_LIST_UPDATE_INTERVALS,
+                                           sizeof(*msg), &upriv);
+       if (IS_ERR(iter))
+               return PTR_ERR(iter);
+
+       return ph->hops->iter_response_run(iter);
+}
 
-       s->axis = devm_kcalloc(ph->dev, s->num_axis,
-                              sizeof(*s->axis), GFP_KERNEL);
-       if (!s->axis)
-               return -ENOMEM;
+static void iter_axes_desc_prepare_message(void *message,
+                                          const unsigned int desc_index,
+                                          const void *priv)
+{
+       struct scmi_msg_sensor_axis_description_get *msg = message;
+       const struct scmi_sensor_info *s = priv;
 
-       ret = ph->xops->xfer_get_init(ph, SENSOR_AXIS_DESCRIPTION_GET,
-                                     sizeof(*msg), 0, &te);
-       if (ret)
-               return ret;
+       /* Set the number of sensors to be skipped/already read */
+       msg->id = cpu_to_le32(s->id);
+       msg->axis_desc_index = cpu_to_le32(desc_index);
+}
 
-       buf = te->rx.buf;
-       do {
-               u32 flags;
-               struct scmi_axis_descriptor *adesc;
+static int
+iter_axes_desc_update_state(struct scmi_iterator_state *st,
+                           const void *response, void *priv)
+{
+       u32 flags;
+       const struct scmi_msg_resp_sensor_axis_description *r = response;
 
-               msg = te->tx.buf;
-               /* Set the number of sensors to be skipped/already read */
-               msg->id = cpu_to_le32(s->id);
-               msg->axis_desc_index = cpu_to_le32(desc_index);
+       flags = le32_to_cpu(r->num_axis_flags);
+       st->num_returned = NUM_AXIS_RETURNED(flags);
+       st->num_remaining = NUM_AXIS_REMAINING(flags);
+       st->priv = (void *)&r->desc[0];
 
-               ret = ph->xops->do_xfer(ph, te);
-               if (ret)
-                       break;
+       return 0;
+}
 
-               flags = le32_to_cpu(buf->num_axis_flags);
-               num_returned = NUM_AXIS_RETURNED(flags);
-               num_remaining = NUM_AXIS_REMAINING(flags);
+static int
+iter_axes_desc_process_response(const struct scmi_protocol_handle *ph,
+                               const void *response,
+                               struct scmi_iterator_state *st, void *priv)
+{
+       u32 attrh, attrl;
+       struct scmi_sensor_axis_info *a;
+       size_t dsize = SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ;
+       struct scmi_sensor_info *s = priv;
+       const struct scmi_axis_descriptor *adesc = st->priv;
 
-               if (desc_index + num_returned > s->num_axis) {
-                       dev_err(ph->dev, "No. of axis can't exceed %d\n",
-                               s->num_axis);
-                       break;
-               }
+       attrl = le32_to_cpu(adesc->attributes_low);
 
-               adesc = &buf->desc[0];
-               for (cnt = 0; cnt < num_returned; cnt++) {
-                       u32 attrh, attrl;
-                       struct scmi_sensor_axis_info *a;
-                       size_t dsize = SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ;
+       a = &s->axis[st->desc_index + st->loop_idx];
+       a->id = le32_to_cpu(adesc->id);
+       a->extended_attrs = SUPPORTS_EXTEND_ATTRS(attrl);
 
-                       attrl = le32_to_cpu(adesc->attributes_low);
+       attrh = le32_to_cpu(adesc->attributes_high);
+       a->scale = S32_EXT(SENSOR_SCALE(attrh));
+       a->type = SENSOR_TYPE(attrh);
+       strscpy(a->name, adesc->name, SCMI_MAX_STR_SIZE);
 
-                       a = &s->axis[desc_index + cnt];
+       if (a->extended_attrs) {
+               unsigned int ares = le32_to_cpu(adesc->resolution);
 
-                       a->id = le32_to_cpu(adesc->id);
-                       a->extended_attrs = SUPPORTS_EXTEND_ATTRS(attrl);
+               a->resolution = SENSOR_RES(ares);
+               a->exponent = S32_EXT(SENSOR_RES_EXP(ares));
+               dsize += sizeof(adesc->resolution);
 
-                       attrh = le32_to_cpu(adesc->attributes_high);
-                       a->scale = S32_EXT(SENSOR_SCALE(attrh));
-                       a->type = SENSOR_TYPE(attrh);
-                       strlcpy(a->name, adesc->name, SCMI_MAX_STR_SIZE);
+               scmi_parse_range_attrs(&a->attrs, &adesc->attrs);
+               dsize += sizeof(adesc->attrs);
+       }
+       st->priv = ((u8 *)adesc + dsize);
+
+       return 0;
+}
 
-                       if (a->extended_attrs) {
-                               unsigned int ares =
-                                       le32_to_cpu(adesc->resolution);
+static int
+iter_axes_extended_name_update_state(struct scmi_iterator_state *st,
+                                    const void *response, void *priv)
+{
+       u32 flags;
+       const struct scmi_msg_resp_sensor_axis_names_description *r = response;
 
-                               a->resolution = SENSOR_RES(ares);
-                               a->exponent =
-                                       S32_EXT(SENSOR_RES_EXP(ares));
-                               dsize += sizeof(adesc->resolution);
+       flags = le32_to_cpu(r->num_axis_flags);
+       st->num_returned = NUM_AXIS_RETURNED(flags);
+       st->num_remaining = NUM_AXIS_REMAINING(flags);
+       st->priv = (void *)&r->desc[0];
 
-                               scmi_parse_range_attrs(&a->attrs,
-                                                      &adesc->attrs);
-                               dsize += sizeof(adesc->attrs);
-                       }
+       return 0;
+}
 
-                       adesc = (typeof(adesc))((u8 *)adesc + dsize);
-               }
+static int
+iter_axes_extended_name_process_response(const struct scmi_protocol_handle *ph,
+                                        const void *response,
+                                        struct scmi_iterator_state *st,
+                                        void *priv)
+{
+       struct scmi_sensor_axis_info *a;
+       const struct scmi_sensor_info *s = priv;
+       struct scmi_sensor_axis_name_descriptor *adesc = st->priv;
 
-               desc_index += num_returned;
+       a = &s->axis[st->desc_index + st->loop_idx];
+       strscpy(a->name, adesc->name, SCMI_MAX_STR_SIZE);
+       st->priv = ++adesc;
 
-               ph->xops->reset_rx_to_maxsz(ph, te);
-               /*
-                * check for both returned and remaining to avoid infinite
-                * loop due to buggy firmware
-                */
-       } while (num_returned && num_remaining);
+       return 0;
+}
 
-       ph->xops->xfer_put(ph, te);
-       return ret;
+static int
+scmi_sensor_axis_extended_names_get(const struct scmi_protocol_handle *ph,
+                                   struct scmi_sensor_info *s)
+{
+       void *iter;
+       struct scmi_msg_sensor_axis_description_get *msg;
+       struct scmi_iterator_ops ops = {
+               .prepare_message = iter_axes_desc_prepare_message,
+               .update_state = iter_axes_extended_name_update_state,
+               .process_response = iter_axes_extended_name_process_response,
+       };
+
+       iter = ph->hops->iter_response_init(ph, &ops, s->num_axis,
+                                           SENSOR_AXIS_NAME_GET,
+                                           sizeof(*msg), s);
+       if (IS_ERR(iter))
+               return PTR_ERR(iter);
+
+       return ph->hops->iter_response_run(iter);
 }
 
-static int scmi_sensor_description_get(const struct scmi_protocol_handle *ph,
-                                      struct sensors_info *si)
+static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph,
+                                       struct scmi_sensor_info *s,
+                                       u32 version)
 {
-       int ret, cnt;
-       u32 desc_index = 0;
-       u16 num_returned, num_remaining;
-       struct scmi_xfer *t;
-       struct scmi_msg_resp_sensor_description *buf;
+       int ret;
+       void *iter;
+       struct scmi_msg_sensor_axis_description_get *msg;
+       struct scmi_iterator_ops ops = {
+               .prepare_message = iter_axes_desc_prepare_message,
+               .update_state = iter_axes_desc_update_state,
+               .process_response = iter_axes_desc_process_response,
+       };
 
-       ret = ph->xops->xfer_get_init(ph, SENSOR_DESCRIPTION_GET,
-                                     sizeof(__le32), 0, &t);
+       s->axis = devm_kcalloc(ph->dev, s->num_axis,
+                              sizeof(*s->axis), GFP_KERNEL);
+       if (!s->axis)
+               return -ENOMEM;
+
+       iter = ph->hops->iter_response_init(ph, &ops, s->num_axis,
+                                           SENSOR_AXIS_DESCRIPTION_GET,
+                                           sizeof(*msg), s);
+       if (IS_ERR(iter))
+               return PTR_ERR(iter);
+
+       ret = ph->hops->iter_response_run(iter);
        if (ret)
                return ret;
 
-       buf = t->rx.buf;
-
-       do {
-               struct scmi_sensor_descriptor *sdesc;
+       if (PROTOCOL_REV_MAJOR(version) >= 0x3)
+               ret = scmi_sensor_axis_extended_names_get(ph, s);
 
-               /* Set the number of sensors to be skipped/already read */
-               put_unaligned_le32(desc_index, t->tx.buf);
+       return ret;
+}
 
-               ret = ph->xops->do_xfer(ph, t);
-               if (ret)
-                       break;
+static void iter_sens_descr_prepare_message(void *message,
+                                           unsigned int desc_index,
+                                           const void *priv)
+{
+       struct scmi_msg_sensor_description *msg = message;
 
-               num_returned = le16_to_cpu(buf->num_returned);
-               num_remaining = le16_to_cpu(buf->num_remaining);
+       msg->desc_index = cpu_to_le32(desc_index);
+}
 
-               if (desc_index + num_returned > si->num_sensors) {
-                       dev_err(ph->dev, "No. of sensors can't exceed %d",
-                               si->num_sensors);
-                       break;
-               }
+static int iter_sens_descr_update_state(struct scmi_iterator_state *st,
+                                       const void *response, void *priv)
+{
+       const struct scmi_msg_resp_sensor_description *r = response;
 
-               sdesc = &buf->desc[0];
-               for (cnt = 0; cnt < num_returned; cnt++) {
-                       u32 attrh, attrl;
-                       struct scmi_sensor_info *s;
-                       size_t dsize = SCMI_MSG_RESP_SENS_DESCR_BASE_SZ;
-
-                       s = &si->sensors[desc_index + cnt];
-                       s->id = le32_to_cpu(sdesc->id);
-
-                       attrl = le32_to_cpu(sdesc->attributes_low);
-                       /* common bitfields parsing */
-                       s->async = SUPPORTS_ASYNC_READ(attrl);
-                       s->num_trip_points = NUM_TRIP_POINTS(attrl);
-                       /**
-                        * only SCMIv3.0 specific bitfield below.
-                        * Such bitfields are assumed to be zeroed on non
-                        * relevant fw versions...assuming fw not buggy !
-                        */
-                       s->update = SUPPORTS_UPDATE_NOTIFY(attrl);
-                       s->timestamped = SUPPORTS_TIMESTAMP(attrl);
-                       if (s->timestamped)
-                               s->tstamp_scale =
-                                       S32_EXT(SENSOR_TSTAMP_EXP(attrl));
-                       s->extended_scalar_attrs =
-                               SUPPORTS_EXTEND_ATTRS(attrl);
-
-                       attrh = le32_to_cpu(sdesc->attributes_high);
-                       /* common bitfields parsing */
-                       s->scale = S32_EXT(SENSOR_SCALE(attrh));
-                       s->type = SENSOR_TYPE(attrh);
-                       /* Use pre-allocated pool wherever possible */
-                       s->intervals.desc = s->intervals.prealloc_pool;
-                       if (si->version == SCMIv2_SENSOR_PROTOCOL) {
-                               s->intervals.segmented = false;
-                               s->intervals.count = 1;
-                               /*
-                                * Convert SCMIv2.0 update interval format to
-                                * SCMIv3.0 to be used as the common exposed
-                                * descriptor, accessible via common macros.
-                                */
-                               s->intervals.desc[0] =
-                                       (SENSOR_UPDATE_BASE(attrh) << 5) |
-                                        SENSOR_UPDATE_SCALE(attrh);
-                       } else {
-                               /*
-                                * From SCMIv3.0 update intervals are retrieved
-                                * via a dedicated (optional) command.
-                                * Since the command is optional, on error carry
-                                * on without any update interval.
-                                */
-                               if (scmi_sensor_update_intervals(ph, s))
-                                       dev_dbg(ph->dev,
-                                               "Update Intervals not available for sensor ID:%d\n",
-                                               s->id);
-                       }
-                       /**
-                        * only > SCMIv2.0 specific bitfield below.
-                        * Such bitfields are assumed to be zeroed on non
-                        * relevant fw versions...assuming fw not buggy !
-                        */
-                       s->num_axis = min_t(unsigned int,
-                                           SUPPORTS_AXIS(attrh) ?
-                                           SENSOR_AXIS_NUMBER(attrh) : 0,
-                                           SCMI_MAX_NUM_SENSOR_AXIS);
-                       strlcpy(s->name, sdesc->name, SCMI_MAX_STR_SIZE);
-
-                       if (s->extended_scalar_attrs) {
-                               s->sensor_power = le32_to_cpu(sdesc->power);
-                               dsize += sizeof(sdesc->power);
-                               /* Only for sensors reporting scalar values */
-                               if (s->num_axis == 0) {
-                                       unsigned int sres =
-                                               le32_to_cpu(sdesc->resolution);
-
-                                       s->resolution = SENSOR_RES(sres);
-                                       s->exponent =
-                                               S32_EXT(SENSOR_RES_EXP(sres));
-                                       dsize += sizeof(sdesc->resolution);
-
-                                       scmi_parse_range_attrs(&s->scalar_attrs,
-                                                              &sdesc->scalar_attrs);
-                                       dsize += sizeof(sdesc->scalar_attrs);
-                               }
-                       }
-                       if (s->num_axis > 0) {
-                               ret = scmi_sensor_axis_description(ph, s);
-                               if (ret)
-                                       goto out;
-                       }
+       st->num_returned = le16_to_cpu(r->num_returned);
+       st->num_remaining = le16_to_cpu(r->num_remaining);
+       st->priv = (void *)&r->desc[0];
 
-                       sdesc = (typeof(sdesc))((u8 *)sdesc + dsize);
-               }
+       return 0;
+}
 
-               desc_index += num_returned;
+static int
+iter_sens_descr_process_response(const struct scmi_protocol_handle *ph,
+                                const void *response,
+                                struct scmi_iterator_state *st, void *priv)
 
-               ph->xops->reset_rx_to_maxsz(ph, t);
+{
+       int ret = 0;
+       u32 attrh, attrl;
+       size_t dsize = SCMI_MSG_RESP_SENS_DESCR_BASE_SZ;
+       struct scmi_sensor_info *s;
+       struct sensors_info *si = priv;
+       const struct scmi_sensor_descriptor *sdesc = st->priv;
+
+       s = &si->sensors[st->desc_index + st->loop_idx];
+       s->id = le32_to_cpu(sdesc->id);
+
+       attrl = le32_to_cpu(sdesc->attributes_low);
+       /* common bitfields parsing */
+       s->async = SUPPORTS_ASYNC_READ(attrl);
+       s->num_trip_points = NUM_TRIP_POINTS(attrl);
+       /**
+        * only SCMIv3.0 specific bitfield below.
+        * Such bitfields are assumed to be zeroed on non
+        * relevant fw versions...assuming fw not buggy !
+        */
+       s->update = SUPPORTS_UPDATE_NOTIFY(attrl);
+       s->timestamped = SUPPORTS_TIMESTAMP(attrl);
+       if (s->timestamped)
+               s->tstamp_scale = S32_EXT(SENSOR_TSTAMP_EXP(attrl));
+       s->extended_scalar_attrs = SUPPORTS_EXTEND_ATTRS(attrl);
+
+       attrh = le32_to_cpu(sdesc->attributes_high);
+       /* common bitfields parsing */
+       s->scale = S32_EXT(SENSOR_SCALE(attrh));
+       s->type = SENSOR_TYPE(attrh);
+       /* Use pre-allocated pool wherever possible */
+       s->intervals.desc = s->intervals.prealloc_pool;
+       if (si->version == SCMIv2_SENSOR_PROTOCOL) {
+               s->intervals.segmented = false;
+               s->intervals.count = 1;
+               /*
+                * Convert SCMIv2.0 update interval format to
+                * SCMIv3.0 to be used as the common exposed
+                * descriptor, accessible via common macros.
+                */
+               s->intervals.desc[0] = (SENSOR_UPDATE_BASE(attrh) << 5) |
+                                       SENSOR_UPDATE_SCALE(attrh);
+       } else {
                /*
-                * check for both returned and remaining to avoid infinite
-                * loop due to buggy firmware
+                * From SCMIv3.0 update intervals are retrieved
+                * via a dedicated (optional) command.
+                * Since the command is optional, on error carry
+                * on without any update interval.
                 */
-       } while (num_returned && num_remaining);
+               if (scmi_sensor_update_intervals(ph, s))
+                       dev_dbg(ph->dev,
+                               "Update Intervals not available for sensor ID:%d\n",
+                               s->id);
+       }
+       /**
+        * only > SCMIv2.0 specific bitfield below.
+        * Such bitfields are assumed to be zeroed on non
+        * relevant fw versions...assuming fw not buggy !
+        */
+       s->num_axis = min_t(unsigned int,
+                           SUPPORTS_AXIS(attrh) ?
+                           SENSOR_AXIS_NUMBER(attrh) : 0,
+                           SCMI_MAX_NUM_SENSOR_AXIS);
+       strscpy(s->name, sdesc->name, SCMI_MAX_STR_SIZE);
+
+       /*
+        * If supported overwrite short name with the extended
+        * one; on error just carry on and use already provided
+        * short name.
+        */
+       if (PROTOCOL_REV_MAJOR(si->version) >= 0x3 &&
+           SUPPORTS_EXTENDED_NAMES(attrl))
+               ph->hops->extended_name_get(ph, SENSOR_NAME_GET, s->id,
+                                           s->name, SCMI_MAX_STR_SIZE);
+
+       if (s->extended_scalar_attrs) {
+               s->sensor_power = le32_to_cpu(sdesc->power);
+               dsize += sizeof(sdesc->power);
+
+               /* Only for sensors reporting scalar values */
+               if (s->num_axis == 0) {
+                       unsigned int sres = le32_to_cpu(sdesc->resolution);
+
+                       s->resolution = SENSOR_RES(sres);
+                       s->exponent = S32_EXT(SENSOR_RES_EXP(sres));
+                       dsize += sizeof(sdesc->resolution);
+
+                       scmi_parse_range_attrs(&s->scalar_attrs,
+                                              &sdesc->scalar_attrs);
+                       dsize += sizeof(sdesc->scalar_attrs);
+               }
+       }
+
+       if (s->num_axis > 0)
+               ret = scmi_sensor_axis_description(ph, s, si->version);
+
+       st->priv = ((u8 *)sdesc + dsize);
 
-out:
-       ph->xops->xfer_put(ph, t);
        return ret;
 }
 
+static int scmi_sensor_description_get(const struct scmi_protocol_handle *ph,
+                                      struct sensors_info *si)
+{
+       void *iter;
+       struct scmi_iterator_ops ops = {
+               .prepare_message = iter_sens_descr_prepare_message,
+               .update_state = iter_sens_descr_update_state,
+               .process_response = iter_sens_descr_process_response,
+       };
+
+       iter = ph->hops->iter_response_init(ph, &ops, si->num_sensors,
+                                           SENSOR_DESCRIPTION_GET,
+                                           sizeof(__le32), si);
+       if (IS_ERR(iter))
+               return PTR_ERR(iter);
+
+       return ph->hops->iter_response_run(iter);
+}
+
 static inline int
 scmi_sensor_request_notify(const struct scmi_protocol_handle *ph, u32 sensor_id,
                           u8 message_id, bool enable)
@@ -966,7 +1061,9 @@ static int scmi_sensors_protocol_init(const struct scmi_protocol_handle *ph)
        int ret;
        struct sensors_info *sinfo;
 
-       ph->xops->version_get(ph, &version);
+       ret = ph->xops->version_get(ph, &version);
+       if (ret)
+               return ret;
 
        dev_dbg(ph->dev, "Sensor Version %d.%d\n",
                PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
index e5175ef..220e399 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * System Control and Management Interface (SCMI) System Power Protocol
  *
- * Copyright (C) 2020-2021 ARM Ltd.
+ * Copyright (C) 2020-2022 ARM Ltd.
  */
 
 #define pr_fmt(fmt) "SCMI Notifications SYSTEM - " fmt
@@ -10,7 +10,7 @@
 #include <linux/module.h>
 #include <linux/scmi_protocol.h>
 
-#include "common.h"
+#include "protocols.h"
 #include "notify.h"
 
 #define SCMI_SYSTEM_NUM_SOURCES                1
@@ -113,10 +113,13 @@ static const struct scmi_protocol_events system_protocol_events = {
 
 static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph)
 {
+       int ret;
        u32 version;
        struct scmi_system_info *pinfo;
 
-       ph->xops->version_get(ph, &version);
+       ret = ph->xops->version_get(ph, &version);
+       if (ret)
+               return ret;
 
        dev_dbg(ph->dev, "System Power Version %d.%d\n",
                PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
index ac08e81..9d195d8 100644 (file)
@@ -2,13 +2,13 @@
 /*
  * System Control and Management Interface (SCMI) Voltage Protocol
  *
- * Copyright (C) 2020-2021 ARM Ltd.
+ * Copyright (C) 2020-2022 ARM Ltd.
  */
 
 #include <linux/module.h>
 #include <linux/scmi_protocol.h>
 
-#include "common.h"
+#include "protocols.h"
 
 #define VOLTAGE_DOMS_NUM_MASK          GENMASK(15, 0)
 #define REMAINING_LEVELS_MASK          GENMASK(31, 16)
@@ -21,13 +21,16 @@ enum scmi_voltage_protocol_cmd {
        VOLTAGE_CONFIG_GET = 0x6,
        VOLTAGE_LEVEL_SET = 0x7,
        VOLTAGE_LEVEL_GET = 0x8,
+       VOLTAGE_DOMAIN_NAME_GET = 0x09,
 };
 
 #define NUM_VOLTAGE_DOMAINS(x) ((u16)(FIELD_GET(VOLTAGE_DOMS_NUM_MASK, (x))))
 
 struct scmi_msg_resp_domain_attributes {
        __le32 attr;
-       u8 name[SCMI_MAX_STR_SIZE];
+#define SUPPORTS_ASYNC_LEVEL_SET(x)    ((x) & BIT(31))
+#define SUPPORTS_EXTENDED_NAMES(x)     ((x) & BIT(30))
+       u8 name[SCMI_SHORT_NAME_MAX_SIZE];
 };
 
 struct scmi_msg_cmd_describe_levels {
@@ -54,6 +57,11 @@ struct scmi_msg_cmd_level_set {
        __le32 voltage_level;
 };
 
+struct scmi_resp_voltage_level_set_complete {
+       __le32 domain_id;
+       __le32 voltage_level;
+};
+
 struct voltage_info {
        unsigned int version;
        unsigned int num_domains;
@@ -110,14 +118,100 @@ static int scmi_init_voltage_levels(struct device *dev,
        return 0;
 }
 
+struct scmi_volt_ipriv {
+       struct device *dev;
+       struct scmi_voltage_info *v;
+};
+
+static void iter_volt_levels_prepare_message(void *message,
+                                            unsigned int desc_index,
+                                            const void *priv)
+{
+       struct scmi_msg_cmd_describe_levels *msg = message;
+       const struct scmi_volt_ipriv *p = priv;
+
+       msg->domain_id = cpu_to_le32(p->v->id);
+       msg->level_index = cpu_to_le32(desc_index);
+}
+
+static int iter_volt_levels_update_state(struct scmi_iterator_state *st,
+                                        const void *response, void *priv)
+{
+       int ret = 0;
+       u32 flags;
+       const struct scmi_msg_resp_describe_levels *r = response;
+       struct scmi_volt_ipriv *p = priv;
+
+       flags = le32_to_cpu(r->flags);
+       st->num_returned = NUM_RETURNED_LEVELS(flags);
+       st->num_remaining = NUM_REMAINING_LEVELS(flags);
+
+       /* Allocate space for num_levels if not already done */
+       if (!p->v->num_levels) {
+               ret = scmi_init_voltage_levels(p->dev, p->v, st->num_returned,
+                                              st->num_remaining,
+                                             SUPPORTS_SEGMENTED_LEVELS(flags));
+               if (!ret)
+                       st->max_resources = p->v->num_levels;
+       }
+
+       return ret;
+}
+
+static int
+iter_volt_levels_process_response(const struct scmi_protocol_handle *ph,
+                                 const void *response,
+                                 struct scmi_iterator_state *st, void *priv)
+{
+       s32 val;
+       const struct scmi_msg_resp_describe_levels *r = response;
+       struct scmi_volt_ipriv *p = priv;
+
+       val = (s32)le32_to_cpu(r->voltage[st->loop_idx]);
+       p->v->levels_uv[st->desc_index + st->loop_idx] = val;
+       if (val < 0)
+               p->v->negative_volts_allowed = true;
+
+       return 0;
+}
+
+static int scmi_voltage_levels_get(const struct scmi_protocol_handle *ph,
+                                  struct scmi_voltage_info *v)
+{
+       int ret;
+       void *iter;
+       struct scmi_msg_cmd_describe_levels *msg;
+       struct scmi_iterator_ops ops = {
+               .prepare_message = iter_volt_levels_prepare_message,
+               .update_state = iter_volt_levels_update_state,
+               .process_response = iter_volt_levels_process_response,
+       };
+       struct scmi_volt_ipriv vpriv = {
+               .dev = ph->dev,
+               .v = v,
+       };
+
+       iter = ph->hops->iter_response_init(ph, &ops, v->num_levels,
+                                           VOLTAGE_DESCRIBE_LEVELS,
+                                           sizeof(*msg), &vpriv);
+       if (IS_ERR(iter))
+               return PTR_ERR(iter);
+
+       ret = ph->hops->iter_response_run(iter);
+       if (ret) {
+               v->num_levels = 0;
+               devm_kfree(ph->dev, v->levels_uv);
+       }
+
+       return ret;
+}
+
 static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
                                        struct voltage_info *vinfo)
 {
        int ret, dom;
-       struct scmi_xfer *td, *tl;
-       struct device *dev = ph->dev;
+       struct scmi_xfer *td;
        struct scmi_msg_resp_domain_attributes *resp_dom;
-       struct scmi_msg_resp_describe_levels *resp_levels;
 
        ret = ph->xops->xfer_get_init(ph, VOLTAGE_DOMAIN_ATTRIBUTES,
                                      sizeof(__le32), sizeof(*resp_dom), &td);
@@ -125,16 +219,8 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
                return ret;
        resp_dom = td->rx.buf;
 
-       ret = ph->xops->xfer_get_init(ph, VOLTAGE_DESCRIBE_LEVELS,
-                                     sizeof(__le64), 0, &tl);
-       if (ret)
-               goto outd;
-       resp_levels = tl->rx.buf;
-
        for (dom = 0; dom < vinfo->num_domains; dom++) {
-               u32 desc_index = 0;
-               u16 num_returned = 0, num_remaining = 0;
-               struct scmi_msg_cmd_describe_levels *cmd;
+               u32 attributes;
                struct scmi_voltage_info *v;
 
                /* Retrieve domain attributes at first ... */
@@ -146,69 +232,31 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
 
                v = vinfo->domains + dom;
                v->id = dom;
-               v->attributes = le32_to_cpu(resp_dom->attr);
+               attributes = le32_to_cpu(resp_dom->attr);
                strlcpy(v->name, resp_dom->name, SCMI_MAX_STR_SIZE);
 
-               cmd = tl->tx.buf;
-               /* ...then retrieve domain levels descriptions */
-               do {
-                       u32 flags;
-                       int cnt;
-
-                       cmd->domain_id = cpu_to_le32(v->id);
-                       cmd->level_index = cpu_to_le32(desc_index);
-                       ret = ph->xops->do_xfer(ph, tl);
-                       if (ret)
-                               break;
-
-                       flags = le32_to_cpu(resp_levels->flags);
-                       num_returned = NUM_RETURNED_LEVELS(flags);
-                       num_remaining = NUM_REMAINING_LEVELS(flags);
-
-                       /* Allocate space for num_levels if not already done */
-                       if (!v->num_levels) {
-                               ret = scmi_init_voltage_levels(dev, v,
-                                                              num_returned,
-                                                              num_remaining,
-                                             SUPPORTS_SEGMENTED_LEVELS(flags));
-                               if (ret)
-                                       break;
-                       }
-
-                       if (desc_index + num_returned > v->num_levels) {
-                               dev_err(ph->dev,
-                                       "No. of voltage levels can't exceed %d\n",
-                                       v->num_levels);
-                               ret = -EINVAL;
-                               break;
-                       }
-
-                       for (cnt = 0; cnt < num_returned; cnt++) {
-                               s32 val;
-
-                               val =
-                                   (s32)le32_to_cpu(resp_levels->voltage[cnt]);
-                               v->levels_uv[desc_index + cnt] = val;
-                               if (val < 0)
-                                       v->negative_volts_allowed = true;
-                       }
-
-                       desc_index += num_returned;
-
-                       ph->xops->reset_rx_to_maxsz(ph, tl);
-                       /* check both to avoid infinite loop due to buggy fw */
-               } while (num_returned && num_remaining);
-
-               if (ret) {
-                       v->num_levels = 0;
-                       devm_kfree(dev, v->levels_uv);
+               /*
+                * If supported overwrite short name with the extended one;
+                * on error just carry on and use already provided short name.
+                */
+               if (PROTOCOL_REV_MAJOR(vinfo->version) >= 0x2) {
+                       if (SUPPORTS_EXTENDED_NAMES(attributes))
+                               ph->hops->extended_name_get(ph,
+                                                       VOLTAGE_DOMAIN_NAME_GET,
+                                                       v->id, v->name,
+                                                       SCMI_MAX_STR_SIZE);
+                       if (SUPPORTS_ASYNC_LEVEL_SET(attributes))
+                               v->async_level_set = true;
                }
 
+               ret = scmi_voltage_levels_get(ph, v);
+               /* Skip invalid voltage descriptors */
+               if (ret)
+                       continue;
+
                ph->xops->reset_rx_to_maxsz(ph, td);
        }
 
-       ph->xops->xfer_put(ph, tl);
-outd:
        ph->xops->xfer_put(ph, td);
 
        return ret;
@@ -271,12 +319,15 @@ static int scmi_voltage_config_get(const struct scmi_protocol_handle *ph,
 }
 
 static int scmi_voltage_level_set(const struct scmi_protocol_handle *ph,
-                                 u32 domain_id, u32 flags, s32 volt_uV)
+                                 u32 domain_id,
+                                 enum scmi_voltage_level_mode mode,
+                                 s32 volt_uV)
 {
        int ret;
        struct scmi_xfer *t;
        struct voltage_info *vinfo = ph->get_priv(ph);
        struct scmi_msg_cmd_level_set *cmd;
+       struct scmi_voltage_info *v;
 
        if (domain_id >= vinfo->num_domains)
                return -EINVAL;
@@ -286,12 +337,31 @@ static int scmi_voltage_level_set(const struct scmi_protocol_handle *ph,
        if (ret)
                return ret;
 
+       v = vinfo->domains + domain_id;
+
        cmd = t->tx.buf;
        cmd->domain_id = cpu_to_le32(domain_id);
-       cmd->flags = cpu_to_le32(flags);
        cmd->voltage_level = cpu_to_le32(volt_uV);
 
-       ret = ph->xops->do_xfer(ph, t);
+       if (!v->async_level_set || mode != SCMI_VOLTAGE_LEVEL_SET_AUTO) {
+               cmd->flags = cpu_to_le32(0x0);
+               ret = ph->xops->do_xfer(ph, t);
+       } else {
+               cmd->flags = cpu_to_le32(0x1);
+               ret = ph->xops->do_xfer_with_response(ph, t);
+               if (!ret) {
+                       struct scmi_resp_voltage_level_set_complete *resp;
+
+                       resp = t->rx.buf;
+                       if (le32_to_cpu(resp->domain_id) == domain_id)
+                               dev_dbg(ph->dev,
+                                       "Voltage domain %d set async to %d\n",
+                                       v->id,
+                                       le32_to_cpu(resp->voltage_level));
+                       else
+                               ret = -EPROTO;
+               }
+       }
 
        ph->xops->xfer_put(ph, t);
        return ret;
index 8e5d879..41c31b1 100644 (file)
@@ -134,7 +134,7 @@ static int gpio_sim_get_multiple(struct gpio_chip *gc,
        struct gpio_sim_chip *chip = gpiochip_get_data(gc);
 
        mutex_lock(&chip->lock);
-       bitmap_copy(bits, chip->value_map, gc->ngpio);
+       bitmap_replace(bits, bits, chip->value_map, mask, gc->ngpio);
        mutex_unlock(&chip->lock);
 
        return 0;
@@ -146,7 +146,7 @@ static void gpio_sim_set_multiple(struct gpio_chip *gc,
        struct gpio_sim_chip *chip = gpiochip_get_data(gc);
 
        mutex_lock(&chip->lock);
-       bitmap_copy(chip->value_map, bits, gc->ngpio);
+       bitmap_replace(chip->value_map, chip->value_map, bits, mask, gc->ngpio);
        mutex_unlock(&chip->lock);
 }
 
index a5495ad..c2523ac 100644 (file)
@@ -108,7 +108,7 @@ static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
  * controller does not have GPIO chip registered at the moment. This is to
  * support probe deferral.
  */
-static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
+static struct gpio_desc *acpi_get_gpiod(char *path, unsigned int pin)
 {
        struct gpio_chip *chip;
        acpi_handle handle;
@@ -136,7 +136,7 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
  * as it is intended for use outside of the GPIO layer (in a similar fashion to
  * gpiod_get_index() for example) it also holds a reference to the GPIO device.
  */
-struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label)
+struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label)
 {
        struct gpio_desc *gpio;
        int ret;
@@ -317,11 +317,12 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
        return desc;
 }
 
-static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
+static bool acpi_gpio_in_ignore_list(const char *controller_in, unsigned int pin_in)
 {
        const char *controller, *pin_str;
-       int len, pin;
+       unsigned int pin;
        char *endp;
+       int len;
 
        controller = ignore_wake;
        while (controller) {
@@ -354,13 +355,13 @@ err:
 static bool acpi_gpio_irq_is_wake(struct device *parent,
                                  struct acpi_resource_gpio *agpio)
 {
-       int pin = agpio->pin_table[0];
+       unsigned int pin = agpio->pin_table[0];
 
        if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
                return false;
 
        if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) {
-               dev_info(parent, "Ignoring wakeup on pin %d\n", pin);
+               dev_info(parent, "Ignoring wakeup on pin %u\n", pin);
                return false;
        }
 
@@ -378,7 +379,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
        struct acpi_gpio_event *event;
        irq_handler_t handler = NULL;
        struct gpio_desc *desc;
-       int ret, pin, irq;
+       unsigned int pin;
+       int ret, irq;
 
        if (!acpi_gpio_get_irq_resource(ares, &agpio))
                return AE_OK;
@@ -387,8 +389,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
        pin = agpio->pin_table[0];
 
        if (pin <= 255) {
-               char ev_name[5];
-               sprintf(ev_name, "_%c%02hhX",
+               char ev_name[8];
+               sprintf(ev_name, "_%c%02X",
                        agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L',
                        pin);
                if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
@@ -1098,7 +1100,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
 
        length = min_t(u16, agpio->pin_table_length, pin_index + bits);
        for (i = pin_index; i < length; ++i) {
-               int pin = agpio->pin_table[i];
+               unsigned int pin = agpio->pin_table[i];
                struct acpi_gpio_connection *conn;
                struct gpio_desc *desc;
                bool found;
index e59884c..085348e 100644 (file)
@@ -1404,6 +1404,16 @@ static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
 {
        struct irq_domain *domain = gc->irq.domain;
 
+#ifdef CONFIG_GPIOLIB_IRQCHIP
+       /*
+        * Avoid race condition with other code, which tries to lookup
+        * an IRQ before the irqchip has been properly registered,
+        * i.e. while gpiochip is still being brought up.
+        */
+       if (!gc->irq.initialized)
+               return -EPROBE_DEFER;
+#endif
+
        if (!gpiochip_irqchip_irq_valid(gc, offset))
                return -ENXIO;
 
@@ -1593,6 +1603,15 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
 
        acpi_gpiochip_request_interrupts(gc);
 
+       /*
+        * Using barrier() here to prevent compiler from reordering
+        * gc->irq.initialized before initialization of above
+        * GPIO chip irq members.
+        */
+       barrier();
+
+       gc->irq.initialized = true;
+
        return 0;
 }
 
index 5b39362..a0f0a17 100644 (file)
 #define CONNECTOR_OBJECT_ID_eDP                   0x14
 #define CONNECTOR_OBJECT_ID_MXM                   0x15
 #define CONNECTOR_OBJECT_ID_LVDS_eDP              0x16
+#define CONNECTOR_OBJECT_ID_USBC                  0x17
 
 /* deleted */
 
index 3987ecb..49f7341 100644 (file)
@@ -5733,7 +5733,7 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
                struct amdgpu_ring *ring)
 {
 #ifdef CONFIG_X86_64
-       if (adev->flags & AMD_IS_APU)
+       if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
                return;
 #endif
        if (adev->gmc.xgmi.connected_to_cpu)
@@ -5749,7 +5749,7 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
                struct amdgpu_ring *ring)
 {
 #ifdef CONFIG_X86_64
-       if (adev->flags & AMD_IS_APU)
+       if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
                return;
 #endif
        if (adev->gmc.xgmi.connected_to_cpu)
index bb1c025..29e9419 100644 (file)
@@ -680,7 +680,7 @@ MODULE_PARM_DESC(sched_policy,
  * Maximum number of processes that HWS can schedule concurrently. The maximum is the
  * number of VMIDs assigned to the HWS, which is also the default.
  */
-int hws_max_conc_proc = 8;
+int hws_max_conc_proc = -1;
 module_param(hws_max_conc_proc, int, 0444);
 MODULE_PARM_DESC(hws_max_conc_proc,
        "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");
@@ -2323,18 +2323,23 @@ static int amdgpu_pmops_suspend(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(drm_dev);
-       int r;
 
        if (amdgpu_acpi_is_s0ix_active(adev))
                adev->in_s0ix = true;
        else
                adev->in_s3 = true;
-       r = amdgpu_device_suspend(drm_dev, true);
-       if (r)
-               return r;
+       return amdgpu_device_suspend(drm_dev, true);
+}
+
+static int amdgpu_pmops_suspend_noirq(struct device *dev)
+{
+       struct drm_device *drm_dev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
        if (!adev->in_s0ix)
-               r = amdgpu_asic_reset(adev);
-       return r;
+               return amdgpu_asic_reset(adev);
+
+       return 0;
 }
 
 static int amdgpu_pmops_resume(struct device *dev)
@@ -2575,6 +2580,7 @@ static const struct dev_pm_ops amdgpu_pm_ops = {
        .prepare = amdgpu_pmops_prepare,
        .complete = amdgpu_pmops_complete,
        .suspend = amdgpu_pmops_suspend,
+       .suspend_noirq = amdgpu_pmops_suspend_noirq,
        .resume = amdgpu_pmops_resume,
        .freeze = amdgpu_pmops_freeze,
        .thaw = amdgpu_pmops_thaw,
index 8fe9399..28a736c 100644 (file)
@@ -266,7 +266,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
                    * adev->gfx.mec.num_pipe_per_mec
                    * adev->gfx.mec.num_queue_per_pipe;
 
-       while (queue_bit-- >= 0) {
+       while (--queue_bit >= 0) {
                if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
                        continue;
 
index ca2cfb6..a66a088 100644 (file)
@@ -561,9 +561,15 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
 
        switch (adev->ip_versions[GC_HWIP][0]) {
        case IP_VERSION(9, 0, 1):
+       case IP_VERSION(9, 3, 0):
        case IP_VERSION(9, 4, 0):
        case IP_VERSION(9, 4, 1):
        case IP_VERSION(9, 4, 2):
+       case IP_VERSION(10, 3, 3):
+       case IP_VERSION(10, 3, 4):
+       case IP_VERSION(10, 3, 5):
+       case IP_VERSION(10, 3, 6):
+       case IP_VERSION(10, 3, 7):
                /*
                 * noretry = 0 will cause kfd page fault tests fail
                 * for some ASICs, so set default to 1 for these ASICs.
index 2573171..9407524 100644 (file)
@@ -1284,6 +1284,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
  */
 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
 {
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
        struct dma_fence *fence = NULL;
        struct amdgpu_bo *abo;
        int r;
@@ -1303,7 +1304,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
                amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
 
        if (bo->resource->mem_type != TTM_PL_VRAM ||
-           !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
+           !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
+           adev->in_suspend || adev->shutdown)
                return;
 
        if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
index 5320bb0..317d802 100644 (file)
@@ -300,8 +300,8 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
-                    unsigned int ring_size, struct amdgpu_irq_src *irq_src,
-                    unsigned int irq_type, unsigned int prio,
+                    unsigned int max_dw, struct amdgpu_irq_src *irq_src,
+                    unsigned int irq_type, unsigned int hw_prio,
                     atomic_t *sched_score);
 void amdgpu_ring_fini(struct amdgpu_ring *ring);
 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
index f99093f..a0ee828 100644 (file)
@@ -52,7 +52,7 @@
 #define FIRMWARE_ALDEBARAN     "amdgpu/aldebaran_vcn.bin"
 #define FIRMWARE_BEIGE_GOBY    "amdgpu/beige_goby_vcn.bin"
 #define FIRMWARE_YELLOW_CARP   "amdgpu/yellow_carp_vcn.bin"
-#define FIRMWARE_VCN_3_1_2     "amdgpu/vcn_3_1_2_vcn.bin"
+#define FIRMWARE_VCN_3_1_2     "amdgpu/vcn_3_1_2.bin"
 
 MODULE_FIRMWARE(FIRMWARE_RAVEN);
 MODULE_FIRMWARE(FIRMWARE_PICASSO);
index e2fde88..f06fb7f 100644 (file)
 #define AMDGPU_VCN_MULTI_QUEUE_FLAG    (1 << 8)
 #define AMDGPU_VCN_SW_RING_FLAG                (1 << 9)
 #define AMDGPU_VCN_FW_LOGGING_FLAG     (1 << 10)
+#define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11)
 
 #define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER       0x00000001
 #define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER         0x00000001
@@ -279,6 +280,11 @@ struct amdgpu_fw_shared_fw_logging {
        uint32_t size;
 };
 
+struct amdgpu_fw_shared_smu_interface_info {
+       uint8_t smu_interface_type;
+       uint8_t padding[3];
+};
+
 struct amdgpu_fw_shared {
        uint32_t present_flag_0;
        uint8_t pad[44];
@@ -287,6 +293,7 @@ struct amdgpu_fw_shared {
        struct amdgpu_fw_shared_multi_queue multi_queue;
        struct amdgpu_fw_shared_sw_ring sw_ring;
        struct amdgpu_fw_shared_fw_logging fw_log;
+       struct amdgpu_fw_shared_smu_interface_info smu_interface_info;
 };
 
 struct amdgpu_vcn_fwlog {
index f4c6acc..9426e25 100644 (file)
@@ -3293,7 +3293,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_3[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
@@ -3429,7 +3429,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_6[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000042),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x00000044),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
@@ -3454,7 +3454,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_7[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000041),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
@@ -7689,6 +7689,7 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
        switch (adev->ip_versions[GC_HWIP][0]) {
        case IP_VERSION(10, 3, 1):
        case IP_VERSION(10, 3, 3):
+       case IP_VERSION(10, 3, 7):
                preempt_disable();
                clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh);
                clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh);
index 46d4bf2..b8cfcc6 100644 (file)
@@ -1205,6 +1205,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
        { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
        /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
        { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
+       /* Apple MacBook Pro (15-inch, 2019) Radeon Pro Vega 20 4 GB */
+       { 0x1002, 0x69af, 0x106b, 0x019a, 0xc0 },
        { 0, 0, 0, 0, 0 },
 };
 
index 3c1d440..5228421 100644 (file)
@@ -814,7 +814,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
        adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 
 #ifdef CONFIG_X86_64
-       if (adev->flags & AMD_IS_APU) {
+       if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
                adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
                adev->gmc.aper_size = adev->gmc.real_vram_size;
        }
index 344d819..979da6f 100644 (file)
@@ -381,8 +381,9 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
        adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 
 #ifdef CONFIG_X86_64
-       if (adev->flags & AMD_IS_APU &&
-           adev->gmc.real_vram_size > adev->gmc.aper_size) {
+       if ((adev->flags & AMD_IS_APU) &&
+           adev->gmc.real_vram_size > adev->gmc.aper_size &&
+           !amdgpu_passthrough(adev)) {
                adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
                adev->gmc.aper_size = adev->gmc.real_vram_size;
        }
index ca9841d..1932a3e 100644 (file)
@@ -581,7 +581,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
        adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
 
 #ifdef CONFIG_X86_64
-       if (adev->flags & AMD_IS_APU) {
+       if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) {
                adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
                adev->gmc.aper_size = adev->gmc.real_vram_size;
        }
index 431742e..6009fbf 100644 (file)
@@ -1456,7 +1456,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
         */
 
        /* check whether both host-gpu and gpu-gpu xgmi links exist */
-       if ((adev->flags & AMD_IS_APU) ||
+       if (((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
            (adev->gmc.xgmi.supported &&
             adev->gmc.xgmi.connected_to_cpu)) {
                adev->gmc.aper_base =
@@ -1721,7 +1721,7 @@ static int gmc_v9_0_sw_fini(void *handle)
        amdgpu_gem_force_release(adev);
        amdgpu_vm_manager_fini(adev);
        amdgpu_gart_table_vram_free(adev);
-       amdgpu_bo_unref(&adev->gmc.pdb0_bo);
+       amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
        amdgpu_bo_fini(adev);
 
        return 0;
index dff5419..f0fbcda 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/firmware.h>
 
 #include "amdgpu.h"
+#include "amdgpu_cs.h"
 #include "amdgpu_vcn.h"
 #include "amdgpu_pm.h"
 #include "soc15.h"
@@ -1900,6 +1901,75 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
        .set_powergating_state = vcn_v1_0_set_powergating_state,
 };
 
+/*
+ * It is a hardware issue that VCN can't handle a GTT TMZ buffer on
+ * CHIP_RAVEN series ASIC. Move such a GTT TMZ buffer to VRAM domain
+ * before command submission as a workaround.
+ */
+static int vcn_v1_0_validate_bo(struct amdgpu_cs_parser *parser,
+                               struct amdgpu_job *job,
+                               uint64_t addr)
+{
+       struct ttm_operation_ctx ctx = { false, false };
+       struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+       struct amdgpu_vm *vm = &fpriv->vm;
+       struct amdgpu_bo_va_mapping *mapping;
+       struct amdgpu_bo *bo;
+       int r;
+
+       addr &= AMDGPU_GMC_HOLE_MASK;
+       if (addr & 0x7) {
+               DRM_ERROR("VCN messages must be 8 byte aligned!\n");
+               return -EINVAL;
+       }
+
+       mapping = amdgpu_vm_bo_lookup_mapping(vm, addr/AMDGPU_GPU_PAGE_SIZE);
+       if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
+               return -EINVAL;
+
+       bo = mapping->bo_va->base.bo;
+       if (!(bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED))
+               return 0;
+
+       amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+       r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+       if (r) {
+               DRM_ERROR("Failed to validate the VCN message BO (%d)!\n", r);
+               return r;
+       }
+
+       return r;
+}
+
+static int vcn_v1_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
+                                          struct amdgpu_job *job,
+                                          struct amdgpu_ib *ib)
+{
+       uint32_t msg_lo = 0, msg_hi = 0;
+       int i, r;
+
+       if (!(ib->flags & AMDGPU_IB_FLAGS_SECURE))
+               return 0;
+
+       for (i = 0; i < ib->length_dw; i += 2) {
+               uint32_t reg = amdgpu_ib_get_value(ib, i);
+               uint32_t val = amdgpu_ib_get_value(ib, i + 1);
+
+               if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) {
+                       msg_lo = val;
+               } else if (reg == PACKET0(p->adev->vcn.internal.data1, 0)) {
+                       msg_hi = val;
+               } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0)) {
+                       r = vcn_v1_0_validate_bo(p, job,
+                                                ((u64)msg_hi) << 32 | msg_lo);
+                       if (r)
+                               return r;
+               }
+       }
+
+       return 0;
+}
+
 static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
        .type = AMDGPU_RING_TYPE_VCN_DEC,
        .align_mask = 0xf,
@@ -1910,6 +1980,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
        .get_rptr = vcn_v1_0_dec_ring_get_rptr,
        .get_wptr = vcn_v1_0_dec_ring_get_wptr,
        .set_wptr = vcn_v1_0_dec_ring_set_wptr,
+       .patch_cs_in_place = vcn_v1_0_ring_patch_cs_in_place,
        .emit_frame_size =
                6 + 6 + /* hdp invalidate / flush */
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
index c87263e..cb5f0a1 100644 (file)
@@ -219,6 +219,11 @@ static int vcn_v3_0_sw_init(void *handle)
                                             cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) |
                                             cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);
                fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED);
+               fw_shared->present_flag_0 |= AMDGPU_VCN_SMU_VERSION_INFO_FLAG;
+               if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 2))
+                       fw_shared->smu_interface_info.smu_interface_type = 2;
+               else if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 1))
+                       fw_shared->smu_interface_info.smu_interface_type = 1;
 
                if (amdgpu_vcnfw_log)
                        amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
@@ -575,8 +580,8 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
                        AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
 
        /* VCN global tiling registers */
-       WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
-               UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+       WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
+               UVD, inst_idx, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
 }
 
 static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
@@ -1480,8 +1485,11 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
 
 static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
 {
+       struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
        uint32_t tmp;
 
+       vcn_v3_0_pause_dpg_mode(adev, inst_idx, &state);
+
        /* Wait for power status to be 1 */
        SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
                UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
index 339e12c..62aa6c9 100644 (file)
@@ -483,15 +483,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
        }
 
        /* Verify module parameters regarding mapped process number*/
-       if ((hws_max_conc_proc < 0)
-                       || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
-               dev_err(kfd_device,
-                       "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
-                       hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
-                       kfd->vm_info.vmid_num_kfd);
+       if (hws_max_conc_proc >= 0)
+               kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd);
+       else
                kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
-       } else
-               kfd->max_proc_per_quantum = hws_max_conc_proc;
 
        /* calculate max size of mqds needed for queues */
        size = max_num_of_queues_per_device *
@@ -536,7 +531,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
                goto kfd_doorbell_error;
        }
 
-       kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
+       if (amdgpu_use_xgmi_p2p)
+               kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
 
        kfd->noretry = kfd->adev->gmc.noretry;
 
index deeccce..64f4a51 100644 (file)
@@ -749,6 +749,8 @@ static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
        event_waiters = kmalloc_array(num_events,
                                        sizeof(struct kfd_event_waiter),
                                        GFP_KERNEL);
+       if (!event_waiters)
+               return NULL;
 
        for (i = 0; (event_waiters) && (i < num_events) ; i++) {
                init_wait(&event_waiters[i].wait);
index e4beebb..f2e1d50 100644 (file)
@@ -247,15 +247,6 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
                return ret;
        }
 
-       ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
-                              O_RDWR);
-       if (ret < 0) {
-               kfifo_free(&client->fifo);
-               kfree(client);
-               return ret;
-       }
-       *fd = ret;
-
        init_waitqueue_head(&client->wait_queue);
        spin_lock_init(&client->lock);
        client->events = 0;
@@ -265,5 +256,20 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
        list_add_rcu(&client->list, &dev->smi_clients);
        spin_unlock(&dev->smi_lock);
 
+       ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
+                              O_RDWR);
+       if (ret < 0) {
+               spin_lock(&dev->smi_lock);
+               list_del_rcu(&client->list);
+               spin_unlock(&dev->smi_lock);
+
+               synchronize_rcu();
+
+               kfifo_free(&client->fifo);
+               kfree(client);
+               return ret;
+       }
+       *fd = ret;
+
        return 0;
 }
index b306569..62139ff 100644 (file)
@@ -2714,7 +2714,8 @@ static int dm_resume(void *handle)
                 * this is the case when traversing through already created
                 * MST connectors, should be skipped
                 */
-               if (aconnector->mst_port)
+               if (aconnector->dc_link &&
+                   aconnector->dc_link->type == dc_connection_mst_branch)
                        continue;
 
                mutex_lock(&aconnector->hpd_lock);
@@ -3972,7 +3973,7 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap
                                 max - min);
 }
 
-static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
                                         int bl_idx,
                                         u32 user_brightness)
 {
@@ -4003,7 +4004,8 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
                        DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
        }
 
-       return rc ? 0 : 1;
+       if (rc)
+               dm->actual_brightness[bl_idx] = user_brightness;
 }
 
 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
@@ -9947,7 +9949,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
        /* restore the backlight level */
        for (i = 0; i < dm->num_of_edps; i++) {
                if (dm->backlight_dev[i] &&
-                   (amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
+                   (dm->actual_brightness[i] != dm->brightness[i]))
                        amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
        }
 #endif
index 6a908d7..7e44b04 100644 (file)
@@ -540,6 +540,12 @@ struct amdgpu_display_manager {
         * cached backlight values.
         */
        u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
+       /**
+        * @actual_brightness:
+        *
+        * last successfully applied backlight values.
+        */
+       u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
 };
 
 enum dsc_clock_force_state {
index dfba613..26feefb 100644 (file)
@@ -374,7 +374,7 @@ void dce_clock_read_ss_info(struct clk_mgr_internal *clk_mgr_dce)
                                clk_mgr_dce->dprefclk_ss_percentage =
                                                info.spread_spectrum_percentage;
                        }
-                       if (clk_mgr_dce->base.ctx->dc->debug.ignore_dpref_ss)
+                       if (clk_mgr_dce->base.ctx->dc->config.ignore_dpref_ss)
                                clk_mgr_dce->dprefclk_ss_percentage = 0;
                }
        }
index edda572..8be4c19 100644 (file)
@@ -436,57 +436,84 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
                struct integrated_info *bios_info,
                const DpmClocks_315_t *clock_table)
 {
-       int i, j;
+       int i;
        struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
-       uint32_t max_dispclk = 0, max_dppclk = 0;
-
-       j = -1;
-
-       ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
-
-       /* Find lowest DPM, FCLK is filled in reverse order*/
-
-       for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
-               if (clock_table->DfPstateTable[i].FClk != 0) {
-                       j = i;
-                       break;
+       uint32_t max_dispclk, max_dppclk, max_pstate, max_socclk, max_fclk = 0, min_pstate = 0;
+       struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
+
+       max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
+       max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
+       max_socclk = find_max_clk_value(clock_table->SocClocks, clock_table->NumSocClkLevelsEnabled);
+
+       /* Find highest fclk pstate */
+       for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) {
+               if (clock_table->DfPstateTable[i].FClk > max_fclk) {
+                       max_fclk = clock_table->DfPstateTable[i].FClk;
+                       max_pstate = i;
                }
        }
 
-       if (j == -1) {
-               /* clock table is all 0s, just use our own hardcode */
-               ASSERT(0);
-               return;
-       }
-
-       bw_params->clk_table.num_entries = j + 1;
-
-       /* dispclk and dppclk can be max at any voltage, same number of levels for both */
-       if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
-           clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
-               max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
-               max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
-       } else {
-               ASSERT(0);
-       }
+       /* For 315 we want to base clock table on dcfclk, need at least one entry regardless of pmfw table */
+       for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
+               int j;
+               uint32_t min_fclk = clock_table->DfPstateTable[0].FClk;
 
-       for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
-               int temp;
+               for (j = 1; j < clock_table->NumDfPstatesEnabled; j++) {
+                       if (clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i]
+                                       && clock_table->DfPstateTable[j].FClk < min_fclk) {
+                               min_fclk = clock_table->DfPstateTable[j].FClk;
+                               min_pstate = j;
+                       }
+               }
 
-               bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
-               bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
-               bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
+               bw_params->clk_table.entries[i].fclk_mhz = min_fclk;
+               bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk;
+               bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage;
+               bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
+               bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
+               bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+               bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
                bw_params->clk_table.entries[i].wck_ratio = 1;
-               temp = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
-               if (temp)
-                       bw_params->clk_table.entries[i].dcfclk_mhz = temp;
-               temp = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
-               if (temp)
-                       bw_params->clk_table.entries[i].socclk_mhz = temp;
+       };
+
+       /* Make sure to include at least one entry and highest pstate */
+       if (max_pstate != min_pstate) {
+               bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
+               bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk;
+               bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_pstate].Voltage;
+               bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(
+                               clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[max_pstate].Voltage);
+               bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(
+                               clock_table, clock_table->SocClocks, clock_table->DfPstateTable[max_pstate].Voltage);
                bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
                bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+               bw_params->clk_table.entries[i].wck_ratio = 1;
+               i++;
        }
+       bw_params->clk_table.num_entries = i;
+
+       /* Include highest socclk */
+       if (bw_params->clk_table.entries[i-1].socclk_mhz < max_socclk)
+               bw_params->clk_table.entries[i-1].socclk_mhz = max_socclk;
 
+       /* Set any 0 clocks to max default setting. Not an issue for
+        * power since we aren't doing switching in such case anyway
+        */
+       for (i = 0; i < bw_params->clk_table.num_entries; i++) {
+               if (!bw_params->clk_table.entries[i].fclk_mhz) {
+                       bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
+                       bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz;
+                       bw_params->clk_table.entries[i].voltage = def_max.voltage;
+               }
+               if (!bw_params->clk_table.entries[i].dcfclk_mhz)
+                       bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz;
+               if (!bw_params->clk_table.entries[i].socclk_mhz)
+                       bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz;
+               if (!bw_params->clk_table.entries[i].dispclk_mhz)
+                       bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
+               if (!bw_params->clk_table.entries[i].dppclk_mhz)
+                       bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
+       }
        bw_params->vram_type = bios_info->memory_type;
        bw_params->num_channels = bios_info->ma_channel_number;
 
index 880ffea..2600313 100644 (file)
@@ -80,8 +80,8 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D
 #define VBIOSSMC_MSG_SetDppclkFreq                0x06 ///< Set DPP clock frequency in MHZ
 #define VBIOSSMC_MSG_SetHardMinDcfclkByFreq       0x07 ///< Set DCF clock frequency hard min in MHZ
 #define VBIOSSMC_MSG_SetMinDeepSleepDcfclk        0x08 ///< Set DCF clock minimum frequency in deep sleep in MHZ
-#define VBIOSSMC_MSG_SetPhyclkVoltageByFreq       0x09 ///< Set display phy clock frequency in MHZ in case VMIN does not support phy frequency
-#define VBIOSSMC_MSG_GetFclkFrequency             0x0A ///< Get FCLK frequency, return frequemcy in MHZ
+#define VBIOSSMC_MSG_GetDtbclkFreq                0x09 ///< Get display dtb clock frequency in MHZ in case VMIN does not support phy frequency
+#define VBIOSSMC_MSG_SetDtbClk                    0x0A ///< Set dtb clock frequency, return frequemcy in MHZ
 #define VBIOSSMC_MSG_SetDisplayCount              0x0B ///< Inform PMFW of number of display connected
 #define VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0x0C ///< To ask PMFW turn off TMDP 48MHz refclk during display off to save power
 #define VBIOSSMC_MSG_UpdatePmeRestore             0x0D ///< To ask PMFW to write into Azalia for PME wake up event
@@ -324,15 +324,26 @@ int dcn315_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr)
        return (dprefclk_get_mhz * 1000);
 }
 
-int dcn315_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr)
+int dcn315_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr)
 {
        int fclk_get_mhz = -1;
 
        if (clk_mgr->smu_present) {
                fclk_get_mhz = dcn315_smu_send_msg_with_param(
                        clk_mgr,
-                       VBIOSSMC_MSG_GetFclkFrequency,
+                       VBIOSSMC_MSG_GetDtbclkFreq,
                        0);
        }
        return (fclk_get_mhz * 1000);
 }
+
+void dcn315_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
+{
+       if (!clk_mgr->smu_present)
+               return;
+
+       dcn315_smu_send_msg_with_param(
+                       clk_mgr,
+                       VBIOSSMC_MSG_SetDtbClk,
+                       enable);
+}
index 66fa42f..5aa3275 100644 (file)
@@ -37,6 +37,7 @@
 #define NUM_SOC_VOLTAGE_LEVELS  4
 #define NUM_DF_PSTATE_LEVELS    4
 
+
 typedef struct {
   uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz)
   uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz)
@@ -124,5 +125,6 @@ void dcn315_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
 void dcn315_smu_request_voltage_via_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz);
 void dcn315_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr);
 int dcn315_smu_get_dpref_clk(struct clk_mgr_internal *clk_mgr);
-int dcn315_smu_get_smu_fclk(struct clk_mgr_internal *clk_mgr);
+int dcn315_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr);
+void dcn315_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable);
 #endif /* DAL_DC_315_SMU_H_ */
index 702d00c..3121dd2 100644 (file)
@@ -686,8 +686,8 @@ void dcn316_clk_mgr_construct(
        clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base);
        clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz;
        dce_clock_read_ss_info(&clk_mgr->base);
-       clk_mgr->base.dccg->ref_dtbclk_khz =
-       dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);
+       /*clk_mgr->base.dccg->ref_dtbclk_khz =
+       dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);*/
 
        clk_mgr->base.base.bw_params = &dcn316_bw_params;
 
index f6e19ef..c436db4 100644 (file)
@@ -2389,6 +2389,8 @@ static enum surface_update_type check_update_surfaces_for_stream(
 
                if (stream_update->mst_bw_update)
                        su_flags->bits.mst_bw = 1;
+               if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc))
+                       su_flags->bits.crtc_timing_adjust = 1;
 
                if (su_flags->raw != 0)
                        overall_type = UPDATE_TYPE_FULL;
@@ -2650,6 +2652,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
        if (update->vrr_infopacket)
                stream->vrr_infopacket = *update->vrr_infopacket;
 
+       if (update->crtc_timing_adjust)
+               stream->adjust = *update->crtc_timing_adjust;
+
        if (update->dpms_off)
                stream->dpms_off = *update->dpms_off;
 
@@ -4051,3 +4056,17 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
        if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
                pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
 }
+/*
+ * dc_extended_blank_supported: Decide whether extended blank is supported
+ *
+ * Extended blank is a freesync optimization feature to be enabled in the future.
+ * During the extra vblank period gained from freesync, we have the ability to enter z9/z10.
+ *
+ * @param [in] dc: Current DC state
+ * @return: Indicate whether extended blank is supported (true or false)
+ */
+bool dc_extended_blank_supported(struct dc *dc)
+{
+       return dc->debug.extended_blank_optimization && !dc->debug.disable_z10
+               && dc->caps.zstate_support && dc->caps.is_apu;
+}
index cb87dd6..bbaa5ab 100644 (file)
@@ -983,8 +983,7 @@ static bool should_verify_link_capability_destructively(struct dc_link *link,
                                destrictive = false;
                        }
                }
-       } else if (dc_is_hdmi_signal(link->local_sink->sink_signal))
-               destrictive = true;
+       }
 
        return destrictive;
 }
index 351081f..22dabe5 100644 (file)
@@ -5216,6 +5216,62 @@ static void retrieve_cable_id(struct dc_link *link)
                                &link->dpcd_caps.cable_id, &usbc_cable_id);
 }
 
+/* DPRX may take some time to respond to AUX messages after HPD asserted.
+ * If AUX read unsuccessful, try to wake unresponsive DPRX by toggling DPCD SET_POWER (0x600).
+ */
+static enum dc_status wa_try_to_wake_dprx(struct dc_link *link, uint64_t timeout_ms)
+{
+       enum dc_status status = DC_ERROR_UNEXPECTED;
+       uint8_t dpcd_data = 0;
+       uint64_t start_ts = 0;
+       uint64_t current_ts = 0;
+       uint64_t time_taken_ms = 0;
+       enum dc_connection_type type = dc_connection_none;
+
+       status = core_link_read_dpcd(
+                       link,
+                       DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
+                       &dpcd_data,
+                       sizeof(dpcd_data));
+
+       if (status != DC_OK) {
+               DC_LOG_WARNING("%s: Read DPCD LTTPR_CAP failed - try to toggle DPCD SET_POWER for %lld ms.",
+                               __func__,
+                               timeout_ms);
+               start_ts = dm_get_timestamp(link->ctx);
+
+               do {
+                       if (!dc_link_detect_sink(link, &type) || type == dc_connection_none)
+                               break;
+
+                       dpcd_data = DP_SET_POWER_D3;
+                       status = core_link_write_dpcd(
+                                       link,
+                                       DP_SET_POWER,
+                                       &dpcd_data,
+                                       sizeof(dpcd_data));
+
+                       dpcd_data = DP_SET_POWER_D0;
+                       status = core_link_write_dpcd(
+                                       link,
+                                       DP_SET_POWER,
+                                       &dpcd_data,
+                                       sizeof(dpcd_data));
+
+                       current_ts = dm_get_timestamp(link->ctx);
+                       time_taken_ms = div_u64(dm_get_elapse_time_in_ns(link->ctx, current_ts, start_ts), 1000000);
+               } while (status != DC_OK && time_taken_ms < timeout_ms);
+
+               DC_LOG_WARNING("%s: DPCD SET_POWER %s after %lld ms%s",
+                               __func__,
+                               (status == DC_OK) ? "succeeded" : "failed",
+                               time_taken_ms,
+                               (type == dc_connection_none) ? ". Unplugged." : ".");
+       }
+
+       return status;
+}
+
 static bool retrieve_link_cap(struct dc_link *link)
 {
        /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
@@ -5251,6 +5307,15 @@ static bool retrieve_link_cap(struct dc_link *link)
        dc_link_aux_try_to_configure_timeout(link->ddc,
                        LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
 
+       /* Try to ensure AUX channel active before proceeding. */
+       if (link->dc->debug.aux_wake_wa.bits.enable_wa) {
+               uint64_t timeout_ms = link->dc->debug.aux_wake_wa.bits.timeout_ms;
+
+               if (link->dc->debug.aux_wake_wa.bits.use_default_timeout)
+                       timeout_ms = LINK_AUX_WAKE_TIMEOUT_MS;
+               status = wa_try_to_wake_dprx(link, timeout_ms);
+       }
+
        is_lttpr_present = dp_retrieve_lttpr_cap(link);
        /* Read DP tunneling information. */
        status = dpcd_get_tunneling_device_data(link);
index 7af1534..d251c3f 100644 (file)
@@ -1685,8 +1685,8 @@ bool dc_is_stream_unchanged(
        if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
                return false;
 
-       // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
-       if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
+       /*compare audio info*/
+       if (memcmp(&old_stream->audio_info, &stream->audio_info, sizeof(stream->audio_info)) != 0)
                return false;
 
        return true;
index 4ffab7b..9e79f60 100644 (file)
@@ -188,6 +188,7 @@ struct dc_caps {
        bool psp_setup_panel_mode;
        bool extended_aux_timeout_support;
        bool dmcub_support;
+       bool zstate_support;
        uint32_t num_of_internal_disp;
        enum dp_protocol_version max_dp_protocol_version;
        unsigned int mall_size_per_mem_channel;
@@ -339,6 +340,7 @@ struct dc_config {
        bool is_asymmetric_memory;
        bool is_single_rank_dimm;
        bool use_pipe_ctx_sync_logic;
+       bool ignore_dpref_ss;
 };
 
 enum visual_confirm {
@@ -525,6 +527,22 @@ union dpia_debug_options {
        uint32_t raw;
 };
 
+/* AUX wake work around options
+ * 0: enable/disable work around
+ * 1: use default timeout LINK_AUX_WAKE_TIMEOUT_MS
+ * 15-2: reserved
+ * 31-16: timeout in ms
+ */
+union aux_wake_wa_options {
+       struct {
+               uint32_t enable_wa : 1;
+               uint32_t use_default_timeout : 1;
+               uint32_t rsvd: 14;
+               uint32_t timeout_ms : 16;
+       } bits;
+       uint32_t raw;
+};
+
 struct dc_debug_data {
        uint32_t ltFailCount;
        uint32_t i2cErrorCount;
@@ -703,14 +721,15 @@ struct dc_debug_options {
        bool enable_driver_sequence_debug;
        enum det_size crb_alloc_policy;
        int crb_alloc_policy_min_disp_count;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        bool disable_z10;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
        bool enable_z9_disable_interface;
        bool enable_sw_cntl_psr;
        union dpia_debug_options dpia_debug;
 #endif
        bool apply_vendor_specific_lttpr_wa;
-       bool ignore_dpref_ss;
+       bool extended_blank_optimization;
+       union aux_wake_wa_options aux_wake_wa;
        uint8_t psr_power_use_phy_fsm;
 };
 
@@ -1369,6 +1388,8 @@ struct dc_sink_init_data {
        bool converter_disable_audio;
 };
 
+bool dc_extended_blank_supported(struct dc *dc);
+
 struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
 
 /* Newer interfaces  */
index 99a750f..c4168c1 100644 (file)
@@ -131,6 +131,7 @@ union stream_update_flags {
                uint32_t wb_update:1;
                uint32_t dsc_changed : 1;
                uint32_t mst_bw : 1;
+               uint32_t crtc_timing_adjust : 1;
        } bits;
 
        uint32_t raw;
@@ -289,6 +290,7 @@ struct dc_stream_update {
        struct dc_3dlut *lut3d_func;
 
        struct test_pattern *pending_test_pattern;
+       struct dc_crtc_timing_adjust *crtc_timing_adjust;
 };
 
 bool dc_is_stream_unchanged(
index c3e141c..83fbea2 100644 (file)
@@ -1497,16 +1497,12 @@ void dcn10_init_hw(struct dc *dc)
                        link->link_status.link_active = true;
        }
 
-       /* Power gate DSCs */
-       if (!is_optimized_init_done) {
-               for (i = 0; i < res_pool->res_cap->num_dsc; i++)
-                       if (hws->funcs.dsc_pg_control != NULL)
-                               hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
-       }
-
        /* we want to turn off all dp displays before doing detection */
        dc_link_blank_all_dp_displays(dc);
 
+       if (hws->funcs.enable_power_gating_plane)
+               hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
        /* If taking control over from VBIOS, we may want to optimize our first
         * mode set, so we need to skip powering down pipes until we know which
         * pipes we want to use.
@@ -1559,8 +1555,6 @@ void dcn10_init_hw(struct dc *dc)
 
                REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
        }
-       if (hws->funcs.enable_power_gating_plane)
-               hws->funcs.enable_power_gating_plane(dc->hwseq, true);
 
        if (dc->clk_mgr->funcs->notify_wm_ranges)
                dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
@@ -2056,7 +2050,7 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
 {
        struct dc_context *dc_ctx = dc->ctx;
        int i, master = -1, embedded = -1;
-       struct dc_crtc_timing hw_crtc_timing[MAX_PIPES] = {0};
+       struct dc_crtc_timing *hw_crtc_timing;
        uint64_t phase[MAX_PIPES];
        uint64_t modulo[MAX_PIPES];
        unsigned int pclk;
@@ -2067,6 +2061,10 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
        uint32_t dp_ref_clk_100hz =
                dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
 
+       hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
+       if (!hw_crtc_timing)
+               return master;
+
        if (dc->config.vblank_alignment_dto_params &&
                dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
                embedded_h_total =
@@ -2130,6 +2128,8 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
                }
 
        }
+
+       kfree(hw_crtc_timing);
        return master;
 }
 
@@ -2522,14 +2522,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
        struct mpc *mpc = dc->res_pool->mpc;
        struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
 
-       if (per_pixel_alpha)
-               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
-       else
-               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
        blnd_cfg.overlap_only = false;
        blnd_cfg.global_gain = 0xff;
 
+       if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
+               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+               blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+       } else if (per_pixel_alpha) {
+               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+       } else {
+               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+       }
+
        if (pipe_ctx->plane_state->global_alpha)
                blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
        else
index ab910de..b627c41 100644 (file)
@@ -1857,6 +1857,7 @@ void dcn20_optimize_bandwidth(
                struct dc_state *context)
 {
        struct hubbub *hubbub = dc->res_pool->hubbub;
+       int i;
 
        /* program dchubbub watermarks */
        hubbub->funcs->program_watermarks(hubbub,
@@ -1873,6 +1874,17 @@ void dcn20_optimize_bandwidth(
                        dc->clk_mgr,
                        context,
                        true);
+       if (dc_extended_blank_supported(dc) && context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
+               for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+                       struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+                       if (pipe_ctx->stream && pipe_ctx->plane_res.hubp->funcs->program_extended_blank
+                               && pipe_ctx->stream->adjust.v_total_min == pipe_ctx->stream->adjust.v_total_max
+                               && pipe_ctx->stream->adjust.v_total_max > pipe_ctx->stream->timing.v_total)
+                                       pipe_ctx->plane_res.hubp->funcs->program_extended_blank(pipe_ctx->plane_res.hubp,
+                                               pipe_ctx->dlg_regs.optimized_min_dst_y_next_start);
+               }
+       }
        /* increase compbuf size */
        if (hubbub->funcs->program_compbuf_size)
                hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
@@ -2332,14 +2344,18 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
        struct mpc *mpc = dc->res_pool->mpc;
        struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
 
-       if (per_pixel_alpha)
-               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
-       else
-               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
-
        blnd_cfg.overlap_only = false;
        blnd_cfg.global_gain = 0xff;
 
+       if (per_pixel_alpha && pipe_ctx->plane_state->global_alpha) {
+               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+               blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+       } else if (per_pixel_alpha) {
+               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+       } else {
+               blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+       }
+
        if (pipe_ctx->plane_state->global_alpha)
                blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
        else
index d473708..7802d60 100644 (file)
@@ -1976,7 +1976,6 @@ int dcn20_validate_apply_pipe_split_flags(
                                /*If need split for odm but 4 way split already*/
                                if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
                                                || !pipe->next_odm_pipe)) {
-                                       ASSERT(0); /* NOT expected yet */
                                        merge[i] = true; /* 4 -> 2 ODM */
                                } else if (split[i] == 0 && pipe->prev_odm_pipe) {
                                        ASSERT(0); /* NOT expected yet */
index 6127326..3fe4bfb 100644 (file)
@@ -644,7 +644,7 @@ static const struct dc_debug_options debug_defaults_drv = {
                .clock_trace = true,
                .disable_pplib_clock_request = true,
                .min_disp_clk_khz = 100000,
-               .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+               .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
                .force_single_disp_pipe_split = false,
                .disable_dcc = DCC_ENABLE,
                .vsr_support = true,
index ed0a0e5..f61ec87 100644 (file)
@@ -547,6 +547,9 @@ void dcn30_init_hw(struct dc *dc)
        /* we want to turn off all dp displays before doing detection */
        dc_link_blank_all_dp_displays(dc);
 
+       if (hws->funcs.enable_power_gating_plane)
+               hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
        /* If taking control over from VBIOS, we may want to optimize our first
         * mode set, so we need to skip powering down pipes until we know which
         * pipes we want to use.
@@ -624,8 +627,6 @@ void dcn30_init_hw(struct dc *dc)
 
                REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
        }
-       if (hws->funcs.enable_power_gating_plane)
-               hws->funcs.enable_power_gating_plane(dc->hwseq, true);
 
        if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
                dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
index 3e6d6eb..51c5f36 100644 (file)
@@ -1042,5 +1042,7 @@ void hubbub31_construct(struct dcn20_hubbub *hubbub31,
        hubbub31->detile_buf_size = det_size_kb * 1024;
        hubbub31->pixel_chunk_size = pixel_chunk_size_kb * 1024;
        hubbub31->crb_size_segs = config_return_buffer_size_kb / DCN31_CRB_SEGMENT_SIZE_KB;
+
+       hubbub31->debug_test_index_pstate = 0x6;
 }
 
index 53b792b..8ae6117 100644 (file)
@@ -54,6 +54,13 @@ void hubp31_soft_reset(struct hubp *hubp, bool reset)
        REG_UPDATE(DCHUBP_CNTL, HUBP_SOFT_RESET, reset);
 }
 
+void hubp31_program_extended_blank(struct hubp *hubp, unsigned int min_dst_y_next_start_optimized)
+{
+       struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+       REG_SET(BLANK_OFFSET_1, 0, MIN_DST_Y_NEXT_START, min_dst_y_next_start_optimized);
+}
+
 static struct hubp_funcs dcn31_hubp_funcs = {
        .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
        .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -80,6 +87,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
        .set_unbounded_requesting = hubp31_set_unbounded_requesting,
        .hubp_soft_reset = hubp31_soft_reset,
        .hubp_in_blank = hubp1_in_blank,
+       .program_extended_blank = hubp31_program_extended_blank,
 };
 
 bool hubp31_construct(
index 4be2286..631d8ac 100644 (file)
@@ -199,6 +199,9 @@ void dcn31_init_hw(struct dc *dc)
        /* we want to turn off all dp displays before doing detection */
        dc_link_blank_all_dp_displays(dc);
 
+       if (hws->funcs.enable_power_gating_plane)
+               hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+
        /* If taking control over from VBIOS, we may want to optimize our first
         * mode set, so we need to skip powering down pipes until we know which
         * pipes we want to use.
@@ -248,8 +251,6 @@ void dcn31_init_hw(struct dc *dc)
 
                REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
        }
-       if (hws->funcs.enable_power_gating_plane)
-               hws->funcs.enable_power_gating_plane(dc->hwseq, true);
 
        if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
                dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
@@ -338,20 +339,20 @@ void dcn31_enable_power_gating_plane(
        bool enable)
 {
        bool force_on = true; /* disable power gating */
+       uint32_t org_ip_request_cntl = 0;
 
        if (enable && !hws->ctx->dc->debug.disable_hubp_power_gate)
                force_on = false;
 
+       REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+       if (org_ip_request_cntl == 0)
+               REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
        /* DCHUBP0/1/2/3/4/5 */
        REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-       REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
        REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-       REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
        /* DPP0/1/2/3/4/5 */
        REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-       REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
        REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-       REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
 
        force_on = true; /* disable power gating */
        if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate)
@@ -359,11 +360,11 @@ void dcn31_enable_power_gating_plane(
 
        /* DCS0/1/2/3/4/5 */
        REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-       REG_WAIT(DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
        REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-       REG_WAIT(DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
        REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
-       REG_WAIT(DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
+
+       if (org_ip_request_cntl == 0)
+               REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
 }
 
 void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
index 8afe213..e05527a 100644 (file)
@@ -124,7 +124,6 @@ static bool optc31_enable_crtc(struct timing_generator *optc)
 static bool optc31_disable_crtc(struct timing_generator *optc)
 {
        struct optc *optc1 = DCN10TG_FROM_TG(optc);
-
        /* disable otg request until end of the first line
         * in the vertical blank region
         */
@@ -138,6 +137,7 @@ static bool optc31_disable_crtc(struct timing_generator *optc)
        REG_WAIT(OTG_CLOCK_CONTROL,
                        OTG_BUSY, 0,
                        1, 100000);
+       optc1_clear_optc_underflow(optc);
 
        return true;
 }
@@ -158,6 +158,9 @@ static bool optc31_immediate_disable_crtc(struct timing_generator *optc)
                        OTG_BUSY, 0,
                        1, 100000);
 
+       /* clear the false state */
+       optc1_clear_optc_underflow(optc);
+
        return true;
 }
 
index 89b7b6b..63934ec 100644 (file)
@@ -2032,7 +2032,9 @@ bool dcn31_validate_bandwidth(struct dc *dc,
 
        BW_VAL_TRACE_COUNT();
 
+       DC_FP_START();
        out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+       DC_FP_END();
 
        // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
        if (pipe_cnt == 0)
@@ -2232,6 +2234,7 @@ static bool dcn31_resource_construct(
        dc->caps.extended_aux_timeout_support = true;
        dc->caps.dmcub_support = true;
        dc->caps.is_apu = true;
+       dc->caps.zstate_support = true;
 
        /* Color pipeline capabilities */
        dc->caps.color.dpp.dcn_arch = 1;
index 2f61221..f93af45 100644 (file)
@@ -722,8 +722,10 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc
 {
        int plane_count;
        int i;
+       unsigned int optimized_min_dst_y_next_start_us;
 
        plane_count = 0;
+       optimized_min_dst_y_next_start_us = 0;
        for (i = 0; i < dc->res_pool->pipe_count; i++) {
                if (context->res_ctx.pipe_ctx[i].plane_state)
                        plane_count++;
@@ -744,11 +746,22 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc
                struct dc_link *link = context->streams[0]->sink->link;
                struct dc_stream_status *stream_status = &context->stream_status[0];
 
+               if (dc_extended_blank_supported(dc)) {
+                       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+                               if (context->res_ctx.pipe_ctx[i].stream == context->streams[0]
+                                       && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min == context->res_ctx.pipe_ctx[i].stream->adjust.v_total_max
+                                       && context->res_ctx.pipe_ctx[i].stream->adjust.v_total_min > context->res_ctx.pipe_ctx[i].stream->timing.v_total) {
+                                               optimized_min_dst_y_next_start_us =
+                                                       context->res_ctx.pipe_ctx[i].dlg_regs.optimized_min_dst_y_next_start_us;
+                                               break;
+                               }
+                       }
+               }
                /* zstate only supported on PWRSEQ0  and when there's <2 planes*/
                if (link->link_index != 0 || stream_status->plane_count > 1)
                        return DCN_ZSTATE_SUPPORT_DISALLOW;
 
-               if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
+               if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000)
                        return DCN_ZSTATE_SUPPORT_ALLOW;
                else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !dc->debug.disable_psr)
                        return DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
@@ -786,8 +799,6 @@ void dcn20_calculate_dlg_params(
                                                        != dm_dram_clock_change_unsupported;
        context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
 
-       context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context);
-
        context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
 
        if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
@@ -843,6 +854,7 @@ void dcn20_calculate_dlg_params(
                                &pipes[pipe_idx].pipe);
                pipe_idx++;
        }
+       context->bw_ctx.bw.dcn.clk.zstate_support = decide_zstate_support(dc, context);
 }
 
 static void swizzle_to_dml_params(
index e0fecf1..53d760e 100644 (file)
@@ -1055,6 +1055,7 @@ static void dml_rq_dlg_get_dlg_params(
 
        float vba__refcyc_per_req_delivery_pre_l = get_refcyc_per_req_delivery_pre_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;  // From VBA
        float vba__refcyc_per_req_delivery_l = get_refcyc_per_req_delivery_l_in_us(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;  // From VBA
+       int blank_lines;
 
        memset(disp_dlg_regs, 0, sizeof(*disp_dlg_regs));
        memset(disp_ttu_regs, 0, sizeof(*disp_ttu_regs));
@@ -1080,6 +1081,18 @@ static void dml_rq_dlg_get_dlg_params(
        dlg_vblank_start = interlaced ? (vblank_start / 2) : vblank_start;
 
        disp_dlg_regs->min_dst_y_next_start = (unsigned int) (((double) dlg_vblank_start) * dml_pow(2, 2));
+       blank_lines = (dst->vblank_end + dst->vtotal_min - dst->vblank_start - dst->vstartup_start - 1);
+       if (blank_lines < 0)
+               blank_lines = 0;
+       if (blank_lines != 0) {
+               disp_dlg_regs->optimized_min_dst_y_next_start_us =
+                       ((unsigned int) blank_lines * dst->hactive) / (unsigned int) dst->pixel_rate_mhz;
+               disp_dlg_regs->optimized_min_dst_y_next_start =
+                       (unsigned int)(((double) (dlg_vblank_start + blank_lines)) * dml_pow(2, 2));
+       } else {
+               // use unoptimized value
+               disp_dlg_regs->optimized_min_dst_y_next_start = disp_dlg_regs->min_dst_y_next_start;
+       }
        ASSERT(disp_dlg_regs->min_dst_y_next_start < (unsigned int)dml_pow(2, 18));
 
        dml_print("DML_DLG: %s: min_ttu_vblank (us)         = %3.2f\n", __func__, min_ttu_vblank);
index 59f0a61..2df660c 100644 (file)
@@ -446,6 +446,8 @@ struct _vcs_dpi_display_dlg_regs_st {
        unsigned int refcyc_h_blank_end;
        unsigned int dlg_vblank_end;
        unsigned int min_dst_y_next_start;
+       unsigned int optimized_min_dst_y_next_start;
+       unsigned int optimized_min_dst_y_next_start_us;
        unsigned int refcyc_per_htotal;
        unsigned int refcyc_x_after_scaler;
        unsigned int dst_y_after_scaler;
index efc2339..4385d19 100644 (file)
@@ -864,11 +864,11 @@ static bool setup_dsc_config(
                min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
        }
 
+       is_dsc_possible = (min_slices_h <= max_slices_h);
+
        if (pic_width % min_slices_h != 0)
                min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first?
 
-       is_dsc_possible = (min_slices_h <= max_slices_h);
-
        if (min_slices_h == 0 && max_slices_h == 0)
                is_dsc_possible = false;
 
index ab9939d..44f167d 100644 (file)
@@ -33,6 +33,7 @@
 #define MAX_MTP_SLOT_COUNT 64
 #define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
 #define TRAINING_AUX_RD_INTERVAL 100 //us
+#define LINK_AUX_WAKE_TIMEOUT_MS 1500 // Timeout when trying to wake unresponsive DPRX.
 
 struct dc_link;
 struct dc_stream_state;
index e45b799..ad69d78 100644 (file)
@@ -195,6 +195,9 @@ struct hubp_funcs {
 
        void (*hubp_set_flip_int)(struct hubp *hubp);
 
+       void (*program_extended_blank)(struct hubp *hubp,
+                       unsigned int min_dst_y_next_start_optimized);
+
        void (*hubp_wait_pipe_read_start)(struct hubp *hubp);
 };
 
index b691aa4..79bc207 100644 (file)
@@ -100,7 +100,8 @@ enum vsc_packet_revision {
 //PB7 = MD0
 #define MASK_VTEM_MD0__VRR_EN         0x01
 #define MASK_VTEM_MD0__M_CONST        0x02
-#define MASK_VTEM_MD0__RESERVED2      0x0C
+#define MASK_VTEM_MD0__QMS_EN         0x04
+#define MASK_VTEM_MD0__RESERVED2      0x08
 #define MASK_VTEM_MD0__FVA_FACTOR_M1  0xF0
 
 //MD1
@@ -109,7 +110,7 @@ enum vsc_packet_revision {
 //MD2
 #define MASK_VTEM_MD2__BASE_REFRESH_RATE_98  0x03
 #define MASK_VTEM_MD2__RB                    0x04
-#define MASK_VTEM_MD2__RESERVED3             0xF8
+#define MASK_VTEM_MD2__NEXT_TFR              0xF8
 
 //MD3
 #define MASK_VTEM_MD3__BASE_REFRESH_RATE_07  0xFF
index 89fbee5..5504d81 100644 (file)
@@ -173,6 +173,17 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
 
        if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
                return false;
+       /* Don't use baco for reset in S3.
+        * This is a workaround for some platforms
+        * where entering BACO during suspend
+        * seems to cause reboots or hangs.
+        * This might be related to the fact that BACO controls
+        * power to the whole GPU including devices like audio and USB.
+        * Powering down/up everything may adversely affect these other
+        * devices.  Needs more investigation.
+        */
+       if (adev->in_s3)
+               return false;
 
        mutex_lock(&adev->pm.mutex);
 
@@ -500,6 +511,9 @@ int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
        struct smu_context *smu = adev->powerplay.pp_handle;
        int ret = 0;
 
+       if (!is_support_sw_smu(adev))
+               return -EOPNOTSUPP;
+
        mutex_lock(&adev->pm.mutex);
        ret = smu_send_hbm_bad_pages_num(smu, size);
        mutex_unlock(&adev->pm.mutex);
@@ -512,6 +526,9 @@ int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t si
        struct smu_context *smu = adev->powerplay.pp_handle;
        int ret = 0;
 
+       if (!is_support_sw_smu(adev))
+               return -EOPNOTSUPP;
+
        mutex_lock(&adev->pm.mutex);
        ret = smu_send_hbm_bad_channel_flag(smu, size);
        mutex_unlock(&adev->pm.mutex);
index 9ddd849..ede71de 100644 (file)
@@ -773,13 +773,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinFclkByFreq,
                                                hwmgr->display_config->num_display > 3 ?
-                                               data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk :
+                                               (data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk / 100) :
                                                min_mclk,
                                                NULL);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinSocclkByFreq,
-                                               data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk,
+                                               data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk / 100,
                                                NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinVcn,
@@ -792,11 +792,11 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                                NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxFclkByFreq,
-                                               data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk,
+                                               data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk / 100,
                                                NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxSocclkByFreq,
-                                               data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk,
+                                               data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk / 100,
                                                NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxVcn,
index 7bfac02..b81711c 100644 (file)
@@ -991,7 +991,7 @@ static int smu_v13_0_5_set_performance_level(struct smu_context *smu,
                return -EINVAL;
        }
 
-       if (sclk_min && sclk_max) {
+       if (sclk_min && sclk_max && smu_v13_0_5_clk_dpm_is_enabled(smu, SMU_SCLK)) {
                ret = smu_v13_0_5_set_soft_freq_limited_range(smu,
                                                            SMU_SCLK,
                                                            sclk_min,
index 026e4e2..f4df344 100644 (file)
@@ -214,6 +214,29 @@ int drm_of_encoder_active_endpoint(struct device_node *node,
 }
 EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
 
+static int find_panel_or_bridge(struct device_node *node,
+                               struct drm_panel **panel,
+                               struct drm_bridge **bridge)
+{
+       if (panel) {
+               *panel = of_drm_find_panel(node);
+               if (!IS_ERR(*panel))
+                       return 0;
+
+               /* Clear the panel pointer in case of error. */
+               *panel = NULL;
+       }
+
+       /* No panel found yet, check for a bridge next. */
+       if (bridge) {
+               *bridge = of_drm_find_bridge(node);
+               if (*bridge)
+                       return 0;
+       }
+
+       return -EPROBE_DEFER;
+}
+
 /**
  * drm_of_find_panel_or_bridge - return connected panel or bridge device
  * @np: device tree node containing encoder output ports
@@ -236,66 +259,44 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
                                struct drm_panel **panel,
                                struct drm_bridge **bridge)
 {
-       int ret = -EPROBE_DEFER;
-       struct device_node *remote;
+       struct device_node *node;
+       int ret;
 
        if (!panel && !bridge)
                return -EINVAL;
+
        if (panel)
                *panel = NULL;
-
-       /**
-        * Devices can also be child nodes when we also control that device
-        * through the upstream device (ie, MIPI-DCS for a MIPI-DSI device).
-        *
-        * Lookup for a child node of the given parent that isn't either port
-        * or ports.
-        */
-       for_each_available_child_of_node(np, remote) {
-               if (of_node_name_eq(remote, "port") ||
-                   of_node_name_eq(remote, "ports"))
-                       continue;
-
-               goto of_find_panel_or_bridge;
+       if (bridge)
+               *bridge = NULL;
+
+       /* Check for a graph on the device node first. */
+       if (of_graph_is_present(np)) {
+               node = of_graph_get_remote_node(np, port, endpoint);
+               if (node) {
+                       ret = find_panel_or_bridge(node, panel, bridge);
+                       of_node_put(node);
+
+                       if (!ret)
+                               return 0;
+               }
        }
 
-       /*
-        * of_graph_get_remote_node() produces a noisy error message if port
-        * node isn't found and the absence of the port is a legit case here,
-        * so at first we silently check whether graph presents in the
-        * device-tree node.
-        */
-       if (!of_graph_is_present(np))
-               return -ENODEV;
-
-       remote = of_graph_get_remote_node(np, port, endpoint);
-
-of_find_panel_or_bridge:
-       if (!remote)
-               return -ENODEV;
+       /* Otherwise check for any child node other than port/ports. */
+       for_each_available_child_of_node(np, node) {
+               if (of_node_name_eq(node, "port") ||
+                   of_node_name_eq(node, "ports"))
+                       continue;
 
-       if (panel) {
-               *panel = of_drm_find_panel(remote);
-               if (!IS_ERR(*panel))
-                       ret = 0;
-               else
-                       *panel = NULL;
-       }
-
-       /* No panel found yet, check for a bridge next. */
-       if (bridge) {
-               if (ret) {
-                       *bridge = of_drm_find_bridge(remote);
-                       if (*bridge)
-                               ret = 0;
-               } else {
-                       *bridge = NULL;
-               }
+               ret = find_panel_or_bridge(node, panel, bridge);
+               of_node_put(node);
 
+               /* Stop at the first found occurrence. */
+               if (!ret)
+                       return 0;
        }
 
-       of_node_put(remote);
-       return ret;
+       return -EPROBE_DEFER;
 }
 EXPORT_SYMBOL_GPL(drm_of_find_panel_or_bridge);
 
index c3ea243..0c5c438 100644 (file)
@@ -70,7 +70,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
         * mmap ioctl is disallowed for all discrete platforms,
         * and for all platforms with GRAPHICS_VER > 12.
         */
-       if (IS_DGFX(i915) || GRAPHICS_VER(i915) > 12)
+       if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) > IP_VER(12, 0))
                return -EOPNOTSUPP;
 
        if (args->flags & ~(I915_MMAP_WC))
index 87428fb..a2277a0 100644 (file)
@@ -222,6 +222,7 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev)
        struct device_node *np = pdev->dev.of_node;
        const struct of_device_id *match = of_match_node(dw_hdmi_imx_dt_ids, np);
        struct imx_hdmi *hdmi;
+       int ret;
 
        hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
        if (!hdmi)
@@ -243,10 +244,15 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev)
        hdmi->bridge = of_drm_find_bridge(np);
        if (!hdmi->bridge) {
                dev_err(hdmi->dev, "Unable to find bridge\n");
+               dw_hdmi_remove(hdmi->hdmi);
                return -ENODEV;
        }
 
-       return component_add(&pdev->dev, &dw_hdmi_imx_ops);
+       ret = component_add(&pdev->dev, &dw_hdmi_imx_ops);
+       if (ret)
+               dw_hdmi_remove(hdmi->hdmi);
+
+       return ret;
 }
 
 static int dw_hdmi_imx_remove(struct platform_device *pdev)
index e5078d0..fb0e951 100644 (file)
@@ -572,6 +572,8 @@ static int imx_ldb_panel_ddc(struct device *dev,
                edidp = of_get_property(child, "edid", &edid_len);
                if (edidp) {
                        channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL);
+                       if (!channel->edid)
+                               return -ENOMEM;
                } else if (!channel->panel) {
                        /* fallback to display-timings node */
                        ret = of_get_drm_display_mode(child,
index 06cb1a5..63ba2ad 100644 (file)
@@ -75,8 +75,10 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
                ret = of_get_drm_display_mode(np, &imxpd->mode,
                                              &imxpd->bus_flags,
                                              OF_USE_NATIVE_MODE);
-               if (ret)
+               if (ret) {
+                       drm_mode_destroy(connector->dev, mode);
                        return ret;
+               }
 
                drm_mode_copy(mode, &imxpd->mode);
                mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
index 83c31b2..ccc4fcf 100644 (file)
@@ -1742,7 +1742,7 @@ a6xx_create_private_address_space(struct msm_gpu *gpu)
                return ERR_CAST(mmu);
 
        return msm_gem_address_space_create(mmu,
-               "gpu", 0x100000000ULL, 0x1ffffffffULL);
+               "gpu", 0x100000000ULL, SZ_4G);
 }
 
 static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
index 89cfd84..8706bcd 100644 (file)
@@ -599,43 +599,91 @@ static const struct of_device_id dt_match[] = {
        {}
 };
 
-#ifdef CONFIG_PM
-static int adreno_resume(struct device *dev)
+static int adreno_runtime_resume(struct device *dev)
 {
        struct msm_gpu *gpu = dev_to_gpu(dev);
 
        return gpu->funcs->pm_resume(gpu);
 }
 
-static int active_submits(struct msm_gpu *gpu)
+static int adreno_runtime_suspend(struct device *dev)
 {
-       int active_submits;
-       mutex_lock(&gpu->active_lock);
-       active_submits = gpu->active_submits;
-       mutex_unlock(&gpu->active_lock);
-       return active_submits;
+       struct msm_gpu *gpu = dev_to_gpu(dev);
+
+       /*
+        * We should be holding a runpm ref, which will prevent
+        * runtime suspend.  In the system suspend path, we've
+        * already waited for active jobs to complete.
+        */
+       WARN_ON_ONCE(gpu->active_submits);
+
+       return gpu->funcs->pm_suspend(gpu);
+}
+
+static void suspend_scheduler(struct msm_gpu *gpu)
+{
+       int i;
+
+       /*
+        * Shut down the scheduler before we force suspend, so that
+        * suspend isn't racing with scheduler kthread feeding us
+        * more work.
+        *
+        * Note, we just want to park the thread, and let any jobs
+        * that are already on the hw queue complete normally, as
+        * opposed to the drm_sched_stop() path used for handling
+        * faulting/timed-out jobs.  We can't really cancel any jobs
+        * already on the hw queue without racing with the GPU.
+        */
+       for (i = 0; i < gpu->nr_rings; i++) {
+               struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
+               kthread_park(sched->thread);
+       }
 }
 
-static int adreno_suspend(struct device *dev)
+static void resume_scheduler(struct msm_gpu *gpu)
+{
+       int i;
+
+       for (i = 0; i < gpu->nr_rings; i++) {
+               struct drm_gpu_scheduler *sched = &gpu->rb[i]->sched;
+               kthread_unpark(sched->thread);
+       }
+}
+
+static int adreno_system_suspend(struct device *dev)
 {
        struct msm_gpu *gpu = dev_to_gpu(dev);
-       int remaining;
+       int remaining, ret;
+
+       suspend_scheduler(gpu);
 
        remaining = wait_event_timeout(gpu->retire_event,
-                                      active_submits(gpu) == 0,
+                                      gpu->active_submits == 0,
                                       msecs_to_jiffies(1000));
        if (remaining == 0) {
                dev_err(dev, "Timeout waiting for GPU to suspend\n");
-               return -EBUSY;
+               ret = -EBUSY;
+               goto out;
        }
 
-       return gpu->funcs->pm_suspend(gpu);
+       ret = pm_runtime_force_suspend(dev);
+out:
+       if (ret)
+               resume_scheduler(gpu);
+
+       return ret;
+}
+
+static int adreno_system_resume(struct device *dev)
+{
+       resume_scheduler(dev_to_gpu(dev));
+       return pm_runtime_force_resume(dev);
 }
-#endif
 
 static const struct dev_pm_ops adreno_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
-       SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
+       SYSTEM_SLEEP_PM_OPS(adreno_system_suspend, adreno_system_resume)
+       RUNTIME_PM_OPS(adreno_runtime_suspend, adreno_runtime_resume, NULL)
 };
 
 static struct platform_driver adreno_driver = {
index c515b7c..c61b5b2 100644 (file)
@@ -54,87 +54,87 @@ struct dpu_intr_reg {
  * When making changes be sure to sync with dpu_hw_intr_reg
  */
 static const struct dpu_intr_reg dpu_intr_set[] = {
-       {
+       [MDP_SSPP_TOP0_INTR] = {
                MDP_SSPP_TOP0_OFF+INTR_CLEAR,
                MDP_SSPP_TOP0_OFF+INTR_EN,
                MDP_SSPP_TOP0_OFF+INTR_STATUS
        },
-       {
+       [MDP_SSPP_TOP0_INTR2] = {
                MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
                MDP_SSPP_TOP0_OFF+INTR2_EN,
                MDP_SSPP_TOP0_OFF+INTR2_STATUS
        },
-       {
+       [MDP_SSPP_TOP0_HIST_INTR] = {
                MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
                MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
                MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
        },
-       {
+       [MDP_INTF0_INTR] = {
                MDP_INTF_0_OFF+INTF_INTR_CLEAR,
                MDP_INTF_0_OFF+INTF_INTR_EN,
                MDP_INTF_0_OFF+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF1_INTR] = {
                MDP_INTF_1_OFF+INTF_INTR_CLEAR,
                MDP_INTF_1_OFF+INTF_INTR_EN,
                MDP_INTF_1_OFF+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF2_INTR] = {
                MDP_INTF_2_OFF+INTF_INTR_CLEAR,
                MDP_INTF_2_OFF+INTF_INTR_EN,
                MDP_INTF_2_OFF+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF3_INTR] = {
                MDP_INTF_3_OFF+INTF_INTR_CLEAR,
                MDP_INTF_3_OFF+INTF_INTR_EN,
                MDP_INTF_3_OFF+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF4_INTR] = {
                MDP_INTF_4_OFF+INTF_INTR_CLEAR,
                MDP_INTF_4_OFF+INTF_INTR_EN,
                MDP_INTF_4_OFF+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF5_INTR] = {
                MDP_INTF_5_OFF+INTF_INTR_CLEAR,
                MDP_INTF_5_OFF+INTF_INTR_EN,
                MDP_INTF_5_OFF+INTF_INTR_STATUS
        },
-       {
+       [MDP_AD4_0_INTR] = {
                MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
                MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
                MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
        },
-       {
+       [MDP_AD4_1_INTR] = {
                MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
                MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
                MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
        },
-       {
+       [MDP_INTF0_7xxx_INTR] = {
                MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
                MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
                MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF1_7xxx_INTR] = {
                MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
                MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
                MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF2_7xxx_INTR] = {
                MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR,
                MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN,
                MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF3_7xxx_INTR] = {
                MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR,
                MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN,
                MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF4_7xxx_INTR] = {
                MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR,
                MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN,
                MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS
        },
-       {
+       [MDP_INTF5_7xxx_INTR] = {
                MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
                MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
                MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
index 1ee8246..c478d25 100644 (file)
@@ -98,7 +98,10 @@ static void mdp5_plane_reset(struct drm_plane *plane)
                __drm_atomic_helper_plane_destroy_state(plane->state);
 
        kfree(to_mdp5_plane_state(plane->state));
+       plane->state = NULL;
        mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
+       if (!mdp5_state)
+               return;
        __drm_atomic_helper_plane_reset(plane, &mdp5_state->base);
 }
 
index 5d2ff67..acfe1b3 100644 (file)
@@ -176,6 +176,8 @@ void msm_disp_snapshot_add_block(struct msm_disp_state *disp_state, u32 len,
        va_list va;
 
        new_blk = kzalloc(sizeof(struct msm_disp_state_block), GFP_KERNEL);
+       if (!new_blk)
+               return;
 
        va_start(va, fmt);
 
index 178b774..a42732b 100644 (file)
@@ -580,6 +580,12 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
                        dp->dp_display.connector_type, state);
        mutex_unlock(&dp->event_mutex);
 
+       /*
+        * add fail safe mode outside event_mutex scope
+        * to avoid potiential circular lock with drm thread
+        */
+       dp_panel_add_fail_safe_mode(dp->dp_display.connector);
+
        /* uevent will complete connection part */
        return 0;
 };
index f141872..26c3653 100644 (file)
@@ -151,6 +151,15 @@ static int dp_panel_update_modes(struct drm_connector *connector,
        return rc;
 }
 
+void dp_panel_add_fail_safe_mode(struct drm_connector *connector)
+{
+       /* fail safe edid */
+       mutex_lock(&connector->dev->mode_config.mutex);
+       if (drm_add_modes_noedid(connector, 640, 480))
+               drm_set_preferred_mode(connector, 640, 480);
+       mutex_unlock(&connector->dev->mode_config.mutex);
+}
+
 int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
        struct drm_connector *connector)
 {
@@ -207,16 +216,7 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
                        goto end;
                }
 
-               /* fail safe edid */
-               mutex_lock(&connector->dev->mode_config.mutex);
-               if (drm_add_modes_noedid(connector, 640, 480))
-                       drm_set_preferred_mode(connector, 640, 480);
-               mutex_unlock(&connector->dev->mode_config.mutex);
-       } else {
-               /* always add fail-safe mode as backup mode */
-               mutex_lock(&connector->dev->mode_config.mutex);
-               drm_add_modes_noedid(connector, 640, 480);
-               mutex_unlock(&connector->dev->mode_config.mutex);
+               dp_panel_add_fail_safe_mode(connector);
        }
 
        if (panel->aux_cfg_update_done) {
index 9023e5b..99739ea 100644 (file)
@@ -59,6 +59,7 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel);
 int dp_panel_deinit(struct dp_panel *dp_panel);
 int dp_panel_timing_cfg(struct dp_panel *dp_panel);
 void dp_panel_dump_regs(struct dp_panel *dp_panel);
+void dp_panel_add_fail_safe_mode(struct drm_connector *connector);
 int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
                struct drm_connector *connector);
 u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
index 0c1b7dd..9f6af0f 100644 (file)
@@ -638,7 +638,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
        return connector;
 
 fail:
-       connector->funcs->destroy(msm_dsi->connector);
+       connector->funcs->destroy(connector);
        return ERR_PTR(ret);
 }
 
index affa95e..9c36b50 100644 (file)
@@ -274,7 +274,7 @@ bool msm_use_mmu(struct drm_device *dev)
        struct msm_drm_private *priv = dev->dev_private;
 
        /* a2xx comes with its own MMU */
-       return priv->is_a2xx || iommu_present(&platform_bus_type);
+       return priv->is_a2xx || device_iommu_mapped(dev->dev);
 }
 
 static int msm_init_vram(struct drm_device *dev)
index 02b9ae6..a4f6197 100644 (file)
@@ -926,6 +926,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
                                        get_pid_task(aspace->pid, PIDTYPE_PID);
                                if (task) {
                                        comm = kstrdup(task->comm, GFP_KERNEL);
+                                       put_task_struct(task);
                                } else {
                                        comm = NULL;
                                }
index e177221..612310d 100644 (file)
@@ -216,6 +216,7 @@ gm20b_pmu = {
        .intr = gt215_pmu_intr,
        .recv = gm20b_pmu_recv,
        .initmsg = gm20b_pmu_initmsg,
+       .reset = gf100_pmu_reset,
 };
 
 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
index 6bf7fc1..1a6f9c3 100644 (file)
@@ -23,7 +23,7 @@
  */
 #include "priv.h"
 
-static void
+void
 gp102_pmu_reset(struct nvkm_pmu *pmu)
 {
        struct nvkm_device *device = pmu->subdev.device;
index ba1583b..94cfb17 100644 (file)
@@ -83,6 +83,7 @@ gp10b_pmu = {
        .intr = gt215_pmu_intr,
        .recv = gm20b_pmu_recv,
        .initmsg = gm20b_pmu_initmsg,
+       .reset = gp102_pmu_reset,
 };
 
 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
index bcaade7..21abf31 100644 (file)
@@ -41,6 +41,7 @@ int gt215_pmu_send(struct nvkm_pmu *, u32[2], u32, u32, u32, u32);
 
 bool gf100_pmu_enabled(struct nvkm_pmu *);
 void gf100_pmu_reset(struct nvkm_pmu *);
+void gp102_pmu_reset(struct nvkm_pmu *pmu);
 
 void gk110_pmu_pgob(struct nvkm_pmu *, bool);
 
index a07ef26..6826f4d 100644 (file)
@@ -612,8 +612,10 @@ static int ili9341_dbi_probe(struct spi_device *spi, struct gpio_desc *dc,
        int ret;
 
        vcc = devm_regulator_get_optional(dev, "vcc");
-       if (IS_ERR(vcc))
+       if (IS_ERR(vcc)) {
                dev_err(dev, "get optional vcc failed\n");
+               vcc = NULL;
+       }
 
        dbidev = devm_drm_dev_alloc(dev, &ili9341_dbi_driver,
                                    struct mipi_dbi_dev, drm);
index 666223c..0a34e0a 100644 (file)
@@ -447,8 +447,9 @@ static void ipu_di_config_clock(struct ipu_di *di,
 
                error = rate / (sig->mode.pixelclock / 1000);
 
-               dev_dbg(di->ipu->dev, "  IPU clock can give %lu with divider %u, error %d.%u%%\n",
-                       rate, div, (signed)(error - 1000) / 10, error % 10);
+               dev_dbg(di->ipu->dev, "  IPU clock can give %lu with divider %u, error %c%d.%d%%\n",
+                       rate, div, error < 1000 ? '-' : '+',
+                       abs(error - 1000) / 10, abs(error - 1000) % 10);
 
                /* Allow a 1% error */
                if (error < 1010 && error >= 990) {
index 26d269b..85a2142 100644 (file)
@@ -380,7 +380,7 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel)
         * execute:
         *
         *  (a) In the "normal (i.e., not resuming from hibernation)" path,
-        *      the full barrier in smp_store_mb() guarantees that the store
+        *      the full barrier in virt_store_mb() guarantees that the store
         *      is propagated to all CPUs before the add_channel_work work
         *      is queued.  In turn, add_channel_work is queued before the
         *      channel's ring buffer is allocated/initialized and the
@@ -392,14 +392,14 @@ void vmbus_channel_map_relid(struct vmbus_channel *channel)
         *      recv_int_page before retrieving the channel pointer from the
         *      array of channels.
         *
-        *  (b) In the "resuming from hibernation" path, the smp_store_mb()
+        *  (b) In the "resuming from hibernation" path, the virt_store_mb()
         *      guarantees that the store is propagated to all CPUs before
         *      the VMBus connection is marked as ready for the resume event
         *      (cf. check_ready_for_resume_event()).  The interrupt handler
         *      of the VMBus driver and vmbus_chan_sched() can not run before
         *      vmbus_bus_resume() has completed execution (cf. resume_noirq).
         */
-       smp_store_mb(
+       virt_store_mb(
                vmbus_connection.channels[channel->offermsg.child_relid],
                channel);
 }
index 439f99b..3248b48 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/slab.h>
 #include <linux/kthread.h>
 #include <linux/completion.h>
+#include <linux/count_zeros.h>
 #include <linux/memory_hotplug.h>
 #include <linux/memory.h>
 #include <linux/notifier.h>
@@ -1130,6 +1131,7 @@ static void post_status(struct hv_dynmem_device *dm)
        struct dm_status status;
        unsigned long now = jiffies;
        unsigned long last_post = last_post_time;
+       unsigned long num_pages_avail, num_pages_committed;
 
        if (pressure_report_delay > 0) {
                --pressure_report_delay;
@@ -1154,16 +1156,21 @@ static void post_status(struct hv_dynmem_device *dm)
         * num_pages_onlined) as committed to the host, otherwise it can try
         * asking us to balloon them out.
         */
-       status.num_avail = si_mem_available();
-       status.num_committed = vm_memory_committed() +
+       num_pages_avail = si_mem_available();
+       num_pages_committed = vm_memory_committed() +
                dm->num_pages_ballooned +
                (dm->num_pages_added > dm->num_pages_onlined ?
                 dm->num_pages_added - dm->num_pages_onlined : 0) +
                compute_balloon_floor();
 
-       trace_balloon_status(status.num_avail, status.num_committed,
+       trace_balloon_status(num_pages_avail, num_pages_committed,
                             vm_memory_committed(), dm->num_pages_ballooned,
                             dm->num_pages_added, dm->num_pages_onlined);
+
+       /* Convert numbers of pages into numbers of HV_HYP_PAGEs. */
+       status.num_avail = num_pages_avail * NR_HV_HYP_PAGES_IN_PAGE;
+       status.num_committed = num_pages_committed * NR_HV_HYP_PAGES_IN_PAGE;
+
        /*
         * If our transaction ID is no longer current, just don't
         * send the status. This can happen if we were interrupted
@@ -1653,6 +1660,38 @@ static void disable_page_reporting(void)
        }
 }
 
+static int ballooning_enabled(void)
+{
+       /*
+        * Disable ballooning if the page size is not 4k (HV_HYP_PAGE_SIZE),
+        * since currently it's unclear to us whether an unballoon request can
+        * make sure all page ranges are guest page size aligned.
+        */
+       if (PAGE_SIZE != HV_HYP_PAGE_SIZE) {
+               pr_info("Ballooning disabled because page size is not 4096 bytes\n");
+               return 0;
+       }
+
+       return 1;
+}
+
+static int hot_add_enabled(void)
+{
+       /*
+        * Disable hot add on ARM64, because we currently rely on
+        * memory_add_physaddr_to_nid() to get a node id of a hot add range,
+        * however ARM64's memory_add_physaddr_to_nid() always return 0 and
+        * DM_MEM_HOT_ADD_REQUEST doesn't have the NUMA node information for
+        * add_memory().
+        */
+       if (IS_ENABLED(CONFIG_ARM64)) {
+               pr_info("Memory hot add disabled on ARM64\n");
+               return 0;
+       }
+
+       return 1;
+}
+
 static int balloon_connect_vsp(struct hv_device *dev)
 {
        struct dm_version_request version_req;
@@ -1724,8 +1763,8 @@ static int balloon_connect_vsp(struct hv_device *dev)
         * currently still requires the bits to be set, so we have to add code
         * to fail the host's hot-add and balloon up/down requests, if any.
         */
-       cap_msg.caps.cap_bits.balloon = 1;
-       cap_msg.caps.cap_bits.hot_add = 1;
+       cap_msg.caps.cap_bits.balloon = ballooning_enabled();
+       cap_msg.caps.cap_bits.hot_add = hot_add_enabled();
 
        /*
         * Specify our alignment requirements as it relates
index c1dd21d..ae68298 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/panic_notifier.h>
 #include <linux/ptrace.h>
 #include <linux/slab.h>
+#include <linux/dma-map-ops.h>
 #include <asm/hyperv-tlfs.h>
 #include <asm/mshyperv.h>
 
@@ -218,6 +219,16 @@ bool hv_query_ext_cap(u64 cap_query)
 }
 EXPORT_SYMBOL_GPL(hv_query_ext_cap);
 
+void hv_setup_dma_ops(struct device *dev, bool coherent)
+{
+       /*
+        * Hyper-V does not offer a vIOMMU in the guest
+        * VM, so pass 0/NULL for the IOMMU settings
+        */
+       arch_setup_dma_ops(dev, 0, 0, NULL, coherent);
+}
+EXPORT_SYMBOL_GPL(hv_setup_dma_ops);
+
 bool hv_is_hibernation_supported(void)
 {
        return !hv_root_partition && acpi_sleep_state_supported(ACPI_STATE_S4);
index 71efacb..3d215d9 100644 (file)
@@ -439,7 +439,16 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
 {
        u32 priv_read_loc = rbi->priv_read_index;
-       u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
+       u32 write_loc;
+
+       /*
+        * The Hyper-V host writes the packet data, then uses
+        * store_release() to update the write_index.  Use load_acquire()
+        * here to prevent loads of the packet data from being re-ordered
+        * before the read of the write_index and potentially getting
+        * stale data.
+        */
+       write_loc = virt_load_acquire(&rbi->ring_buffer->write_index);
 
        if (write_loc >= priv_read_loc)
                return write_loc - priv_read_loc;
index 60ee8b3..14de170 100644 (file)
@@ -77,8 +77,8 @@ static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
 
        /*
         * Hyper-V should be notified only once about a panic.  If we will be
-        * doing hyperv_report_panic_msg() later with kmsg data, don't do
-        * the notification here.
+        * doing hv_kmsg_dump() with kmsg data later, don't do the notification
+        * here.
         */
        if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
            && hyperv_report_reg()) {
@@ -100,8 +100,8 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val,
 
        /*
         * Hyper-V should be notified only once about a panic.  If we will be
-        * doing hyperv_report_panic_msg() later with kmsg data, don't do
-        * the notification here.
+        * doing hv_kmsg_dump() with kmsg data later, don't do the notification
+        * here.
         */
        if (hyperv_report_reg())
                hyperv_report_panic(regs, val, true);
@@ -921,6 +921,21 @@ static int vmbus_probe(struct device *child_device)
 }
 
 /*
+ * vmbus_dma_configure -- Configure DMA coherence for VMbus device
+ */
+static int vmbus_dma_configure(struct device *child_device)
+{
+       /*
+        * On ARM64, propagate the DMA coherence setting from the top level
+        * VMbus ACPI device to the child VMbus device being added here.
+        * On x86/x64 coherence is assumed and these calls have no effect.
+        */
+       hv_setup_dma_ops(child_device,
+               device_get_dma_attr(&hv_acpi_dev->dev) == DEV_DMA_COHERENT);
+       return 0;
+}
+
+/*
  * vmbus_remove - Remove a vmbus device
  */
 static void vmbus_remove(struct device *child_device)
@@ -1040,6 +1055,7 @@ static struct bus_type  hv_bus = {
        .remove =               vmbus_remove,
        .probe =                vmbus_probe,
        .uevent =               vmbus_uevent,
+       .dma_configure =        vmbus_dma_configure,
        .dev_groups =           vmbus_dev_groups,
        .drv_groups =           vmbus_drv_groups,
        .bus_groups =           vmbus_bus_groups,
@@ -1546,14 +1562,20 @@ static int vmbus_bus_init(void)
        if (ret)
                goto err_connect;
 
+       if (hv_is_isolation_supported())
+               sysctl_record_panic_msg = 0;
+
        /*
         * Only register if the crash MSRs are available
         */
        if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
                u64 hyperv_crash_ctl;
                /*
-                * Sysctl registration is not fatal, since by default
-                * reporting is enabled.
+                * Panic message recording (sysctl_record_panic_msg)
+                * is enabled by default in non-isolated guests and
+                * disabled by default in isolated guests; the panic
+                * message recording won't be available in isolated
+                * guests should the following registration fail.
                 */
                hv_ctl_table_hdr = register_sysctl_table(hv_root_table);
                if (!hv_ctl_table_hdr)
@@ -2097,6 +2119,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
        child_device_obj->device.parent = &hv_acpi_dev->dev;
        child_device_obj->device.release = vmbus_device_release;
 
+       child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
+       child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
+       dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
+
        /*
         * Register with the LDM. This will kick off the driver/device
         * binding...which will eventually call vmbus_match() and vmbus_probe()
@@ -2122,9 +2148,6 @@ int vmbus_device_register(struct hv_device *child_device_obj)
        }
        hv_debug_add_dev_dir(child_device_obj);
 
-       child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
-       child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
-       dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
        return 0;
 
 err_kset_unregister:
@@ -2428,6 +2451,21 @@ static int vmbus_acpi_add(struct acpi_device *device)
 
        hv_acpi_dev = device;
 
+       /*
+        * Older versions of Hyper-V for ARM64 fail to include the _CCA
+        * method on the top level VMbus device in the DSDT. But devices
+        * are hardware coherent in all current Hyper-V use cases, so fix
+        * up the ACPI device to behave as if _CCA is present and indicates
+        * hardware coherence.
+        */
+       ACPI_COMPANION_SET(&device->dev, device);
+       if (IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED) &&
+           device_get_dma_attr(&device->dev) == DEV_DMA_NOT_SUPPORTED) {
+               pr_info("No ACPI _CCA found; assuming coherent device I/O\n");
+               device->flags.cca_seen = true;
+               device->flags.coherent_dma = true;
+       }
+
        result = acpi_walk_resources(device->handle, METHOD_NAME__CRS,
                                        vmbus_walk_resources, NULL);
 
@@ -2780,10 +2818,15 @@ static void __exit vmbus_exit(void)
        if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
                kmsg_dump_unregister(&hv_kmsg_dumper);
                unregister_die_notifier(&hyperv_die_block);
-               atomic_notifier_chain_unregister(&panic_notifier_list,
-                                                &hyperv_panic_block);
        }
 
+       /*
+        * The panic notifier is always registered, hence we should
+        * also unconditionally unregister it here as well.
+        */
+       atomic_notifier_chain_unregister(&panic_notifier_list,
+                                        &hyperv_panic_block);
+
        free_page((unsigned long)hv_panic_page);
        unregister_sysctl_table(hv_ctl_table_hdr);
        hv_ctl_table_hdr = NULL;
index 27f969b..e9e2db6 100644 (file)
@@ -179,6 +179,12 @@ struct imx_i2c_hwdata {
        unsigned int            ndivs;
        unsigned int            i2sr_clr_opcode;
        unsigned int            i2cr_ien_opcode;
+       /*
+        * Errata ERR007805 or e7805:
+        * I2C: When the I2C clock speed is configured for 400 kHz,
+        * the SCL low period violates the I2C spec of 1.3 uS min.
+        */
+       bool                    has_err007805;
 };
 
 struct imx_i2c_dma {
@@ -240,6 +246,16 @@ static const struct imx_i2c_hwdata imx21_i2c_hwdata = {
 
 };
 
+static const struct imx_i2c_hwdata imx6_i2c_hwdata = {
+       .devtype                = IMX21_I2C,
+       .regshift               = IMX_I2C_REGSHIFT,
+       .clk_div                = imx_i2c_clk_div,
+       .ndivs                  = ARRAY_SIZE(imx_i2c_clk_div),
+       .i2sr_clr_opcode        = I2SR_CLR_OPCODE_W0C,
+       .i2cr_ien_opcode        = I2CR_IEN_OPCODE_1,
+       .has_err007805          = true,
+};
+
 static struct imx_i2c_hwdata vf610_i2c_hwdata = {
        .devtype                = VF610_I2C,
        .regshift               = VF610_I2C_REGSHIFT,
@@ -266,6 +282,16 @@ MODULE_DEVICE_TABLE(platform, imx_i2c_devtype);
 static const struct of_device_id i2c_imx_dt_ids[] = {
        { .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, },
        { .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, },
+       { .compatible = "fsl,imx6q-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx6sl-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx6sll-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx6sx-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx6ul-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx7s-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx8mm-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx8mn-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx8mp-i2c", .data = &imx6_i2c_hwdata, },
+       { .compatible = "fsl,imx8mq-i2c", .data = &imx6_i2c_hwdata, },
        { .compatible = "fsl,vf610-i2c", .data = &vf610_i2c_hwdata, },
        { /* sentinel */ }
 };
@@ -551,6 +577,13 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
        unsigned int div;
        int i;
 
+       if (i2c_imx->hwdata->has_err007805 && i2c_imx->bitrate > 384000) {
+               dev_dbg(&i2c_imx->adapter.dev,
+                       "SoC errata ERR007805 or e7805 applies, bus frequency limited from %d Hz to 384000 Hz.\n",
+                       i2c_imx->bitrate);
+               i2c_imx->bitrate = 384000;
+       }
+
        /* Divider value calculation */
        if (i2c_imx->cur_clk == i2c_clk_rate)
                return;
index f4820fd..c036431 100644 (file)
 #define ISMT_SPGT_SPD_MASK     0xc0000000      /* SMBus Speed mask */
 #define ISMT_SPGT_SPD_80K      0x00            /* 80 kHz */
 #define ISMT_SPGT_SPD_100K     (0x1 << 30)     /* 100 kHz */
-#define ISMT_SPGT_SPD_400K     (0x2 << 30)     /* 400 kHz */
-#define ISMT_SPGT_SPD_1M       (0x3 << 30)     /* 1 MHz */
+#define ISMT_SPGT_SPD_400K     (0x2U << 30)    /* 400 kHz */
+#define ISMT_SPGT_SPD_1M       (0x3U << 30)    /* 1 MHz */
 
 
 /* MSI Control Register (MSICTL) bit definitions */
index 7728c84..9028ffb 100644 (file)
@@ -137,6 +137,12 @@ static int pasemi_i2c_xfer_msg(struct i2c_adapter *adapter,
 
                TXFIFO_WR(smbus, msg->buf[msg->len-1] |
                          (stop ? MTXFIFO_STOP : 0));
+
+               if (stop) {
+                       err = pasemi_smb_waitready(smbus);
+                       if (err)
+                               goto reset_out;
+               }
        }
 
        return 0;
index fc1dcc1..5b920f0 100644 (file)
@@ -843,10 +843,8 @@ static int geni_i2c_probe(struct platform_device *pdev)
                /* FIFO is disabled, so we can only use GPI DMA */
                gi2c->gpi_mode = true;
                ret = setup_gpi_dma(gi2c);
-               if (ret) {
-                       dev_err(dev, "Failed to setup GPI DMA mode:%d ret\n", ret);
-                       return ret;
-               }
+               if (ret)
+                       return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n");
 
                dev_dbg(dev, "Using GPI DMA mode for I2C\n");
        } else {
index cf5d049..ab0adaa 100644 (file)
@@ -557,7 +557,7 @@ static long compat_i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned lo
                                .addr = umsg.addr,
                                .flags = umsg.flags,
                                .len = umsg.len,
-                               .buf = compat_ptr(umsg.buf)
+                               .buf = (__force __u8 *)compat_ptr(umsg.buf),
                        };
                }
 
@@ -668,16 +668,21 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
        i2c_dev->dev.class = i2c_dev_class;
        i2c_dev->dev.parent = &adap->dev;
        i2c_dev->dev.release = i2cdev_dev_release;
-       dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
+
+       res = dev_set_name(&i2c_dev->dev, "i2c-%d", adap->nr);
+       if (res)
+               goto err_put_i2c_dev;
 
        res = cdev_device_add(&i2c_dev->cdev, &i2c_dev->dev);
-       if (res) {
-               put_i2c_dev(i2c_dev, false);
-               return res;
-       }
+       if (res)
+               goto err_put_i2c_dev;
 
        pr_debug("adapter [%s] registered as minor %d\n", adap->name, adap->nr);
        return 0;
+
+err_put_i2c_dev:
+       put_i2c_dev(i2c_dev, false);
+       return res;
 }
 
 static int i2cdev_detach_adapter(struct device *dev, void *dummy)
index 35f0d5e..1c107d6 100644 (file)
@@ -2824,6 +2824,7 @@ static int cm_dreq_handler(struct cm_work *work)
        switch (cm_id_priv->id.state) {
        case IB_CM_REP_SENT:
        case IB_CM_DREQ_SENT:
+       case IB_CM_MRA_REP_RCVD:
                ib_cancel_mad(cm_id_priv->msg);
                break;
        case IB_CM_ESTABLISHED:
@@ -2831,8 +2832,6 @@ static int cm_dreq_handler(struct cm_work *work)
                    cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
                        ib_cancel_mad(cm_id_priv->msg);
                break;
-       case IB_CM_MRA_REP_RCVD:
-               break;
        case IB_CM_TIMEWAIT:
                atomic_long_inc(&work->port->counters[CM_RECV_DUPLICATES]
                                                     [CM_DREQ_COUNTER]);
index 876cc78..7333646 100644 (file)
@@ -80,6 +80,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
        unsigned long flags;
        struct list_head del_list;
 
+       /* Prevent freeing of mm until we are completely finished. */
+       mmgrab(handler->mn.mm);
+
        /* Unregister first so we don't get any more notifications. */
        mmu_notifier_unregister(&handler->mn, handler->mn.mm);
 
@@ -102,6 +105,9 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
 
        do_remove(handler, &del_list);
 
+       /* Now the mm may be freed. */
+       mmdrop(handler->mn.mm);
+
        kfree(handler);
 }
 
index 956f8e8..32ef67e 100644 (file)
@@ -574,8 +574,10 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
                spin_lock_irq(&ent->lock);
                if (ent->disabled)
                        goto out;
-               if (need_delay)
+               if (need_delay) {
                        queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
+                       goto out;
+               }
                remove_cache_mr_locked(ent);
                queue_adjust_cache_locked(ent);
        }
@@ -625,6 +627,7 @@ static void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
        struct mlx5_cache_ent *ent = mr->cache_ent;
 
+       WRITE_ONCE(dev->cache.last_add, jiffies);
        spin_lock_irq(&ent->lock);
        list_add_tail(&mr->list, &ent->head);
        ent->available_mrs++;
index ae50b56..8ef112f 100644 (file)
@@ -3190,7 +3190,11 @@ serr_no_r_lock:
        spin_lock_irqsave(&sqp->s_lock, flags);
        rvt_send_complete(sqp, wqe, send_status);
        if (sqp->ibqp.qp_type == IB_QPT_RC) {
-               int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+               int lastwqe;
+
+               spin_lock(&sqp->r_lock);
+               lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+               spin_unlock(&sqp->r_lock);
 
                sqp->s_flags &= ~RVT_S_BUSY;
                spin_unlock_irqrestore(&sqp->s_lock, flags);
index 4aab631..d9cf282 100644 (file)
@@ -1661,7 +1661,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
        num_iommus = of_property_count_elems_of_size(dev->of_node, "iommus",
                                                     sizeof(phandle));
        if (num_iommus < 0)
-               return 0;
+               return ERR_PTR(-ENODEV);
 
        arch_data = kcalloc(num_iommus + 1, sizeof(*arch_data), GFP_KERNEL);
        if (!arch_data)
index 680d2fc..15edb9a 100644 (file)
@@ -433,6 +433,7 @@ config QCOM_PDC
 config QCOM_MPM
        tristate "QCOM MPM"
        depends on ARCH_QCOM
+       depends on MAILBOX
        select IRQ_DOMAIN_HIERARCHY
        help
          MSM Power Manager driver to manage and configure wakeup
index cd77297..a0fc764 100644 (file)
@@ -3011,18 +3011,12 @@ static int __init allocate_lpi_tables(void)
        return 0;
 }
 
-static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
+static u64 read_vpend_dirty_clear(void __iomem *vlpi_base)
 {
        u32 count = 1000000;    /* 1s! */
        bool clean;
        u64 val;
 
-       val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
-       val &= ~GICR_VPENDBASER_Valid;
-       val &= ~clr;
-       val |= set;
-       gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
-
        do {
                val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
                clean = !(val & GICR_VPENDBASER_Dirty);
@@ -3033,10 +3027,26 @@ static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
                }
        } while (!clean && count);
 
-       if (unlikely(val & GICR_VPENDBASER_Dirty)) {
+       if (unlikely(!clean))
                pr_err_ratelimited("ITS virtual pending table not cleaning\n");
+
+       return val;
+}
+
+static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
+{
+       u64 val;
+
+       /* Make sure we wait until the RD is done with the initial scan */
+       val = read_vpend_dirty_clear(vlpi_base);
+       val &= ~GICR_VPENDBASER_Valid;
+       val &= ~clr;
+       val |= set;
+       gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+       val = read_vpend_dirty_clear(vlpi_base);
+       if (unlikely(val & GICR_VPENDBASER_Dirty))
                val |= GICR_VPENDBASER_PendingLast;
-       }
 
        return val;
 }
index 0efe1a9..b252d55 100644 (file)
@@ -206,11 +206,11 @@ static inline void __iomem *gic_dist_base(struct irq_data *d)
        }
 }
 
-static void gic_do_wait_for_rwp(void __iomem *base)
+static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
 {
        u32 count = 1000000;    /* 1s! */
 
-       while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
+       while (readl_relaxed(base + GICD_CTLR) & bit) {
                count--;
                if (!count) {
                        pr_err_ratelimited("RWP timeout, gone fishing\n");
@@ -224,13 +224,13 @@ static void gic_do_wait_for_rwp(void __iomem *base)
 /* Wait for completion of a distributor change */
 static void gic_dist_wait_for_rwp(void)
 {
-       gic_do_wait_for_rwp(gic_data.dist_base);
+       gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
 }
 
 /* Wait for completion of a redistributor change */
 static void gic_redist_wait_for_rwp(void)
 {
-       gic_do_wait_for_rwp(gic_data_rdist_rd_base());
+       gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
 }
 
 #ifdef CONFIG_ARM64
@@ -1466,6 +1466,12 @@ static int gic_irq_domain_translate(struct irq_domain *d,
                if(fwspec->param_count != 2)
                        return -EINVAL;
 
+               if (fwspec->param[0] < 16) {
+                       pr_err(FW_BUG "Illegal GSI%d translation request\n",
+                              fwspec->param[0]);
+                       return -EINVAL;
+               }
+
                *hwirq = fwspec->param[0];
                *type = fwspec->param[1];
 
index 58ba835..09c710e 100644 (file)
@@ -1123,6 +1123,12 @@ static int gic_irq_domain_translate(struct irq_domain *d,
                if(fwspec->param_count != 2)
                        return -EINVAL;
 
+               if (fwspec->param[0] < 16) {
+                       pr_err(FW_BUG "Illegal GSI%d translation request\n",
+                              fwspec->param[0]);
+                       return -EINVAL;
+               }
+
                *hwirq = fwspec->param[0];
                *type = fwspec->param[1];
 
index eea5a75..d306146 100644 (file)
@@ -375,7 +375,7 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
        raw_spin_lock_init(&priv->lock);
 
        priv->base = devm_platform_ioremap_resource(pdev, 0);
-       if (!priv->base)
+       if (IS_ERR(priv->base))
                return PTR_ERR(priv->base);
 
        for (i = 0; i < priv->reg_stride; i++) {
index ad2d5fa..36ae30b 100644 (file)
@@ -4399,6 +4399,7 @@ try_smaller_buffer:
        }
 
        if (ic->internal_hash) {
+               size_t recalc_tags_size;
                ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
                if (!ic->recalc_wq ) {
                        ti->error = "Cannot allocate workqueue";
@@ -4412,8 +4413,10 @@ try_smaller_buffer:
                        r = -ENOMEM;
                        goto bad;
                }
-               ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
-                                                ic->tag_size, GFP_KERNEL);
+               recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size;
+               if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
+                       recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
+               ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL);
                if (!ic->recalc_tags) {
                        ti->error = "Cannot allocate tags for recalculating";
                        r = -ENOMEM;
index 875bca3..82f2a06 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/blkdev.h>
 #include <linux/slab.h>
 #include <linux/module.h>
-#include <linux/sched/clock.h>
 
 
 #define DM_MSG_PREFIX  "multipath historical-service-time"
@@ -433,7 +432,7 @@ static struct dm_path *hst_select_path(struct path_selector *ps,
 {
        struct selector *s = ps->context;
        struct path_info *pi = NULL, *best = NULL;
-       u64 time_now = sched_clock();
+       u64 time_now = ktime_get_ns();
        struct dm_path *ret = NULL;
        unsigned long flags;
 
@@ -474,7 +473,7 @@ static int hst_start_io(struct path_selector *ps, struct dm_path *path,
 
 static u64 path_service_time(struct path_info *pi, u64 start_time)
 {
-       u64 sched_now = ktime_get_ns();
+       u64 now = ktime_get_ns();
 
        /* if a previous disk request has finished after this IO was
         * sent to the hardware, pretend the submission happened
@@ -483,11 +482,11 @@ static u64 path_service_time(struct path_info *pi, u64 start_time)
        if (time_after64(pi->last_finish, start_time))
                start_time = pi->last_finish;
 
-       pi->last_finish = sched_now;
-       if (time_before64(sched_now, start_time))
+       pi->last_finish = now;
+       if (time_before64(now, start_time))
                return 0;
 
-       return sched_now - start_time;
+       return now - start_time;
 }
 
 static int hst_end_io(struct path_selector *ps, struct dm_path *path,
index c1ca9be..57daa86 100644 (file)
@@ -360,16 +360,20 @@ static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno,
        return 0;
 }
 
+struct orig_bio_details {
+       unsigned int op;
+       unsigned int nr_sectors;
+};
+
 /*
  * First phase of BIO mapping for targets with zone append emulation:
  * check all BIO that change a zone writer pointer and change zone
  * append operations into regular write operations.
  */
 static bool dm_zone_map_bio_begin(struct mapped_device *md,
-                                 struct bio *orig_bio, struct bio *clone)
+                                 unsigned int zno, struct bio *clone)
 {
        sector_t zsectors = blk_queue_zone_sectors(md->queue);
-       unsigned int zno = bio_zone_no(orig_bio);
        unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
 
        /*
@@ -384,7 +388,7 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
                WRITE_ONCE(md->zwp_offset[zno], zwp_offset);
        }
 
-       switch (bio_op(orig_bio)) {
+       switch (bio_op(clone)) {
        case REQ_OP_ZONE_RESET:
        case REQ_OP_ZONE_FINISH:
                return true;
@@ -401,9 +405,8 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
                 * target zone.
                 */
                clone->bi_opf = REQ_OP_WRITE | REQ_NOMERGE |
-                       (orig_bio->bi_opf & (~REQ_OP_MASK));
-               clone->bi_iter.bi_sector =
-                       orig_bio->bi_iter.bi_sector + zwp_offset;
+                       (clone->bi_opf & (~REQ_OP_MASK));
+               clone->bi_iter.bi_sector += zwp_offset;
                break;
        default:
                DMWARN_LIMIT("Invalid BIO operation");
@@ -423,11 +426,10 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md,
  * data written to a zone. Note that at this point, the remapped clone BIO
  * may already have completed, so we do not touch it.
  */
-static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
-                                       struct bio *orig_bio,
+static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int zno,
+                                       struct orig_bio_details *orig_bio_details,
                                        unsigned int nr_sectors)
 {
-       unsigned int zno = bio_zone_no(orig_bio);
        unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]);
 
        /* The clone BIO may already have been completed and failed */
@@ -435,7 +437,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
                return BLK_STS_IOERR;
 
        /* Update the zone wp offset */
-       switch (bio_op(orig_bio)) {
+       switch (orig_bio_details->op) {
        case REQ_OP_ZONE_RESET:
                WRITE_ONCE(md->zwp_offset[zno], 0);
                return BLK_STS_OK;
@@ -452,7 +454,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md,
                 * Check that the target did not truncate the write operation
                 * emulating a zone append.
                 */
-               if (nr_sectors != bio_sectors(orig_bio)) {
+               if (nr_sectors != orig_bio_details->nr_sectors) {
                        DMWARN_LIMIT("Truncated write for zone append");
                        return BLK_STS_IOERR;
                }
@@ -488,7 +490,7 @@ static inline void dm_zone_unlock(struct request_queue *q,
        bio_clear_flag(clone, BIO_ZONE_WRITE_LOCKED);
 }
 
-static bool dm_need_zone_wp_tracking(struct bio *orig_bio)
+static bool dm_need_zone_wp_tracking(struct bio *bio)
 {
        /*
         * Special processing is not needed for operations that do not need the
@@ -496,15 +498,15 @@ static bool dm_need_zone_wp_tracking(struct bio *orig_bio)
         * zones and all operations that do not modify directly a sequential
         * zone write pointer.
         */
-       if (op_is_flush(orig_bio->bi_opf) && !bio_sectors(orig_bio))
+       if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
                return false;
-       switch (bio_op(orig_bio)) {
+       switch (bio_op(bio)) {
        case REQ_OP_WRITE_ZEROES:
        case REQ_OP_WRITE:
        case REQ_OP_ZONE_RESET:
        case REQ_OP_ZONE_FINISH:
        case REQ_OP_ZONE_APPEND:
-               return bio_zone_is_seq(orig_bio);
+               return bio_zone_is_seq(bio);
        default:
                return false;
        }
@@ -519,8 +521,8 @@ int dm_zone_map_bio(struct dm_target_io *tio)
        struct dm_target *ti = tio->ti;
        struct mapped_device *md = io->md;
        struct request_queue *q = md->queue;
-       struct bio *orig_bio = io->orig_bio;
        struct bio *clone = &tio->clone;
+       struct orig_bio_details orig_bio_details;
        unsigned int zno;
        blk_status_t sts;
        int r;
@@ -529,18 +531,21 @@ int dm_zone_map_bio(struct dm_target_io *tio)
         * IOs that do not change a zone write pointer do not need
         * any additional special processing.
         */
-       if (!dm_need_zone_wp_tracking(orig_bio))
+       if (!dm_need_zone_wp_tracking(clone))
                return ti->type->map(ti, clone);
 
        /* Lock the target zone */
-       zno = bio_zone_no(orig_bio);
+       zno = bio_zone_no(clone);
        dm_zone_lock(q, zno, clone);
 
+       orig_bio_details.nr_sectors = bio_sectors(clone);
+       orig_bio_details.op = bio_op(clone);
+
        /*
         * Check that the bio and the target zone write pointer offset are
         * both valid, and if the bio is a zone append, remap it to a write.
         */
-       if (!dm_zone_map_bio_begin(md, orig_bio, clone)) {
+       if (!dm_zone_map_bio_begin(md, zno, clone)) {
                dm_zone_unlock(q, zno, clone);
                return DM_MAPIO_KILL;
        }
@@ -560,7 +565,8 @@ int dm_zone_map_bio(struct dm_target_io *tio)
                 * The target submitted the clone BIO. The target zone will
                 * be unlocked on completion of the clone.
                 */
-               sts = dm_zone_map_bio_end(md, orig_bio, *tio->len_ptr);
+               sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
+                                         *tio->len_ptr);
                break;
        case DM_MAPIO_REMAPPED:
                /*
@@ -568,7 +574,8 @@ int dm_zone_map_bio(struct dm_target_io *tio)
                 * unlock the target zone here as the clone will not be
                 * submitted.
                 */
-               sts = dm_zone_map_bio_end(md, orig_bio, *tio->len_ptr);
+               sts = dm_zone_map_bio_end(md, zno, &orig_bio_details,
+                                         *tio->len_ptr);
                if (sts != BLK_STS_OK)
                        dm_zone_unlock(q, zno, clone);
                break;
index 3c5fad7..82957bd 100644 (file)
@@ -1323,8 +1323,7 @@ static void __map_bio(struct bio *clone)
 }
 
 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
-                               struct dm_target *ti, unsigned num_bios,
-                               unsigned *len)
+                               struct dm_target *ti, unsigned num_bios)
 {
        struct bio *bio;
        int try;
@@ -1335,7 +1334,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
                if (try)
                        mutex_lock(&ci->io->md->table_devices_lock);
                for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
-                       bio = alloc_tio(ci, ti, bio_nr, len,
+                       bio = alloc_tio(ci, ti, bio_nr, NULL,
                                        try ? GFP_NOIO : GFP_NOWAIT);
                        if (!bio)
                                break;
@@ -1363,11 +1362,11 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
                break;
        case 1:
                clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
-               dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
                __map_bio(clone);
                break;
        default:
-               alloc_multiple_bios(&blist, ci, ti, num_bios, len);
+               /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
+               alloc_multiple_bios(&blist, ci, ti, num_bios);
                while ((clone = bio_list_pop(&blist))) {
                        dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
                        __map_bio(clone);
@@ -1392,6 +1391,7 @@ static void __send_empty_flush(struct clone_info *ci)
 
        ci->bio = &flush_bio;
        ci->sector_count = 0;
+       ci->io->tio.clone.bi_iter.bi_size = 0;
 
        while ((ti = dm_table_get_target(ci->map, target_nr++)))
                __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
@@ -1407,14 +1407,10 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target
        len = min_t(sector_t, ci->sector_count,
                    max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
 
-       /*
-        * dm_accept_partial_bio cannot be used with duplicate bios,
-        * so update clone_info cursor before __send_duplicate_bios().
-        */
+       __send_duplicate_bios(ci, ti, num_bios, &len);
+
        ci->sector += len;
        ci->sector_count -= len;
-
-       __send_duplicate_bios(ci, ti, num_bios, &len);
 }
 
 static bool is_abnormal_io(struct bio *bio)
index 28f2baf..5afa373 100644 (file)
@@ -7,6 +7,7 @@ comment "NXP media platform drivers"
 config VIDEO_IMX_MIPI_CSIS
        tristate "NXP MIPI CSI-2 CSIS receiver found on i.MX7 and i.MX8 models"
        depends on ARCH_MXC || COMPILE_TEST
+       depends on VIDEO_DEV
        select MEDIA_CONTROLLER
        select V4L2_FWNODE
        select VIDEO_V4L2_SUBDEV_API
index 4de5e8d..3d3d106 100644 (file)
@@ -892,7 +892,7 @@ static int rga_probe(struct platform_device *pdev)
        }
        rga->dst_mmu_pages =
                (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
-       if (rga->dst_mmu_pages) {
+       if (!rga->dst_mmu_pages) {
                ret = -ENOMEM;
                goto free_src_pages;
        }
index 4702974..0de587b 100644 (file)
@@ -77,16 +77,16 @@ err_mutex_unlock:
 }
 
 static const struct si2157_tuner_info si2157_tuners[] = {
-       { SI2141, false, 0x60, SI2141_60_FIRMWARE, SI2141_A10_FIRMWARE },
-       { SI2141, false, 0x61, SI2141_61_FIRMWARE, SI2141_A10_FIRMWARE },
-       { SI2146, false, 0x11, SI2146_11_FIRMWARE, NULL },
-       { SI2147, false, 0x50, SI2147_50_FIRMWARE, NULL },
-       { SI2148, true,  0x32, SI2148_32_FIRMWARE, SI2158_A20_FIRMWARE },
-       { SI2148, true,  0x33, SI2148_33_FIRMWARE, SI2158_A20_FIRMWARE },
-       { SI2157, false, 0x50, SI2157_50_FIRMWARE, SI2157_A30_FIRMWARE },
-       { SI2158, false, 0x50, SI2158_50_FIRMWARE, SI2158_A20_FIRMWARE },
-       { SI2158, false, 0x51, SI2158_51_FIRMWARE, SI2158_A20_FIRMWARE },
-       { SI2177, false, 0x50, SI2177_50_FIRMWARE, SI2157_A30_FIRMWARE },
+       { SI2141, 0x60, false, SI2141_60_FIRMWARE, SI2141_A10_FIRMWARE },
+       { SI2141, 0x61, false, SI2141_61_FIRMWARE, SI2141_A10_FIRMWARE },
+       { SI2146, 0x11, false, SI2146_11_FIRMWARE, NULL },
+       { SI2147, 0x50, false, SI2147_50_FIRMWARE, NULL },
+       { SI2148, 0x32, true,  SI2148_32_FIRMWARE, SI2158_A20_FIRMWARE },
+       { SI2148, 0x33, true,  SI2148_33_FIRMWARE, SI2158_A20_FIRMWARE },
+       { SI2157, 0x50, false, SI2157_50_FIRMWARE, SI2157_A30_FIRMWARE },
+       { SI2158, 0x50, false, SI2158_50_FIRMWARE, SI2158_A20_FIRMWARE },
+       { SI2158, 0x51, false, SI2158_51_FIRMWARE, SI2158_A20_FIRMWARE },
+       { SI2177, 0x50, false, SI2177_50_FIRMWARE, SI2157_A30_FIRMWARE },
 };
 
 static int si2157_load_firmware(struct dvb_frontend *fe,
@@ -178,7 +178,7 @@ static int si2157_find_and_load_firmware(struct dvb_frontend *fe)
                }
        }
 
-       if (!fw_name && !fw_alt_name) {
+       if (required && !fw_name && !fw_alt_name) {
                dev_err(&client->dev,
                        "unknown chip version Si21%d-%c%c%c ROM 0x%02x\n",
                        part_id, cmd.args[1], cmd.args[3], cmd.args[4], rom_id);
index 30bff6c..b7800b3 100644 (file)
@@ -103,7 +103,7 @@ config TI_EMIF
          temperature changes
 
 config OMAP_GPMC
-       bool "Texas Instruments OMAP SoC GPMC driver" if COMPILE_TEST
+       tristate "Texas Instruments OMAP SoC GPMC driver"
        depends on OF_ADDRESS
        select GPIOLIB
        help
index c267283..e749dcb 100644 (file)
@@ -544,20 +544,27 @@ static int atmel_ebi_probe(struct platform_device *pdev)
        smc_np = of_parse_phandle(dev->of_node, "atmel,smc", 0);
 
        ebi->smc.regmap = syscon_node_to_regmap(smc_np);
-       if (IS_ERR(ebi->smc.regmap))
-               return PTR_ERR(ebi->smc.regmap);
+       if (IS_ERR(ebi->smc.regmap)) {
+               ret = PTR_ERR(ebi->smc.regmap);
+               goto put_node;
+       }
 
        ebi->smc.layout = atmel_hsmc_get_reg_layout(smc_np);
-       if (IS_ERR(ebi->smc.layout))
-               return PTR_ERR(ebi->smc.layout);
+       if (IS_ERR(ebi->smc.layout)) {
+               ret = PTR_ERR(ebi->smc.layout);
+               goto put_node;
+       }
 
        ebi->smc.clk = of_clk_get(smc_np, 0);
        if (IS_ERR(ebi->smc.clk)) {
-               if (PTR_ERR(ebi->smc.clk) != -ENOENT)
-                       return PTR_ERR(ebi->smc.clk);
+               if (PTR_ERR(ebi->smc.clk) != -ENOENT) {
+                       ret = PTR_ERR(ebi->smc.clk);
+                       goto put_node;
+               }
 
                ebi->smc.clk = NULL;
        }
+       of_node_put(smc_np);
        ret = clk_prepare_enable(ebi->smc.clk);
        if (ret)
                return ret;
@@ -608,6 +615,10 @@ static int atmel_ebi_probe(struct platform_device *pdev)
        }
 
        return of_platform_populate(np, NULL, NULL, dev);
+
+put_node:
+       of_node_put(smc_np);
+       return ret;
 }
 
 static __maybe_unused int atmel_ebi_resume(struct device *dev)
index 1441200..76c82e9 100644 (file)
@@ -857,7 +857,6 @@ static int brcmstb_dpfe_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct brcmstb_dpfe_priv *priv;
-       struct resource *res;
        int ret;
 
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -869,22 +868,19 @@ static int brcmstb_dpfe_probe(struct platform_device *pdev)
        mutex_init(&priv->lock);
        platform_set_drvdata(pdev, priv);
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-cpu");
-       priv->regs = devm_ioremap_resource(dev, res);
+       priv->regs = devm_platform_ioremap_resource_byname(pdev, "dpfe-cpu");
        if (IS_ERR(priv->regs)) {
                dev_err(dev, "couldn't map DCPU registers\n");
                return -ENODEV;
        }
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-dmem");
-       priv->dmem = devm_ioremap_resource(dev, res);
+       priv->dmem = devm_platform_ioremap_resource_byname(pdev, "dpfe-dmem");
        if (IS_ERR(priv->dmem)) {
                dev_err(dev, "Couldn't map DCPU data memory\n");
                return -ENOENT;
        }
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dpfe-imem");
-       priv->imem = devm_ioremap_resource(dev, res);
+       priv->imem = devm_platform_ioremap_resource_byname(pdev, "dpfe-imem");
        if (IS_ERR(priv->imem)) {
                dev_err(dev, "Couldn't map DCPU instruction memory\n");
                return -ENOENT;
index 872addd..b32005b 100644 (file)
@@ -115,8 +115,7 @@ static int da8xx_ddrctl_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       ddrctl = devm_ioremap_resource(dev, res);
+       ddrctl = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
        if (IS_ERR(ddrctl)) {
                dev_err(dev, "unable to map memory controller registers\n");
                return PTR_ERR(ddrctl);
index ecc78d6..6c2a421 100644 (file)
@@ -1025,10 +1025,8 @@ static struct emif_data *__init_or_module get_device_details(
        temp    = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
        dev_info = devm_kzalloc(dev, sizeof(*dev_info), GFP_KERNEL);
 
-       if (!emif || !temp || !dev_info) {
-               dev_err(dev, "%s:%d: allocation error\n", __func__, __LINE__);
+       if (!emif || !temp || !dev_info)
                goto error;
-       }
 
        memcpy(temp, pd, sizeof(*pd));
        pd = temp;
@@ -1067,9 +1065,6 @@ static struct emif_data *__init_or_module get_device_details(
                temp = devm_kzalloc(dev, sizeof(*cust_cfgs), GFP_KERNEL);
                if (temp)
                        memcpy(temp, cust_cfgs, sizeof(*cust_cfgs));
-               else
-                       dev_warn(dev, "%s:%d: allocation error\n", __func__,
-                               __LINE__);
                pd->custom_configs = temp;
        }
 
@@ -1084,8 +1079,6 @@ static struct emif_data *__init_or_module get_device_details(
                        memcpy(temp, pd->timings, size);
                        pd->timings = temp;
                } else {
-                       dev_warn(dev, "%s:%d: allocation error\n", __func__,
-                               __LINE__);
                        get_default_timings(emif);
                }
        } else {
@@ -1098,8 +1091,6 @@ static struct emif_data *__init_or_module get_device_details(
                        memcpy(temp, pd->min_tck, sizeof(*pd->min_tck));
                        pd->min_tck = temp;
                } else {
-                       dev_warn(dev, "%s:%d: allocation error\n", __func__,
-                               __LINE__);
                        pd->min_tck = &lpddr2_jedec_min_tck;
                }
        } else {
@@ -1116,7 +1107,6 @@ error:
 static int __init_or_module emif_probe(struct platform_device *pdev)
 {
        struct emif_data        *emif;
-       struct resource         *res;
        int                     irq, ret;
 
        if (pdev->dev.of_node)
@@ -1135,8 +1125,7 @@ static int __init_or_module emif_probe(struct platform_device *pdev)
        emif->dev = &pdev->dev;
        platform_set_drvdata(pdev, emif);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       emif->base = devm_ioremap_resource(emif->dev, res);
+       emif->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(emif->base))
                goto error;
 
index f8ea592..7fc9f57 100644 (file)
@@ -172,7 +172,6 @@ out:
 static int ccf_probe(struct platform_device *pdev)
 {
        struct ccf_private *ccf;
-       struct resource *r;
        const struct of_device_id *match;
        u32 errinten;
        int ret, irq;
@@ -185,13 +184,7 @@ static int ccf_probe(struct platform_device *pdev)
        if (!ccf)
                return -ENOMEM;
 
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r) {
-               dev_err(&pdev->dev, "%s: no mem resource\n", __func__);
-               return -ENXIO;
-       }
-
-       ccf->regs = devm_ioremap_resource(&pdev->dev, r);
+       ccf->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(ccf->regs))
                return PTR_ERR(ccf->regs);
 
index 2f6939d..e83b61c 100644 (file)
@@ -287,8 +287,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
        }
 
        /* legacy dts may still use "simple-bus" compatible */
-       ret = of_platform_populate(dev->dev.of_node, NULL, NULL,
-                                       &dev->dev);
+       ret = of_platform_default_populate(dev->dev.of_node, NULL, &dev->dev);
        if (ret)
                goto err_free_nandirq;
 
index ed11887..2351f27 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/cpu_pm.h>
 #include <linux/irq.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/err.h>
 #include <linux/clk.h>
@@ -1889,16 +1890,6 @@ int gpmc_cs_program_settings(int cs, struct gpmc_settings *p)
 }
 
 #ifdef CONFIG_OF
-static const struct of_device_id gpmc_dt_ids[] = {
-       { .compatible = "ti,omap2420-gpmc" },
-       { .compatible = "ti,omap2430-gpmc" },
-       { .compatible = "ti,omap3430-gpmc" },   /* omap3430 & omap3630 */
-       { .compatible = "ti,omap4430-gpmc" },   /* omap4430 & omap4460 & omap543x */
-       { .compatible = "ti,am3352-gpmc" },     /* am335x devices */
-       { .compatible = "ti,am64-gpmc" },
-       { }
-};
-
 static void gpmc_cs_set_name(int cs, const char *name)
 {
        struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
@@ -2257,11 +2248,9 @@ no_timings:
        if (!of_platform_device_create(child, NULL, &pdev->dev))
                goto err_child_fail;
 
-       /* is child a common bus? */
-       if (of_match_node(of_default_bus_match_table, child))
-               /* create children and other common bus children */
-               if (of_platform_default_populate(child, NULL, &pdev->dev))
-                       goto err_child_fail;
+       /* create children and other common bus children */
+       if (of_platform_default_populate(child, NULL, &pdev->dev))
+               goto err_child_fail;
 
        return 0;
 
@@ -2278,6 +2267,8 @@ err:
        return ret;
 }
 
+static const struct of_device_id gpmc_dt_ids[];
+
 static int gpmc_probe_dt(struct platform_device *pdev)
 {
        int ret;
@@ -2644,6 +2635,19 @@ static int gpmc_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(gpmc_pm_ops, gpmc_suspend, gpmc_resume);
 
+#ifdef CONFIG_OF
+static const struct of_device_id gpmc_dt_ids[] = {
+       { .compatible = "ti,omap2420-gpmc" },
+       { .compatible = "ti,omap2430-gpmc" },
+       { .compatible = "ti,omap3430-gpmc" },   /* omap3430 & omap3630 */
+       { .compatible = "ti,omap4430-gpmc" },   /* omap4430 & omap4460 & omap543x */
+       { .compatible = "ti,am3352-gpmc" },     /* am335x devices */
+       { .compatible = "ti,am64-gpmc" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, gpmc_dt_ids);
+#endif
+
 static struct platform_driver gpmc_driver = {
        .probe          = gpmc_probe,
        .remove         = gpmc_remove,
@@ -2654,8 +2658,7 @@ static struct platform_driver gpmc_driver = {
        },
 };
 
-static __init int gpmc_init(void)
-{
-       return platform_driver_register(&gpmc_driver);
-}
-postcore_initcall(gpmc_init);
+module_platform_driver(gpmc_driver);
+
+MODULE_DESCRIPTION("Texas Instruments GPMC driver");
+MODULE_LICENSE("GPL");
index e4cc64f..f4d6224 100644 (file)
@@ -229,8 +229,7 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
 
        rpc->dev = dev;
 
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
-       rpc->base = devm_ioremap_resource(&pdev->dev, res);
+       rpc->base = devm_platform_ioremap_resource_byname(pdev, "regs");
        if (IS_ERR(rpc->base))
                return PTR_ERR(rpc->base);
 
@@ -458,7 +457,7 @@ int rpcif_manual_xfer(struct rpcif *rpc)
        case RPCIF_DATA_OUT:
                while (pos < rpc->xferlen) {
                        u32 bytes_left = rpc->xferlen - pos;
-                       u32 nbytes, data[2];
+                       u32 nbytes, data[2], *p = data;
 
                        smcr = rpc->smcr | RPCIF_SMCR_SPIE;
 
@@ -471,15 +470,9 @@ int rpcif_manual_xfer(struct rpcif *rpc)
                        regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
 
                        memcpy(data, rpc->buffer + pos, nbytes);
-                       if (nbytes == 8) {
-                               regmap_write(rpc->regmap, RPCIF_SMWDR1,
-                                            data[0]);
-                               regmap_write(rpc->regmap, RPCIF_SMWDR0,
-                                            data[1]);
-                       } else {
-                               regmap_write(rpc->regmap, RPCIF_SMWDR0,
-                                            data[0]);
-                       }
+                       if (nbytes == 8)
+                               regmap_write(rpc->regmap, RPCIF_SMWDR1, *p++);
+                       regmap_write(rpc->regmap, RPCIF_SMWDR0, *p);
 
                        regmap_write(rpc->regmap, RPCIF_SMCR, smcr);
                        ret = wait_msg_xfer_end(rpc);
@@ -521,7 +514,7 @@ int rpcif_manual_xfer(struct rpcif *rpc)
                }
                while (pos < rpc->xferlen) {
                        u32 bytes_left = rpc->xferlen - pos;
-                       u32 nbytes, data[2];
+                       u32 nbytes, data[2], *p = data;
 
                        /* nbytes may only be 1, 2, 4, or 8 */
                        nbytes = bytes_left >= max ? max : (1 << ilog2(bytes_left));
@@ -537,15 +530,9 @@ int rpcif_manual_xfer(struct rpcif *rpc)
                        if (ret)
                                goto err_out;
 
-                       if (nbytes == 8) {
-                               regmap_read(rpc->regmap, RPCIF_SMRDR1,
-                                           &data[0]);
-                               regmap_read(rpc->regmap, RPCIF_SMRDR0,
-                                           &data[1]);
-                       } else {
-                               regmap_read(rpc->regmap, RPCIF_SMRDR0,
-                                           &data[0]);
-                       }
+                       if (nbytes == 8)
+                               regmap_read(rpc->regmap, RPCIF_SMRDR1, p++);
+                       regmap_read(rpc->regmap, RPCIF_SMRDR0, p);
                        memcpy(rpc->buffer + pos, data, nbytes);
 
                        pos += nbytes;
@@ -651,6 +638,7 @@ static int rpcif_probe(struct platform_device *pdev)
        struct platform_device *vdev;
        struct device_node *flash;
        const char *name;
+       int ret;
 
        flash = of_get_next_child(pdev->dev.of_node, NULL);
        if (!flash) {
@@ -674,7 +662,14 @@ static int rpcif_probe(struct platform_device *pdev)
                return -ENOMEM;
        vdev->dev.parent = &pdev->dev;
        platform_set_drvdata(pdev, vdev);
-       return platform_device_add(vdev);
+
+       ret = platform_device_add(vdev);
+       if (ret) {
+               platform_device_put(vdev);
+               return ret;
+       }
+
+       return 0;
 }
 
 static int rpcif_remove(struct platform_device *pdev)
index 9c83189..4733e78 100644 (file)
@@ -1322,7 +1322,6 @@ static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
  */
 static int exynos5_performance_counters_init(struct exynos5_dmc *dmc)
 {
-       int counters_size;
        int ret, i;
 
        dmc->num_counters = devfreq_event_get_edev_count(dmc->dev,
@@ -1332,8 +1331,8 @@ static int exynos5_performance_counters_init(struct exynos5_dmc *dmc)
                return dmc->num_counters;
        }
 
-       counters_size = sizeof(struct devfreq_event_dev) * dmc->num_counters;
-       dmc->counter = devm_kzalloc(dmc->dev, counters_size, GFP_KERNEL);
+       dmc->counter = devm_kcalloc(dmc->dev, dmc->num_counters,
+                                   sizeof(*dmc->counter), GFP_KERNEL);
        if (!dmc->counter)
                return -ENOMEM;
 
index 44b4a40..6d22d1e 100644 (file)
@@ -716,7 +716,6 @@ del_provider:
 
 static int tegra_mc_probe(struct platform_device *pdev)
 {
-       struct resource *res;
        struct tegra_mc *mc;
        u64 mask;
        int err;
@@ -741,8 +740,7 @@ static int tegra_mc_probe(struct platform_device *pdev)
        /* length of MC tick in nanoseconds */
        mc->tick = 30;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       mc->regs = devm_ioremap_resource(&pdev->dev, res);
+       mc->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(mc->regs))
                return PTR_ERR(mc->regs);
 
index 51d20c2..f81e7df 100644 (file)
@@ -328,7 +328,6 @@ static int aemif_probe(struct platform_device *pdev)
 {
        int i;
        int ret = -ENODEV;
-       struct resource *res;
        struct device *dev = &pdev->dev;
        struct device_node *np = dev->of_node;
        struct device_node *child_np;
@@ -362,8 +361,7 @@ static int aemif_probe(struct platform_device *pdev)
        else if (pdata)
                aemif->cs_offset = pdata->cs_offset;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       aemif->base = devm_ioremap_resource(dev, res);
+       aemif->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(aemif->base)) {
                ret = PTR_ERR(aemif->base);
                goto error;
index 179fec2..31d6266 100644 (file)
@@ -290,9 +290,9 @@ static int ti_emif_probe(struct platform_device *pdev)
 
        emif_data->pm_data.ti_emif_sram_config = (unsigned long)match->data;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       emif_data->pm_data.ti_emif_base_addr_virt = devm_ioremap_resource(dev,
-                                                                         res);
+       emif_data->pm_data.ti_emif_base_addr_virt = devm_platform_get_and_ioremap_resource(pdev,
+                                                                                          0,
+                                                                                          &res);
        if (IS_ERR(emif_data->pm_data.ti_emif_base_addr_virt)) {
                ret = PTR_ERR(emif_data->pm_data.ti_emif_base_addr_virt);
                return ret;
index e90adfa..9b3ba2d 100644 (file)
@@ -6658,13 +6658,13 @@ static int mpt_summary_proc_show(struct seq_file *m, void *v)
 static int mpt_version_proc_show(struct seq_file *m, void *v)
 {
        u8       cb_idx;
-       int      scsi, fc, sas, lan, ctl, targ, dmp;
+       int      scsi, fc, sas, lan, ctl, targ;
        char    *drvname;
 
        seq_printf(m, "%s-%s\n", "mptlinux", MPT_LINUX_VERSION_COMMON);
        seq_printf(m, "  Fusion MPT base driver\n");
 
-       scsi = fc = sas = lan = ctl = targ = dmp = 0;
+       scsi = fc = sas = lan = ctl = targ = 0;
        for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
                drvname = NULL;
                if (MptCallbacks[cb_idx]) {
index e008d82..a13506d 100644 (file)
@@ -111,10 +111,10 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
 
        if (contiguous) {
                if (is_power_of_2(page_size))
-                       paddr = (u64) (uintptr_t) gen_pool_dma_alloc_align(vm->dram_pg_pool,
-                                                               total_size, NULL, page_size);
+                       paddr = (uintptr_t) gen_pool_dma_alloc_align(vm->dram_pg_pool,
+                                                                    total_size, NULL, page_size);
                else
-                       paddr = (u64) (uintptr_t) gen_pool_alloc(vm->dram_pg_pool, total_size);
+                       paddr = gen_pool_alloc(vm->dram_pg_pool, total_size);
                if (!paddr) {
                        dev_err(hdev->dev,
                                "failed to allocate %llu contiguous pages with total size of %llu\n",
@@ -150,12 +150,12 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
                for (i = 0 ; i < num_pgs ; i++) {
                        if (is_power_of_2(page_size))
                                phys_pg_pack->pages[i] =
-                                               (u64) gen_pool_dma_alloc_align(vm->dram_pg_pool,
-                                                                               page_size, NULL,
-                                                                               page_size);
+                                       (uintptr_t)gen_pool_dma_alloc_align(vm->dram_pg_pool,
+                                                                           page_size, NULL,
+                                                                           page_size);
                        else
-                               phys_pg_pack->pages[i] = (u64) gen_pool_alloc(vm->dram_pg_pool,
-                                                                               page_size);
+                               phys_pg_pack->pages[i] = gen_pool_alloc(vm->dram_pg_pool,
+                                                                       page_size);
                        if (!phys_pg_pack->pages[i]) {
                                dev_err(hdev->dev,
                                        "Failed to allocate device memory (out of memory)\n");
index 4e67c14..506dc90 100644 (file)
@@ -993,7 +993,7 @@ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
                return -EEXIST;
 
        md->reset_done |= type;
-       err = mmc_hw_reset(host);
+       err = mmc_hw_reset(host->card);
        /* Ensure we switch back to the correct partition */
        if (err) {
                struct mmc_blk_data *main_md =
@@ -1880,6 +1880,31 @@ static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq)
               brq->data.error || brq->cmd.resp[0] & CMD_ERRORS;
 }
 
+static int mmc_spi_err_check(struct mmc_card *card)
+{
+       u32 status = 0;
+       int err;
+
+       /*
+        * SPI does not have a TRAN state we have to wait on, instead the
+        * card is ready again when it no longer holds the line LOW.
+        * We still have to ensure two things here before we know the write
+        * was successful:
+        * 1. The card has not disconnected during busy and we actually read our
+        * own pull-up, thinking it was still connected, so ensure it
+        * still responds.
+        * 2. Check for any error bits, in particular R1_SPI_IDLE to catch a
+        * just reconnected card after being disconnected during busy.
+        */
+       err = __mmc_send_status(card, &status, 0);
+       if (err)
+               return err;
+       /* All R1 and R2 bits of SPI are errors in our case */
+       if (status)
+               return -EIO;
+       return 0;
+}
+
 static int mmc_blk_busy_cb(void *cb_data, bool *busy)
 {
        struct mmc_blk_busy_data *data = cb_data;
@@ -1903,9 +1928,16 @@ static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
        struct mmc_blk_busy_data cb_data;
        int err;
 
-       if (mmc_host_is_spi(card->host) || rq_data_dir(req) == READ)
+       if (rq_data_dir(req) == READ)
                return 0;
 
+       if (mmc_host_is_spi(card->host)) {
+               err = mmc_spi_err_check(card);
+               if (err)
+                       mqrq->brq.data.bytes_xfered = 0;
+               return err;
+       }
+
        cb_data.card = card;
        cb_data.status = 0;
        err = __mmc_poll_for_busy(card->host, 0, MMC_BLK_TIMEOUT_MS,
@@ -2350,6 +2382,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
        struct mmc_blk_data *md;
        int devidx, ret;
        char cap_str[10];
+       bool cache_enabled = false;
+       bool fua_enabled = false;
 
        devidx = ida_simple_get(&mmc_blk_ida, 0, max_devices, GFP_KERNEL);
        if (devidx < 0) {
@@ -2429,13 +2463,17 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
                        md->flags |= MMC_BLK_CMD23;
        }
 
-       if (mmc_card_mmc(card) &&
-           md->flags & MMC_BLK_CMD23 &&
+       if (md->flags & MMC_BLK_CMD23 &&
            ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
             card->ext_csd.rel_sectors)) {
                md->flags |= MMC_BLK_REL_WR;
-               blk_queue_write_cache(md->queue.queue, true, true);
+               fua_enabled = true;
+               cache_enabled = true;
        }
+       if (mmc_cache_enabled(card->host))
+               cache_enabled  = true;
+
+       blk_queue_write_cache(md->queue.queue, cache_enabled, fua_enabled);
 
        string_get_size((u64)size, 512, STRING_UNITS_2,
                        cap_str, sizeof(cap_str));
index 368f104..c6ae16d 100644 (file)
@@ -1995,7 +1995,7 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
 
 /**
  * mmc_hw_reset - reset the card in hardware
- * @host: MMC host to which the card is attached
+ * @card: card to be reset
  *
  * Hard reset the card. This function is only for upper layers, like the
  * block layer or card drivers. You cannot use it in host drivers (struct
@@ -2003,8 +2003,9 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
  *
  * Return: 0 on success, -errno on failure
  */
-int mmc_hw_reset(struct mmc_host *host)
+int mmc_hw_reset(struct mmc_card *card)
 {
+       struct mmc_host *host = card->host;
        int ret;
 
        ret = host->bus_ops->hw_reset(host);
index e6a2fd2..8d9bcee 100644 (file)
@@ -2325,10 +2325,9 @@ static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
 static int mmc_test_reset(struct mmc_test_card *test)
 {
        struct mmc_card *card = test->card;
-       struct mmc_host *host = card->host;
        int err;
 
-       err = mmc_hw_reset(host);
+       err = mmc_hw_reset(card);
        if (!err) {
                /*
                 * Reset will re-enable the card's command queue, but tests
index 9c13f2c..4566d7f 100644 (file)
@@ -62,8 +62,8 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
         * excepted the last element which has no constraint on idmasize
         */
        for_each_sg(data->sg, sg, data->sg_len - 1, i) {
-               if (!IS_ALIGNED(data->sg->offset, sizeof(u32)) ||
-                   !IS_ALIGNED(data->sg->length, SDMMC_IDMA_BURST)) {
+               if (!IS_ALIGNED(sg->offset, sizeof(u32)) ||
+                   !IS_ALIGNED(sg->length, SDMMC_IDMA_BURST)) {
                        dev_err(mmc_dev(host->mmc),
                                "unaligned scatterlist: ofst:%x length:%d\n",
                                data->sg->offset, data->sg->length);
@@ -71,7 +71,7 @@ static int sdmmc_idma_validate_data(struct mmci_host *host,
                }
        }
 
-       if (!IS_ALIGNED(data->sg->offset, sizeof(u32))) {
+       if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
                dev_err(mmc_dev(host->mmc),
                        "unaligned last scatterlist: ofst:%x length:%d\n",
                        data->sg->offset, data->sg->length);
index 2797a9c..ddb5ca2 100644 (file)
@@ -144,9 +144,9 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
                return clk_get_rate(priv->clk);
 
        if (priv->clkh) {
+               /* HS400 with 4TAP needs different clock settings */
                bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
-               bool need_slow_clkh = (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) ||
-                                     (host->mmc->ios.timing == MMC_TIMING_MMC_HS400);
+               bool need_slow_clkh = host->mmc->ios.timing == MMC_TIMING_MMC_HS400;
                clkh_shift = use_4tap && need_slow_clkh ? 1 : 2;
                ref_clk = priv->clkh;
        }
@@ -396,10 +396,10 @@ static void renesas_sdhi_hs400_complete(struct mmc_host *mmc)
                        SH_MOBILE_SDHI_SCC_TMPPORT2_HS400OSEL) |
                        sd_scc_read32(host, priv, SH_MOBILE_SDHI_SCC_TMPPORT2));
 
-       /* Set the sampling clock selection range of HS400 mode */
        sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_DTCNTL,
                       SH_MOBILE_SDHI_SCC_DTCNTL_TAPEN |
-                      0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT);
+                      sd_scc_read32(host, priv,
+                                    SH_MOBILE_SDHI_SCC_DTCNTL));
 
        /* Avoid bad TAP */
        if (bad_taps & BIT(priv->tap_set)) {
index 666cee4..08e8384 100644 (file)
@@ -241,16 +241,6 @@ static void xenon_voltage_switch(struct sdhci_host *host)
 {
        /* Wait for 5ms after set 1.8V signal enable bit */
        usleep_range(5000, 5500);
-
-       /*
-        * For some reason the controller's Host Control2 register reports
-        * the bit representing 1.8V signaling as 0 when read after it was
-        * written as 1. Subsequent read reports 1.
-        *
-        * Since this may cause some issues, do an empty read of the Host
-        * Control2 register here to circumvent this.
-        */
-       sdhci_readw(host, SDHCI_HOST_CONTROL2);
 }
 
 static unsigned int xenon_get_max_clock(struct sdhci_host *host)
index 413b000..9e28219 100644 (file)
@@ -670,6 +670,8 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
        struct ocelot *ocelot = ds->priv;
        struct felix *felix = ocelot_to_felix(ocelot);
        enum dsa_tag_protocol old_proto = felix->tag_proto;
+       bool cpu_port_active = false;
+       struct dsa_port *dp;
        int err;
 
        if (proto != DSA_TAG_PROTO_SEVILLE &&
@@ -677,6 +679,27 @@ static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
            proto != DSA_TAG_PROTO_OCELOT_8021Q)
                return -EPROTONOSUPPORT;
 
+       /* We don't support multiple CPU ports, yet the DT blob may have
+        * multiple CPU ports defined. The first CPU port is the active one,
+        * the others are inactive. In this case, DSA will call
+        * ->change_tag_protocol() multiple times, once per CPU port.
+        * Since we implement the tagging protocol change towards "ocelot" or
+        * "seville" as effectively initializing the NPI port, what we are
+        * doing is effectively changing who the NPI port is to the last @cpu
+        * argument passed, which is an unused DSA CPU port and not the one
+        * that should actively pass traffic.
+        * Suppress DSA's calls on CPU ports that are inactive.
+        */
+       dsa_switch_for_each_user_port(dp, ds) {
+               if (dp->cpu_dp->index == cpu) {
+                       cpu_port_active = true;
+                       break;
+               }
+       }
+
+       if (!cpu_port_active)
+               return 0;
+
        felix_del_tag_protocol(ds, cpu, old_proto);
 
        err = felix_set_tag_protocol(ds, cpu, proto);
index 8d382b2..52a8566 100644 (file)
@@ -2316,7 +2316,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
 
        err = dsa_register_switch(ds);
        if (err) {
-               dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
+               dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n");
                goto err_register_ds;
        }
 
index 1aa7973..060165a 100644 (file)
@@ -9,34 +9,46 @@ menuconfig NET_DSA_REALTEK
        help
          Select to enable support for Realtek Ethernet switch chips.
 
+         Note that at least one interface driver must be enabled for the
+         subdrivers to be loaded. Moreover, an interface driver cannot achieve
+         anything without at least one subdriver enabled.
+
+if NET_DSA_REALTEK
+
 config NET_DSA_REALTEK_MDIO
-       tristate "Realtek MDIO connected switch driver"
-       depends on NET_DSA_REALTEK
+       tristate "Realtek MDIO interface driver"
        depends on OF
+       depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
+       depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
+       depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
        help
          Select to enable support for registering switches configured
          through MDIO.
 
 config NET_DSA_REALTEK_SMI
-       tristate "Realtek SMI connected switch driver"
-       depends on NET_DSA_REALTEK
+       tristate "Realtek SMI interface driver"
        depends on OF
+       depends on NET_DSA_REALTEK_RTL8365MB || NET_DSA_REALTEK_RTL8366RB
+       depends on NET_DSA_REALTEK_RTL8365MB || !NET_DSA_REALTEK_RTL8365MB
+       depends on NET_DSA_REALTEK_RTL8366RB || !NET_DSA_REALTEK_RTL8366RB
        help
          Select to enable support for registering switches connected
          through SMI.
 
 config NET_DSA_REALTEK_RTL8365MB
        tristate "Realtek RTL8365MB switch subdriver"
-       depends on NET_DSA_REALTEK
-       depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
+       imply NET_DSA_REALTEK_SMI
+       imply NET_DSA_REALTEK_MDIO
        select NET_DSA_TAG_RTL8_4
        help
          Select to enable support for Realtek RTL8365MB-VC and RTL8367S.
 
 config NET_DSA_REALTEK_RTL8366RB
        tristate "Realtek RTL8366RB switch subdriver"
-       depends on NET_DSA_REALTEK
-       depends on NET_DSA_REALTEK_SMI || NET_DSA_REALTEK_MDIO
+       imply NET_DSA_REALTEK_SMI
+       imply NET_DSA_REALTEK_MDIO
        select NET_DSA_TAG_RTL4_A
        help
-         Select to enable support for Realtek RTL8366RB
+         Select to enable support for Realtek RTL8366RB.
+
+endif
index 2243d3d..6cec559 100644 (file)
@@ -546,11 +546,6 @@ static const struct of_device_id realtek_smi_of_match[] = {
                .data = &rtl8366rb_variant,
        },
 #endif
-       {
-               /* FIXME: add support for RTL8366S and more */
-               .compatible = "realtek,rtl8366s",
-               .data = NULL,
-       },
 #if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8365MB)
        {
                .compatible = "realtek,rtl8365mb",
index 33f1a13..24d715c 100644 (file)
@@ -486,8 +486,8 @@ int aq_nic_start(struct aq_nic_s *self)
        if (err < 0)
                goto err_exit;
 
-       for (i = 0U, aq_vec = self->aq_vec[0];
-               self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+       for (i = 0U; self->aq_vecs > i; ++i) {
+               aq_vec = self->aq_vec[i];
                err = aq_vec_start(aq_vec);
                if (err < 0)
                        goto err_exit;
@@ -517,8 +517,8 @@ int aq_nic_start(struct aq_nic_s *self)
                mod_timer(&self->polling_timer, jiffies +
                          AQ_CFG_POLLING_TIMER_INTERVAL);
        } else {
-               for (i = 0U, aq_vec = self->aq_vec[0];
-                       self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+               for (i = 0U; self->aq_vecs > i; ++i) {
+                       aq_vec = self->aq_vec[i];
                        err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
                                                    aq_vec_isr, aq_vec,
                                                    aq_vec_get_affinity_mask(aq_vec));
index f4774cf..6ab1f32 100644 (file)
@@ -43,8 +43,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
        if (!self) {
                err = -EINVAL;
        } else {
-               for (i = 0U, ring = self->ring[0];
-                       self->tx_rings > i; ++i, ring = self->ring[i]) {
+               for (i = 0U; self->tx_rings > i; ++i) {
+                       ring = self->ring[i];
                        u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
                        ring[AQ_VEC_RX_ID].stats.rx.polls++;
                        u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
@@ -182,8 +182,8 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
        self->aq_hw_ops = aq_hw_ops;
        self->aq_hw = aq_hw;
 
-       for (i = 0U, ring = self->ring[0];
-               self->tx_rings > i; ++i, ring = self->ring[i]) {
+       for (i = 0U; self->tx_rings > i; ++i) {
+               ring = self->ring[i];
                err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
                if (err < 0)
                        goto err_exit;
@@ -224,8 +224,8 @@ int aq_vec_start(struct aq_vec_s *self)
        unsigned int i = 0U;
        int err = 0;
 
-       for (i = 0U, ring = self->ring[0];
-               self->tx_rings > i; ++i, ring = self->ring[i]) {
+       for (i = 0U; self->tx_rings > i; ++i) {
+               ring = self->ring[i];
                err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
                                                        &ring[AQ_VEC_TX_ID]);
                if (err < 0)
@@ -248,8 +248,8 @@ void aq_vec_stop(struct aq_vec_s *self)
        struct aq_ring_s *ring = NULL;
        unsigned int i = 0U;
 
-       for (i = 0U, ring = self->ring[0];
-               self->tx_rings > i; ++i, ring = self->ring[i]) {
+       for (i = 0U; self->tx_rings > i; ++i) {
+               ring = self->ring[i];
                self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
                                                 &ring[AQ_VEC_TX_ID]);
 
@@ -268,8 +268,8 @@ void aq_vec_deinit(struct aq_vec_s *self)
        if (!self)
                goto err_exit;
 
-       for (i = 0U, ring = self->ring[0];
-               self->tx_rings > i; ++i, ring = self->ring[i]) {
+       for (i = 0U; self->tx_rings > i; ++i) {
+               ring = self->ring[i];
                aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
                aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
        }
@@ -297,8 +297,8 @@ void aq_vec_ring_free(struct aq_vec_s *self)
        if (!self)
                goto err_exit;
 
-       for (i = 0U, ring = self->ring[0];
-               self->tx_rings > i; ++i, ring = self->ring[i]) {
+       for (i = 0U; self->tx_rings > i; ++i) {
+               ring = self->ring[i];
                aq_ring_free(&ring[AQ_VEC_TX_ID]);
                if (i < self->rx_rings)
                        aq_ring_free(&ring[AQ_VEC_RX_ID]);
index 1c28495..874fad0 100644 (file)
@@ -3253,6 +3253,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
                }
                qidx = bp->tc_to_qidx[j];
                ring->queue_id = bp->q_info[qidx].queue_id;
+               spin_lock_init(&txr->xdp_tx_lock);
                if (i < bp->tx_nr_rings_xdp)
                        continue;
                if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
@@ -10338,6 +10339,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
        if (irq_re_init)
                udp_tunnel_nic_reset_ntf(bp->dev);
 
+       if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
+               if (!static_key_enabled(&bnxt_xdp_locking_key))
+                       static_branch_enable(&bnxt_xdp_locking_key);
+       } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
+               static_branch_disable(&bnxt_xdp_locking_key);
+       }
        set_bit(BNXT_STATE_OPEN, &bp->state);
        bnxt_enable_int(bp);
        /* Enable TX queues */
index 61aa3e8..98453a7 100644 (file)
@@ -593,7 +593,8 @@ struct nqe_cn {
 #define BNXT_MAX_MTU           9500
 #define BNXT_MAX_PAGE_MODE_MTU \
        ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN -       \
-        XDP_PACKET_HEADROOM)
+        XDP_PACKET_HEADROOM - \
+        SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)))
 
 #define BNXT_MIN_PKT_SIZE      52
 
@@ -800,6 +801,8 @@ struct bnxt_tx_ring_info {
        u32                     dev_state;
 
        struct bnxt_ring_struct tx_ring_struct;
+       /* Synchronize simultaneous xdp_xmit on same ring */
+       spinlock_t              xdp_tx_lock;
 };
 
 #define BNXT_LEGACY_COAL_CMPL_PARAMS                                   \
index 52fad0f..03b1d6c 100644 (file)
@@ -20,6 +20,8 @@
 #include "bnxt.h"
 #include "bnxt_xdp.h"
 
+DEFINE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
+
 struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
                                   struct bnxt_tx_ring_info *txr,
                                   dma_addr_t mapping, u32 len)
@@ -227,11 +229,16 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
        ring = smp_processor_id() % bp->tx_nr_rings_xdp;
        txr = &bp->tx_ring[ring];
 
+       if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING)
+               return -EINVAL;
+
+       if (static_branch_unlikely(&bnxt_xdp_locking_key))
+               spin_lock(&txr->xdp_tx_lock);
+
        for (i = 0; i < num_frames; i++) {
                struct xdp_frame *xdp = frames[i];
 
-               if (!txr || !bnxt_tx_avail(bp, txr) ||
-                   !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP))
+               if (!bnxt_tx_avail(bp, txr))
                        break;
 
                mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
@@ -250,6 +257,9 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
                bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
        }
 
+       if (static_branch_unlikely(&bnxt_xdp_locking_key))
+               spin_unlock(&txr->xdp_tx_lock);
+
        return nxmit;
 }
 
index 0df40c3..067bb5e 100644 (file)
@@ -10,6 +10,8 @@
 #ifndef BNXT_XDP_H
 #define BNXT_XDP_H
 
+DECLARE_STATIC_KEY_FALSE(bnxt_xdp_locking_key);
+
 struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
                                   struct bnxt_tx_ring_info *txr,
                                   dma_addr_t mapping, u32 len);
index 2dd79af..9a41145 100644 (file)
@@ -76,7 +76,7 @@ static inline void bcmgenet_writel(u32 value, void __iomem *offset)
        if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                __raw_writel(value, offset);
        else
-               writel(value, offset);
+               writel_relaxed(value, offset);
 }
 
 static inline u32 bcmgenet_readl(void __iomem *offset)
@@ -84,7 +84,7 @@ static inline u32 bcmgenet_readl(void __iomem *offset)
        if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
                return __raw_readl(offset);
        else
-               return readl(offset);
+               return readl_relaxed(offset);
 }
 
 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
index 800d5ce..e475be2 100644 (file)
@@ -1658,6 +1658,7 @@ static void macb_tx_restart(struct macb_queue *queue)
        unsigned int head = queue->tx_head;
        unsigned int tail = queue->tx_tail;
        struct macb *bp = queue->bp;
+       unsigned int head_idx, tbqp;
 
        if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
                queue_writel(queue, ISR, MACB_BIT(TXUBR));
@@ -1665,6 +1666,13 @@ static void macb_tx_restart(struct macb_queue *queue)
        if (head == tail)
                return;
 
+       tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
+       tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
+       head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
+
+       if (tbqp == head_idx)
+               return;
+
        macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
 }
 
index d5356db..caf4802 100644 (file)
@@ -1835,11 +1835,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
                priv->rxdes0_edorr_mask = BIT(30);
                priv->txdes0_edotr_mask = BIT(30);
                priv->is_aspeed = true;
-               /* Disable ast2600 problematic HW arbitration */
-               if (of_device_is_compatible(np, "aspeed,ast2600-mac")) {
-                       iowrite32(FTGMAC100_TM_DEFAULT,
-                                 priv->base + FTGMAC100_OFFSET_TM);
-               }
        } else {
                priv->rxdes0_edorr_mask = BIT(15);
                priv->txdes0_edotr_mask = BIT(15);
@@ -1911,6 +1906,11 @@ static int ftgmac100_probe(struct platform_device *pdev)
                err = ftgmac100_setup_clk(priv);
                if (err)
                        goto err_phy_connect;
+
+               /* Disable ast2600 problematic HW arbitration */
+               if (of_device_is_compatible(np, "aspeed,ast2600-mac"))
+                       iowrite32(FTGMAC100_TM_DEFAULT,
+                                 priv->base + FTGMAC100_OFFSET_TM);
        }
 
        /* Default ring sizes */
index 763d2c7..5750f9a 100644 (file)
@@ -489,11 +489,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
        info->phc_index = -1;
 
        fman_node = of_get_parent(mac_node);
-       if (fman_node)
+       if (fman_node) {
                ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
+               of_node_put(fman_node);
+       }
 
-       if (ptp_node)
+       if (ptp_node) {
                ptp_dev = of_find_device_by_node(ptp_node);
+               of_node_put(ptp_node);
+       }
 
        if (ptp_dev)
                ptp = platform_get_drvdata(ptp_dev);
index 5f5f8c5..c8cb541 100644 (file)
@@ -167,7 +167,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
        base = of_iomap(node, 0);
        if (!base) {
                err = -ENOMEM;
-               goto err_close;
+               goto err_put;
        }
 
        err = fsl_mc_allocate_irqs(mc_dev);
@@ -210,6 +210,8 @@ err_free_mc_irq:
        fsl_mc_free_irqs(mc_dev);
 err_unmap:
        iounmap(base);
+err_put:
+       of_node_put(node);
 err_close:
        dprtc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
 err_free_mcp:
index 5d7aef7..fb5120d 100644 (file)
@@ -586,8 +586,8 @@ static int fun_get_dev_limits(struct fun_dev *fdev)
        /* Calculate the max QID based on SQ/CQ/doorbell counts.
         * SQ/CQ doorbells alternate.
         */
-       num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) /
-                 (fdev->db_stride * 4);
+       num_dbs = (pci_resource_len(pdev, 0) - NVME_REG_DBS) >>
+                 (2 + NVME_CAP_STRIDE(fdev->cap_reg));
        fdev->max_qid = min3(cq_count, sq_count, num_dbs / 2) - 1;
        fdev->kern_end_qid = fdev->max_qid + 1;
        return 0;
index 190590d..7dfcf78 100644 (file)
@@ -2871,7 +2871,6 @@ continue_reset:
        running = adapter->state == __IAVF_RUNNING;
 
        if (running) {
-               netdev->flags &= ~IFF_UP;
                netif_carrier_off(netdev);
                netif_tx_stop_all_queues(netdev);
                adapter->link_up = false;
@@ -2988,7 +2987,7 @@ continue_reset:
                 * to __IAVF_RUNNING
                 */
                iavf_up_complete(adapter);
-               netdev->flags |= IFF_UP;
+
                iavf_irq_enable(adapter, true);
        } else {
                iavf_change_state(adapter, __IAVF_DOWN);
@@ -3004,10 +3003,8 @@ continue_reset:
 reset_err:
        mutex_unlock(&adapter->client_lock);
        mutex_unlock(&adapter->crit_lock);
-       if (running) {
+       if (running)
                iavf_change_state(adapter, __IAVF_RUNNING);
-               netdev->flags |= IFF_UP;
-       }
        dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
        iavf_close(netdev);
 }
index d4f1874..8ed3c9a 100644 (file)
@@ -301,7 +301,6 @@ enum ice_vsi_state {
        ICE_VSI_NETDEV_REGISTERED,
        ICE_VSI_UMAC_FLTR_CHANGED,
        ICE_VSI_MMAC_FLTR_CHANGED,
-       ICE_VSI_VLAN_FLTR_CHANGED,
        ICE_VSI_PROMISC_CHANGED,
        ICE_VSI_STATE_NBITS             /* must be last */
 };
@@ -672,7 +671,7 @@ static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
 
 static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
 {
-       return !!vsi->xdp_prog;
+       return !!READ_ONCE(vsi->xdp_prog);
 }
 
 static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
index 5daade3..fba178e 100644 (file)
@@ -577,7 +577,7 @@ void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
 {
        struct net_device *netdev;
 
-       if (!vsi || vsi->type != ICE_VSI_PF || !vsi->arfs_fltr_list)
+       if (!vsi || vsi->type != ICE_VSI_PF)
                return;
 
        netdev = vsi->netdev;
@@ -599,7 +599,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
        int base_idx, i;
 
        if (!vsi || vsi->type != ICE_VSI_PF)
-               return -EINVAL;
+               return 0;
 
        pf = vsi->back;
        netdev = vsi->netdev;
@@ -636,7 +636,6 @@ void ice_remove_arfs(struct ice_pf *pf)
        if (!pf_vsi)
                return;
 
-       ice_free_cpu_rx_rmap(pf_vsi);
        ice_clear_arfs(pf_vsi);
 }
 
@@ -653,9 +652,5 @@ void ice_rebuild_arfs(struct ice_pf *pf)
                return;
 
        ice_remove_arfs(pf);
-       if (ice_set_cpu_rx_rmap(pf_vsi)) {
-               dev_err(ice_pf_to_dev(pf), "Failed to rebuild aRFS\n");
-               return;
-       }
        ice_init_arfs(pf_vsi);
 }
index af57eb1..85a9448 100644 (file)
@@ -58,7 +58,16 @@ int
 ice_fltr_set_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
                              u8 promisc_mask)
 {
-       return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
+       struct ice_pf *pf = hw->back;
+       int result;
+
+       result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, false);
+       if (result)
+               dev_err(ice_pf_to_dev(pf),
+                       "Error setting promisc mode on VSI %i (rc=%d)\n",
+                       vsi->vsi_num, result);
+
+       return result;
 }
 
 /**
@@ -73,7 +82,16 @@ int
 ice_fltr_clear_vlan_vsi_promisc(struct ice_hw *hw, struct ice_vsi *vsi,
                                u8 promisc_mask)
 {
-       return ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
+       struct ice_pf *pf = hw->back;
+       int result;
+
+       result = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_mask, true);
+       if (result)
+               dev_err(ice_pf_to_dev(pf),
+                       "Error clearing promisc mode on VSI %i (rc=%d)\n",
+                       vsi->vsi_num, result);
+
+       return result;
 }
 
 /**
@@ -87,7 +105,16 @@ int
 ice_fltr_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
                           u16 vid)
 {
-       return ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+       struct ice_pf *pf = hw->back;
+       int result;
+
+       result = ice_clear_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+       if (result)
+               dev_err(ice_pf_to_dev(pf),
+                       "Error clearing promisc mode on VSI %i for VID %u (rc=%d)\n",
+                       ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
+
+       return result;
 }
 
 /**
@@ -101,7 +128,16 @@ int
 ice_fltr_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
                         u16 vid)
 {
-       return ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+       struct ice_pf *pf = hw->back;
+       int result;
+
+       result = ice_set_vsi_promisc(hw, vsi_handle, promisc_mask, vid);
+       if (result)
+               dev_err(ice_pf_to_dev(pf),
+                       "Error setting promisc mode on VSI %i for VID %u (rc=%d)\n",
+                       ice_get_hw_vsi_num(hw, vsi_handle), vid, result);
+
+       return result;
 }
 
 /**
index b897926..6d19c58 100644 (file)
@@ -1480,6 +1480,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
                ring->tx_tstamps = &pf->ptp.port.tx;
                ring->dev = dev;
                ring->count = vsi->num_tx_desc;
+               ring->txq_teid = ICE_INVAL_TEID;
                if (dvm_ena)
                        ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
                else
@@ -2688,6 +2689,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
                return;
 
        vsi->irqs_ready = false;
+       ice_free_cpu_rx_rmap(vsi);
+
        ice_for_each_q_vector(vsi, i) {
                u16 vector = i + base;
                int irq_num;
@@ -2701,7 +2704,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
                        continue;
 
                /* clear the affinity notifier in the IRQ descriptor */
-               irq_set_affinity_notifier(irq_num, NULL);
+               if (!IS_ENABLED(CONFIG_RFS_ACCEL))
+                       irq_set_affinity_notifier(irq_num, NULL);
 
                /* clear the affinity_mask in the IRQ descriptor */
                irq_set_affinity_hint(irq_num, NULL);
@@ -2983,6 +2987,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
                }
        }
 
+       if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
+               ice_clear_dflt_vsi(pf->first_sw);
        ice_fltr_remove_all(vsi);
        ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
        err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
index b588d79..5b11988 100644 (file)
@@ -243,8 +243,7 @@ static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
 static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
 {
        return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) ||
-              test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state) ||
-              test_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
+              test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
 }
 
 /**
@@ -260,10 +259,15 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
        if (vsi->type != ICE_VSI_PF)
                return 0;
 
-       if (ice_vsi_has_non_zero_vlans(vsi))
-               status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
-       else
-               status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
+       if (ice_vsi_has_non_zero_vlans(vsi)) {
+               promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
+               status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi,
+                                                      promisc_m);
+       } else {
+               status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
+                                                 promisc_m, 0);
+       }
+
        return status;
 }
 
@@ -280,10 +284,15 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
        if (vsi->type != ICE_VSI_PF)
                return 0;
 
-       if (ice_vsi_has_non_zero_vlans(vsi))
-               status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, promisc_m);
-       else
-               status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, 0);
+       if (ice_vsi_has_non_zero_vlans(vsi)) {
+               promisc_m |= (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX);
+               status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi,
+                                                        promisc_m);
+       } else {
+               status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+                                                   promisc_m, 0);
+       }
+
        return status;
 }
 
@@ -302,7 +311,6 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
        struct ice_pf *pf = vsi->back;
        struct ice_hw *hw = &pf->hw;
        u32 changed_flags = 0;
-       u8 promisc_m;
        int err;
 
        if (!vsi->netdev)
@@ -320,7 +328,6 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
        if (ice_vsi_fltr_changed(vsi)) {
                clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state);
                clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state);
-               clear_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
 
                /* grab the netdev's addr_list_lock */
                netif_addr_lock_bh(netdev);
@@ -369,29 +376,15 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
        /* check for changes in promiscuous modes */
        if (changed_flags & IFF_ALLMULTI) {
                if (vsi->current_netdev_flags & IFF_ALLMULTI) {
-                       if (ice_vsi_has_non_zero_vlans(vsi))
-                               promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
-                       else
-                               promisc_m = ICE_MCAST_PROMISC_BITS;
-
-                       err = ice_set_promisc(vsi, promisc_m);
+                       err = ice_set_promisc(vsi, ICE_MCAST_PROMISC_BITS);
                        if (err) {
-                               netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
-                                          vsi->vsi_num);
                                vsi->current_netdev_flags &= ~IFF_ALLMULTI;
                                goto out_promisc;
                        }
                } else {
                        /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */
-                       if (ice_vsi_has_non_zero_vlans(vsi))
-                               promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
-                       else
-                               promisc_m = ICE_MCAST_PROMISC_BITS;
-
-                       err = ice_clear_promisc(vsi, promisc_m);
+                       err = ice_clear_promisc(vsi, ICE_MCAST_PROMISC_BITS);
                        if (err) {
-                               netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
-                                          vsi->vsi_num);
                                vsi->current_netdev_flags |= IFF_ALLMULTI;
                                goto out_promisc;
                        }
@@ -2517,6 +2510,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
                irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
        }
 
+       err = ice_set_cpu_rx_rmap(vsi);
+       if (err) {
+               netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n",
+                          vsi->vsi_num, ERR_PTR(err));
+               goto free_q_irqs;
+       }
+
        vsi->irqs_ready = true;
        return 0;
 
@@ -2569,7 +2569,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
                spin_lock_init(&xdp_ring->tx_lock);
                for (j = 0; j < xdp_ring->count; j++) {
                        tx_desc = ICE_TX_DESC(xdp_ring, j);
-                       tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE);
+                       tx_desc->cmd_type_offset_bsz = 0;
                }
        }
 
@@ -2765,8 +2765,10 @@ free_qmap:
 
        ice_for_each_xdp_txq(vsi, i)
                if (vsi->xdp_rings[i]) {
-                       if (vsi->xdp_rings[i]->desc)
+                       if (vsi->xdp_rings[i]->desc) {
+                               synchronize_rcu();
                                ice_free_tx_ring(vsi->xdp_rings[i]);
+                       }
                        kfree_rcu(vsi->xdp_rings[i], rcu);
                        vsi->xdp_rings[i] = NULL;
                }
@@ -3488,6 +3490,20 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
        if (!vid)
                return 0;
 
+       while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
+               usleep_range(1000, 2000);
+
+       /* Add multicast promisc rule for the VLAN ID to be added if
+        * all-multicast is currently enabled.
+        */
+       if (vsi->current_netdev_flags & IFF_ALLMULTI) {
+               ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
+                                              ICE_MCAST_VLAN_PROMISC_BITS,
+                                              vid);
+               if (ret)
+                       goto finish;
+       }
+
        vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
 
        /* Add a switch rule for this VLAN ID so its corresponding VLAN tagged
@@ -3495,8 +3511,23 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
         */
        vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
        ret = vlan_ops->add_vlan(vsi, &vlan);
-       if (!ret)
-               set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
+       if (ret)
+               goto finish;
+
+       /* If all-multicast is currently enabled and this VLAN ID is only one
+        * besides VLAN-0 we have to update look-up type of multicast promisc
+        * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN.
+        */
+       if ((vsi->current_netdev_flags & IFF_ALLMULTI) &&
+           ice_vsi_num_non_zero_vlans(vsi) == 1) {
+               ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+                                          ICE_MCAST_PROMISC_BITS, 0);
+               ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
+                                        ICE_MCAST_VLAN_PROMISC_BITS, 0);
+       }
+
+finish:
+       clear_bit(ICE_CFG_BUSY, vsi->state);
 
        return ret;
 }
@@ -3522,6 +3553,9 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
        if (!vid)
                return 0;
 
+       while (test_and_set_bit(ICE_CFG_BUSY, vsi->state))
+               usleep_range(1000, 2000);
+
        vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
 
        /* Make sure VLAN delete is successful before updating VLAN
@@ -3530,10 +3564,33 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
        vlan = ICE_VLAN(be16_to_cpu(proto), vid, 0);
        ret = vlan_ops->del_vlan(vsi, &vlan);
        if (ret)
-               return ret;
+               goto finish;
 
-       set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
-       return 0;
+       /* Remove multicast promisc rule for the removed VLAN ID if
+        * all-multicast is enabled.
+        */
+       if (vsi->current_netdev_flags & IFF_ALLMULTI)
+               ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+                                          ICE_MCAST_VLAN_PROMISC_BITS, vid);
+
+       if (!ice_vsi_has_non_zero_vlans(vsi)) {
+               /* Update look-up type of multicast promisc rule for VLAN 0
+                * from ICE_SW_LKUP_PROMISC_VLAN to ICE_SW_LKUP_PROMISC when
+                * all-multicast is enabled and VLAN 0 is the only VLAN rule.
+                */
+               if (vsi->current_netdev_flags & IFF_ALLMULTI) {
+                       ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
+                                                  ICE_MCAST_VLAN_PROMISC_BITS,
+                                                  0);
+                       ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx,
+                                                ICE_MCAST_PROMISC_BITS, 0);
+               }
+       }
+
+finish:
+       clear_bit(ICE_CFG_BUSY, vsi->state);
+
+       return ret;
 }
 
 /**
@@ -3642,20 +3699,12 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
         */
        ice_napi_add(vsi);
 
-       status = ice_set_cpu_rx_rmap(vsi);
-       if (status) {
-               dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n",
-                       vsi->vsi_num, status);
-               goto unroll_napi_add;
-       }
        status = ice_init_mac_fltr(pf);
        if (status)
-               goto free_cpu_rx_map;
+               goto unroll_napi_add;
 
        return 0;
 
-free_cpu_rx_map:
-       ice_free_cpu_rx_rmap(vsi);
 unroll_napi_add:
        ice_tc_indir_block_unregister(vsi);
 unroll_cfg_netdev:
@@ -5117,7 +5166,6 @@ static int __maybe_unused ice_suspend(struct device *dev)
                        continue;
                ice_vsi_free_q_vectors(pf->vsi[v]);
        }
-       ice_free_cpu_rx_rmap(ice_get_main_vsi(pf));
        ice_clear_interrupt_scheme(pf);
 
        pci_save_state(pdev);
@@ -5475,16 +5523,19 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
 
        /* Add filter for new MAC. If filter exists, return success */
        err = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI);
-       if (err == -EEXIST)
+       if (err == -EEXIST) {
                /* Although this MAC filter is already present in hardware it's
                 * possible in some cases (e.g. bonding) that dev_addr was
                 * modified outside of the driver and needs to be restored back
                 * to this value.
                 */
                netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac);
-       else if (err)
+
+               return 0;
+       } else if (err) {
                /* error if the new filter addition failed */
                err = -EADDRNOTAVAIL;
+       }
 
 err_update_filters:
        if (err) {
index 3f1a638..69ff4b9 100644 (file)
@@ -1358,9 +1358,9 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
                                goto error_param;
                        }
 
-                       /* Skip queue if not enabled */
                        if (!test_bit(vf_q_id, vf->txq_ena))
-                               continue;
+                               dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
+                                       vf_q_id, vsi->vsi_num);
 
                        ice_fill_txq_meta(vsi, ring, &txq_meta);
 
index dfbcaf0..866ee4d 100644 (file)
@@ -41,8 +41,10 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
 {
        ice_clean_tx_ring(vsi->tx_rings[q_idx]);
-       if (ice_is_xdp_ena_vsi(vsi))
+       if (ice_is_xdp_ena_vsi(vsi)) {
+               synchronize_rcu();
                ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
+       }
        ice_clean_rx_ring(vsi->rx_rings[q_idx]);
 }
 
@@ -918,7 +920,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
        struct ice_vsi *vsi = np->vsi;
        struct ice_tx_ring *ring;
 
-       if (test_bit(ICE_DOWN, vsi->state))
+       if (test_bit(ICE_VSI_DOWN, vsi->state))
                return -ENETDOWN;
 
        if (!ice_is_xdp_ena_vsi(vsi))
index 5f9ab18..c188014 100644 (file)
@@ -2751,7 +2751,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
        }
 
        ret = of_get_mac_address(pnp, ppd.mac_addr);
-       if (ret)
+       if (ret == -EPROBE_DEFER)
                return ret;
 
        mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
index 939b692..ce843ea 100644 (file)
@@ -650,6 +650,7 @@ static int mlxsw_i2c_probe(struct i2c_client *client,
        return 0;
 
 errout:
+       mutex_destroy(&mlxsw_i2c->cmd.lock);
        i2c_set_clientdata(client, NULL);
 
        return err;
index 93df304..830363b 100644 (file)
@@ -28,6 +28,7 @@ config KS8842
 config KS8851
        tristate "Micrel KS8851 SPI"
        depends on SPI
+       depends on PTP_1588_CLOCK_OPTIONAL
        select MII
        select CRC32
        select EEPROM_93CX6
@@ -39,6 +40,7 @@ config KS8851
 config KS8851_MLL
        tristate "Micrel KS8851 MLL"
        depends on HAS_IOMEM
+       depends on PTP_1588_CLOCK_OPTIONAL
        select MII
        select CRC32
        select EEPROM_93CX6
index ce5970b..2679111 100644 (file)
@@ -346,7 +346,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
 
                        lan966x_mac_process_raw_entry(&raw_entries[column],
                                                      mac, &vid, &dest_idx);
-                       WARN_ON(dest_idx > lan966x->num_phys_ports);
+                       if (WARN_ON(dest_idx > lan966x->num_phys_ports))
+                               continue;
 
                        /* If the entry in SW is found, then there is nothing
                         * to do
@@ -392,7 +393,8 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
 
                lan966x_mac_process_raw_entry(&raw_entries[column],
                                              mac, &vid, &dest_idx);
-               WARN_ON(dest_idx > lan966x->num_phys_ports);
+               if (WARN_ON(dest_idx > lan966x->num_phys_ports))
+                       continue;
 
                mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx);
                if (!mac_entry)
index 1f8c67f..958e555 100644 (file)
@@ -446,6 +446,12 @@ static bool lan966x_hw_offload(struct lan966x *lan966x, u32 port,
                     ANA_CPU_FWD_CFG_MLD_REDIR_ENA)))
                return true;
 
+       if (eth_type_vlan(skb->protocol)) {
+               skb = skb_vlan_untag(skb);
+               if (unlikely(!skb))
+                       return false;
+       }
+
        if (skb->protocol == htons(ETH_P_IP) &&
            ip_hdr(skb)->protocol == IPPROTO_IGMP)
                return false;
index ae78277..0a1041d 100644 (file)
@@ -29,10 +29,10 @@ enum {
 
 static u64 lan966x_ptp_get_nominal_value(void)
 {
-       u64 res = 0x304d2df1;
-
-       res <<= 32;
-       return res;
+       /* This is the default value that for each system clock, the time of day
+        * is increased. It has the format 5.59 nanosecond.
+        */
+       return 0x304d4873ecade305;
 }
 
 int lan966x_ptp_hwtstamp_set(struct lan966x_port *port, struct ifreq *ifr)
index e3555c9..df2bee6 100644 (file)
@@ -322,8 +322,7 @@ static int lan966x_port_prechangeupper(struct net_device *dev,
 
        if (netif_is_bridge_master(info->upper_dev) && !info->linking)
                switchdev_bridge_port_unoffload(port->dev, port,
-                                               &lan966x_switchdev_nb,
-                                               &lan966x_switchdev_blocking_nb);
+                                               NULL, NULL);
 
        return NOTIFY_DONE;
 }
index 50ac3ee..21d2645 100644 (file)
@@ -2903,11 +2903,9 @@ static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
                status = myri10ge_xmit(curr, dev);
                if (status != 0) {
                        dev_kfree_skb_any(curr);
-                       if (segs != NULL) {
-                               curr = segs;
-                               segs = next;
+                       skb_list_walk_safe(next, curr, next) {
                                curr->next = NULL;
-                               dev_kfree_skb_any(segs);
+                               dev_kfree_skb_any(curr);
                        }
                        goto drop;
                }
index e3edca1..5250d1d 100644 (file)
@@ -489,7 +489,7 @@ struct split_type_defs {
 
 #define STATIC_DEBUG_LINE_DWORDS       9
 
-#define NUM_COMMON_GLOBAL_PARAMS       11
+#define NUM_COMMON_GLOBAL_PARAMS       10
 
 #define MAX_RECURSION_DEPTH            10
 
index b242000..b7cc365 100644 (file)
@@ -748,6 +748,9 @@ qede_build_skb(struct qede_rx_queue *rxq,
        buf = page_address(bd->data) + bd->page_offset;
        skb = build_skb(buf, rxq->rx_buf_seg_size);
 
+       if (unlikely(!skb))
+               return NULL;
+
        skb_reserve(skb, pad);
        skb_put(skb, len);
 
index f906453..377df8b 100644 (file)
@@ -786,6 +786,85 @@ void efx_remove_channels(struct efx_nic *efx)
        kfree(efx->xdp_tx_queues);
 }
 
+static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
+                               struct efx_tx_queue *tx_queue)
+{
+       if (xdp_queue_number >= efx->xdp_tx_queue_count)
+               return -EINVAL;
+
+       netif_dbg(efx, drv, efx->net_dev,
+                 "Channel %u TXQ %u is XDP %u, HW %u\n",
+                 tx_queue->channel->channel, tx_queue->label,
+                 xdp_queue_number, tx_queue->queue);
+       efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+       return 0;
+}
+
+static void efx_set_xdp_channels(struct efx_nic *efx)
+{
+       struct efx_tx_queue *tx_queue;
+       struct efx_channel *channel;
+       unsigned int next_queue = 0;
+       int xdp_queue_number = 0;
+       int rc;
+
+       /* We need to mark which channels really have RX and TX
+        * queues, and adjust the TX queue numbers if we have separate
+        * RX-only and TX-only channels.
+        */
+       efx_for_each_channel(channel, efx) {
+               if (channel->channel < efx->tx_channel_offset)
+                       continue;
+
+               if (efx_channel_is_xdp_tx(channel)) {
+                       efx_for_each_channel_tx_queue(tx_queue, channel) {
+                               tx_queue->queue = next_queue++;
+                               rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
+                                                         tx_queue);
+                               if (rc == 0)
+                                       xdp_queue_number++;
+                       }
+               } else {
+                       efx_for_each_channel_tx_queue(tx_queue, channel) {
+                               tx_queue->queue = next_queue++;
+                               netif_dbg(efx, drv, efx->net_dev,
+                                         "Channel %u TXQ %u is HW %u\n",
+                                         channel->channel, tx_queue->label,
+                                         tx_queue->queue);
+                       }
+
+                       /* If XDP is borrowing queues from net stack, it must
+                        * use the queue with no csum offload, which is the
+                        * first one of the channel
+                        * (note: tx_queue_by_type is not initialized yet)
+                        */
+                       if (efx->xdp_txq_queues_mode ==
+                           EFX_XDP_TX_QUEUES_BORROWED) {
+                               tx_queue = &channel->tx_queue[0];
+                               rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
+                                                         tx_queue);
+                               if (rc == 0)
+                                       xdp_queue_number++;
+                       }
+               }
+       }
+       WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
+               xdp_queue_number != efx->xdp_tx_queue_count);
+       WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
+               xdp_queue_number > efx->xdp_tx_queue_count);
+
+       /* If we have more CPUs than assigned XDP TX queues, assign the already
+        * existing queues to the exceeding CPUs
+        */
+       next_queue = 0;
+       while (xdp_queue_number < efx->xdp_tx_queue_count) {
+               tx_queue = efx->xdp_tx_queues[next_queue++];
+               rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+               if (rc == 0)
+                       xdp_queue_number++;
+       }
+}
+
 int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
 {
        struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
@@ -857,6 +936,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
                efx_init_napi_channel(efx->channel[i]);
        }
 
+       efx_set_xdp_channels(efx);
 out:
        /* Destroy unused channel structures */
        for (i = 0; i < efx->n_channels; i++) {
@@ -889,26 +969,9 @@ rollback:
        goto out;
 }
 
-static inline int
-efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
-                    struct efx_tx_queue *tx_queue)
-{
-       if (xdp_queue_number >= efx->xdp_tx_queue_count)
-               return -EINVAL;
-
-       netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
-                 tx_queue->channel->channel, tx_queue->label,
-                 xdp_queue_number, tx_queue->queue);
-       efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
-       return 0;
-}
-
 int efx_set_channels(struct efx_nic *efx)
 {
-       struct efx_tx_queue *tx_queue;
        struct efx_channel *channel;
-       unsigned int next_queue = 0;
-       int xdp_queue_number;
        int rc;
 
        efx->tx_channel_offset =
@@ -926,61 +989,14 @@ int efx_set_channels(struct efx_nic *efx)
                        return -ENOMEM;
        }
 
-       /* We need to mark which channels really have RX and TX
-        * queues, and adjust the TX queue numbers if we have separate
-        * RX-only and TX-only channels.
-        */
-       xdp_queue_number = 0;
        efx_for_each_channel(channel, efx) {
                if (channel->channel < efx->n_rx_channels)
                        channel->rx_queue.core_index = channel->channel;
                else
                        channel->rx_queue.core_index = -1;
-
-               if (channel->channel >= efx->tx_channel_offset) {
-                       if (efx_channel_is_xdp_tx(channel)) {
-                               efx_for_each_channel_tx_queue(tx_queue, channel) {
-                                       tx_queue->queue = next_queue++;
-                                       rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
-                                       if (rc == 0)
-                                               xdp_queue_number++;
-                               }
-                       } else {
-                               efx_for_each_channel_tx_queue(tx_queue, channel) {
-                                       tx_queue->queue = next_queue++;
-                                       netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n",
-                                                 channel->channel, tx_queue->label,
-                                                 tx_queue->queue);
-                               }
-
-                               /* If XDP is borrowing queues from net stack, it must use the queue
-                                * with no csum offload, which is the first one of the channel
-                                * (note: channel->tx_queue_by_type is not initialized yet)
-                                */
-                               if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
-                                       tx_queue = &channel->tx_queue[0];
-                                       rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
-                                       if (rc == 0)
-                                               xdp_queue_number++;
-                               }
-                       }
-               }
        }
-       WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
-               xdp_queue_number != efx->xdp_tx_queue_count);
-       WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
-               xdp_queue_number > efx->xdp_tx_queue_count);
 
-       /* If we have more CPUs than assigned XDP TX queues, assign the already
-        * existing queues to the exceeding CPUs
-        */
-       next_queue = 0;
-       while (xdp_queue_number < efx->xdp_tx_queue_count) {
-               tx_queue = efx->xdp_tx_queues[next_queue++];
-               rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
-               if (rc == 0)
-                       xdp_queue_number++;
-       }
+       efx_set_xdp_channels(efx);
 
        rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
        if (rc)
@@ -1124,7 +1140,7 @@ void efx_start_channels(struct efx_nic *efx)
        struct efx_rx_queue *rx_queue;
        struct efx_channel *channel;
 
-       efx_for_each_channel(channel, efx) {
+       efx_for_each_channel_rev(channel, efx) {
                efx_for_each_channel_tx_queue(tx_queue, channel) {
                        efx_init_tx_queue(tx_queue);
                        atomic_inc(&efx->active_queues);
index 1b22c7b..fa8b9aa 100644 (file)
@@ -150,6 +150,9 @@ static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
        struct efx_nic *efx = rx_queue->efx;
        int i;
 
+       if (unlikely(!rx_queue->page_ring))
+               return;
+
        /* Unmap and release the pages in the recycle ring. Remove the ring. */
        for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
                struct page *page = rx_queue->page_ring[i];
index d16e031..6983799 100644 (file)
@@ -443,6 +443,9 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
        if (unlikely(!tx_queue))
                return -EINVAL;
 
+       if (!tx_queue->initialised)
+               return -EINVAL;
+
        if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
                HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
 
index d530cde..9bc8281 100644 (file)
@@ -101,6 +101,8 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
        netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
                  "shutting down TX queue %d\n", tx_queue->queue);
 
+       tx_queue->initialised = false;
+
        if (!tx_queue->buffer)
                return;
 
index cd478d2..00f6d34 100644 (file)
 #define TSE_PCS_USE_SGMII_ENA                          BIT(0)
 #define TSE_PCS_IF_USE_SGMII                           0x03
 
-#define SGMII_ADAPTER_CTRL_REG                         0x00
-#define SGMII_ADAPTER_DISABLE                          0x0001
-#define SGMII_ADAPTER_ENABLE                           0x0000
-
 #define AUTONEGO_LINK_TIMER                            20
 
 static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs)
@@ -202,12 +198,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
                           unsigned int speed)
 {
        void __iomem *tse_pcs_base = pcs->tse_pcs_base;
-       void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
        u32 val;
 
-       writew(SGMII_ADAPTER_ENABLE,
-              sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
-
        pcs->autoneg = phy_dev->autoneg;
 
        if (phy_dev->autoneg == AUTONEG_ENABLE) {
index 442812c..694ac25 100644 (file)
 #include <linux/phy.h>
 #include <linux/timer.h>
 
+#define SGMII_ADAPTER_CTRL_REG         0x00
+#define SGMII_ADAPTER_ENABLE           0x0000
+#define SGMII_ADAPTER_DISABLE          0x0001
+
 struct tse_pcs {
        struct device *dev;
        void __iomem *tse_pcs_base;
index ecf759e..017dbbd 100644 (file)
@@ -205,7 +205,7 @@ static const struct pci_device_id loongson_dwmac_id_table[] = {
 };
 MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
 
-struct pci_driver loongson_dwmac_driver = {
+static struct pci_driver loongson_dwmac_driver = {
        .name = "dwmac-loongson-pci",
        .id_table = loongson_dwmac_id_table,
        .probe = loongson_dwmac_probe,
index b7c2579..ac9e6c7 100644 (file)
@@ -18,9 +18,6 @@
 
 #include "altr_tse_pcs.h"
 
-#define SGMII_ADAPTER_CTRL_REG                          0x00
-#define SGMII_ADAPTER_DISABLE                           0x0001
-
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
@@ -62,16 +59,14 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
 {
        struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
        void __iomem *splitter_base = dwmac->splitter_base;
-       void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base;
        void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base;
        struct device *dev = dwmac->dev;
        struct net_device *ndev = dev_get_drvdata(dev);
        struct phy_device *phy_dev = ndev->phydev;
        u32 val;
 
-       if ((tse_pcs_base) && (sgmii_adapter_base))
-               writew(SGMII_ADAPTER_DISABLE,
-                      sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+       writew(SGMII_ADAPTER_DISABLE,
+              sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
 
        if (splitter_base) {
                val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
@@ -93,7 +88,9 @@ static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
                writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
        }
 
-       if (tse_pcs_base && sgmii_adapter_base)
+       writew(SGMII_ADAPTER_ENABLE,
+              sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+       if (phy_dev)
                tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
 }
 
index 5d29f33..11e1055 100644 (file)
@@ -431,8 +431,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
        plat->phylink_node = np;
 
        /* Get max speed of operation from device tree */
-       if (of_property_read_u32(np, "max-speed", &plat->max_speed))
-               plat->max_speed = -1;
+       of_property_read_u32(np, "max-speed", &plat->max_speed);
 
        plat->bus_id = of_alias_get_id(np, "ethernet");
        if (plat->bus_id < 0)
index 0f9c88d..d5c1e5c 100644 (file)
@@ -433,8 +433,6 @@ struct axienet_local {
        struct net_device *ndev;
        struct device *dev;
 
-       struct device_node *phy_node;
-
        struct phylink *phylink;
        struct phylink_config phylink_config;
 
index c7eb05e..d6fc3f7 100644 (file)
@@ -2064,25 +2064,33 @@ static int axienet_probe(struct platform_device *pdev)
        if (ret)
                goto cleanup_clk;
 
-       lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
-       if (lp->phy_node) {
-               ret = axienet_mdio_setup(lp);
-               if (ret)
-                       dev_warn(&pdev->dev,
-                                "error registering MDIO bus: %d\n", ret);
-       }
+       ret = axienet_mdio_setup(lp);
+       if (ret)
+               dev_warn(&pdev->dev,
+                        "error registering MDIO bus: %d\n", ret);
+
        if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
            lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
-               if (!lp->phy_node) {
-                       dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n");
+               np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
+               if (!np) {
+                       /* Deprecated: Always use "pcs-handle" for pcs_phy.
+                        * Falling back to "phy-handle" here is only for
+                        * backward compatibility with old device trees.
+                        */
+                       np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+               }
+               if (!np) {
+                       dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
                        ret = -EINVAL;
                        goto cleanup_mdio;
                }
-               lp->pcs_phy = of_mdio_find_device(lp->phy_node);
+               lp->pcs_phy = of_mdio_find_device(np);
                if (!lp->pcs_phy) {
                        ret = -EPROBE_DEFER;
+                       of_node_put(np);
                        goto cleanup_mdio;
                }
+               of_node_put(np);
                lp->pcs.ops = &axienet_pcs_ops;
                lp->pcs.poll = true;
        }
@@ -2125,8 +2133,6 @@ cleanup_mdio:
                put_device(&lp->pcs_phy->dev);
        if (lp->mii_bus)
                axienet_mdio_teardown(lp);
-       of_node_put(lp->phy_node);
-
 cleanup_clk:
        clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
        clk_disable_unprepare(lp->axi_clk);
@@ -2155,9 +2161,6 @@ static int axienet_remove(struct platform_device *pdev)
        clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
        clk_disable_unprepare(lp->axi_clk);
 
-       of_node_put(lp->phy_node);
-       lp->phy_node = NULL;
-
        free_netdev(ndev);
 
        return 0;
index 069e882..b00bc81 100644 (file)
@@ -460,8 +460,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
                        return RX_HANDLER_CONSUMED;
                *pskb = skb;
                eth = eth_hdr(skb);
-               if (macvlan_forward_source(skb, port, eth->h_source))
+               if (macvlan_forward_source(skb, port, eth->h_source)) {
+                       kfree_skb(skb);
                        return RX_HANDLER_CONSUMED;
+               }
                src = macvlan_hash_lookup(port, eth->h_source);
                if (src && src->mode != MACVLAN_MODE_VEPA &&
                    src->mode != MACVLAN_MODE_BRIDGE) {
@@ -480,8 +482,10 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
                return RX_HANDLER_PASS;
        }
 
-       if (macvlan_forward_source(skb, port, eth->h_source))
+       if (macvlan_forward_source(skb, port, eth->h_source)) {
+               kfree_skb(skb);
                return RX_HANDLER_CONSUMED;
+       }
        if (macvlan_passthru(port))
                vlan = list_first_or_null_rcu(&port->vlans,
                                              struct macvlan_dev, list);
index baf7afa..53846c6 100644 (file)
@@ -553,7 +553,7 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev,
        hdr->source_slave = ((llsrc << 1) & 0xff) | 0x01;
        mhdr->ver = 0x01;
 
-       return 0;
+       return sizeof(struct mctp_i2c_hdr);
 }
 
 static int mctp_i2c_tx_thread(void *data)
index 1becb1a..1c1584f 100644 (file)
@@ -43,6 +43,11 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
        int rc;
 
        rc = fwnode_irq_get(child, 0);
+       /* Don't wait forever if the IRQ provider doesn't become available,
+        * just fall back to poll mode
+        */
+       if (rc == -EPROBE_DEFER)
+               rc = driver_deferred_probe_check_state(&phy->mdio.dev);
        if (rc == -EPROBE_DEFER)
                return rc;
 
index c483ba6..5829697 100644 (file)
@@ -102,6 +102,9 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum)
        u32 val;
        int ret;
 
+       if (regnum & MII_ADDR_C45)
+               return -EOPNOTSUPP;
+
        ret = mscc_miim_wait_pending(bus);
        if (ret)
                goto out;
@@ -145,6 +148,9 @@ static int mscc_miim_write(struct mii_bus *bus, int mii_id,
        struct mscc_miim_dev *miim = bus->priv;
        int ret;
 
+       if (regnum & MII_ADDR_C45)
+               return -EOPNOTSUPP;
+
        ret = mscc_miim_wait_pending(bus);
        if (ret < 0)
                goto out;
index 19b11e8..fc53b71 100644 (file)
 #define PTP_TIMESTAMP_EN_PDREQ_                        BIT(2)
 #define PTP_TIMESTAMP_EN_PDRES_                        BIT(3)
 
-#define PTP_RX_LATENCY_1000                    0x0224
-#define PTP_TX_LATENCY_1000                    0x0225
-
-#define PTP_RX_LATENCY_100                     0x0222
-#define PTP_TX_LATENCY_100                     0x0223
-
-#define PTP_RX_LATENCY_10                      0x0220
-#define PTP_TX_LATENCY_10                      0x0221
-
 #define PTP_TX_PARSE_L2_ADDR_EN                        0x0284
 #define PTP_RX_PARSE_L2_ADDR_EN                        0x0244
 
@@ -268,15 +259,6 @@ struct lan8814_ptp_rx_ts {
        u16 seq_id;
 };
 
-struct kszphy_latencies {
-       u16 rx_10;
-       u16 tx_10;
-       u16 rx_100;
-       u16 tx_100;
-       u16 rx_1000;
-       u16 tx_1000;
-};
-
 struct kszphy_ptp_priv {
        struct mii_timestamper mii_ts;
        struct phy_device *phydev;
@@ -296,7 +278,6 @@ struct kszphy_ptp_priv {
 
 struct kszphy_priv {
        struct kszphy_ptp_priv ptp_priv;
-       struct kszphy_latencies latencies;
        const struct kszphy_type *type;
        int led_mode;
        bool rmii_ref_clk_sel;
@@ -304,14 +285,6 @@ struct kszphy_priv {
        u64 stats[ARRAY_SIZE(kszphy_hw_stats)];
 };
 
-static struct kszphy_latencies lan8814_latencies = {
-       .rx_10          = 0x22AA,
-       .tx_10          = 0x2E4A,
-       .rx_100         = 0x092A,
-       .tx_100         = 0x02C1,
-       .rx_1000        = 0x01AD,
-       .tx_1000        = 0x00C9,
-};
 static const struct kszphy_type ksz8021_type = {
        .led_mode_reg           = MII_KSZPHY_CTRL_2,
        .has_broadcast_disable  = true,
@@ -2618,55 +2591,6 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
        return 0;
 }
 
-static int lan8814_read_status(struct phy_device *phydev)
-{
-       struct kszphy_priv *priv = phydev->priv;
-       struct kszphy_latencies *latencies = &priv->latencies;
-       int err;
-       int regval;
-
-       err = genphy_read_status(phydev);
-       if (err)
-               return err;
-
-       switch (phydev->speed) {
-       case SPEED_1000:
-               lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_1000,
-                                     latencies->rx_1000);
-               lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_1000,
-                                     latencies->tx_1000);
-               break;
-       case SPEED_100:
-               lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_100,
-                                     latencies->rx_100);
-               lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_100,
-                                     latencies->tx_100);
-               break;
-       case SPEED_10:
-               lanphy_write_page_reg(phydev, 5, PTP_RX_LATENCY_10,
-                                     latencies->rx_10);
-               lanphy_write_page_reg(phydev, 5, PTP_TX_LATENCY_10,
-                                     latencies->tx_10);
-               break;
-       default:
-               break;
-       }
-
-       /* Make sure the PHY is not broken. Read idle error count,
-        * and reset the PHY if it is maxed out.
-        */
-       regval = phy_read(phydev, MII_STAT1000);
-       if ((regval & 0xFF) == 0xFF) {
-               phy_init_hw(phydev);
-               phydev->link = 0;
-               if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
-                       phydev->drv->config_intr(phydev);
-               return genphy_config_aneg(phydev);
-       }
-
-       return 0;
-}
-
 static int lan8814_config_init(struct phy_device *phydev)
 {
        int val;
@@ -2690,30 +2614,8 @@ static int lan8814_config_init(struct phy_device *phydev)
        return 0;
 }
 
-static void lan8814_parse_latency(struct phy_device *phydev)
-{
-       const struct device_node *np = phydev->mdio.dev.of_node;
-       struct kszphy_priv *priv = phydev->priv;
-       struct kszphy_latencies *latency = &priv->latencies;
-       u32 val;
-
-       if (!of_property_read_u32(np, "lan8814,latency_rx_10", &val))
-               latency->rx_10 = val;
-       if (!of_property_read_u32(np, "lan8814,latency_tx_10", &val))
-               latency->tx_10 = val;
-       if (!of_property_read_u32(np, "lan8814,latency_rx_100", &val))
-               latency->rx_100 = val;
-       if (!of_property_read_u32(np, "lan8814,latency_tx_100", &val))
-               latency->tx_100 = val;
-       if (!of_property_read_u32(np, "lan8814,latency_rx_1000", &val))
-               latency->rx_1000 = val;
-       if (!of_property_read_u32(np, "lan8814,latency_tx_1000", &val))
-               latency->tx_1000 = val;
-}
-
 static int lan8814_probe(struct phy_device *phydev)
 {
-       const struct device_node *np = phydev->mdio.dev.of_node;
        struct kszphy_priv *priv;
        u16 addr;
        int err;
@@ -2724,13 +2626,10 @@ static int lan8814_probe(struct phy_device *phydev)
 
        priv->led_mode = -1;
 
-       priv->latencies = lan8814_latencies;
-
        phydev->priv = priv;
 
        if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
-           !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING) ||
-           of_property_read_bool(np, "lan8814,ignore-ts"))
+           !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
                return 0;
 
        /* Strap-in value for PHY address, below register read gives starting
@@ -2746,7 +2645,6 @@ static int lan8814_probe(struct phy_device *phydev)
                        return err;
        }
 
-       lan8814_parse_latency(phydev);
        lan8814_ptp_init(phydev);
 
        return 0;
@@ -2928,7 +2826,7 @@ static struct phy_driver ksphy_driver[] = {
        .config_init    = lan8814_config_init,
        .probe          = lan8814_probe,
        .soft_reset     = genphy_soft_reset,
-       .read_status    = lan8814_read_status,
+       .read_status    = ksz9031_read_status,
        .get_sset_count = kszphy_get_sset_count,
        .get_strings    = kszphy_get_strings,
        .get_stats      = kszphy_get_stats,
index 389df3f..3f79bbb 100644 (file)
@@ -706,7 +706,6 @@ static int lan87xx_read_status(struct phy_device *phydev)
 static int lan87xx_config_aneg(struct phy_device *phydev)
 {
        u16 ctl = 0;
-       int rc;
 
        switch (phydev->master_slave_set) {
        case MASTER_SLAVE_CFG_MASTER_FORCE:
@@ -722,11 +721,7 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
                return -EOPNOTSUPP;
        }
 
-       rc = phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
-       if (rc == 1)
-               rc = genphy_soft_reset(phydev);
-
-       return rc;
+       return phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
 }
 
 static struct phy_driver microchip_t1_phy_driver[] = {
index 88396ff..6865d32 100644 (file)
@@ -469,7 +469,7 @@ static void sl_tx_timeout(struct net_device *dev, unsigned int txqueue)
        spin_lock(&sl->lock);
 
        if (netif_queue_stopped(dev)) {
-               if (!netif_running(dev))
+               if (!netif_running(dev) || !sl->tty)
                        goto out;
 
                /* May be we must check transmitter timeout here ?
index 276a0e4..dbe4c0a 100644 (file)
@@ -1124,7 +1124,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* NETIF_F_LLTX requires to do our own update of trans_start */
        queue = netdev_get_tx_queue(dev, txq);
-       queue->trans_start = jiffies;
+       txq_trans_cond_update(queue);
 
        /* Notify and wake up reader process */
        if (tfile->flags & TUN_FASYNC)
index ea06d10..ca409d4 100644 (file)
@@ -1102,10 +1102,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
        if (start_of_descs != desc_offset)
                goto err;
 
-       /* self check desc_offset from header*/
-       if (desc_offset >= skb_len)
+       /* self check desc_offset from header and make sure that the
+        * bounds of the metadata array are inside the SKB
+        */
+       if (pkt_count * 2 + desc_offset >= skb_len)
                goto err;
 
+       /* Packets must not overlap the metadata array */
+       skb_trim(skb, desc_offset);
+
        if (pkt_count == 0)
                goto err;
 
index 1b57149..eb0121a 100644 (file)
@@ -320,7 +320,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
 
        rcu_read_lock();
        rcv = rcu_dereference(priv->peer);
-       if (unlikely(!rcv)) {
+       if (unlikely(!rcv) || !pskb_may_pull(skb, ETH_HLEN)) {
                kfree_skb(skb);
                goto drop;
        }
index 85e3624..cfc30ce 100644 (file)
@@ -1265,6 +1265,7 @@ static int vrf_prepare_mac_header(struct sk_buff *skb,
        eth = (struct ethhdr *)skb->data;
 
        skb_reset_mac_header(skb);
+       skb_reset_mac_len(skb);
 
        /* we set the ethernet destination and the source addresses to the
         * address of the VRF device.
@@ -1294,9 +1295,9 @@ static int vrf_prepare_mac_header(struct sk_buff *skb,
  */
 static int vrf_add_mac_header_if_unset(struct sk_buff *skb,
                                       struct net_device *vrf_dev,
-                                      u16 proto)
+                                      u16 proto, struct net_device *orig_dev)
 {
-       if (skb_mac_header_was_set(skb))
+       if (skb_mac_header_was_set(skb) && dev_has_header(orig_dev))
                return 0;
 
        return vrf_prepare_mac_header(skb, vrf_dev, proto);
@@ -1402,6 +1403,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
 
        /* if packet is NDISC then keep the ingress interface */
        if (!is_ndisc) {
+               struct net_device *orig_dev = skb->dev;
+
                vrf_rx_stats(vrf_dev, skb->len);
                skb->dev = vrf_dev;
                skb->skb_iif = vrf_dev->ifindex;
@@ -1410,7 +1413,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
                        int err;
 
                        err = vrf_add_mac_header_if_unset(skb, vrf_dev,
-                                                         ETH_P_IPV6);
+                                                         ETH_P_IPV6,
+                                                         orig_dev);
                        if (likely(!err)) {
                                skb_push(skb, skb->mac_len);
                                dev_queue_xmit_nit(skb, vrf_dev);
@@ -1440,6 +1444,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
 static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
                                  struct sk_buff *skb)
 {
+       struct net_device *orig_dev = skb->dev;
+
        skb->dev = vrf_dev;
        skb->skb_iif = vrf_dev->ifindex;
        IPCB(skb)->flags |= IPSKB_L3SLAVE;
@@ -1460,7 +1466,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
        if (!list_empty(&vrf_dev->ptype_all)) {
                int err;
 
-               err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP);
+               err = vrf_add_mac_header_if_unset(skb, vrf_dev, ETH_P_IP,
+                                                 orig_dev);
                if (likely(!err)) {
                        skb_push(skb, skb->mac_len);
                        dev_queue_xmit_nit(skb, vrf_dev);
index de97ff9..8a5e3a6 100644 (file)
@@ -651,11 +651,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
 
        rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
        if (rd == NULL)
-               return -ENOBUFS;
+               return -ENOMEM;
 
        if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
                kfree(rd);
-               return -ENOBUFS;
+               return -ENOMEM;
        }
 
        rd->remote_ip = *ip;
index 63e1c2d..73693c6 100644 (file)
@@ -1633,7 +1633,7 @@ static void ath10k_sdio_hif_power_down(struct ath10k *ar)
                return;
        }
 
-       ret = mmc_hw_reset(ar_sdio->func->card->host);
+       ret = mmc_hw_reset(ar_sdio->func->card);
        if (ret)
                ath10k_warn(ar, "unable to reset sdio: %d\n", ret);
 
index d5b83f9..e6b34b0 100644 (file)
@@ -3136,6 +3136,20 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
                        arvif->do_not_send_tmpl = true;
                else
                        arvif->do_not_send_tmpl = false;
+
+               if (vif->bss_conf.he_support) {
+                       ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+                                                           WMI_VDEV_PARAM_BA_MODE,
+                                                           WMI_BA_MODE_BUFFER_SIZE_256);
+                       if (ret)
+                               ath11k_warn(ar->ab,
+                                           "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
+                                           arvif->vdev_id);
+                       else
+                               ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+                                          "Set BA BUFFER SIZE 256 for VDEV: %d\n",
+                                          arvif->vdev_id);
+               }
        }
 
        if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
@@ -3171,14 +3185,6 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
 
                if (arvif->is_up && vif->bss_conf.he_support &&
                    vif->bss_conf.he_oper.params) {
-                       ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
-                                                           WMI_VDEV_PARAM_BA_MODE,
-                                                           WMI_BA_MODE_BUFFER_SIZE_256);
-                       if (ret)
-                               ath11k_warn(ar->ab,
-                                           "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
-                                           arvif->vdev_id);
-
                        param_id = WMI_VDEV_PARAM_HEOPS_0_31;
                        param_value = vif->bss_conf.he_oper.params;
                        ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
index 98090e4..e2791d4 100644 (file)
@@ -839,7 +839,7 @@ static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
                        continue;
 
                txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
-               fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
+               fi = (struct ath_frame_info *)&txinfo->status.status_driver_data[0];
                if (fi->keyix == keyix)
                        return true;
        }
index d0caf1d..db83cc4 100644 (file)
@@ -141,8 +141,8 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
 {
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        BUILD_BUG_ON(sizeof(struct ath_frame_info) >
-                    sizeof(tx_info->rate_driver_data));
-       return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
+                    sizeof(tx_info->status.status_driver_data));
+       return (struct ath_frame_info *) &tx_info->status.status_driver_data[0];
 }
 
 static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
@@ -2542,6 +2542,16 @@ skip_tx_complete:
        spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
 }
 
+static void ath_clear_tx_status(struct ieee80211_tx_info *tx_info)
+{
+       void *ptr = &tx_info->status;
+
+       memset(ptr + sizeof(tx_info->status.rates), 0,
+              sizeof(tx_info->status) -
+              sizeof(tx_info->status.rates) -
+              sizeof(tx_info->status.status_driver_data));
+}
+
 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
                             struct ath_tx_status *ts, int nframes, int nbad,
                             int txok)
@@ -2553,6 +2563,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
        struct ath_hw *ah = sc->sc_ah;
        u8 i, tx_rateindex;
 
+       ath_clear_tx_status(tx_info);
+
        if (txok)
                tx_info->status.ack_signal = ts->ts_rssi;
 
@@ -2567,6 +2579,13 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
        tx_info->status.ampdu_len = nframes;
        tx_info->status.ampdu_ack_len = nframes - nbad;
 
+       tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
+
+       for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
+               tx_info->status.rates[i].count = 0;
+               tx_info->status.rates[i].idx = -1;
+       }
+
        if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
            (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) == 0) {
                /*
@@ -2588,16 +2607,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
                        tx_info->status.rates[tx_rateindex].count =
                                hw->max_rate_tries;
        }
-
-       for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
-               tx_info->status.rates[i].count = 0;
-               tx_info->status.rates[i].idx = -1;
-       }
-
-       tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
-
-       /* we report airtime in ath_tx_count_airtime(), don't report twice */
-       tx_info->status.tx_time = 0;
 }
 
 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
index ba3c159..212fbbe 100644 (file)
@@ -557,7 +557,7 @@ enum brcmf_sdio_frmtype {
        BRCMF_SDIO_FT_SUB,
 };
 
-#define SDIOD_DRVSTR_KEY(chip, pmu)     (((chip) << 16) | (pmu))
+#define SDIOD_DRVSTR_KEY(chip, pmu)     (((unsigned int)(chip) << 16) | (pmu))
 
 /* SDIO Pad drive strength to select value mappings */
 struct sdiod_drive_str {
@@ -4165,7 +4165,7 @@ static int brcmf_sdio_bus_reset(struct device *dev)
 
        /* reset the adapter */
        sdio_claim_host(sdiodev->func1);
-       mmc_hw_reset(sdiodev->func1->card->host);
+       mmc_hw_reset(sdiodev->func1->card);
        sdio_release_host(sdiodev->func1);
 
        brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
index bde9e4b..4f3238d 100644 (file)
@@ -2639,7 +2639,7 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
 
        /* Run a HW reset of the SDIO interface. */
        sdio_claim_host(func);
-       ret = mmc_hw_reset(func->card->host);
+       ret = mmc_hw_reset(func->card);
        sdio_release_host(func);
 
        switch (ret) {
index 8a22ee5..df85ebc 100644 (file)
@@ -80,7 +80,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
 
        /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
-       mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
+       mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
 
        /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
        mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
index 72fc41a..9140b01 100644 (file)
@@ -146,7 +146,7 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
         * To guarantee that the SDIO card is power cycled, as required to make
         * the FW programming to succeed, let's do a brute force HW reset.
         */
-       mmc_hw_reset(card->host);
+       mmc_hw_reset(card);
 
        sdio_enable_func(func);
        sdio_release_host(func);
index d6d0569..877d2ec 100644 (file)
@@ -91,3 +91,16 @@ config NVME_TCP
          from https://github.com/linux-nvme/nvme-cli.
 
          If unsure, say N.
+
+config NVME_APPLE
+       tristate "Apple ANS2 NVM Express host driver"
+       depends on OF && BLOCK
+       depends on APPLE_RTKIT && APPLE_SART
+       depends on ARCH_APPLE || COMPILE_TEST
+       select NVME_CORE
+       help
+         This provides support for the NVMe controller embedded in Apple SoCs
+         such as the M1.
+
+         To compile this driver as a module, choose M here: the
+         module will be called nvme-apple.
index 476c5c9..a36ae16 100644 (file)
@@ -8,6 +8,7 @@ obj-$(CONFIG_NVME_FABRICS)              += nvme-fabrics.o
 obj-$(CONFIG_NVME_RDMA)                        += nvme-rdma.o
 obj-$(CONFIG_NVME_FC)                  += nvme-fc.o
 obj-$(CONFIG_NVME_TCP)                 += nvme-tcp.o
+obj-$(CONFIG_NVME_APPLE)               += nvme-apple.o
 
 nvme-core-y                            := core.o ioctl.o constants.o
 nvme-core-$(CONFIG_TRACING)            += trace.o
@@ -25,3 +26,5 @@ nvme-rdma-y                           += rdma.o
 nvme-fc-y                              += fc.o
 
 nvme-tcp-y                             += tcp.o
+
+nvme-apple-y                           += apple.o
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
new file mode 100644 (file)
index 0000000..d702d7d
--- /dev/null
@@ -0,0 +1,1593 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Apple ANS NVM Express device driver
+ * Copyright The Asahi Linux Contributors
+ *
+ * Based on the pci.c NVM Express device driver
+ * Copyright (c) 2011-2014, Intel Corporation.
+ * and on the rdma.c NVMe over Fabrics RDMA host code.
+ * Copyright (c) 2015-2016 HGST, a Western Digital Company.
+ */
+
+#include <linux/async.h>
+#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/jiffies.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/once.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/soc/apple/rtkit.h>
+#include <linux/soc/apple/sart.h>
+#include <linux/reset.h>
+#include <linux/time64.h>
+
+#include "nvme.h"
+
+#define APPLE_ANS_BOOT_TIMEOUT   USEC_PER_SEC
+#define APPLE_ANS_MAX_QUEUE_DEPTH 64
+
+#define APPLE_ANS_COPROC_CPU_CONTROL    0x44
+#define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4)
+
+#define APPLE_ANS_ACQ_DB  0x1004
+#define APPLE_ANS_IOCQ_DB 0x100c
+
+#define APPLE_ANS_MAX_PEND_CMDS_CTRL 0x1210
+
+#define APPLE_ANS_BOOT_STATUS   0x1300
+#define APPLE_ANS_BOOT_STATUS_OK 0xde71ce55
+
+#define APPLE_ANS_UNKNOWN_CTRL  0x24008
+#define APPLE_ANS_PRP_NULL_CHECK BIT(11)
+
+#define APPLE_ANS_LINEAR_SQ_CTRL 0x24908
+#define APPLE_ANS_LINEAR_SQ_EN  BIT(0)
+
+#define APPLE_ANS_LINEAR_ASQ_DB         0x2490c
+#define APPLE_ANS_LINEAR_IOSQ_DB 0x24910
+
+#define APPLE_NVMMU_NUM_TCBS     0x28100
+#define APPLE_NVMMU_ASQ_TCB_BASE  0x28108
+#define APPLE_NVMMU_IOSQ_TCB_BASE 0x28110
+#define APPLE_NVMMU_TCB_INVAL    0x28118
+#define APPLE_NVMMU_TCB_STAT     0x28120
+
+/*
+ * This controller is a bit weird in the way command tags works: Both the
+ * admin and the IO queue share the same tag space. Additionally, tags
+ * cannot be higher than 0x40 which effectively limits the combined
+ * queue depth to 0x40. Instead of wasting half of that on the admin queue
+ * which gets much less traffic we instead reduce its size here.
+ * The controller also doesn't support async event such that no space must
+ * be reserved for NVME_NR_AEN_COMMANDS.
+ */
+#define APPLE_NVME_AQ_DEPTH       2
+#define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1)
+
+/*
+ * These can be higher, but we need to ensure that any command doesn't
+ * require an sg allocation that needs more than a page of data.
+ */
+#define NVME_MAX_KB_SZ 4096
+#define NVME_MAX_SEGS  127
+
+/*
+ * This controller comes with an embedded IOMMU known as NVMMU.
+ * The NVMMU is pointed to an array of TCBs indexed by the command tag.
+ * Each command must be configured inside this structure before it's allowed
+ * to execute, including commands that don't require DMA transfers.
+ *
+ * An exception to this are Apple's vendor-specific commands (opcode 0xD8 on the
+ * admin queue): Those commands must still be added to the NVMMU but the DMA
+ * buffers cannot be represented as PRPs and must instead be allowed using SART.
+ *
+ * Programming the PRPs to the same values as those in the submission queue
+ * looks rather silly at first. This hardware is however designed for a kernel
+ * that runs the NVMMU code in a higher exception level than the NVMe driver.
+ * In that setting the NVMe driver first programs the submission queue entry
+ * and then executes a hypercall to the code that is allowed to program the
+ * NVMMU. The NVMMU driver then creates a shadow copy of the PRPs while
+ * verifying that they don't point to kernel text, data, pagetables, or similar
+ * protected areas before programming the TCB to point to this shadow copy.
+ * Since Linux doesn't do any of that we may as well just point both the queue
+ * and the TCB PRP pointer to the same memory.
+ */
+struct apple_nvmmu_tcb {
+       u8 opcode;
+
+#define APPLE_ANS_TCB_DMA_FROM_DEVICE BIT(0)
+#define APPLE_ANS_TCB_DMA_TO_DEVICE   BIT(1)
+       u8 dma_flags;
+
+       u8 command_id;
+       u8 _unk0;
+       __le16 length;
+       u8 _unk1[18];
+       __le64 prp1;
+       __le64 prp2;
+       u8 _unk2[16];
+       u8 aes_iv[8];
+       u8 _aes_unk[64];
+};
+
+/*
+ * The Apple NVMe controller only supports a single admin and a single IO queue
+ * which are both limited to 64 entries and share a single interrupt.
+ *
+ * The completion queue works as usual. The submission "queue" instead is
+ * an array indexed by the command tag on this hardware. Commands must also be
+ * present in the NVMMU's tcb array. They are triggered by writing their tag to
+ * a MMIO register.
+ */
+struct apple_nvme_queue {
+       struct nvme_command *sqes;
+       struct nvme_completion *cqes;
+       struct apple_nvmmu_tcb *tcbs;
+
+       dma_addr_t sq_dma_addr;
+       dma_addr_t cq_dma_addr;
+       dma_addr_t tcb_dma_addr;
+
+       u32 __iomem *sq_db;
+       u32 __iomem *cq_db;
+
+       u16 cq_head;
+       u8 cq_phase;
+
+       bool is_adminq;
+       bool enabled;
+};
+
+/*
+ * The apple_nvme_iod describes the data in an I/O.
+ *
+ * The sg pointer contains the list of PRP chunk allocations in addition
+ * to the actual struct scatterlist.
+ */
+struct apple_nvme_iod {
+       struct nvme_request req;
+       struct nvme_command cmd;
+       struct apple_nvme_queue *q;
+       int npages; /* In the PRP list. 0 means small pool in use */
+       int nents; /* Used in scatterlist */
+       dma_addr_t first_dma;
+       unsigned int dma_len; /* length of single DMA segment mapping */
+       struct scatterlist *sg;
+};
+
+struct apple_nvme {
+       struct device *dev;
+
+       void __iomem *mmio_coproc;
+       void __iomem *mmio_nvme;
+
+       struct device **pd_dev;
+       struct device_link **pd_link;
+       int pd_count;
+
+       struct apple_sart *sart;
+       struct apple_rtkit *rtk;
+       struct reset_control *reset;
+
+       struct dma_pool *prp_page_pool;
+       struct dma_pool *prp_small_pool;
+       mempool_t *iod_mempool;
+
+       struct nvme_ctrl ctrl;
+       struct work_struct remove_work;
+
+       struct apple_nvme_queue adminq;
+       struct apple_nvme_queue ioq;
+
+       struct blk_mq_tag_set admin_tagset;
+       struct blk_mq_tag_set tagset;
+
+       int irq;
+       spinlock_t lock;
+};
+
+static_assert(sizeof(struct nvme_command) == 64);
+static_assert(sizeof(struct apple_nvmmu_tcb) == 128);
+
+static inline struct apple_nvme *ctrl_to_apple_nvme(struct nvme_ctrl *ctrl)
+{
+       return container_of(ctrl, struct apple_nvme, ctrl);
+}
+
+static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
+{
+       if (q->is_adminq)
+               return container_of(q, struct apple_nvme, adminq);
+       else
+               return container_of(q, struct apple_nvme, ioq);
+}
+
+static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
+{
+       if (q->is_adminq)
+               return APPLE_NVME_AQ_DEPTH;
+       else
+               return APPLE_ANS_MAX_QUEUE_DEPTH;
+}
+
+static void apple_nvme_rtkit_crashed(void *cookie)
+{
+       struct apple_nvme *anv = cookie;
+
+       dev_warn(anv->dev, "RTKit crashed; unable to recover without a reboot");
+       nvme_reset_ctrl(&anv->ctrl);
+}
+
+static int apple_nvme_sart_dma_setup(void *cookie,
+                                    struct apple_rtkit_shmem *bfr)
+{
+       struct apple_nvme *anv = cookie;
+       int ret;
+
+       if (bfr->iova)
+               return -EINVAL;
+       if (!bfr->size)
+               return -EINVAL;
+
+       bfr->buffer =
+               dma_alloc_coherent(anv->dev, bfr->size, &bfr->iova, GFP_KERNEL);
+       if (!bfr->buffer)
+               return -ENOMEM;
+
+       ret = apple_sart_add_allowed_region(anv->sart, bfr->iova, bfr->size);
+       if (ret) {
+               dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
+               bfr->buffer = NULL;
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void apple_nvme_sart_dma_destroy(void *cookie,
+                                       struct apple_rtkit_shmem *bfr)
+{
+       struct apple_nvme *anv = cookie;
+
+       apple_sart_remove_allowed_region(anv->sart, bfr->iova, bfr->size);
+       dma_free_coherent(anv->dev, bfr->size, bfr->buffer, bfr->iova);
+}
+
+static const struct apple_rtkit_ops apple_nvme_rtkit_ops = {
+       .crashed = apple_nvme_rtkit_crashed,
+       .shmem_setup = apple_nvme_sart_dma_setup,
+       .shmem_destroy = apple_nvme_sart_dma_destroy,
+};
+
+static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag)
+{
+       struct apple_nvme *anv = queue_to_apple_nvme(q);
+
+       writel(tag, anv->mmio_nvme + APPLE_NVMMU_TCB_INVAL);
+       if (readl(anv->mmio_nvme + APPLE_NVMMU_TCB_STAT))
+               dev_warn_ratelimited(anv->dev,
+                                    "NVMMU TCB invalidation failed\n");
+}
+
+static void apple_nvme_submit_cmd(struct apple_nvme_queue *q,
+                                 struct nvme_command *cmd)
+{
+       struct apple_nvme *anv = queue_to_apple_nvme(q);
+       u32 tag = nvme_tag_from_cid(cmd->common.command_id);
+       struct apple_nvmmu_tcb *tcb = &q->tcbs[tag];
+
+       tcb->opcode = cmd->common.opcode;
+       tcb->prp1 = cmd->common.dptr.prp1;
+       tcb->prp2 = cmd->common.dptr.prp2;
+       tcb->length = cmd->rw.length;
+       tcb->command_id = tag;
+
+       if (nvme_is_write(cmd))
+               tcb->dma_flags = APPLE_ANS_TCB_DMA_TO_DEVICE;
+       else
+               tcb->dma_flags = APPLE_ANS_TCB_DMA_FROM_DEVICE;
+
+       memcpy(&q->sqes[tag], cmd, sizeof(*cmd));
+
+       /*
+        * This lock here doesn't make much sense at a first glace but
+        * removing it will result in occasional missed completetion
+        * interrupts even though the commands still appear on the CQ.
+        * It's unclear why this happens but our best guess is that
+        * there is a bug in the firmware triggered when a new command
+        * is issued while we're inside the irq handler between the
+        * NVMMU invalidation (and making the tag available again)
+        * and the final CQ update.
+        */
+       spin_lock_irq(&anv->lock);
+       writel(tag, q->sq_db);
+       spin_unlock_irq(&anv->lock);
+}
+
+/*
+ * From pci.c:
+ * Will slightly overestimate the number of pages needed.  This is OK
+ * as it only leads to a small amount of wasted memory for the lifetime of
+ * the I/O.
+ */
+static inline size_t apple_nvme_iod_alloc_size(void)
+{
+       const unsigned int nprps = DIV_ROUND_UP(
+               NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE);
+       const int npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
+       const size_t alloc_size = sizeof(__le64 *) * npages +
+                                 sizeof(struct scatterlist) * NVME_MAX_SEGS;
+
+       return alloc_size;
+}
+
+static void **apple_nvme_iod_list(struct request *req)
+{
+       struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+       return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
+}
+
+static void apple_nvme_free_prps(struct apple_nvme *anv, struct request *req)
+{
+       const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1;
+       struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       dma_addr_t dma_addr = iod->first_dma;
+       int i;
+
+       for (i = 0; i < iod->npages; i++) {
+               __le64 *prp_list = apple_nvme_iod_list(req)[i];
+               dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
+
+               dma_pool_free(anv->prp_page_pool, prp_list, dma_addr);
+               dma_addr = next_dma_addr;
+       }
+}
+
+static void apple_nvme_unmap_data(struct apple_nvme *anv, struct request *req)
+{
+       struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+       if (iod->dma_len) {
+               dma_unmap_page(anv->dev, iod->first_dma, iod->dma_len,
+                              rq_dma_dir(req));
+               return;
+       }
+
+       WARN_ON_ONCE(!iod->nents);
+
+       dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
+       if (iod->npages == 0)
+               dma_pool_free(anv->prp_small_pool, apple_nvme_iod_list(req)[0],
+                             iod->first_dma);
+       else
+               apple_nvme_free_prps(anv, req);
+       mempool_free(iod->sg, anv->iod_mempool);
+}
+
+static void apple_nvme_print_sgl(struct scatterlist *sgl, int nents)
+{
+       int i;
+       struct scatterlist *sg;
+
+       for_each_sg(sgl, sg, nents, i) {
+               dma_addr_t phys = sg_phys(sg);
+
+               pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d dma_address:%pad dma_length:%d\n",
+                       i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
+                       sg_dma_len(sg));
+       }
+}
+
+static blk_status_t apple_nvme_setup_prps(struct apple_nvme *anv,
+                                         struct request *req,
+                                         struct nvme_rw_command *cmnd)
+{
+       struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       struct dma_pool *pool;
+       int length = blk_rq_payload_bytes(req);
+       struct scatterlist *sg = iod->sg;
+       int dma_len = sg_dma_len(sg);
+       u64 dma_addr = sg_dma_address(sg);
+       int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
+       __le64 *prp_list;
+       void **list = apple_nvme_iod_list(req);
+       dma_addr_t prp_dma;
+       int nprps, i;
+
+       length -= (NVME_CTRL_PAGE_SIZE - offset);
+       if (length <= 0) {
+               iod->first_dma = 0;
+               goto done;
+       }
+
+       dma_len -= (NVME_CTRL_PAGE_SIZE - offset);
+       if (dma_len) {
+               dma_addr += (NVME_CTRL_PAGE_SIZE - offset);
+       } else {
+               sg = sg_next(sg);
+               dma_addr = sg_dma_address(sg);
+               dma_len = sg_dma_len(sg);
+       }
+
+       if (length <= NVME_CTRL_PAGE_SIZE) {
+               iod->first_dma = dma_addr;
+               goto done;
+       }
+
+       nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE);
+       if (nprps <= (256 / 8)) {
+               pool = anv->prp_small_pool;
+               iod->npages = 0;
+       } else {
+               pool = anv->prp_page_pool;
+               iod->npages = 1;
+       }
+
+       prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+       if (!prp_list) {
+               iod->first_dma = dma_addr;
+               iod->npages = -1;
+               return BLK_STS_RESOURCE;
+       }
+       list[0] = prp_list;
+       iod->first_dma = prp_dma;
+       i = 0;
+       for (;;) {
+               if (i == NVME_CTRL_PAGE_SIZE >> 3) {
+                       __le64 *old_prp_list = prp_list;
+
+                       prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
+                       if (!prp_list)
+                               goto free_prps;
+                       list[iod->npages++] = prp_list;
+                       prp_list[0] = old_prp_list[i - 1];
+                       old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+                       i = 1;
+               }
+               prp_list[i++] = cpu_to_le64(dma_addr);
+               dma_len -= NVME_CTRL_PAGE_SIZE;
+               dma_addr += NVME_CTRL_PAGE_SIZE;
+               length -= NVME_CTRL_PAGE_SIZE;
+               if (length <= 0)
+                       break;
+               if (dma_len > 0)
+                       continue;
+               if (unlikely(dma_len < 0))
+                       goto bad_sgl;
+               sg = sg_next(sg);
+               dma_addr = sg_dma_address(sg);
+               dma_len = sg_dma_len(sg);
+       }
+done:
+       cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+       cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
+       return BLK_STS_OK;
+free_prps:
+       apple_nvme_free_prps(anv, req);
+       return BLK_STS_RESOURCE;
+bad_sgl:
+       WARN(DO_ONCE(apple_nvme_print_sgl, iod->sg, iod->nents),
+            "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req),
+            iod->nents);
+       return BLK_STS_IOERR;
+}
+
+static blk_status_t apple_nvme_setup_prp_simple(struct apple_nvme *anv,
+                                               struct request *req,
+                                               struct nvme_rw_command *cmnd,
+                                               struct bio_vec *bv)
+{
+       struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1);
+       unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset;
+
+       iod->first_dma = dma_map_bvec(anv->dev, bv, rq_dma_dir(req), 0);
+       if (dma_mapping_error(anv->dev, iod->first_dma))
+               return BLK_STS_RESOURCE;
+       iod->dma_len = bv->bv_len;
+
+       cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
+       if (bv->bv_len > first_prp_len)
+               cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
+       return BLK_STS_OK;
+}
+
+static blk_status_t apple_nvme_map_data(struct apple_nvme *anv,
+                                       struct request *req,
+                                       struct nvme_command *cmnd)
+{
+       struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       blk_status_t ret = BLK_STS_RESOURCE;
+       int nr_mapped;
+
+       if (blk_rq_nr_phys_segments(req) == 1) {
+               struct bio_vec bv = req_bvec(req);
+
+               if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2)
+                       return apple_nvme_setup_prp_simple(anv, req, &cmnd->rw,
+                                                          &bv);
+       }
+
+       iod->dma_len = 0;
+       iod->sg = mempool_alloc(anv->iod_mempool, GFP_ATOMIC);
+       if (!iod->sg)
+               return BLK_STS_RESOURCE;
+       sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
+       iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
+       if (!iod->nents)
+               goto out_free_sg;
+
+       nr_mapped = dma_map_sg_attrs(anv->dev, iod->sg, iod->nents,
+                                    rq_dma_dir(req), DMA_ATTR_NO_WARN);
+       if (!nr_mapped)
+               goto out_free_sg;
+
+       ret = apple_nvme_setup_prps(anv, req, &cmnd->rw);
+       if (ret != BLK_STS_OK)
+               goto out_unmap_sg;
+       return BLK_STS_OK;
+
+out_unmap_sg:
+       dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
+out_free_sg:
+       mempool_free(iod->sg, anv->iod_mempool);
+       return ret;
+}
+
+static __always_inline void apple_nvme_unmap_rq(struct request *req)
+{
+       struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       struct apple_nvme *anv = queue_to_apple_nvme(iod->q);
+
+       if (blk_rq_nr_phys_segments(req))
+               apple_nvme_unmap_data(anv, req);
+}
+
+static void apple_nvme_complete_rq(struct request *req)
+{
+       apple_nvme_unmap_rq(req);
+       nvme_complete_rq(req);
+}
+
+static void apple_nvme_complete_batch(struct io_comp_batch *iob)
+{
+       nvme_complete_batch(iob, apple_nvme_unmap_rq);
+}
+
+static inline bool apple_nvme_cqe_pending(struct apple_nvme_queue *q)
+{
+       struct nvme_completion *hcqe = &q->cqes[q->cq_head];
+
+       return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == q->cq_phase;
+}
+
+static inline struct blk_mq_tags *
+apple_nvme_queue_tagset(struct apple_nvme *anv, struct apple_nvme_queue *q)
+{
+       if (q->is_adminq)
+               return anv->admin_tagset.tags[0];
+       else
+               return anv->tagset.tags[0];
+}
+
+static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q,
+                                        struct io_comp_batch *iob, u16 idx)
+{
+       struct apple_nvme *anv = queue_to_apple_nvme(q);
+       struct nvme_completion *cqe = &q->cqes[idx];
+       __u16 command_id = READ_ONCE(cqe->command_id);
+       struct request *req;
+
+       apple_nvmmu_inval(q, command_id);
+
+       req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id);
+       if (unlikely(!req)) {
+               dev_warn(anv->dev, "invalid id %d completed", command_id);
+               return;
+       }
+
+       if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
+           !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
+                                apple_nvme_complete_batch))
+               apple_nvme_complete_rq(req);
+}
+
+static inline void apple_nvme_update_cq_head(struct apple_nvme_queue *q)
+{
+       u32 tmp = q->cq_head + 1;
+
+       if (tmp == apple_nvme_queue_depth(q)) {
+               q->cq_head = 0;
+               q->cq_phase ^= 1;
+       } else {
+               q->cq_head = tmp;
+       }
+}
+
+static bool apple_nvme_poll_cq(struct apple_nvme_queue *q,
+                              struct io_comp_batch *iob)
+{
+       bool found = false;
+
+       while (apple_nvme_cqe_pending(q)) {
+               found = true;
+
+               /*
+                * load-load control dependency between phase and the rest of
+                * the cqe requires a full read memory barrier
+                */
+               dma_rmb();
+               apple_nvme_handle_cqe(q, iob, q->cq_head);
+               apple_nvme_update_cq_head(q);
+       }
+
+       if (found)
+               writel(q->cq_head, q->cq_db);
+
+       return found;
+}
+
+static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force)
+{
+       bool found;
+       DEFINE_IO_COMP_BATCH(iob);
+
+       if (!READ_ONCE(q->enabled) && !force)
+               return false;
+
+       found = apple_nvme_poll_cq(q, &iob);
+
+       if (!rq_list_empty(iob.req_list))
+               apple_nvme_complete_batch(&iob);
+
+       return found;
+}
+
+static irqreturn_t apple_nvme_irq(int irq, void *data)
+{
+       struct apple_nvme *anv = data;
+       bool handled = false;
+       unsigned long flags;
+
+       spin_lock_irqsave(&anv->lock, flags);
+       if (apple_nvme_handle_cq(&anv->ioq, false))
+               handled = true;
+       if (apple_nvme_handle_cq(&anv->adminq, false))
+               handled = true;
+       spin_unlock_irqrestore(&anv->lock, flags);
+
+       if (handled)
+               return IRQ_HANDLED;
+       return IRQ_NONE;
+}
+
+static int apple_nvme_create_cq(struct apple_nvme *anv)
+{
+       struct nvme_command c = {};
+
+       /*
+        * Note: we (ab)use the fact that the prp fields survive if no data
+        * is attached to the request.
+        */
+       c.create_cq.opcode = nvme_admin_create_cq;
+       c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr);
+       c.create_cq.cqid = cpu_to_le16(1);
+       c.create_cq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
+       c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED);
+       c.create_cq.irq_vector = cpu_to_le16(0);
+
+       return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
+}
+
+static int apple_nvme_remove_cq(struct apple_nvme *anv)
+{
+       struct nvme_command c = {};
+
+       c.delete_queue.opcode = nvme_admin_delete_cq;
+       c.delete_queue.qid = cpu_to_le16(1);
+
+       return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
+}
+
+static int apple_nvme_create_sq(struct apple_nvme *anv)
+{
+       struct nvme_command c = {};
+
+       /*
+        * Note: we (ab)use the fact that the prp fields survive if no data
+        * is attached to the request.
+        */
+       c.create_sq.opcode = nvme_admin_create_sq;
+       c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr);
+       c.create_sq.sqid = cpu_to_le16(1);
+       c.create_sq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1);
+       c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG);
+       c.create_sq.cqid = cpu_to_le16(1);
+
+       return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
+}
+
+static int apple_nvme_remove_sq(struct apple_nvme *anv)
+{
+       struct nvme_command c = {};
+
+       c.delete_queue.opcode = nvme_admin_delete_sq;
+       c.delete_queue.qid = cpu_to_le16(1);
+
+       return nvme_submit_sync_cmd(anv->ctrl.admin_q, &c, NULL, 0);
+}
+
+static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
+                                       const struct blk_mq_queue_data *bd)
+{
+       struct nvme_ns *ns = hctx->queue->queuedata;
+       struct apple_nvme_queue *q = hctx->driver_data;
+       struct apple_nvme *anv = queue_to_apple_nvme(q);
+       struct request *req = bd->rq;
+       struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       struct nvme_command *cmnd = &iod->cmd;
+       blk_status_t ret;
+
+       iod->npages = -1;
+       iod->nents = 0;
+
+       /*
+        * We should not need to do this, but we're still using this to
+        * ensure we can drain requests on a dying queue.
+        */
+       if (unlikely(!READ_ONCE(q->enabled)))
+               return BLK_STS_IOERR;
+
+       if (!nvme_check_ready(&anv->ctrl, req, true))
+               return nvme_fail_nonready_command(&anv->ctrl, req);
+
+       ret = nvme_setup_cmd(ns, req);
+       if (ret)
+               return ret;
+
+       if (blk_rq_nr_phys_segments(req)) {
+               ret = apple_nvme_map_data(anv, req, cmnd);
+               if (ret)
+                       goto out_free_cmd;
+       }
+
+       blk_mq_start_request(req);
+       apple_nvme_submit_cmd(q, cmnd);
+       return BLK_STS_OK;
+
+out_free_cmd:
+       nvme_cleanup_cmd(req);
+       return ret;
+}
+
+static int apple_nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
+                               unsigned int hctx_idx)
+{
+       hctx->driver_data = data;
+       return 0;
+}
+
+static int apple_nvme_init_request(struct blk_mq_tag_set *set,
+                                  struct request *req, unsigned int hctx_idx,
+                                  unsigned int numa_node)
+{
+       struct apple_nvme_queue *q = set->driver_data;
+       struct apple_nvme *anv = queue_to_apple_nvme(q);
+       struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       struct nvme_request *nreq = nvme_req(req);
+
+       iod->q = q;
+       nreq->ctrl = &anv->ctrl;
+       nreq->cmd = &iod->cmd;
+
+       return 0;
+}
+
+static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
+{
+       u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
+       bool dead = false, freeze = false;
+       unsigned long flags;
+
+       if (apple_rtkit_is_crashed(anv->rtk))
+               dead = true;
+       if (!(csts & NVME_CSTS_RDY))
+               dead = true;
+       if (csts & NVME_CSTS_CFS)
+               dead = true;
+
+       if (anv->ctrl.state == NVME_CTRL_LIVE ||
+           anv->ctrl.state == NVME_CTRL_RESETTING) {
+               freeze = true;
+               nvme_start_freeze(&anv->ctrl);
+       }
+
+       /*
+        * Give the controller a chance to complete all entered requests if
+        * doing a safe shutdown.
+        */
+       if (!dead && shutdown && freeze)
+               nvme_wait_freeze_timeout(&anv->ctrl, NVME_IO_TIMEOUT);
+
+       nvme_stop_queues(&anv->ctrl);
+
+       if (!dead) {
+               if (READ_ONCE(anv->ioq.enabled)) {
+                       apple_nvme_remove_sq(anv);
+                       apple_nvme_remove_cq(anv);
+               }
+
+               if (shutdown)
+                       nvme_shutdown_ctrl(&anv->ctrl);
+               nvme_disable_ctrl(&anv->ctrl);
+       }
+
+       WRITE_ONCE(anv->ioq.enabled, false);
+       WRITE_ONCE(anv->adminq.enabled, false);
+       mb(); /* ensure that nvme_queue_rq() sees that enabled is cleared */
+       nvme_stop_admin_queue(&anv->ctrl);
+
+       /* last chance to complete any requests before nvme_cancel_request */
+       spin_lock_irqsave(&anv->lock, flags);
+       apple_nvme_handle_cq(&anv->ioq, true);
+       apple_nvme_handle_cq(&anv->adminq, true);
+       spin_unlock_irqrestore(&anv->lock, flags);
+
+       blk_mq_tagset_busy_iter(&anv->tagset, nvme_cancel_request, &anv->ctrl);
+       blk_mq_tagset_busy_iter(&anv->admin_tagset, nvme_cancel_request,
+                               &anv->ctrl);
+       blk_mq_tagset_wait_completed_request(&anv->tagset);
+       blk_mq_tagset_wait_completed_request(&anv->admin_tagset);
+
+       /*
+        * The driver will not be starting up queues again if shutting down so
+        * must flush all entered requests to their failed completion to avoid
+        * deadlocking blk-mq hot-cpu notifier.
+        */
+       if (shutdown) {
+               nvme_start_queues(&anv->ctrl);
+               nvme_start_admin_queue(&anv->ctrl);
+       }
+}
+
+static enum blk_eh_timer_return apple_nvme_timeout(struct request *req,
+                                                  bool reserved)
+{
+       struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
+       struct apple_nvme_queue *q = iod->q;
+       struct apple_nvme *anv = queue_to_apple_nvme(q);
+       unsigned long flags;
+       u32 csts = readl(anv->mmio_nvme + NVME_REG_CSTS);
+
+       if (anv->ctrl.state != NVME_CTRL_LIVE) {
+               /*
+                * From rdma.c:
+                * If we are resetting, connecting or deleting we should
+                * complete immediately because we may block controller
+                * teardown or setup sequence
+                * - ctrl disable/shutdown fabrics requests
+                * - connect requests
+                * - initialization admin requests
+                * - I/O requests that entered after unquiescing and
+                *   the controller stopped responding
+                *
+                * All other requests should be cancelled by the error
+                * recovery work, so it's fine that we fail it here.
+                */
+               dev_warn(anv->dev,
+                        "I/O %d(aq:%d) timeout while not in live state\n",
+                        req->tag, q->is_adminq);
+               if (blk_mq_request_started(req) &&
+                   !blk_mq_request_completed(req)) {
+                       nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
+                       nvme_req(req)->flags |= NVME_REQ_CANCELLED;
+                       blk_mq_complete_request(req);
+               }
+               return BLK_EH_DONE;
+       }
+
+       /* check if we just missed an interrupt if we're still alive */
+       if (!apple_rtkit_is_crashed(anv->rtk) && !(csts & NVME_CSTS_CFS)) {
+               spin_lock_irqsave(&anv->lock, flags);
+               apple_nvme_handle_cq(q, false);
+               spin_unlock_irqrestore(&anv->lock, flags);
+               if (blk_mq_request_completed(req)) {
+                       dev_warn(anv->dev,
+                                "I/O %d(aq:%d) timeout: completion polled\n",
+                                req->tag, q->is_adminq);
+                       return BLK_EH_DONE;
+               }
+       }
+
+       /*
+        * aborting commands isn't supported which leaves a full reset as our
+        * only option here
+        */
+       dev_warn(anv->dev, "I/O %d(aq:%d) timeout: resetting controller\n",
+                req->tag, q->is_adminq);
+       nvme_req(req)->flags |= NVME_REQ_CANCELLED;
+       apple_nvme_disable(anv, false);
+       nvme_reset_ctrl(&anv->ctrl);
+       return BLK_EH_DONE;
+}
+
+static int apple_nvme_poll(struct blk_mq_hw_ctx *hctx,
+                          struct io_comp_batch *iob)
+{
+       struct apple_nvme_queue *q = hctx->driver_data;
+       struct apple_nvme *anv = queue_to_apple_nvme(q);
+       bool found;
+       unsigned long flags;
+
+       spin_lock_irqsave(&anv->lock, flags);
+       found = apple_nvme_poll_cq(q, iob);
+       spin_unlock_irqrestore(&anv->lock, flags);
+
+       return found;
+}
+
+static const struct blk_mq_ops apple_nvme_mq_admin_ops = {
+       .queue_rq = apple_nvme_queue_rq,
+       .complete = apple_nvme_complete_rq,
+       .init_hctx = apple_nvme_init_hctx,
+       .init_request = apple_nvme_init_request,
+       .timeout = apple_nvme_timeout,
+};
+
+static const struct blk_mq_ops apple_nvme_mq_ops = {
+       .queue_rq = apple_nvme_queue_rq,
+       .complete = apple_nvme_complete_rq,
+       .init_hctx = apple_nvme_init_hctx,
+       .init_request = apple_nvme_init_request,
+       .timeout = apple_nvme_timeout,
+       .poll = apple_nvme_poll,
+};
+
+static void apple_nvme_init_queue(struct apple_nvme_queue *q)
+{
+       unsigned int depth = apple_nvme_queue_depth(q);
+
+       q->cq_head = 0;
+       q->cq_phase = 1;
+       memset(q->tcbs, 0,
+              APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb));
+       memset(q->cqes, 0, depth * sizeof(struct nvme_completion));
+       WRITE_ONCE(q->enabled, true);
+       wmb(); /* ensure the first interrupt sees the initialization */
+}
+
+static void apple_nvme_reset_work(struct work_struct *work)
+{
+       unsigned int nr_io_queues = 1;
+       int ret;
+       u32 boot_status, aqa;
+       struct apple_nvme *anv =
+               container_of(work, struct apple_nvme, ctrl.reset_work);
+
+       if (anv->ctrl.state != NVME_CTRL_RESETTING) {
+               dev_warn(anv->dev, "ctrl state %d is not RESETTING\n",
+                        anv->ctrl.state);
+               ret = -ENODEV;
+               goto out;
+       }
+
+       /* there's unfortunately no known way to recover if RTKit crashed :( */
+       if (apple_rtkit_is_crashed(anv->rtk)) {
+               dev_err(anv->dev,
+                       "RTKit has crashed without any way to recover.");
+               ret = -EIO;
+               goto out;
+       }
+
+       if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
+               apple_nvme_disable(anv, false);
+
+       /* RTKit must be shut down cleanly for the (soft)-reset to work */
+       if (apple_rtkit_is_running(anv->rtk)) {
+               dev_dbg(anv->dev, "Trying to shut down RTKit before reset.");
+               ret = apple_rtkit_shutdown(anv->rtk);
+               if (ret)
+                       goto out;
+       }
+
+       writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+
+       ret = reset_control_assert(anv->reset);
+       if (ret)
+               goto out;
+
+       ret = apple_rtkit_reinit(anv->rtk);
+       if (ret)
+               goto out;
+
+       ret = reset_control_deassert(anv->reset);
+       if (ret)
+               goto out;
+
+       writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN,
+              anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+       ret = apple_rtkit_boot(anv->rtk);
+       if (ret) {
+               dev_err(anv->dev, "ANS did not boot");
+               goto out;
+       }
+
+       ret = readl_poll_timeout(anv->mmio_nvme + APPLE_ANS_BOOT_STATUS,
+                                boot_status,
+                                boot_status == APPLE_ANS_BOOT_STATUS_OK,
+                                USEC_PER_MSEC, APPLE_ANS_BOOT_TIMEOUT);
+       if (ret) {
+               dev_err(anv->dev, "ANS did not initialize");
+               goto out;
+       }
+
+       dev_dbg(anv->dev, "ANS booted successfully.");
+
+       /*
+        * Limit the max command size to prevent iod->sg allocations going
+        * over a single page.
+        */
+       anv->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1,
+                                        dma_max_mapping_size(anv->dev) >> 9);
+       anv->ctrl.max_segments = NVME_MAX_SEGS;
+
+       /*
+        * Enable NVMMU and linear submission queues.
+        * While we could keep those disabled and pretend this is slightly
+        * more common NVMe controller we'd still need some quirks (e.g.
+        * sq entries will be 128 bytes) and Apple might drop support for
+        * that mode in the future.
+        */
+       writel(APPLE_ANS_LINEAR_SQ_EN,
+              anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL);
+
+       /* Allow as many pending command as possible for both queues */
+       writel(APPLE_ANS_MAX_QUEUE_DEPTH | (APPLE_ANS_MAX_QUEUE_DEPTH << 16),
+              anv->mmio_nvme + APPLE_ANS_MAX_PEND_CMDS_CTRL);
+
+       /* Setup the NVMMU for the maximum admin and IO queue depth */
+       writel(APPLE_ANS_MAX_QUEUE_DEPTH - 1,
+              anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS);
+
+       /*
+        * This is probably a chicken bit: without it all commands where any PRP
+        * is set to zero (including those that don't use that field) fail and
+        * the co-processor complains about "completed with err BAD_CMD-" or
+        * a "NULL_PRP_PTR_ERR" in the syslog
+        */
+       writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) &
+                      ~APPLE_ANS_PRP_NULL_CHECK,
+              anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL);
+
+       /* Setup the admin queue */
+       aqa = APPLE_NVME_AQ_DEPTH - 1;
+       aqa |= aqa << 16;
+       writel(aqa, anv->mmio_nvme + NVME_REG_AQA);
+       writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ);
+       writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ);
+
+       /* Setup NVMMU for both queues */
+       writeq(anv->adminq.tcb_dma_addr,
+              anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE);
+       writeq(anv->ioq.tcb_dma_addr,
+              anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE);
+
+       anv->ctrl.sqsize =
+               APPLE_ANS_MAX_QUEUE_DEPTH - 1; /* 0's based queue depth */
+       anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP);
+
+       dev_dbg(anv->dev, "Enabling controller now");
+       ret = nvme_enable_ctrl(&anv->ctrl);
+       if (ret)
+               goto out;
+
+       dev_dbg(anv->dev, "Starting admin queue");
+       apple_nvme_init_queue(&anv->adminq);
+       nvme_start_admin_queue(&anv->ctrl);
+
+       if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_CONNECTING)) {
+               dev_warn(anv->ctrl.device,
+                        "failed to mark controller CONNECTING\n");
+               ret = -ENODEV;
+               goto out;
+       }
+
+       ret = nvme_init_ctrl_finish(&anv->ctrl);
+       if (ret)
+               goto out;
+
+       dev_dbg(anv->dev, "Creating IOCQ");
+       ret = apple_nvme_create_cq(anv);
+       if (ret)
+               goto out;
+       dev_dbg(anv->dev, "Creating IOSQ");
+       ret = apple_nvme_create_sq(anv);
+       if (ret)
+               goto out_remove_cq;
+
+       apple_nvme_init_queue(&anv->ioq);
+       nr_io_queues = 1;
+       ret = nvme_set_queue_count(&anv->ctrl, &nr_io_queues);
+       if (ret)
+               goto out_remove_sq;
+       if (nr_io_queues != 1) {
+               ret = -ENXIO;
+               goto out_remove_sq;
+       }
+
+       anv->ctrl.queue_count = nr_io_queues + 1;
+
+       nvme_start_queues(&anv->ctrl);
+       nvme_wait_freeze(&anv->ctrl);
+       blk_mq_update_nr_hw_queues(&anv->tagset, 1);
+       nvme_unfreeze(&anv->ctrl);
+
+       if (!nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_LIVE)) {
+               dev_warn(anv->ctrl.device,
+                        "failed to mark controller live state\n");
+               ret = -ENODEV;
+               goto out_remove_sq;
+       }
+
+       nvme_start_ctrl(&anv->ctrl);
+
+       dev_dbg(anv->dev, "ANS boot and NVMe init completed.");
+       return;
+
+out_remove_sq:
+       apple_nvme_remove_sq(anv);
+out_remove_cq:
+       apple_nvme_remove_cq(anv);
+out:
+       dev_warn(anv->ctrl.device, "Reset failure status: %d\n", ret);
+       nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
+       nvme_get_ctrl(&anv->ctrl);
+       apple_nvme_disable(anv, false);
+       nvme_kill_queues(&anv->ctrl);
+       if (!queue_work(nvme_wq, &anv->remove_work))
+               nvme_put_ctrl(&anv->ctrl);
+}
+
+static void apple_nvme_remove_dead_ctrl_work(struct work_struct *work)
+{
+       struct apple_nvme *anv =
+               container_of(work, struct apple_nvme, remove_work);
+
+       nvme_put_ctrl(&anv->ctrl);
+       device_release_driver(anv->dev);
+}
+
+static int apple_nvme_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
+{
+       *val = readl(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
+       return 0;
+}
+
+static int apple_nvme_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
+{
+       writel(val, ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
+       return 0;
+}
+
+static int apple_nvme_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
+{
+       *val = readq(ctrl_to_apple_nvme(ctrl)->mmio_nvme + off);
+       return 0;
+}
+
+static int apple_nvme_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
+{
+       struct device *dev = ctrl_to_apple_nvme(ctrl)->dev;
+
+       return snprintf(buf, size, "%s\n", dev_name(dev));
+}
+
+static void apple_nvme_free_ctrl(struct nvme_ctrl *ctrl)
+{
+       struct apple_nvme *anv = ctrl_to_apple_nvme(ctrl);
+
+       if (anv->ctrl.admin_q)
+               blk_put_queue(anv->ctrl.admin_q);
+       put_device(anv->dev);
+}
+
+static const struct nvme_ctrl_ops nvme_ctrl_ops = {
+       .name = "apple-nvme",
+       .module = THIS_MODULE,
+       .flags = 0,
+       .reg_read32 = apple_nvme_reg_read32,
+       .reg_write32 = apple_nvme_reg_write32,
+       .reg_read64 = apple_nvme_reg_read64,
+       .free_ctrl = apple_nvme_free_ctrl,
+       .get_address = apple_nvme_get_address,
+};
+
+static void apple_nvme_async_probe(void *data, async_cookie_t cookie)
+{
+       struct apple_nvme *anv = data;
+
+       flush_work(&anv->ctrl.reset_work);
+       flush_work(&anv->ctrl.scan_work);
+       nvme_put_ctrl(&anv->ctrl);
+}
+
+static int apple_nvme_alloc_tagsets(struct apple_nvme *anv)
+{
+       int ret;
+
+       anv->admin_tagset.ops = &apple_nvme_mq_admin_ops;
+       anv->admin_tagset.nr_hw_queues = 1;
+       anv->admin_tagset.queue_depth = APPLE_NVME_AQ_MQ_TAG_DEPTH;
+       anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT;
+       anv->admin_tagset.numa_node = NUMA_NO_NODE;
+       anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod);
+       anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
+       anv->admin_tagset.driver_data = &anv->adminq;
+
+       ret = blk_mq_alloc_tag_set(&anv->admin_tagset);
+       if (ret)
+               return ret;
+       ret = devm_add_action_or_reset(anv->dev,
+                                      (void (*)(void *))blk_mq_free_tag_set,
+                                      &anv->admin_tagset);
+       if (ret)
+               return ret;
+
+       anv->tagset.ops = &apple_nvme_mq_ops;
+       anv->tagset.nr_hw_queues = 1;
+       anv->tagset.nr_maps = 1;
+       /*
+        * Tags are used as an index to the NVMMU and must be unique across
+        * both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which
+        * must be marked as reserved in the IO queue.
+        */
+       anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH;
+       anv->tagset.queue_depth = APPLE_ANS_MAX_QUEUE_DEPTH - 1;
+       anv->tagset.timeout = NVME_IO_TIMEOUT;
+       anv->tagset.numa_node = NUMA_NO_NODE;
+       anv->tagset.cmd_size = sizeof(struct apple_nvme_iod);
+       anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE;
+       anv->tagset.driver_data = &anv->ioq;
+
+       ret = blk_mq_alloc_tag_set(&anv->tagset);
+       if (ret)
+               return ret;
+       ret = devm_add_action_or_reset(
+               anv->dev, (void (*)(void *))blk_mq_free_tag_set, &anv->tagset);
+       if (ret)
+               return ret;
+
+       anv->ctrl.admin_tagset = &anv->admin_tagset;
+       anv->ctrl.tagset = &anv->tagset;
+
+       return 0;
+}
+
+static int apple_nvme_queue_alloc(struct apple_nvme *anv,
+                                 struct apple_nvme_queue *q)
+{
+       unsigned int depth = apple_nvme_queue_depth(q);
+
+       q->cqes = dmam_alloc_coherent(anv->dev,
+                                     depth * sizeof(struct nvme_completion),
+                                     &q->cq_dma_addr, GFP_KERNEL);
+       if (!q->cqes)
+               return -ENOMEM;
+
+       q->sqes = dmam_alloc_coherent(anv->dev,
+                                     depth * sizeof(struct nvme_command),
+                                     &q->sq_dma_addr, GFP_KERNEL);
+       if (!q->sqes)
+               return -ENOMEM;
+
+       /*
+        * We need the maximum queue depth here because the NVMMU only has a
+        * single depth configuration shared between both queues.
+        */
+       q->tcbs = dmam_alloc_coherent(anv->dev,
+                                     APPLE_ANS_MAX_QUEUE_DEPTH *
+                                             sizeof(struct apple_nvmmu_tcb),
+                                     &q->tcb_dma_addr, GFP_KERNEL);
+       if (!q->tcbs)
+               return -ENOMEM;
+
+       /*
+        * initialize phase to make sure the allocated and empty memory
+        * doesn't look like a full cq already.
+        */
+       q->cq_phase = 1;
+       return 0;
+}
+
+static void apple_nvme_detach_genpd(struct apple_nvme *anv)
+{
+       int i;
+
+       if (anv->pd_count <= 1)
+               return;
+
+       for (i = anv->pd_count - 1; i >= 0; i--) {
+               if (anv->pd_link[i])
+                       device_link_del(anv->pd_link[i]);
+               if (!IS_ERR_OR_NULL(anv->pd_dev[i]))
+                       dev_pm_domain_detach(anv->pd_dev[i], true);
+       }
+}
+
+static int apple_nvme_attach_genpd(struct apple_nvme *anv)
+{
+       struct device *dev = anv->dev;
+       int i;
+
+       anv->pd_count = of_count_phandle_with_args(
+               dev->of_node, "power-domains", "#power-domain-cells");
+       if (anv->pd_count <= 1)
+               return 0;
+
+       anv->pd_dev = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_dev),
+                                  GFP_KERNEL);
+       if (!anv->pd_dev)
+               return -ENOMEM;
+
+       anv->pd_link = devm_kcalloc(dev, anv->pd_count, sizeof(*anv->pd_link),
+                                   GFP_KERNEL);
+       if (!anv->pd_link)
+               return -ENOMEM;
+
+       for (i = 0; i < anv->pd_count; i++) {
+               anv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
+               if (IS_ERR(anv->pd_dev[i])) {
+                       apple_nvme_detach_genpd(anv);
+                       return PTR_ERR(anv->pd_dev[i]);
+               }
+
+               anv->pd_link[i] = device_link_add(dev, anv->pd_dev[i],
+                                                 DL_FLAG_STATELESS |
+                                                 DL_FLAG_PM_RUNTIME |
+                                                 DL_FLAG_RPM_ACTIVE);
+               if (!anv->pd_link[i]) {
+                       apple_nvme_detach_genpd(anv);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int apple_nvme_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct apple_nvme *anv;
+       int ret;
+
+       anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL);
+       if (!anv)
+               return -ENOMEM;
+
+       anv->dev = get_device(dev);
+       anv->adminq.is_adminq = true;
+       platform_set_drvdata(pdev, anv);
+
+       ret = apple_nvme_attach_genpd(anv);
+       if (ret < 0) {
+               dev_err_probe(dev, ret, "Failed to attach power domains");
+               goto put_dev;
+       }
+       if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
+               ret = -ENXIO;
+               goto put_dev;
+       }
+
+       anv->irq = platform_get_irq(pdev, 0);
+       if (anv->irq < 0) {
+               ret = anv->irq;
+               goto put_dev;
+       }
+       if (!anv->irq) {
+               ret = -ENXIO;
+               goto put_dev;
+       }
+
+       anv->mmio_coproc = devm_platform_ioremap_resource_byname(pdev, "ans");
+       if (IS_ERR(anv->mmio_coproc)) {
+               ret = PTR_ERR(anv->mmio_coproc);
+               goto put_dev;
+       }
+       anv->mmio_nvme = devm_platform_ioremap_resource_byname(pdev, "nvme");
+       if (IS_ERR(anv->mmio_nvme)) {
+               ret = PTR_ERR(anv->mmio_nvme);
+               goto put_dev;
+       }
+
+       anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB;
+       anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB;
+       anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB;
+       anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB;
+
+       anv->sart = devm_apple_sart_get(dev);
+       if (IS_ERR(anv->sart)) {
+               ret = dev_err_probe(dev, PTR_ERR(anv->sart),
+                                   "Failed to initialize SART");
+               goto put_dev;
+       }
+
+       anv->reset = devm_reset_control_array_get_exclusive(anv->dev);
+       if (IS_ERR(anv->reset)) {
+               ret = dev_err_probe(dev, PTR_ERR(anv->reset),
+                                   "Failed to get reset control");
+               goto put_dev;
+       }
+
+       INIT_WORK(&anv->ctrl.reset_work, apple_nvme_reset_work);
+       INIT_WORK(&anv->remove_work, apple_nvme_remove_dead_ctrl_work);
+       spin_lock_init(&anv->lock);
+
+       ret = apple_nvme_queue_alloc(anv, &anv->adminq);
+       if (ret)
+               goto put_dev;
+       ret = apple_nvme_queue_alloc(anv, &anv->ioq);
+       if (ret)
+               goto put_dev;
+
+       anv->prp_page_pool = dmam_pool_create("prp list page", anv->dev,
+                                             NVME_CTRL_PAGE_SIZE,
+                                             NVME_CTRL_PAGE_SIZE, 0);
+       if (!anv->prp_page_pool) {
+               ret = -ENOMEM;
+               goto put_dev;
+       }
+
+       anv->prp_small_pool =
+               dmam_pool_create("prp list 256", anv->dev, 256, 256, 0);
+       if (!anv->prp_small_pool) {
+               ret = -ENOMEM;
+               goto put_dev;
+       }
+
+       WARN_ON_ONCE(apple_nvme_iod_alloc_size() > PAGE_SIZE);
+       anv->iod_mempool =
+               mempool_create_kmalloc_pool(1, apple_nvme_iod_alloc_size());
+       if (!anv->iod_mempool) {
+               ret = -ENOMEM;
+               goto put_dev;
+       }
+       ret = devm_add_action_or_reset(
+               anv->dev, (void (*)(void *))mempool_destroy, anv->iod_mempool);
+       if (ret)
+               goto put_dev;
+
+       ret = apple_nvme_alloc_tagsets(anv);
+       if (ret)
+               goto put_dev;
+
+       ret = devm_request_irq(anv->dev, anv->irq, apple_nvme_irq, 0,
+                              "nvme-apple", anv);
+       if (ret) {
+               dev_err_probe(dev, ret, "Failed to request IRQ");
+               goto put_dev;
+       }
+
+       anv->rtk =
+               devm_apple_rtkit_init(dev, anv, NULL, 0, &apple_nvme_rtkit_ops);
+       if (IS_ERR(anv->rtk)) {
+               ret = dev_err_probe(dev, PTR_ERR(anv->rtk),
+                                   "Failed to initialize RTKit");
+               goto put_dev;
+       }
+
+       ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops,
+                            NVME_QUIRK_SKIP_CID_GEN);
+       if (ret) {
+               dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl");
+               goto put_dev;
+       }
+
+       anv->ctrl.admin_q = blk_mq_init_queue(&anv->admin_tagset);
+       if (IS_ERR(anv->ctrl.admin_q)) {
+               ret = -ENOMEM;
+               goto put_dev;
+       }
+
+       if (!blk_get_queue(anv->ctrl.admin_q)) {
+               nvme_start_admin_queue(&anv->ctrl);
+               blk_cleanup_queue(anv->ctrl.admin_q);
+               anv->ctrl.admin_q = NULL;
+               ret = -ENODEV;
+               goto put_dev;
+       }
+
+       nvme_reset_ctrl(&anv->ctrl);
+       async_schedule(apple_nvme_async_probe, anv);
+
+       return 0;
+
+put_dev:
+       put_device(anv->dev);
+       return ret;
+}
+
+static int apple_nvme_remove(struct platform_device *pdev)
+{
+       struct apple_nvme *anv = platform_get_drvdata(pdev);
+
+       nvme_change_ctrl_state(&anv->ctrl, NVME_CTRL_DELETING);
+       flush_work(&anv->ctrl.reset_work);
+       nvme_stop_ctrl(&anv->ctrl);
+       nvme_remove_namespaces(&anv->ctrl);
+       apple_nvme_disable(anv, true);
+       nvme_uninit_ctrl(&anv->ctrl);
+
+       if (apple_rtkit_is_running(anv->rtk))
+               apple_rtkit_shutdown(anv->rtk);
+
+       apple_nvme_detach_genpd(anv);
+
+       return 0;
+}
+
+static void apple_nvme_shutdown(struct platform_device *pdev)
+{
+       struct apple_nvme *anv = platform_get_drvdata(pdev);
+
+       apple_nvme_disable(anv, true);
+       if (apple_rtkit_is_running(anv->rtk))
+               apple_rtkit_shutdown(anv->rtk);
+}
+
+static int apple_nvme_resume(struct device *dev)
+{
+       struct apple_nvme *anv = dev_get_drvdata(dev);
+
+       return nvme_reset_ctrl(&anv->ctrl);
+}
+
+static int apple_nvme_suspend(struct device *dev)
+{
+       struct apple_nvme *anv = dev_get_drvdata(dev);
+       int ret = 0;
+
+       apple_nvme_disable(anv, true);
+
+       if (apple_rtkit_is_running(anv->rtk))
+               ret = apple_rtkit_shutdown(anv->rtk);
+
+       writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+
+       return ret;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend,
+                               apple_nvme_resume);
+
+static const struct of_device_id apple_nvme_of_match[] = {
+       { .compatible = "apple,nvme-ans2" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, apple_nvme_of_match);
+
+static struct platform_driver apple_nvme_driver = {
+       .driver = {
+               .name = "nvme-apple",
+               .of_match_table = apple_nvme_of_match,
+               .pm = pm_sleep_ptr(&apple_nvme_pm_ops),
+       },
+       .probe = apple_nvme_probe,
+       .remove = apple_nvme_remove,
+       .shutdown = apple_nvme_shutdown,
+};
+module_platform_driver(apple_nvme_driver);
+
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_LICENSE("GPL");
index efb85c6..e1846d0 100644 (file)
@@ -366,7 +366,7 @@ static inline void nvme_end_req(struct request *req)
 {
        blk_status_t status = nvme_error_status(nvme_req(req)->status);
 
-       if (unlikely(nvme_req(req)->status != NVME_SC_SUCCESS))
+       if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET)))
                nvme_log_error(req);
        nvme_end_req_zoned(req);
        nvme_trace_bio_complete(req);
@@ -1015,6 +1015,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
                        goto out;
        }
 
+       req->rq_flags |= RQF_QUIET;
        ret = nvme_execute_rq(req, at_head);
        if (result && ret >= 0)
                *result = nvme_req(req)->result;
@@ -1287,6 +1288,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
                                 warn_str, cur->nidl);
                        return -1;
                }
+               if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+                       return NVME_NIDT_EUI64_LEN;
                memcpy(ids->eui64, data + sizeof(*cur), NVME_NIDT_EUI64_LEN);
                return NVME_NIDT_EUI64_LEN;
        case NVME_NIDT_NGUID:
@@ -1295,6 +1298,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
                                 warn_str, cur->nidl);
                        return -1;
                }
+               if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+                       return NVME_NIDT_NGUID_LEN;
                memcpy(ids->nguid, data + sizeof(*cur), NVME_NIDT_NGUID_LEN);
                return NVME_NIDT_NGUID_LEN;
        case NVME_NIDT_UUID:
@@ -1303,6 +1308,8 @@ static int nvme_process_ns_desc(struct nvme_ctrl *ctrl, struct nvme_ns_ids *ids,
                                 warn_str, cur->nidl);
                        return -1;
                }
+               if (ctrl->quirks & NVME_QUIRK_BOGUS_NID)
+                       return NVME_NIDT_UUID_LEN;
                uuid_copy(&ids->uuid, data + sizeof(*cur));
                return NVME_NIDT_UUID_LEN;
        case NVME_NIDT_CSI:
@@ -1399,12 +1406,18 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid,
        if ((*id)->ncap == 0) /* namespace not allocated or attached */
                goto out_free_id;
 
-       if (ctrl->vs >= NVME_VS(1, 1, 0) &&
-           !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
-               memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
-       if (ctrl->vs >= NVME_VS(1, 2, 0) &&
-           !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
-               memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+
+       if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) {
+               dev_info(ctrl->device,
+                        "Ignoring bogus Namespace Identifiers\n");
+       } else {
+               if (ctrl->vs >= NVME_VS(1, 1, 0) &&
+                   !memchr_inv(ids->eui64, 0, sizeof(ids->eui64)))
+                       memcpy(ids->eui64, (*id)->eui64, sizeof(ids->eui64));
+               if (ctrl->vs >= NVME_VS(1, 2, 0) &&
+                   !memchr_inv(ids->nguid, 0, sizeof(ids->nguid)))
+                       memcpy(ids->nguid, (*id)->nguid, sizeof(ids->nguid));
+       }
 
        return 0;
 
index 1393bbf..a2b53ca 100644 (file)
@@ -144,6 +144,11 @@ enum nvme_quirks {
         * encoding the generation sequence number.
         */
        NVME_QUIRK_SKIP_CID_GEN                 = (1 << 17),
+
+       /*
+        * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
+        */
+       NVME_QUIRK_BOGUS_NID                    = (1 << 18),
 };
 
 /*
index d817ca1..3aacf1c 100644 (file)
@@ -3409,7 +3409,10 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
        { PCI_VDEVICE(INTEL, 0x5845),   /* Qemu emulated controller */
                .driver_data = NVME_QUIRK_IDENTIFY_CNS |
-                               NVME_QUIRK_DISABLE_WRITE_ZEROES, },
+                               NVME_QUIRK_DISABLE_WRITE_ZEROES |
+                               NVME_QUIRK_BOGUS_NID, },
+       { PCI_VDEVICE(REDHAT, 0x0010),  /* Qemu emulated controller */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x126f, 0x2263),   /* Silicon Motion unidentified */
                .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, },
        { PCI_DEVICE(0x1bb1, 0x0100),   /* Seagate Nytro Flash Storage */
@@ -3447,6 +3450,10 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
        { PCI_DEVICE(0x2646, 0x2263),   /* KINGSTON A2000 NVMe SSD  */
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+       { PCI_DEVICE(0x1e4B, 0x1002),   /* MAXIO MAP1002 */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
+       { PCI_DEVICE(0x1e4B, 0x1202),   /* MAXIO MAP1202 */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
                .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
        { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
index 558b35a..d270a20 100644 (file)
@@ -3407,6 +3407,15 @@ static int hv_pci_probe(struct hv_device *hdev,
        hbus->bridge->domain_nr = dom;
 #ifdef CONFIG_X86
        hbus->sysdata.domain = dom;
+#elif defined(CONFIG_ARM64)
+       /*
+        * Set the PCI bus parent to be the corresponding VMbus
+        * device. Then the VMbus device will be assigned as the
+        * ACPI companion in pcibios_root_bridge_prepare() and
+        * pci_dma_configure() will propagate device coherence
+        * information to devices created on the bus.
+        */
+       hbus->sysdata.parent = hdev->device.parent;
 #endif
 
        hbus->hdev = hdev;
index afdcb91..1e2d694 100644 (file)
@@ -187,7 +187,7 @@ source "drivers/perf/hisilicon/Kconfig"
 
 config MARVELL_CN10K_DDR_PMU
        tristate "Enable MARVELL CN10K DRAM Subsystem(DSS) PMU Support"
-       depends on ARM64 || (COMPILE_TEST && 64BIT)
+       depends on ARCH_THUNDER || (COMPILE_TEST && 64BIT)
        help
          Enable perf support for Marvell DDR Performance monitoring
          event on CN10K platform.
index 94ebc1e..b1b2a55 100644 (file)
@@ -29,7 +29,7 @@
 #define CNTL_OVER_MASK         0xFFFFFFFE
 
 #define CNTL_CSV_SHIFT         24
-#define CNTL_CSV_MASK          (0xFF << CNTL_CSV_SHIFT)
+#define CNTL_CSV_MASK          (0xFFU << CNTL_CSV_SHIFT)
 
 #define EVENT_CYCLES_ID                0
 #define EVENT_CYCLES_COUNTER   0
index 7640491..30234c2 100644 (file)
@@ -736,7 +736,7 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster(
 {
        u64 mpidr;
        int cpu_cluster_id;
-       struct cluster_pmu *cluster = NULL;
+       struct cluster_pmu *cluster;
 
        /*
         * This assumes that the cluster_id is in MPIDR[aff1] for
@@ -758,10 +758,10 @@ static struct cluster_pmu *l2_cache_associate_cpu_with_cluster(
                         cluster->cluster_id);
                cpumask_set_cpu(cpu, &cluster->cluster_cpus);
                *per_cpu_ptr(l2cache_pmu->pmu_cluster, cpu) = cluster;
-               break;
+               return cluster;
        }
 
-       return cluster;
+       return NULL;
 }
 
 static int l2cache_pmu_online_cpu(unsigned int cpu, struct hlist_node *node)
index 6b8b3ab..3463629 100644 (file)
@@ -584,21 +584,6 @@ static struct platform_driver acerhdf_driver = {
        .remove = acerhdf_remove,
 };
 
-/* checks if str begins with start */
-static int str_starts_with(const char *str, const char *start)
-{
-       unsigned long str_len = 0, start_len = 0;
-
-       str_len = strlen(str);
-       start_len = strlen(start);
-
-       if (str_len >= start_len &&
-                       !strncmp(str, start, start_len))
-               return 1;
-
-       return 0;
-}
-
 /* check hardware */
 static int __init acerhdf_check_hardware(void)
 {
@@ -651,9 +636,9 @@ static int __init acerhdf_check_hardware(void)
                 * check if actual hardware BIOS vendor, product and version
                 * IDs start with the strings of BIOS table entry
                 */
-               if (str_starts_with(vendor, bt->vendor) &&
-                               str_starts_with(product, bt->product) &&
-                               str_starts_with(version, bt->version)) {
+               if (strstarts(vendor, bt->vendor) &&
+                   strstarts(product, bt->product) &&
+                   strstarts(version, bt->version)) {
                        found = 1;
                        break;
                }
index e9d0dbb..fa4123d 100644 (file)
@@ -160,8 +160,10 @@ MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism");
 
 static struct amd_pmc_dev pmc;
 static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
-static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
 static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf);
+#ifdef CONFIG_SUSPEND
+static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data);
+#endif
 
 static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
 {
@@ -325,6 +327,7 @@ static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table
        return 0;
 }
 
+#ifdef CONFIG_SUSPEND
 static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
 {
        struct smu_metrics table;
@@ -338,6 +341,7 @@ static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev)
                dev_dbg(pdev->dev, "Last suspend in deepest state for %lluus\n",
                         table.timein_s0i3_lastcapture);
 }
+#endif
 
 #ifdef CONFIG_DEBUG_FS
 static int smu_fw_info_show(struct seq_file *s, void *unused)
@@ -569,6 +573,7 @@ out_unlock:
        return rc;
 }
 
+#ifdef CONFIG_SUSPEND
 static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
 {
        switch (dev->cpu_id) {
@@ -694,6 +699,7 @@ static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {
        .prepare = amd_pmc_s2idle_prepare,
        .restore = amd_pmc_s2idle_restore,
 };
+#endif
 
 static const struct pci_device_id pmc_pci_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },
@@ -733,6 +739,7 @@ static int amd_pmc_s2d_init(struct amd_pmc_dev *dev)
        return 0;
 }
 
+#ifdef CONFIG_SUSPEND
 static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
 {
        int err;
@@ -753,6 +760,7 @@ static int amd_pmc_write_stb(struct amd_pmc_dev *dev, u32 data)
 
        return 0;
 }
+#endif
 
 static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf)
 {
@@ -859,9 +867,11 @@ static int amd_pmc_probe(struct platform_device *pdev)
 
        amd_pmc_get_smu_version(dev);
        platform_set_drvdata(pdev, dev);
+#ifdef CONFIG_SUSPEND
        err = acpi_register_lps0_dev(&amd_pmc_s2idle_dev_ops);
        if (err)
                dev_warn(dev->dev, "failed to register LPS0 sleep handler, expect increased power consumption\n");
+#endif
 
        amd_pmc_dbgfs_register(dev);
        return 0;
@@ -875,7 +885,9 @@ static int amd_pmc_remove(struct platform_device *pdev)
 {
        struct amd_pmc_dev *dev = platform_get_drvdata(pdev);
 
+#ifdef CONFIG_SUSPEND
        acpi_unregister_lps0_dev(&amd_pmc_s2idle_dev_ops);
+#endif
        amd_pmc_dbgfs_unregister(dev);
        pci_dev_put(dev->rdev);
        mutex_destroy(&dev->lock);
index f5c72e3..0553428 100644 (file)
@@ -10,7 +10,6 @@
 
 #define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
 
-#include <linux/io.h>
 #include <linux/delay.h>
 #include <linux/dmi.h>
 #include <linux/err.h>
index c1d9ed9..19f6b45 100644 (file)
@@ -1121,8 +1121,6 @@ static void kbd_led_set(struct led_classdev *led_cdev,
 
        if (value > samsung->kbd_led.max_brightness)
                value = samsung->kbd_led.max_brightness;
-       else if (value < 0)
-               value = 0;
 
        samsung->kbd_led_wk = value;
        queue_work(samsung->led_workqueue, &samsung->kbd_led_work);
index bce17ca..a01a927 100644 (file)
@@ -740,16 +740,8 @@ static ssize_t certificate_store(struct kobject *kobj,
        if (!tlmi_priv.certificate_support)
                return -EOPNOTSUPP;
 
-       new_cert = kstrdup(buf, GFP_KERNEL);
-       if (!new_cert)
-               return -ENOMEM;
-       /* Strip out CR if one is present */
-       strip_cr(new_cert);
-
        /* If empty then clear installed certificate */
-       if (new_cert[0] == '\0') { /* Clear installed certificate */
-               kfree(new_cert);
-
+       if ((buf[0] == '\0') || (buf[0] == '\n')) { /* Clear installed certificate */
                /* Check that signature is set */
                if (!setting->signature || !setting->signature[0])
                        return -EACCES;
@@ -763,14 +755,16 @@ static ssize_t certificate_store(struct kobject *kobj,
 
                ret = tlmi_simple_call(LENOVO_CLEAR_BIOS_CERT_GUID, auth_str);
                kfree(auth_str);
-               if (ret)
-                       return ret;
 
-               kfree(setting->certificate);
-               setting->certificate = NULL;
-               return count;
+               return ret ?: count;
        }
 
+       new_cert = kstrdup(buf, GFP_KERNEL);
+       if (!new_cert)
+               return -ENOMEM;
+       /* Strip out CR if one is present */
+       strip_cr(new_cert);
+
        if (setting->cert_installed) {
                /* Certificate is installed so this is an update */
                if (!setting->signature || !setting->signature[0]) {
@@ -792,21 +786,14 @@ static ssize_t certificate_store(struct kobject *kobj,
                auth_str = kasprintf(GFP_KERNEL, "%s,%s",
                                new_cert, setting->password);
        }
-       if (!auth_str) {
-               kfree(new_cert);
+       kfree(new_cert);
+       if (!auth_str)
                return -ENOMEM;
-       }
 
        ret = tlmi_simple_call(guid, auth_str);
        kfree(auth_str);
-       if (ret) {
-               kfree(new_cert);
-               return ret;
-       }
 
-       kfree(setting->certificate);
-       setting->certificate = new_cert;
-       return count;
+       return ret ?: count;
 }
 
 static struct kobj_attribute auth_certificate = __ATTR_WO(certificate);
@@ -1194,6 +1181,10 @@ static void tlmi_release_attr(void)
 
        kset_unregister(tlmi_priv.attribute_kset);
 
+       /* Free up any saved signatures */
+       kfree(tlmi_priv.pwd_admin->signature);
+       kfree(tlmi_priv.pwd_admin->save_signature);
+
        /* Authentication structures */
        sysfs_remove_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group);
        kobject_put(&tlmi_priv.pwd_admin->kobj);
@@ -1210,11 +1201,6 @@ static void tlmi_release_attr(void)
        }
 
        kset_unregister(tlmi_priv.authentication_kset);
-
-       /* Free up any saved certificates/signatures */
-       kfree(tlmi_priv.pwd_admin->certificate);
-       kfree(tlmi_priv.pwd_admin->signature);
-       kfree(tlmi_priv.pwd_admin->save_signature);
 }
 
 static int tlmi_sysfs_init(void)
index 4f69df6..4daba61 100644 (file)
@@ -63,7 +63,6 @@ struct tlmi_pwd_setting {
        int index; /*Used for HDD and NVME auth */
        enum level_option level;
        bool cert_installed;
-       char *certificate;
        char *signature;
        char *save_signature;
 };
index ea02c8d..d925cb1 100644 (file)
@@ -604,6 +604,12 @@ int power_supply_get_battery_info(struct power_supply *psy,
        err = samsung_sdi_battery_get_info(&psy->dev, value, &info);
        if (!err)
                goto out_ret_pointer;
+       else if (err == -ENODEV)
+               /*
+                * Device does not have a static battery.
+                * Proceed to look for a simple battery.
+                */
+               err = 0;
 
        if (strcmp("simple-battery", value)) {
                err = -ENODEV;
index 9d59f27..b33daab 100644 (file)
@@ -824,6 +824,7 @@ static struct samsung_sdi_battery samsung_sdi_batteries[] = {
                        .constant_charge_current_max_ua = 900000,
                        .constant_charge_voltage_max_uv = 4200000,
                        .charge_term_current_ua = 200000,
+                       .charge_restart_voltage_uv = 4170000,
                        .maintenance_charge = samsung_maint_charge_table,
                        .maintenance_charge_size = ARRAY_SIZE(samsung_maint_charge_table),
                        .alert_low_temp_charge_current_ua = 300000,
@@ -867,6 +868,7 @@ static struct samsung_sdi_battery samsung_sdi_batteries[] = {
                        .constant_charge_current_max_ua = 1500000,
                        .constant_charge_voltage_max_uv = 4350000,
                        .charge_term_current_ua = 120000,
+                       .charge_restart_voltage_uv = 4300000,
                        .maintenance_charge = samsung_maint_charge_table,
                        .maintenance_charge_size = ARRAY_SIZE(samsung_maint_charge_table),
                        .alert_low_temp_charge_current_ua = 300000,
index 05147d2..485e58b 100644 (file)
@@ -292,6 +292,7 @@ enum atc2603c_reg_ids {
        .bypass_mask = BIT(5), \
        .active_discharge_reg = ATC2603C_PMU_SWITCH_CTL, \
        .active_discharge_mask = BIT(1), \
+       .active_discharge_on = BIT(1), \
        .owner = THIS_MODULE, \
 }
 
index f21e3f8..8e13dea 100644 (file)
@@ -285,6 +285,7 @@ static const unsigned int rtq2134_buck_ramp_delay_table[] = {
                .enable_mask = RTQ2134_VOUTEN_MASK, \
                .active_discharge_reg = RTQ2134_REG_BUCK##_id##_CFG0, \
                .active_discharge_mask = RTQ2134_ACTDISCHG_MASK, \
+               .active_discharge_on = RTQ2134_ACTDISCHG_MASK, \
                .ramp_reg = RTQ2134_REG_BUCK##_id##_RSPCFG, \
                .ramp_mask = RTQ2134_RSPUP_MASK, \
                .ramp_delay_table = rtq2134_buck_ramp_delay_table, \
index cadea03..40befdd 100644 (file)
@@ -82,6 +82,35 @@ static const struct regulator_desc wm8994_ldo_desc[] = {
                .min_uV = 2400000,
                .uV_step = 100000,
                .enable_time = 3000,
+               .off_on_delay = 36000,
+               .owner = THIS_MODULE,
+       },
+       {
+               .name = "LDO2",
+               .id = 2,
+               .type = REGULATOR_VOLTAGE,
+               .n_voltages = WM8994_LDO2_MAX_SELECTOR + 1,
+               .vsel_reg = WM8994_LDO_2,
+               .vsel_mask = WM8994_LDO2_VSEL_MASK,
+               .ops = &wm8994_ldo2_ops,
+               .enable_time = 3000,
+               .off_on_delay = 36000,
+               .owner = THIS_MODULE,
+       },
+};
+
+static const struct regulator_desc wm8958_ldo_desc[] = {
+       {
+               .name = "LDO1",
+               .id = 1,
+               .type = REGULATOR_VOLTAGE,
+               .n_voltages = WM8994_LDO1_MAX_SELECTOR + 1,
+               .vsel_reg = WM8994_LDO_1,
+               .vsel_mask = WM8994_LDO1_VSEL_MASK,
+               .ops = &wm8994_ldo1_ops,
+               .min_uV = 2400000,
+               .uV_step = 100000,
+               .enable_time = 3000,
                .owner = THIS_MODULE,
        },
        {
@@ -172,9 +201,16 @@ static int wm8994_ldo_probe(struct platform_device *pdev)
         * regulator core and we need not worry about it on the
         * error path.
         */
-       ldo->regulator = devm_regulator_register(&pdev->dev,
-                                                &wm8994_ldo_desc[id],
-                                                &config);
+       if (ldo->wm8994->type == WM8994) {
+               ldo->regulator = devm_regulator_register(&pdev->dev,
+                                                        &wm8994_ldo_desc[id],
+                                                        &config);
+       } else {
+               ldo->regulator = devm_regulator_register(&pdev->dev,
+                                                        &wm8958_ldo_desc[id],
+                                                        &config);
+       }
+
        if (IS_ERR(ldo->regulator)) {
                ret = PTR_ERR(ldo->regulator);
                dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
index b496028..e0fc80e 100644 (file)
@@ -183,7 +183,7 @@ config RESET_RASPBERRYPI
 
 config RESET_RZG2L_USBPHY_CTRL
        tristate "Renesas RZ/G2L USBPHY control driver"
-       depends on ARCH_R9A07G044 || COMPILE_TEST
+       depends on ARCH_RZG2L || COMPILE_TEST
        help
          Support for USBPHY Control found on RZ/G2L family. It mainly
          controls reset and power down of the USB/PHY.
index 61e6888..f0a076e 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/kref.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/acpi.h>
 #include <linux/reset.h>
 #include <linux/reset-controller.h>
 #include <linux/slab.h>
@@ -1100,13 +1101,25 @@ EXPORT_SYMBOL_GPL(__devm_reset_control_bulk_get);
  *
  * Convenience wrapper for __reset_control_get() and reset_control_reset().
  * This is useful for the common case of devices with single, dedicated reset
- * lines.
+ * lines. _RST firmware method will be called for devices with ACPI.
  */
 int __device_reset(struct device *dev, bool optional)
 {
        struct reset_control *rstc;
        int ret;
 
+#ifdef CONFIG_ACPI
+       acpi_handle handle = ACPI_HANDLE(dev);
+
+       if (handle) {
+               if (!acpi_has_method(handle, "_RST"))
+                       return optional ? 0 : -ENOENT;
+               if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL,
+                                                     NULL)))
+                       return -EIO;
+       }
+#endif
+
        rstc = __reset_control_get(dev, NULL, 0, 0, optional, true);
        if (IS_ERR(rstc))
                return PTR_ERR(rstc);
index c9bc325..26dc547 100644 (file)
@@ -98,11 +98,17 @@ static const struct meson_reset_param meson_a1_param = {
        .level_offset   = 0x40,
 };
 
+static const struct meson_reset_param meson_s4_param = {
+       .reg_count      = 6,
+       .level_offset   = 0x40,
+};
+
 static const struct of_device_id meson_reset_dt_ids[] = {
         { .compatible = "amlogic,meson8b-reset",    .data = &meson8b_param},
         { .compatible = "amlogic,meson-gxbb-reset", .data = &meson8b_param},
         { .compatible = "amlogic,meson-axg-reset",  .data = &meson8b_param},
         { .compatible = "amlogic,meson-a1-reset",   .data = &meson_a1_param},
+        { .compatible = "amlogic,meson-s4-reset",   .data = &meson_s4_param},
         { /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, meson_reset_dt_ids);
index 1e83150..a8dde46 100644 (file)
@@ -121,7 +121,9 @@ static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
                return dev_err_probe(dev, PTR_ERR(priv->rstc),
                                     "failed to get reset\n");
 
-       reset_control_deassert(priv->rstc);
+       error = reset_control_deassert(priv->rstc);
+       if (error)
+               return error;
 
        priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops;
        priv->rcdev.of_reset_n_cells = 1;
index 4dda0da..361a683 100644 (file)
@@ -144,6 +144,7 @@ static const struct of_device_id reset_simple_dt_ids[] = {
                .data = &reset_simple_active_low },
        { .compatible = "aspeed,ast2400-lpc-reset" },
        { .compatible = "aspeed,ast2500-lpc-reset" },
+       { .compatible = "aspeed,ast2600-lpc-reset" },
        { .compatible = "bitmain,bm1880-reset",
                .data = &reset_simple_active_low },
        { .compatible = "brcm,bcm4908-misc-pcie-reset",
index 908c1d5..146fd5d 100644 (file)
@@ -23,19 +23,32 @@ struct uniphier_glue_reset_soc_data {
 
 struct uniphier_glue_reset_priv {
        struct clk_bulk_data clk[MAX_CLKS];
-       struct reset_control *rst[MAX_RSTS];
+       struct reset_control_bulk_data rst[MAX_RSTS];
        struct reset_simple_data rdata;
        const struct uniphier_glue_reset_soc_data *data;
 };
 
+static void uniphier_clk_disable(void *_priv)
+{
+       struct uniphier_glue_reset_priv *priv = _priv;
+
+       clk_bulk_disable_unprepare(priv->data->nclks, priv->clk);
+}
+
+static void uniphier_rst_assert(void *_priv)
+{
+       struct uniphier_glue_reset_priv *priv = _priv;
+
+       reset_control_bulk_assert(priv->data->nrsts, priv->rst);
+}
+
 static int uniphier_glue_reset_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct uniphier_glue_reset_priv *priv;
        struct resource *res;
        resource_size_t size;
-       const char *name;
-       int i, ret, nr;
+       int i, ret;
 
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
@@ -58,22 +71,28 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       for (i = 0; i < priv->data->nrsts; i++) {
-               name = priv->data->reset_names[i];
-               priv->rst[i] = devm_reset_control_get_shared(dev, name);
-               if (IS_ERR(priv->rst[i]))
-                       return PTR_ERR(priv->rst[i]);
-       }
+       for (i = 0; i < priv->data->nrsts; i++)
+               priv->rst[i].id = priv->data->reset_names[i];
+       ret = devm_reset_control_bulk_get_shared(dev, priv->data->nrsts,
+                                                priv->rst);
+       if (ret)
+               return ret;
 
        ret = clk_bulk_prepare_enable(priv->data->nclks, priv->clk);
        if (ret)
                return ret;
 
-       for (nr = 0; nr < priv->data->nrsts; nr++) {
-               ret = reset_control_deassert(priv->rst[nr]);
-               if (ret)
-                       goto out_rst_assert;
-       }
+       ret = devm_add_action_or_reset(dev, uniphier_clk_disable, priv);
+       if (ret)
+               return ret;
+
+       ret = reset_control_bulk_deassert(priv->data->nrsts, priv->rst);
+       if (ret)
+               return ret;
+
+       ret = devm_add_action_or_reset(dev, uniphier_rst_assert, priv);
+       if (ret)
+               return ret;
 
        spin_lock_init(&priv->rdata.lock);
        priv->rdata.rcdev.owner = THIS_MODULE;
@@ -84,32 +103,7 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, priv);
 
-       ret = devm_reset_controller_register(dev, &priv->rdata.rcdev);
-       if (ret)
-               goto out_rst_assert;
-
-       return 0;
-
-out_rst_assert:
-       while (nr--)
-               reset_control_assert(priv->rst[nr]);
-
-       clk_bulk_disable_unprepare(priv->data->nclks, priv->clk);
-
-       return ret;
-}
-
-static int uniphier_glue_reset_remove(struct platform_device *pdev)
-{
-       struct uniphier_glue_reset_priv *priv = platform_get_drvdata(pdev);
-       int i;
-
-       for (i = 0; i < priv->data->nrsts; i++)
-               reset_control_assert(priv->rst[i]);
-
-       clk_bulk_disable_unprepare(priv->data->nclks, priv->clk);
-
-       return 0;
+       return devm_reset_controller_register(dev, &priv->rdata.rcdev);
 }
 
 static const char * const uniphier_pro4_clock_reset_names[] = {
@@ -177,7 +171,6 @@ MODULE_DEVICE_TABLE(of, uniphier_glue_reset_match);
 
 static struct platform_driver uniphier_glue_reset_driver = {
        .probe = uniphier_glue_reset_probe,
-       .remove = uniphier_glue_reset_remove,
        .driver = {
                .name = "uniphier-glue-reset",
                .of_match_table = uniphier_glue_reset_match,
index 24d3395..4c5bba5 100644 (file)
@@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
        struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
        struct mrq_reset_request request;
        struct tegra_bpmp_message msg;
+       int err;
 
        memset(&request, 0, sizeof(request));
        request.cmd = command;
@@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
        msg.tx.data = &request;
        msg.tx.size = sizeof(request);
 
-       return tegra_bpmp_transfer(bpmp, &msg);
+       err = tegra_bpmp_transfer(bpmp, &msg);
+       if (err)
+               return err;
+       if (msg.rx.ret)
+               return -EINVAL;
+
+       return 0;
 }
 
 static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
index 5f554a3..caeebfb 100644 (file)
@@ -317,14 +317,18 @@ enum {
 };
 
 struct aha152x_cmd_priv {
-       struct scsi_pointer scsi_pointer;
+       char *ptr;
+       int this_residual;
+       struct scatterlist *buffer;
+       int status;
+       int message;
+       int sent_command;
+       int phase;
 };
 
-static struct scsi_pointer *aha152x_scsi_pointer(struct scsi_cmnd *cmd)
+static struct aha152x_cmd_priv *aha152x_priv(struct scsi_cmnd *cmd)
 {
-       struct aha152x_cmd_priv *acmd = scsi_cmd_priv(cmd);
-
-       return &acmd->scsi_pointer;
+       return scsi_cmd_priv(cmd);
 }
 
 MODULE_AUTHOR("Jürgen Fischer");
@@ -890,17 +894,16 @@ void aha152x_release(struct Scsi_Host *shpnt)
 static int setup_expected_interrupts(struct Scsi_Host *shpnt)
 {
        if(CURRENT_SC) {
-               struct scsi_pointer *scsi_pointer =
-                       aha152x_scsi_pointer(CURRENT_SC);
+               struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
 
-               scsi_pointer->phase |= 1 << 16;
+               acp->phase |= 1 << 16;
 
-               if (scsi_pointer->phase & selecting) {
+               if (acp->phase & selecting) {
                        SETPORT(SSTAT1, SELTO);
                        SETPORT(SIMODE0, ENSELDO | (DISCONNECTED_SC ? ENSELDI : 0));
                        SETPORT(SIMODE1, ENSELTIMO);
                } else {
-                       SETPORT(SIMODE0, (scsi_pointer->phase & spiordy) ? ENSPIORDY : 0);
+                       SETPORT(SIMODE0, (acp->phase & spiordy) ? ENSPIORDY : 0);
                        SETPORT(SIMODE1, ENPHASEMIS | ENSCSIRST | ENSCSIPERR | ENBUSFREE);
                }
        } else if(STATE==seldi) {
@@ -924,17 +927,16 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
 static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
                                  struct completion *complete, int phase)
 {
-       struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(SCpnt);
+       struct aha152x_cmd_priv *acp = aha152x_priv(SCpnt);
        struct Scsi_Host *shpnt = SCpnt->device->host;
        unsigned long flags;
 
-       scsi_pointer->phase        = not_issued | phase;
-       scsi_pointer->Status       = 0x1; /* Ilegal status by SCSI standard */
-       scsi_pointer->Message      = 0;
-       scsi_pointer->have_data_in = 0;
-       scsi_pointer->sent_command = 0;
+       acp->phase        = not_issued | phase;
+       acp->status       = 0x1; /* Illegal status by SCSI standard */
+       acp->message      = 0;
+       acp->sent_command = 0;
 
-       if (scsi_pointer->phase & (resetting | check_condition)) {
+       if (acp->phase & (resetting | check_condition)) {
                if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) {
                        scmd_printk(KERN_ERR, SCpnt, "cannot reuse command\n");
                        return FAILED;
@@ -957,15 +959,15 @@ static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
           SCp.phase            : current state of the command */
 
        if ((phase & resetting) || !scsi_sglist(SCpnt)) {
-               scsi_pointer->ptr           = NULL;
-               scsi_pointer->this_residual = 0;
+               acp->ptr           = NULL;
+               acp->this_residual = 0;
                scsi_set_resid(SCpnt, 0);
-               scsi_pointer->buffer        = NULL;
+               acp->buffer        = NULL;
        } else {
                scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
-               scsi_pointer->buffer        = scsi_sglist(SCpnt);
-               scsi_pointer->ptr           = SG_ADDRESS(scsi_pointer->buffer);
-               scsi_pointer->this_residual = scsi_pointer->buffer->length;
+               acp->buffer        = scsi_sglist(SCpnt);
+               acp->ptr           = SG_ADDRESS(acp->buffer);
+               acp->this_residual = acp->buffer->length;
        }
 
        DO_LOCK(flags);
@@ -1015,7 +1017,7 @@ static void reset_done(struct scsi_cmnd *SCpnt)
 
 static void aha152x_scsi_done(struct scsi_cmnd *SCpnt)
 {
-       if (aha152x_scsi_pointer(SCpnt)->phase & resetting)
+       if (aha152x_priv(SCpnt)->phase & resetting)
                reset_done(SCpnt);
        else
                scsi_done(SCpnt);
@@ -1101,7 +1103,7 @@ static int aha152x_device_reset(struct scsi_cmnd * SCpnt)
 
        DO_LOCK(flags);
 
-       if (aha152x_scsi_pointer(SCpnt)->phase & resetted) {
+       if (aha152x_priv(SCpnt)->phase & resetted) {
                HOSTDATA(shpnt)->commands--;
                if (!HOSTDATA(shpnt)->commands)
                        SETPORT(PORTA, 0);
@@ -1395,31 +1397,30 @@ static void busfree_run(struct Scsi_Host *shpnt)
        SETPORT(SSTAT1, CLRBUSFREE);
 
        if(CURRENT_SC) {
-               struct scsi_pointer *scsi_pointer =
-                       aha152x_scsi_pointer(CURRENT_SC);
+               struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
 
 #if defined(AHA152X_STAT)
                action++;
 #endif
-               scsi_pointer->phase &= ~syncneg;
+               acp->phase &= ~syncneg;
 
-               if (scsi_pointer->phase & completed) {
+               if (acp->phase & completed) {
                        /* target sent COMMAND COMPLETE */
-                       done(shpnt, scsi_pointer->Status, DID_OK);
+                       done(shpnt, acp->status, DID_OK);
 
-               } else if (scsi_pointer->phase & aborted) {
-                       done(shpnt, scsi_pointer->Status, DID_ABORT);
+               } else if (acp->phase & aborted) {
+                       done(shpnt, acp->status, DID_ABORT);
 
-               } else if (scsi_pointer->phase & resetted) {
-                       done(shpnt, scsi_pointer->Status, DID_RESET);
+               } else if (acp->phase & resetted) {
+                       done(shpnt, acp->status, DID_RESET);
 
-               } else if (scsi_pointer->phase & disconnected) {
+               } else if (acp->phase & disconnected) {
                        /* target sent DISCONNECT */
 #if defined(AHA152X_STAT)
                        HOSTDATA(shpnt)->disconnections++;
 #endif
                        append_SC(&DISCONNECTED_SC, CURRENT_SC);
-                       scsi_pointer->phase |= 1 << 16;
+                       acp->phase |= 1 << 16;
                        CURRENT_SC = NULL;
 
                } else {
@@ -1438,24 +1439,23 @@ static void busfree_run(struct Scsi_Host *shpnt)
                action++;
 #endif
 
-               if (aha152x_scsi_pointer(DONE_SC)->phase & check_condition) {
+               if (aha152x_priv(DONE_SC)->phase & check_condition) {
                        struct scsi_cmnd *cmd = HOSTDATA(shpnt)->done_SC;
                        struct aha152x_scdata *sc = SCDATA(cmd);
 
                        scsi_eh_restore_cmnd(cmd, &sc->ses);
 
-                       aha152x_scsi_pointer(cmd)->Status = SAM_STAT_CHECK_CONDITION;
+                       aha152x_priv(cmd)->status = SAM_STAT_CHECK_CONDITION;
 
                        HOSTDATA(shpnt)->commands--;
                        if (!HOSTDATA(shpnt)->commands)
                                SETPORT(PORTA, 0);      /* turn led off */
-               } else if (aha152x_scsi_pointer(DONE_SC)->Status ==
-                          SAM_STAT_CHECK_CONDITION) {
+               } else if (aha152x_priv(DONE_SC)->status == SAM_STAT_CHECK_CONDITION) {
 #if defined(AHA152X_STAT)
                        HOSTDATA(shpnt)->busfree_with_check_condition++;
 #endif
 
-                       if(!(aha152x_scsi_pointer(DONE_SC)->phase & not_issued)) {
+                       if (!(aha152x_priv(DONE_SC)->phase & not_issued)) {
                                struct aha152x_scdata *sc;
                                struct scsi_cmnd *ptr = DONE_SC;
                                DONE_SC=NULL;
@@ -1480,7 +1480,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
                        if (!HOSTDATA(shpnt)->commands)
                                SETPORT(PORTA, 0);      /* turn led off */
 
-                       if (!(aha152x_scsi_pointer(ptr)->phase & resetting)) {
+                       if (!(aha152x_priv(ptr)->phase & resetting)) {
                                kfree(ptr->host_scribble);
                                ptr->host_scribble=NULL;
                        }
@@ -1503,13 +1503,12 @@ static void busfree_run(struct Scsi_Host *shpnt)
        DO_UNLOCK(flags);
 
        if(CURRENT_SC) {
-               struct scsi_pointer *scsi_pointer =
-                       aha152x_scsi_pointer(CURRENT_SC);
+               struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
 
 #if defined(AHA152X_STAT)
                action++;
 #endif
-               scsi_pointer->phase |= selecting;
+               acp->phase |= selecting;
 
                /* clear selection timeout */
                SETPORT(SSTAT1, SELTO);
@@ -1537,13 +1536,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
  */
 static void seldo_run(struct Scsi_Host *shpnt)
 {
-       struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+       struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
 
        SETPORT(SCSISIG, 0);
        SETPORT(SSTAT1, CLRBUSFREE);
        SETPORT(SSTAT1, CLRPHASECHG);
 
-       scsi_pointer->phase &= ~(selecting | not_issued);
+       acp->phase &= ~(selecting | not_issued);
 
        SETPORT(SCSISEQ, 0);
 
@@ -1558,12 +1557,12 @@ static void seldo_run(struct Scsi_Host *shpnt)
 
        ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
 
-       if (scsi_pointer->phase & aborting) {
+       if (acp->phase & aborting) {
                ADDMSGO(ABORT);
-       } else if (scsi_pointer->phase & resetting) {
+       } else if (acp->phase & resetting) {
                ADDMSGO(BUS_DEVICE_RESET);
        } else if (SYNCNEG==0 && SYNCHRONOUS) {
-               scsi_pointer->phase |= syncneg;
+               acp->phase |= syncneg;
                MSGOLEN += spi_populate_sync_msg(&MSGO(MSGOLEN), 50, 8);
                SYNCNEG=1;              /* negotiation in progress */
        }
@@ -1578,7 +1577,7 @@ static void seldo_run(struct Scsi_Host *shpnt)
  */
 static void selto_run(struct Scsi_Host *shpnt)
 {
-       struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+       struct aha152x_cmd_priv *acp;
 
        SETPORT(SCSISEQ, 0);
        SETPORT(SSTAT1, CLRSELTIMO);
@@ -1586,9 +1585,10 @@ static void selto_run(struct Scsi_Host *shpnt)
        if (!CURRENT_SC)
                return;
 
-       scsi_pointer->phase &= ~selecting;
+       acp = aha152x_priv(CURRENT_SC);
+       acp->phase &= ~selecting;
 
-       if (scsi_pointer->phase & aborted)
+       if (acp->phase & aborted)
                done(shpnt, SAM_STAT_GOOD, DID_ABORT);
        else if (TESTLO(SSTAT0, SELINGO))
                done(shpnt, SAM_STAT_GOOD, DID_BUS_BUSY);
@@ -1616,10 +1616,9 @@ static void seldi_run(struct Scsi_Host *shpnt)
        SETPORT(SSTAT1, CLRPHASECHG);
 
        if(CURRENT_SC) {
-               struct scsi_pointer *scsi_pointer =
-                       aha152x_scsi_pointer(CURRENT_SC);
+               struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
 
-               if (!(scsi_pointer->phase & not_issued))
+               if (!(acp->phase & not_issued))
                        scmd_printk(KERN_ERR, CURRENT_SC,
                                    "command should not have been issued yet\n");
 
@@ -1676,7 +1675,7 @@ static void seldi_run(struct Scsi_Host *shpnt)
 static void msgi_run(struct Scsi_Host *shpnt)
 {
        for(;;) {
-               struct scsi_pointer *scsi_pointer;
+               struct aha152x_cmd_priv *acp;
                int sstat1 = GETPORT(SSTAT1);
 
                if(sstat1 & (PHASECHG|PHASEMIS|BUSFREE) || !(sstat1 & REQINIT))
@@ -1714,9 +1713,9 @@ static void msgi_run(struct Scsi_Host *shpnt)
                                continue;
                        }
 
-                       scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
-                       scsi_pointer->Message = MSGI(0);
-                       scsi_pointer->phase &= ~disconnected;
+                       acp = aha152x_priv(CURRENT_SC);
+                       acp->message = MSGI(0);
+                       acp->phase &= ~disconnected;
 
                        MSGILEN=0;
 
@@ -1724,8 +1723,8 @@ static void msgi_run(struct Scsi_Host *shpnt)
                        continue;
                }
 
-               scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
-               scsi_pointer->Message = MSGI(0);
+               acp = aha152x_priv(CURRENT_SC);
+               acp->message = MSGI(0);
 
                switch (MSGI(0)) {
                case DISCONNECT:
@@ -1733,11 +1732,11 @@ static void msgi_run(struct Scsi_Host *shpnt)
                                scmd_printk(KERN_WARNING, CURRENT_SC,
                                            "target was not allowed to disconnect\n");
 
-                       scsi_pointer->phase |= disconnected;
+                       acp->phase |= disconnected;
                        break;
 
                case COMMAND_COMPLETE:
-                       scsi_pointer->phase |= completed;
+                       acp->phase |= completed;
                        break;
 
                case MESSAGE_REJECT:
@@ -1867,11 +1866,9 @@ static void msgi_end(struct Scsi_Host *shpnt)
  */
 static void msgo_init(struct Scsi_Host *shpnt)
 {
-       struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
-
        if(MSGOLEN==0) {
-               if ((scsi_pointer->phase & syncneg) && SYNCNEG==2 &&
-                   SYNCRATE==0) {
+               if ((aha152x_priv(CURRENT_SC)->phase & syncneg) &&
+                   SYNCNEG == 2 && SYNCRATE == 0) {
                        ADDMSGO(IDENTIFY(RECONNECT, CURRENT_SC->device->lun));
                } else {
                        scmd_printk(KERN_INFO, CURRENT_SC,
@@ -1888,7 +1885,7 @@ static void msgo_init(struct Scsi_Host *shpnt)
  */
 static void msgo_run(struct Scsi_Host *shpnt)
 {
-       struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+       struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
 
        while(MSGO_I<MSGOLEN) {
                if (TESTLO(SSTAT0, SPIORDY))
@@ -1901,13 +1898,13 @@ static void msgo_run(struct Scsi_Host *shpnt)
 
 
                if (MSGO(MSGO_I) & IDENTIFY_BASE)
-                       scsi_pointer->phase |= identified;
+                       acp->phase |= identified;
 
                if (MSGO(MSGO_I)==ABORT)
-                       scsi_pointer->phase |= aborted;
+                       acp->phase |= aborted;
 
                if (MSGO(MSGO_I)==BUS_DEVICE_RESET)
-                       scsi_pointer->phase |= resetted;
+                       acp->phase |= resetted;
 
                SETPORT(SCSIDAT, MSGO(MSGO_I++));
        }
@@ -1936,7 +1933,7 @@ static void msgo_end(struct Scsi_Host *shpnt)
  */
 static void cmd_init(struct Scsi_Host *shpnt)
 {
-       if (aha152x_scsi_pointer(CURRENT_SC)->sent_command) {
+       if (aha152x_priv(CURRENT_SC)->sent_command) {
                scmd_printk(KERN_ERR, CURRENT_SC,
                            "command already sent\n");
                done(shpnt, SAM_STAT_GOOD, DID_ERROR);
@@ -1967,7 +1964,7 @@ static void cmd_end(struct Scsi_Host *shpnt)
                            "command sent incompletely (%d/%d)\n",
                            CMD_I, CURRENT_SC->cmd_len);
        else
-               aha152x_scsi_pointer(CURRENT_SC)->sent_command++;
+               aha152x_priv(CURRENT_SC)->sent_command++;
 }
 
 /*
@@ -1979,7 +1976,7 @@ static void status_run(struct Scsi_Host *shpnt)
        if (TESTLO(SSTAT0, SPIORDY))
                return;
 
-       aha152x_scsi_pointer(CURRENT_SC)->Status = GETPORT(SCSIDAT);
+       aha152x_priv(CURRENT_SC)->status = GETPORT(SCSIDAT);
 
 }
 
@@ -2003,7 +2000,7 @@ static void datai_init(struct Scsi_Host *shpnt)
 
 static void datai_run(struct Scsi_Host *shpnt)
 {
-       struct scsi_pointer *scsi_pointer;
+       struct aha152x_cmd_priv *acp;
        unsigned long the_time;
        int fifodata, data_count;
 
@@ -2041,36 +2038,35 @@ static void datai_run(struct Scsi_Host *shpnt)
                        fifodata = GETPORT(FIFOSTAT);
                }
 
-               scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
-               if (scsi_pointer->this_residual > 0) {
-                       while (fifodata > 0 && scsi_pointer->this_residual > 0) {
-                               data_count = fifodata > scsi_pointer->this_residual ?
-                                               scsi_pointer->this_residual :
-                                               fifodata;
+               acp = aha152x_priv(CURRENT_SC);
+               if (acp->this_residual > 0) {
+                       while (fifodata > 0 && acp->this_residual > 0) {
+                               data_count = fifodata > acp->this_residual ?
+                                               acp->this_residual : fifodata;
                                fifodata -= data_count;
 
                                if (data_count & 1) {
                                        SETPORT(DMACNTRL0, ENDMA|_8BIT);
-                                       *scsi_pointer->ptr++ = GETPORT(DATAPORT);
-                                       scsi_pointer->this_residual--;
+                                       *acp->ptr++ = GETPORT(DATAPORT);
+                                       acp->this_residual--;
                                        DATA_LEN++;
                                        SETPORT(DMACNTRL0, ENDMA);
                                }
 
                                if (data_count > 1) {
                                        data_count >>= 1;
-                                       insw(DATAPORT, scsi_pointer->ptr, data_count);
-                                       scsi_pointer->ptr += 2 * data_count;
-                                       scsi_pointer->this_residual -= 2 * data_count;
+                                       insw(DATAPORT, acp->ptr, data_count);
+                                       acp->ptr += 2 * data_count;
+                                       acp->this_residual -= 2 * data_count;
                                        DATA_LEN += 2 * data_count;
                                }
 
-                               if (scsi_pointer->this_residual == 0 &&
-                                   !sg_is_last(scsi_pointer->buffer)) {
+                               if (acp->this_residual == 0 &&
+                                   !sg_is_last(acp->buffer)) {
                                        /* advance to next buffer */
-                                       scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
-                                       scsi_pointer->ptr           = SG_ADDRESS(scsi_pointer->buffer);
-                                       scsi_pointer->this_residual = scsi_pointer->buffer->length;
+                                       acp->buffer = sg_next(acp->buffer);
+                                       acp->ptr = SG_ADDRESS(acp->buffer);
+                                       acp->this_residual = acp->buffer->length;
                                }
                        }
                } else if (fifodata > 0) {
@@ -2138,15 +2134,15 @@ static void datao_init(struct Scsi_Host *shpnt)
 
 static void datao_run(struct Scsi_Host *shpnt)
 {
-       struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+       struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
        unsigned long the_time;
        int data_count;
 
        /* until phase changes or all data sent */
-       while (TESTLO(DMASTAT, INTSTAT) && scsi_pointer->this_residual > 0) {
+       while (TESTLO(DMASTAT, INTSTAT) && acp->this_residual > 0) {
                data_count = 128;
-               if (data_count > scsi_pointer->this_residual)
-                       data_count = scsi_pointer->this_residual;
+               if (data_count > acp->this_residual)
+                       data_count = acp->this_residual;
 
                if(TESTLO(DMASTAT, DFIFOEMP)) {
                        scmd_printk(KERN_ERR, CURRENT_SC,
@@ -2157,26 +2153,25 @@ static void datao_run(struct Scsi_Host *shpnt)
 
                if(data_count & 1) {
                        SETPORT(DMACNTRL0,WRITE_READ|ENDMA|_8BIT);
-                       SETPORT(DATAPORT, *scsi_pointer->ptr++);
-                       scsi_pointer->this_residual--;
+                       SETPORT(DATAPORT, *acp->ptr++);
+                       acp->this_residual--;
                        CMD_INC_RESID(CURRENT_SC, -1);
                        SETPORT(DMACNTRL0,WRITE_READ|ENDMA);
                }
 
                if(data_count > 1) {
                        data_count >>= 1;
-                       outsw(DATAPORT, scsi_pointer->ptr, data_count);
-                       scsi_pointer->ptr           += 2 * data_count;
-                       scsi_pointer->this_residual -= 2 * data_count;
+                       outsw(DATAPORT, acp->ptr, data_count);
+                       acp->ptr += 2 * data_count;
+                       acp->this_residual -= 2 * data_count;
                        CMD_INC_RESID(CURRENT_SC, -2 * data_count);
                }
 
-               if (scsi_pointer->this_residual == 0 &&
-                   !sg_is_last(scsi_pointer->buffer)) {
+               if (acp->this_residual == 0 && !sg_is_last(acp->buffer)) {
                        /* advance to next buffer */
-                       scsi_pointer->buffer = sg_next(scsi_pointer->buffer);
-                       scsi_pointer->ptr           = SG_ADDRESS(scsi_pointer->buffer);
-                       scsi_pointer->this_residual = scsi_pointer->buffer->length;
+                       acp->buffer = sg_next(acp->buffer);
+                       acp->ptr = SG_ADDRESS(acp->buffer);
+                       acp->this_residual = acp->buffer->length;
                }
 
                the_time=jiffies + 100*HZ;
@@ -2192,7 +2187,7 @@ static void datao_run(struct Scsi_Host *shpnt)
 
 static void datao_end(struct Scsi_Host *shpnt)
 {
-       struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
+       struct aha152x_cmd_priv *acp = aha152x_priv(CURRENT_SC);
 
        if(TESTLO(DMASTAT, DFIFOEMP)) {
                u32 datao_cnt = GETSTCNT();
@@ -2211,10 +2206,9 @@ static void datao_end(struct Scsi_Host *shpnt)
                        sg = sg_next(sg);
                }
 
-               scsi_pointer->buffer = sg;
-               scsi_pointer->ptr = SG_ADDRESS(scsi_pointer->buffer) + done;
-               scsi_pointer->this_residual = scsi_pointer->buffer->length -
-                       done;
+               acp->buffer = sg;
+               acp->ptr = SG_ADDRESS(acp->buffer) + done;
+               acp->this_residual = acp->buffer->length - done;
        }
 
        SETPORT(SXFRCTL0, CH1|CLRCH1|CLRSTCNT);
@@ -2229,7 +2223,6 @@ static void datao_end(struct Scsi_Host *shpnt)
  */
 static int update_state(struct Scsi_Host *shpnt)
 {
-       struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(CURRENT_SC);
        int dataphase=0;
        unsigned int stat0 = GETPORT(SSTAT0);
        unsigned int stat1 = GETPORT(SSTAT1);
@@ -2244,7 +2237,7 @@ static int update_state(struct Scsi_Host *shpnt)
        } else if (stat0 & SELDI && PREVSTATE == busfree) {
                STATE=seldi;
        } else if (stat0 & SELDO && CURRENT_SC &&
-                  (scsi_pointer->phase & selecting)) {
+                  (aha152x_priv(CURRENT_SC)->phase & selecting)) {
                STATE=seldo;
        } else if(stat1 & SELTO) {
                STATE=selto;
@@ -2376,8 +2369,7 @@ static void is_complete(struct Scsi_Host *shpnt)
                        SETPORT(SXFRCTL0, CH1);
                        SETPORT(DMACNTRL0, 0);
                        if(CURRENT_SC)
-                               aha152x_scsi_pointer(CURRENT_SC)->phase &=
-                                       ~spiordy;
+                               aha152x_priv(CURRENT_SC)->phase &= ~spiordy;
                }
 
                /*
@@ -2399,8 +2391,7 @@ static void is_complete(struct Scsi_Host *shpnt)
                        SETPORT(DMACNTRL0, 0);
                        SETPORT(SXFRCTL0, CH1|SPIOEN);
                        if(CURRENT_SC)
-                               aha152x_scsi_pointer(CURRENT_SC)->phase |=
-                                       spiordy;
+                               aha152x_priv(CURRENT_SC)->phase |= spiordy;
                }
 
                /*
@@ -2490,7 +2481,7 @@ static void disp_enintr(struct Scsi_Host *shpnt)
  */
 static void show_command(struct scsi_cmnd *ptr)
 {
-       const int phase = aha152x_scsi_pointer(ptr)->phase;
+       const int phase = aha152x_priv(ptr)->phase;
 
        scsi_print_command(ptr);
        scmd_printk(KERN_DEBUG, ptr,
@@ -2538,8 +2529,8 @@ static void show_queues(struct Scsi_Host *shpnt)
 
 static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
 {
-       struct scsi_pointer *scsi_pointer = aha152x_scsi_pointer(ptr);
-       const int phase = scsi_pointer->phase;
+       struct aha152x_cmd_priv *acp = aha152x_priv(ptr);
+       const int phase = acp->phase;
        int i;
 
        seq_printf(m, "%p: target=%d; lun=%d; cmnd=( ",
@@ -2549,8 +2540,8 @@ static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
                seq_printf(m, "0x%02x ", ptr->cmnd[i]);
 
        seq_printf(m, "); resid=%d; residual=%d; buffers=%d; phase |",
-               scsi_get_resid(ptr), scsi_pointer->this_residual,
-               sg_nents(scsi_pointer->buffer) - 1);
+               scsi_get_resid(ptr), acp->this_residual,
+               sg_nents(acp->buffer) - 1);
 
        if (phase & not_issued)
                seq_puts(m, "not issued|");
index 679a4fd..793fe19 100644 (file)
@@ -420,8 +420,6 @@ ahd_unlock(struct ahd_softc *ahd, unsigned long *flags)
 
 /* config registers for header type 0 devices */
 #define PCIR_MAPS      0x10
-#define PCIR_SUBVEND_0 0x2c
-#define PCIR_SUBDEV_0  0x2e
 
 /****************************** PCI-X definitions *****************************/
 #define PCIXR_COMMAND  0x96
index 2f0bdb9..5fad41b 100644 (file)
@@ -260,8 +260,8 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
 
        vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
        device = ahd_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
-       subvendor = ahd_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
-       subdevice = ahd_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
+       subvendor = ahd_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
+       subdevice = ahd_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2);
        full_id = ahd_compose_id(device,
                                 vendor,
                                 subdevice,
@@ -298,7 +298,7 @@ ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
         * Record if this is an HP board.
         */
        subvendor = ahd_pci_read_config(ahd->dev_softc,
-                                       PCIR_SUBVEND_0, /*bytes*/2);
+                                       PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
        if (subvendor == SUBID_HP)
                ahd->flags |= AHD_HP_BOARD;
 
index 4782a30..51d9f4d 100644 (file)
@@ -433,8 +433,6 @@ ahc_unlock(struct ahc_softc *ahc, unsigned long *flags)
 
 /* config registers for header type 0 devices */
 #define PCIR_MAPS      0x10
-#define PCIR_SUBVEND_0 0x2c
-#define PCIR_SUBDEV_0  0x2e
 
 typedef enum
 {
index dab3a6d..2d4c854 100644 (file)
@@ -673,8 +673,8 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
 
        vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
        device = ahc_pci_read_config(pci, PCIR_DEVICE, /*bytes*/2);
-       subvendor = ahc_pci_read_config(pci, PCIR_SUBVEND_0, /*bytes*/2);
-       subdevice = ahc_pci_read_config(pci, PCIR_SUBDEV_0, /*bytes*/2);
+       subvendor = ahc_pci_read_config(pci, PCI_SUBSYSTEM_VENDOR_ID, /*bytes*/2);
+       subdevice = ahc_pci_read_config(pci, PCI_SUBSYSTEM_ID, /*bytes*/2);
        full_id = ahc_compose_id(device, vendor, subdevice, subvendor);
 
        /*
index 0103f81..7765443 100644 (file)
@@ -1169,7 +1169,7 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
                ofld_kcqe->fcoe_conn_context_id);
        interface = tgt->port->priv;
        if (hba != interface->hba) {
-               printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n");
+               printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mismatch\n");
                goto ofld_cmpl_err;
        }
        /*
@@ -1226,12 +1226,12 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
         * and enable
         */
        if (tgt->context_id != context_id) {
-               printk(KERN_ERR PFX "context id mis-match\n");
+               printk(KERN_ERR PFX "context id mismatch\n");
                return;
        }
        interface = tgt->port->priv;
        if (hba != interface->hba) {
-               printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
+               printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mismatch\n");
                goto enbl_cmpl_err;
        }
        if (!ofld_kcqe->completion_status)
index 5521469..6c864b0 100644 (file)
@@ -1977,7 +1977,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
                if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
                        break;
 
-               if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx))) {
+               if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
                        if (nopin->op_code == ISCSI_OP_NOOP_IN &&
                            nopin->itt == (u16) RESERVED_ITT) {
                                printk(KERN_ALERT "bnx2i: Unsolicited "
@@ -2398,7 +2398,7 @@ static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
        }
 
        if (hba != ep->hba) {
-               printk(KERN_ALERT "conn destroy- error hba mis-match\n");
+               printk(KERN_ALERT "conn destroy- error hba mismatch\n");
                return;
        }
 
@@ -2432,7 +2432,7 @@ static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
        }
 
        if (hba != ep->hba) {
-               printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
+               printk(KERN_ALERT "ofld_cmpl: error hba mismatch\n");
                return;
        }
 
index fe86fd6..15fbd09 100644 (file)
@@ -1721,7 +1721,7 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
                        struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
 
                        /* Must suspend all rx queue activity for this ep */
-                       set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+                       set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
                }
                /* CONN_DISCONNECT timeout may or may not be an issue depending
                 * on what transcribed in TCP layer, different targets behave
index 8c7d4dd..4365d52 100644 (file)
@@ -1634,11 +1634,11 @@ void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
        log_debug(1 << CXGBI_DBG_PDU_RX,
                "csk 0x%p, conn 0x%p.\n", csk, conn);
 
-       if (unlikely(!conn || conn->suspend_rx)) {
+       if (unlikely(!conn || test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
                log_debug(1 << CXGBI_DBG_PDU_RX,
-                       "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
+                       "csk 0x%p, conn 0x%p, id %d, conn flags 0x%lx!\n",
                        csk, conn, conn ? conn->id : 0xFF,
-                       conn ? conn->suspend_rx : 0xFF);
+                       conn ? conn->flags : 0xFF);
                return;
        }
 
index 461ef8a..4bda2f6 100644 (file)
@@ -442,7 +442,6 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
        case SAS_PROTOCOL_INTERNAL_ABORT:
                hisi_sas_task_prep_abort(hisi_hba, slot);
                break;
-       fallthrough;
        default:
                return;
        }
index 80238e6..eee1a24 100644 (file)
@@ -36,7 +36,7 @@
 
 #define IBMVSCSIS_VERSION      "v0.2"
 
-#define        INITIAL_SRP_LIMIT       800
+#define        INITIAL_SRP_LIMIT       1024
 #define        DEFAULT_MAX_SECTORS     256
 #define MAX_TXU                        1024 * 1024
 
index d690d9c..35589b6 100644 (file)
@@ -413,7 +413,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
                                dev_warn(&ihost->pdev->dev,
                                         "%s: SCIC Controller 0x%p received "
                                         "event 0x%x for io request object "
-                                        "that doesnt exist.\n",
+                                        "that doesn't exist.\n",
                                         __func__,
                                         ihost,
                                         ent);
@@ -428,7 +428,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
                                dev_warn(&ihost->pdev->dev,
                                         "%s: SCIC Controller 0x%p received "
                                         "event 0x%x for remote device object "
-                                        "that doesnt exist.\n",
+                                        "that doesn't exist.\n",
                                         __func__,
                                         ihost,
                                         ent);
@@ -462,7 +462,7 @@ static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
                } else
                        dev_err(&ihost->pdev->dev,
                                "%s: SCIC Controller 0x%p received event 0x%x "
-                               "for remote device object 0x%0x that doesnt "
+                               "for remote device object 0x%0x that doesn't "
                                "exist.\n",
                                __func__,
                                ihost,
index d09926e..797abf4 100644 (file)
@@ -678,7 +678,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
        struct iscsi_task *task;
        itt_t itt;
 
-       if (session->state == ISCSI_STATE_TERMINATE)
+       if (session->state == ISCSI_STATE_TERMINATE ||
+           !test_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags))
                return NULL;
 
        if (opcode == ISCSI_OP_LOGIN || opcode == ISCSI_OP_TEXT) {
@@ -1392,8 +1393,8 @@ static bool iscsi_set_conn_failed(struct iscsi_conn *conn)
        if (conn->stop_stage == 0)
                session->state = ISCSI_STATE_FAILED;
 
-       set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
-       set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
+       set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
+       set_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
        return true;
 }
 
@@ -1454,7 +1455,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
         * Do this after dropping the extra ref because if this was a requeue
         * it's removed from that list and cleanup_queued_task would miss it.
         */
-       if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+       if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
                /*
                 * Save the task and ref in case we weren't cleaning up this
                 * task and get woken up again.
@@ -1532,7 +1533,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
        int rc = 0;
 
        spin_lock_bh(&conn->session->frwd_lock);
-       if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+       if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
                ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
                spin_unlock_bh(&conn->session->frwd_lock);
                return -ENODATA;
@@ -1746,7 +1747,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
                goto fault;
        }
 
-       if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+       if (test_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags)) {
                reason = FAILURE_SESSION_IN_RECOVERY;
                sc->result = DID_REQUEUE << 16;
                goto fault;
@@ -1935,7 +1936,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
 void iscsi_suspend_queue(struct iscsi_conn *conn)
 {
        spin_lock_bh(&conn->session->frwd_lock);
-       set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+       set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
        spin_unlock_bh(&conn->session->frwd_lock);
 }
 EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
@@ -1953,7 +1954,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
        struct Scsi_Host *shost = conn->session->host;
        struct iscsi_host *ihost = shost_priv(shost);
 
-       set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+       set_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
        if (ihost->workq)
                flush_workqueue(ihost->workq);
 }
@@ -1961,7 +1962,7 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
 
 static void iscsi_start_tx(struct iscsi_conn *conn)
 {
-       clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+       clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
        iscsi_conn_queue_work(conn);
 }
 
@@ -2214,6 +2215,8 @@ void iscsi_conn_unbind(struct iscsi_cls_conn *cls_conn, bool is_active)
        iscsi_suspend_tx(conn);
 
        spin_lock_bh(&session->frwd_lock);
+       clear_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
+
        if (!is_active) {
                /*
                 * if logout timed out before userspace could even send a PDU
@@ -3045,7 +3048,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
        if (!cls_conn)
                return NULL;
        conn = cls_conn->dd_data;
-       memset(conn, 0, sizeof(*conn) + dd_size);
 
        conn->dd_data = cls_conn->dd_data + sizeof(*conn);
        conn->session = session;
@@ -3318,6 +3320,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
        spin_lock_bh(&session->frwd_lock);
        if (is_leading)
                session->leadconn = conn;
+
+       set_bit(ISCSI_CONN_FLAG_BOUND, &conn->flags);
        spin_unlock_bh(&session->frwd_lock);
 
        /*
@@ -3330,8 +3334,8 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
        /*
         * Unblock xmitworker(), Login Phase will pass through.
         */
-       clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
-       clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+       clear_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags);
+       clear_bit(ISCSI_CONN_FLAG_SUSPEND_TX, &conn->flags);
        return 0;
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_bind);
index 2e9ffe3..8830057 100644 (file)
@@ -927,7 +927,7 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
         */
        conn->last_recv = jiffies;
 
-       if (unlikely(conn->suspend_rx)) {
+       if (unlikely(test_bit(ISCSI_CONN_FLAG_SUSPEND_RX, &conn->flags))) {
                ISCSI_DBG_TCP(conn, "Rx suspended!\n");
                *status = ISCSI_TCP_SUSPENDED;
                return 0;
index f0cf8ff..0025760 100644 (file)
@@ -897,6 +897,11 @@ enum lpfc_irq_chann_mode {
        NHT_MODE,
 };
 
+enum lpfc_hba_bit_flags {
+       FABRIC_COMANDS_BLOCKED,
+       HBA_PCI_ERR,
+};
+
 struct lpfc_hba {
        /* SCSI interface function jump table entries */
        struct lpfc_io_buf * (*lpfc_get_scsi_buf)
@@ -1043,7 +1048,6 @@ struct lpfc_hba {
                                         * Firmware supports Forced Link Speed
                                         * capability
                                         */
-#define HBA_PCI_ERR            0x80000 /* The PCI slot is offline */
 #define HBA_FLOGI_ISSUED       0x100000 /* FLOGI was issued */
 #define HBA_SHORT_CMF          0x200000 /* shorter CMF timer routine */
 #define HBA_CGN_DAY_WRAP       0x400000 /* HBA Congestion info day wraps */
@@ -1350,7 +1354,6 @@ struct lpfc_hba {
        atomic_t fabric_iocb_count;
        struct timer_list fabric_block_timer;
        unsigned long bit_flags;
-#define        FABRIC_COMANDS_BLOCKED  0
        atomic_t num_rsrc_err;
        atomic_t num_cmd_success;
        unsigned long last_rsrc_error_time;
index 96408cd..9897a1a 100644 (file)
@@ -670,3 +670,6 @@ struct lpfc_vmid *lpfc_get_vmid_from_hashtable(struct lpfc_vport *vport,
                                              uint32_t hash, uint8_t *buf);
 void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport);
 int lpfc_issue_els_qfpa(struct lpfc_vport *vport);
+
+void lpfc_sli_rpi_release(struct lpfc_vport *vport,
+                         struct lpfc_nodelist *ndlp);
index 0144da3..2b877df 100644 (file)
@@ -109,8 +109,8 @@ lpfc_rport_invalid(struct fc_rport *rport)
 
        ndlp = rdata->pnode;
        if (!rdata->pnode) {
-               pr_err("**** %s: NULL ndlp on rport x%px SID x%x\n",
-                      __func__, rport, rport->scsi_target_id);
+               pr_info("**** %s: NULL ndlp on rport x%px SID x%x\n",
+                       __func__, rport, rport->scsi_target_id);
                return -EINVAL;
        }
 
@@ -169,9 +169,10 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
 
        lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
                         "3181 dev_loss_callbk x%06x, rport x%px flg x%x "
-                        "load_flag x%x refcnt %d\n",
+                        "load_flag x%x refcnt %d state %d xpt x%x\n",
                         ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag,
-                        vport->load_flag, kref_read(&ndlp->kref));
+                        vport->load_flag, kref_read(&ndlp->kref),
+                        ndlp->nlp_state, ndlp->fc4_xpt_flags);
 
        /* Don't schedule a worker thread event if the vport is going down.
         * The teardown process cleans up the node via lpfc_drop_node.
@@ -181,6 +182,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
                ndlp->rport = NULL;
 
                ndlp->fc4_xpt_flags &= ~SCSI_XPT_REGD;
+               /* clear the NLP_XPT_REGD if the node is not registered
+                * with nvme-fc
+                */
+               if (ndlp->fc4_xpt_flags == NLP_XPT_REGD)
+                       ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD;
 
                /* Remove the node reference from remote_port_add now.
                 * The driver will not call remote_port_delete.
@@ -225,18 +231,36 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
        ndlp->rport = NULL;
        spin_unlock_irqrestore(&ndlp->lock, iflags);
 
-       /* We need to hold the node by incrementing the reference
-        * count until this queued work is done
-        */
-       evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+       if (phba->worker_thread) {
+               /* We need to hold the node by incrementing the reference
+                * count until this queued work is done
+                */
+               evtp->evt_arg1 = lpfc_nlp_get(ndlp);
+
+               spin_lock_irqsave(&phba->hbalock, iflags);
+               if (evtp->evt_arg1) {
+                       evtp->evt = LPFC_EVT_DEV_LOSS;
+                       list_add_tail(&evtp->evt_listp, &phba->work_list);
+                       lpfc_worker_wake_up(phba);
+               }
+               spin_unlock_irqrestore(&phba->hbalock, iflags);
+       } else {
+               lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
+                                "3188 worker thread is stopped %s x%06x, "
+                                " rport x%px flg x%x load_flag x%x refcnt "
+                                "%d\n", __func__, ndlp->nlp_DID,
+                                ndlp->rport, ndlp->nlp_flag,
+                                vport->load_flag, kref_read(&ndlp->kref));
+               if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) {
+                       spin_lock_irqsave(&ndlp->lock, iflags);
+                       /* Node is in dev loss.  No further transaction. */
+                       ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
+                       spin_unlock_irqrestore(&ndlp->lock, iflags);
+                       lpfc_disc_state_machine(vport, ndlp, NULL,
+                                               NLP_EVT_DEVICE_RM);
+               }
 
-       spin_lock_irqsave(&phba->hbalock, iflags);
-       if (evtp->evt_arg1) {
-               evtp->evt = LPFC_EVT_DEV_LOSS;
-               list_add_tail(&evtp->evt_listp, &phba->work_list);
-               lpfc_worker_wake_up(phba);
        }
-       spin_unlock_irqrestore(&phba->hbalock, iflags);
 
        return;
 }
@@ -503,11 +527,12 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
                lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
                                 "0203 Devloss timeout on "
                                 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
-                                "NPort x%06x Data: x%x x%x x%x\n",
+                                "NPort x%06x Data: x%x x%x x%x refcnt %d\n",
                                 *name, *(name+1), *(name+2), *(name+3),
                                 *(name+4), *(name+5), *(name+6), *(name+7),
                                 ndlp->nlp_DID, ndlp->nlp_flag,
-                                ndlp->nlp_state, ndlp->nlp_rpi);
+                                ndlp->nlp_state, ndlp->nlp_rpi,
+                                kref_read(&ndlp->kref));
        } else {
                lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
                                 "0204 Devloss timeout on "
@@ -755,18 +780,22 @@ lpfc_work_list_done(struct lpfc_hba *phba)
        int free_evt;
        int fcf_inuse;
        uint32_t nlp_did;
+       bool hba_pci_err;
 
        spin_lock_irq(&phba->hbalock);
        while (!list_empty(&phba->work_list)) {
                list_remove_head((&phba->work_list), evtp, typeof(*evtp),
                                 evt_listp);
                spin_unlock_irq(&phba->hbalock);
+               hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
                free_evt = 1;
                switch (evtp->evt) {
                case LPFC_EVT_ELS_RETRY:
                        ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
-                       lpfc_els_retry_delay_handler(ndlp);
-                       free_evt = 0; /* evt is part of ndlp */
+                       if (!hba_pci_err) {
+                               lpfc_els_retry_delay_handler(ndlp);
+                               free_evt = 0; /* evt is part of ndlp */
+                       }
                        /* decrement the node reference count held
                         * for this queued work
                         */
@@ -788,8 +817,10 @@ lpfc_work_list_done(struct lpfc_hba *phba)
                        break;
                case LPFC_EVT_RECOVER_PORT:
                        ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
-                       lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
-                       free_evt = 0;
+                       if (!hba_pci_err) {
+                               lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
+                               free_evt = 0;
+                       }
                        /* decrement the node reference count held for
                         * this queued work
                         */
@@ -859,14 +890,18 @@ lpfc_work_done(struct lpfc_hba *phba)
        struct lpfc_vport **vports;
        struct lpfc_vport *vport;
        int i;
+       bool hba_pci_err;
 
+       hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
        spin_lock_irq(&phba->hbalock);
        ha_copy = phba->work_ha;
        phba->work_ha = 0;
        spin_unlock_irq(&phba->hbalock);
+       if (hba_pci_err)
+               ha_copy = 0;
 
        /* First, try to post the next mailbox command to SLI4 device */
-       if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+       if (phba->pci_dev_grp == LPFC_PCI_DEV_OC && !hba_pci_err)
                lpfc_sli4_post_async_mbox(phba);
 
        if (ha_copy & HA_ERATT) {
@@ -886,7 +921,7 @@ lpfc_work_done(struct lpfc_hba *phba)
                lpfc_handle_latt(phba);
 
        /* Handle VMID Events */
-       if (lpfc_is_vmid_enabled(phba)) {
+       if (lpfc_is_vmid_enabled(phba) && !hba_pci_err) {
                if (phba->pport->work_port_events &
                    WORKER_CHECK_VMID_ISSUE_QFPA) {
                        lpfc_check_vmid_qfpa_issue(phba);
@@ -936,6 +971,8 @@ lpfc_work_done(struct lpfc_hba *phba)
                        work_port_events = vport->work_port_events;
                        vport->work_port_events &= ~work_port_events;
                        spin_unlock_irq(&vport->work_port_lock);
+                       if (hba_pci_err)
+                               continue;
                        if (work_port_events & WORKER_DISC_TMO)
                                lpfc_disc_timeout_handler(vport);
                        if (work_port_events & WORKER_ELS_TMO)
@@ -1173,12 +1210,14 @@ lpfc_linkdown(struct lpfc_hba *phba)
        struct lpfc_vport **vports;
        LPFC_MBOXQ_t          *mb;
        int i;
+       int offline;
 
        if (phba->link_state == LPFC_LINK_DOWN)
                return 0;
 
        /* Block all SCSI stack I/Os */
        lpfc_scsi_dev_block(phba);
+       offline = pci_channel_offline(phba->pcidev);
 
        phba->defer_flogi_acc_flag = false;
 
@@ -1219,7 +1258,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
        lpfc_destroy_vport_work_array(phba, vports);
 
        /* Clean up any SLI3 firmware default rpi's */
-       if (phba->sli_rev > LPFC_SLI_REV3)
+       if (phba->sli_rev > LPFC_SLI_REV3 || offline)
                goto skip_unreg_did;
 
        mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -4712,6 +4751,11 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        spin_lock_irqsave(&ndlp->lock, iflags);
        if (!(ndlp->fc4_xpt_flags & NLP_XPT_REGD)) {
                spin_unlock_irqrestore(&ndlp->lock, iflags);
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+                                "0999 %s Not regd: ndlp x%px rport x%px DID "
+                                "x%x FLG x%x XPT x%x\n",
+                                 __func__, ndlp, ndlp->rport, ndlp->nlp_DID,
+                                 ndlp->nlp_flag, ndlp->fc4_xpt_flags);
                return;
        }
 
@@ -4722,6 +4766,13 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
            ndlp->fc4_xpt_flags & SCSI_XPT_REGD) {
                vport->phba->nport_event_cnt++;
                lpfc_unregister_remote_port(ndlp);
+       } else if (!ndlp->rport) {
+               lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+                                "1999 %s NDLP in devloss x%px DID x%x FLG x%x"
+                                " XPT x%x refcnt %d\n",
+                                __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag,
+                                ndlp->fc4_xpt_flags,
+                                kref_read(&ndlp->kref));
        }
 
        if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) {
@@ -5371,6 +5422,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                                ndlp->nlp_flag &= ~NLP_UNREG_INP;
                                mempool_free(mbox, phba->mbox_mem_pool);
                                acc_plogi = 1;
+                               lpfc_nlp_put(ndlp);
                        }
                } else {
                        lpfc_printf_vlog(vport, KERN_INFO,
@@ -6097,12 +6149,34 @@ lpfc_disc_flush_list(struct lpfc_vport *vport)
        }
 }
 
+/*
+ * lpfc_notify_xport_npr - notifies xport of node disappearance
+ * @vport: Pointer to Virtual Port object.
+ *
+ * Transitions all ndlps to NPR state.  When lpfc_nlp_set_state
+ * calls lpfc_nlp_state_cleanup, the ndlp->rport is unregistered
+ * and transport notified that the node is gone.
+ * Return Code:
+ *     none
+ */
+static void
+lpfc_notify_xport_npr(struct lpfc_vport *vport)
+{
+       struct lpfc_nodelist *ndlp, *next_ndlp;
+
+       list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
+                                nlp_listp) {
+               lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
+       }
+}
 void
 lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
 {
        lpfc_els_flush_rscn(vport);
        lpfc_els_flush_cmd(vport);
        lpfc_disc_flush_list(vport);
+       if (pci_channel_offline(vport->phba->pcidev))
+               lpfc_notify_xport_npr(vport);
 }
 
 /*****************************************************************************/
index eed6464..461d333 100644 (file)
@@ -95,6 +95,7 @@ static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
 static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
 static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
 static int lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *);
+static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
 
 static struct scsi_transport_template *lpfc_transport_template = NULL;
 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1642,7 +1643,7 @@ lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
 {
        spin_lock_irq(&phba->hbalock);
        if (phba->link_state == LPFC_HBA_ERROR &&
-           phba->hba_flag & HBA_PCI_ERR) {
+               test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
                spin_unlock_irq(&phba->hbalock);
                return;
        }
@@ -1985,6 +1986,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
        if (pci_channel_offline(phba->pcidev)) {
                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
                                "3166 pci channel is offline\n");
+               lpfc_sli_flush_io_rings(phba);
                return;
        }
 
@@ -2973,6 +2975,22 @@ lpfc_cleanup(struct lpfc_vport *vport)
                                        NLP_EVT_DEVICE_RM);
        }
 
+       /* This is a special case flush to return all
+        * IOs before entering this loop. There are
+        * two points in the code where a flush is
+        * avoided if the FC_UNLOADING flag is set.
+        * one is in the multipool destroy,
+        * (this prevents a crash) and the other is
+        * in the nvme abort handler, ( also prevents
+        * a crash). Both of these exceptions are
+        * cases where the slot is still accessible.
+        * The flush here is only when the pci slot
+        * is offline.
+        */
+       if (vport->load_flag & FC_UNLOADING &&
+           pci_channel_offline(phba->pcidev))
+               lpfc_sli_flush_io_rings(vport->phba);
+
        /* At this point, ALL ndlp's should be gone
         * because of the previous NLP_EVT_DEVICE_RM.
         * Lets wait for this to happen, if needed.
@@ -2985,7 +3003,7 @@ lpfc_cleanup(struct lpfc_vport *vport)
                        list_for_each_entry_safe(ndlp, next_ndlp,
                                                &vport->fc_nodes, nlp_listp) {
                                lpfc_printf_vlog(ndlp->vport, KERN_ERR,
-                                                LOG_TRACE_EVENT,
+                                                LOG_DISCOVERY,
                                                 "0282 did:x%x ndlp:x%px "
                                                 "refcnt:%d xflags x%x nflag x%x\n",
                                                 ndlp->nlp_DID, (void *)ndlp,
@@ -3682,7 +3700,8 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
        struct lpfc_vport **vports;
        struct Scsi_Host *shost;
        int i;
-       int offline = 0;
+       int offline;
+       bool hba_pci_err;
 
        if (vport->fc_flag & FC_OFFLINE_MODE)
                return;
@@ -3692,6 +3711,7 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
        lpfc_linkdown(phba);
 
        offline =  pci_channel_offline(phba->pcidev);
+       hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
 
        /* Issue an unreg_login to all nodes on all vports */
        vports = lpfc_create_vport_work_array(phba);
@@ -3715,11 +3735,14 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
                                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
                                spin_unlock_irq(&ndlp->lock);
 
-                               if (offline) {
+                               if (offline || hba_pci_err) {
                                        spin_lock_irq(&ndlp->lock);
                                        ndlp->nlp_flag &= ~(NLP_UNREG_INP |
                                                            NLP_RPI_REGISTERED);
                                        spin_unlock_irq(&ndlp->lock);
+                                       if (phba->sli_rev == LPFC_SLI_REV4)
+                                               lpfc_sli_rpi_release(vports[i],
+                                                                    ndlp);
                                } else {
                                        lpfc_unreg_rpi(vports[i], ndlp);
                                }
@@ -13354,8 +13377,9 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
        /* Abort all iocbs associated with the hba */
        lpfc_sli_hba_iocb_abort(phba);
 
-       /* Wait for completion of device XRI exchange busy */
-       lpfc_sli4_xri_exchange_busy_wait(phba);
+       if (!pci_channel_offline(phba->pcidev))
+               /* Wait for completion of device XRI exchange busy */
+               lpfc_sli4_xri_exchange_busy_wait(phba);
 
        /* per-phba callback de-registration for hotplug event */
        if (phba->pport)
@@ -13374,15 +13398,12 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
        /* Disable FW logging to host memory */
        lpfc_ras_stop_fwlog(phba);
 
-       /* Unset the queues shared with the hardware then release all
-        * allocated resources.
-        */
-       lpfc_sli4_queue_unset(phba);
-       lpfc_sli4_queue_destroy(phba);
-
        /* Reset SLI4 HBA FCoE function */
        lpfc_pci_function_reset(phba);
 
+       /* release all queue allocated resources. */
+       lpfc_sli4_queue_destroy(phba);
+
        /* Free RAS DMA memory */
        if (phba->ras_fwlog.ras_enabled)
                lpfc_sli4_ras_dma_free(phba);
@@ -14262,6 +14283,7 @@ lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
                        "2711 PCI channel permanent disable for failure\n");
        /* Block all SCSI devices' I/Os on the host */
        lpfc_scsi_dev_block(phba);
+       lpfc_sli4_prep_dev_for_reset(phba);
 
        /* stop all timers */
        lpfc_stop_hba_timers(phba);
@@ -15057,24 +15079,28 @@ lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
 static void
 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
 {
-       lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
-                       "2826 PCI channel disable preparing for reset\n");
+       int offline =  pci_channel_offline(phba->pcidev);
+
+       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "2826 PCI channel disable preparing for reset offline"
+                       " %d\n", offline);
 
        /* Block any management I/Os to the device */
        lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
 
-       /* Block all SCSI devices' I/Os on the host */
-       lpfc_scsi_dev_block(phba);
 
+       /* HBA_PCI_ERR was set in io_error_detect */
+       lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
        /* Flush all driver's outstanding I/Os as we are to reset */
        lpfc_sli_flush_io_rings(phba);
+       lpfc_offline(phba);
 
        /* stop all timers */
        lpfc_stop_hba_timers(phba);
 
+       lpfc_sli4_queue_destroy(phba);
        /* Disable interrupt and pci device */
        lpfc_sli4_disable_intr(phba);
-       lpfc_sli4_queue_destroy(phba);
        pci_disable_device(phba->pcidev);
 }
 
@@ -15123,6 +15149,7 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+       bool hba_pci_err;
 
        switch (state) {
        case pci_channel_io_normal:
@@ -15130,17 +15157,24 @@ lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
                lpfc_sli4_prep_dev_for_recover(phba);
                return PCI_ERS_RESULT_CAN_RECOVER;
        case pci_channel_io_frozen:
-               phba->hba_flag |= HBA_PCI_ERR;
+               hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
                /* Fatal error, prepare for slot reset */
-               lpfc_sli4_prep_dev_for_reset(phba);
+               if (!hba_pci_err)
+                       lpfc_sli4_prep_dev_for_reset(phba);
+               else
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "2832  Already handling PCI error "
+                                       "state: x%x\n", state);
                return PCI_ERS_RESULT_NEED_RESET;
        case pci_channel_io_perm_failure:
-               phba->hba_flag |= HBA_PCI_ERR;
+               set_bit(HBA_PCI_ERR, &phba->bit_flags);
                /* Permanent failure, prepare for device down */
                lpfc_sli4_prep_dev_for_perm_failure(phba);
                return PCI_ERS_RESULT_DISCONNECT;
        default:
-               phba->hba_flag |= HBA_PCI_ERR;
+               hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
+               if (!hba_pci_err)
+                       lpfc_sli4_prep_dev_for_reset(phba);
                /* Unknown state, prepare and request slot reset */
                lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
                                "2825 Unknown PCI error state: x%x\n", state);
@@ -15174,17 +15208,21 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
        struct lpfc_sli *psli = &phba->sli;
        uint32_t intr_mode;
+       bool hba_pci_err;
 
        dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
        if (pci_enable_device_mem(pdev)) {
                printk(KERN_ERR "lpfc: Cannot re-enable "
-                       "PCI device after reset.\n");
+                      "PCI device after reset.\n");
                return PCI_ERS_RESULT_DISCONNECT;
        }
 
        pci_restore_state(pdev);
 
-       phba->hba_flag &= ~HBA_PCI_ERR;
+       hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
+       if (!hba_pci_err)
+               dev_info(&pdev->dev,
+                        "hba_pci_err was not set, recovering slot reset.\n");
        /*
         * As the new kernel behavior of pci_restore_state() API call clears
         * device saved_state flag, need to save the restored state again.
@@ -15198,6 +15236,8 @@ lpfc_io_slot_reset_s4(struct pci_dev *pdev)
        psli->sli_flag &= ~LPFC_SLI_ACTIVE;
        spin_unlock_irq(&phba->hbalock);
 
+       /* Init cpu_map array */
+       lpfc_cpu_map_array_init(phba);
        /* Configure and enable interrupt */
        intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
        if (intr_mode == LPFC_INTR_ERROR) {
@@ -15239,8 +15279,6 @@ lpfc_io_resume_s4(struct pci_dev *pdev)
         */
        if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
                /* Perform device reset */
-               lpfc_offline_prep(phba, LPFC_MBX_WAIT);
-               lpfc_offline(phba);
                lpfc_sli_brdrestart(phba);
                /* Bring the device back online */
                lpfc_online(phba);
index 1213a29..8d26f20 100644 (file)
@@ -93,6 +93,11 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
 
        lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
        vport = lport->vport;
+
+       if (!vport || vport->load_flag & FC_UNLOADING ||
+           vport->phba->hba_flag & HBA_IOQ_FLUSH)
+               return -ENODEV;
+
        qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
        if (qhandle == NULL)
                return -ENOMEM;
@@ -267,7 +272,8 @@ lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
                return -EINVAL;
 
        remoteport = lpfc_rport->remoteport;
-       if (!vport->localport)
+       if (!vport->localport ||
+           vport->phba->hba_flag & HBA_IOQ_FLUSH)
                return -EINVAL;
 
        lport = vport->localport->private;
@@ -559,6 +565,8 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                                 ndlp->nlp_DID, ntype, nstate);
                return -ENODEV;
        }
+       if (vport->phba->hba_flag & HBA_IOQ_FLUSH)
+               return -ENODEV;
 
        if (!vport->phba->sli4_hba.nvmels_wq)
                return -ENOMEM;
@@ -662,7 +670,8 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
                return -EINVAL;
 
        vport = lport->vport;
-       if (vport->load_flag & FC_UNLOADING)
+       if (vport->load_flag & FC_UNLOADING ||
+           vport->phba->hba_flag & HBA_IOQ_FLUSH)
                return -ENODEV;
 
        atomic_inc(&lport->fc4NvmeLsRequests);
@@ -1516,7 +1525,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
 
        phba = vport->phba;
 
-       if (unlikely(vport->load_flag & FC_UNLOADING)) {
+       if ((unlikely(vport->load_flag & FC_UNLOADING)) ||
+           phba->hba_flag & HBA_IOQ_FLUSH) {
                lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
                                 "6124 Fail IO, Driver unload\n");
                atomic_inc(&lport->xmt_fcp_err);
@@ -2169,8 +2179,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
                        abts_nvme = 0;
                        for (i = 0; i < phba->cfg_hdw_queue; i++) {
                                qp = &phba->sli4_hba.hdwq[i];
-                               if (!vport || !vport->localport ||
-                                   !qp || !qp->io_wq)
+                               if (!vport->localport || !qp || !qp->io_wq)
                                        return;
 
                                pring = qp->io_wq->pring;
@@ -2180,8 +2189,9 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
                                abts_scsi += qp->abts_scsi_io_bufs;
                                abts_nvme += qp->abts_nvme_io_bufs;
                        }
-                       if (!vport || !vport->localport ||
-                           vport->phba->hba_flag & HBA_PCI_ERR)
+                       if (!vport->localport ||
+                           test_bit(HBA_PCI_ERR, &vport->phba->bit_flags) ||
+                           vport->load_flag & FC_UNLOADING)
                                return;
 
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -2541,8 +2551,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                 * return values is ignored.  The upcall is a courtesy to the
                 * transport.
                 */
-               if (vport->load_flag & FC_UNLOADING ||
-                   unlikely(vport->phba->hba_flag & HBA_PCI_ERR))
+               if (vport->load_flag & FC_UNLOADING)
                        (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
 
                ret = nvme_fc_unregister_remoteport(remoteport);
index 3c13260..ba9dbb5 100644 (file)
@@ -5929,13 +5929,15 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
        }
 
        lpfc_cmd->waitq = &waitq;
-       if (phba->sli_rev == LPFC_SLI_REV4)
+       if (phba->sli_rev == LPFC_SLI_REV4) {
                spin_unlock(&pring_s4->ring_lock);
-       else
+               ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
+                                                     lpfc_sli_abort_fcp_cmpl);
+       } else {
                pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
-
-       ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
-                                            lpfc_sli_abort_fcp_cmpl);
+               ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
+                                                    lpfc_sli_abort_fcp_cmpl);
+       }
 
        /* Make sure HBA is alive */
        lpfc_issue_hb_tmo(phba);
index 20d4095..bda2a7b 100644 (file)
@@ -2828,6 +2828,12 @@ __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        ndlp->nlp_flag &= ~NLP_UNREG_INP;
 }
 
+void
+lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+       __lpfc_sli_rpi_release(vport, ndlp);
+}
+
 /**
  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
  * @phba: Pointer to HBA context object.
@@ -3715,7 +3721,15 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        unsigned long iflag;
        u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
 
+       if (phba->sli_rev == LPFC_SLI_REV4)
+               spin_lock_irqsave(&pring->ring_lock, iflag);
+       else
+               spin_lock_irqsave(&phba->hbalock, iflag);
        cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
+       if (phba->sli_rev == LPFC_SLI_REV4)
+               spin_unlock_irqrestore(&pring->ring_lock, iflag);
+       else
+               spin_unlock_irqrestore(&phba->hbalock, iflag);
 
        ulp_command = get_job_cmnd(phba, saveq);
        ulp_status = get_job_ulpstatus(phba, saveq);
@@ -4052,10 +4066,8 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
                                break;
                        }
 
-                       spin_unlock_irqrestore(&phba->hbalock, iflag);
                        cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
                                                         &rspiocbq);
-                       spin_lock_irqsave(&phba->hbalock, iflag);
                        if (unlikely(!cmdiocbq))
                                break;
                        if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
@@ -4536,42 +4548,62 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
 void
 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
 {
-       LIST_HEAD(completions);
+       LIST_HEAD(tx_completions);
+       LIST_HEAD(txcmplq_completions);
        struct lpfc_iocbq *iocb, *next_iocb;
+       int offline;
 
        if (pring->ringno == LPFC_ELS_RING) {
                lpfc_fabric_abort_hba(phba);
        }
+       offline = pci_channel_offline(phba->pcidev);
 
        /* Error everything on txq and txcmplq
         * First do the txq.
         */
        if (phba->sli_rev >= LPFC_SLI_REV4) {
                spin_lock_irq(&pring->ring_lock);
-               list_splice_init(&pring->txq, &completions);
+               list_splice_init(&pring->txq, &tx_completions);
                pring->txq_cnt = 0;
-               spin_unlock_irq(&pring->ring_lock);
 
-               spin_lock_irq(&phba->hbalock);
-               /* Next issue ABTS for everything on the txcmplq */
-               list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
-                       lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
-               spin_unlock_irq(&phba->hbalock);
+               if (offline) {
+                       list_splice_init(&pring->txcmplq,
+                                        &txcmplq_completions);
+               } else {
+                       /* Next issue ABTS for everything on the txcmplq */
+                       list_for_each_entry_safe(iocb, next_iocb,
+                                                &pring->txcmplq, list)
+                               lpfc_sli_issue_abort_iotag(phba, pring,
+                                                          iocb, NULL);
+               }
+               spin_unlock_irq(&pring->ring_lock);
        } else {
                spin_lock_irq(&phba->hbalock);
-               list_splice_init(&pring->txq, &completions);
+               list_splice_init(&pring->txq, &tx_completions);
                pring->txq_cnt = 0;
 
-               /* Next issue ABTS for everything on the txcmplq */
-               list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
-                       lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
+               if (offline) {
+                       list_splice_init(&pring->txcmplq, &txcmplq_completions);
+               } else {
+                       /* Next issue ABTS for everything on the txcmplq */
+                       list_for_each_entry_safe(iocb, next_iocb,
+                                                &pring->txcmplq, list)
+                               lpfc_sli_issue_abort_iotag(phba, pring,
+                                                          iocb, NULL);
+               }
                spin_unlock_irq(&phba->hbalock);
        }
-       /* Make sure HBA is alive */
-       lpfc_issue_hb_tmo(phba);
 
+       if (offline) {
+               /* Cancel all the IOCBs from the completions list */
+               lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
+                                     IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
+       } else {
+               /* Make sure HBA is alive */
+               lpfc_issue_hb_tmo(phba);
+       }
        /* Cancel all the IOCBs from the completions list */
-       lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+       lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
                              IOERR_SLI_ABORTED);
 }
 
@@ -4624,11 +4656,6 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
        struct lpfc_iocbq *piocb, *next_iocb;
 
        spin_lock_irq(&phba->hbalock);
-       if (phba->hba_flag & HBA_IOQ_FLUSH ||
-           !phba->sli4_hba.hdwq) {
-               spin_unlock_irq(&phba->hbalock);
-               return;
-       }
        /* Indicate the I/O queues are flushed */
        phba->hba_flag |= HBA_IOQ_FLUSH;
        spin_unlock_irq(&phba->hbalock);
@@ -10997,6 +11024,10 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
        unsigned long iflags;
        int rc;
 
+       /* If the PCI channel is in offline state, do not post iocbs. */
+       if (unlikely(pci_channel_offline(phba->pcidev)))
+               return IOCB_ERROR;
+
        if (phba->sli_rev == LPFC_SLI_REV4) {
                lpfc_sli_prep_wqe(phba, piocb);
 
index e52f37e..a4d3259 100644 (file)
@@ -20,7 +20,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "14.2.0.0"
+#define LPFC_DRIVER_VERSION "14.2.0.1"
 #define LPFC_DRIVER_NAME               "lpfc"
 
 /* Used for SLI 2/3 */
index 611871e..4919ea5 100644 (file)
@@ -2560,6 +2560,9 @@ struct megasas_instance_template {
 #define MEGASAS_IS_LOGICAL(sdev)                                       \
        ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
 
+#define MEGASAS_IS_LUN_VALID(sdev)                                     \
+       (((sdev)->lun == 0) ? 1 : 0)
+
 #define MEGASAS_DEV_INDEX(scp)                                         \
        (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +   \
        scp->device->id)
index 8bf72db..db67936 100644 (file)
@@ -2126,6 +2126,9 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
                        goto scan_target;
                }
                return -ENXIO;
+       } else if (!MEGASAS_IS_LUN_VALID(sdev)) {
+               sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
+               return -ENXIO;
        }
 
 scan_target:
@@ -2156,6 +2159,10 @@ static void megasas_slave_destroy(struct scsi_device *sdev)
        instance = megasas_lookup_instance(sdev->host->host_no);
 
        if (MEGASAS_IS_LOGICAL(sdev)) {
+               if (!MEGASAS_IS_LUN_VALID(sdev)) {
+                       sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
+                       return;
+               }
                ld_tgt_id = MEGASAS_TARGET_ID(sdev);
                instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
                if (megasas_dbg_lvl & LD_PD_DEBUG)
index b57f180..538d2c0 100644 (file)
@@ -5716,13 +5716,12 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 /**
  * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
  *     having same upper 32bits in their base memory address.
- * @reply_pool_start_address: Base address of a reply queue set
+ * @start_address: Base address of a reply queue set
  * @pool_sz: Size of single Reply Descriptor Post Queues pool size
  *
  * Return: 1 if reply queues in a set have a same upper 32bits in their base
  * memory address, else 0.
  */
-
 static int
 mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz)
 {
index 0563078..a8dd14c 100644 (file)
@@ -394,10 +394,13 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
                retry_count++;
                if (ioc->config_cmds.smid == smid)
                        mpt3sas_base_free_smid(ioc, smid);
-               if ((ioc->shost_recovery) || (ioc->config_cmds.status &
-                   MPT3_CMD_RESET) || ioc->pci_error_recovery)
+               if (ioc->config_cmds.status & MPT3_CMD_RESET)
                        goto retry_config;
-               issue_host_reset = 1;
+               if (ioc->shost_recovery || ioc->pci_error_recovery) {
+                       issue_host_reset = 0;
+                       r = -EFAULT;
+               } else
+                       issue_host_reset = 1;
                goto free_mem;
        }
 
index 0079276..7e476f5 100644 (file)
@@ -11035,6 +11035,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
 {
        struct _sas_port *mpt3sas_port, *next;
        unsigned long flags;
+       int port_id;
 
        /* remove sibling ports attached to this expander */
        list_for_each_entry_safe(mpt3sas_port, next,
@@ -11055,6 +11056,8 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
                            mpt3sas_port->hba_port);
        }
 
+       port_id = sas_expander->port->port_id;
+
        mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
            sas_expander->sas_address_parent, sas_expander->port);
 
@@ -11062,7 +11065,7 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
            "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
            sas_expander->handle, (unsigned long long)
            sas_expander->sas_address,
-           sas_expander->port->port_id);
+           port_id);
 
        spin_lock_irqsave(&ioc->sas_node_lock, flags);
        list_del(&sas_expander->list);
index 7ac63eb..2fde496 100644 (file)
@@ -647,6 +647,7 @@ static struct pci_device_id mvs_pci_table[] = {
        { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1300), chip_1300 },
        { PCI_VDEVICE(ARECA, PCI_DEVICE_ID_ARECA_1320), chip_1320 },
        { PCI_VDEVICE(ADAPTEC2, 0x0450), chip_6440 },
+       { PCI_VDEVICE(TTI, 0x2640), chip_6440 },
        { PCI_VDEVICE(TTI, 0x2710), chip_9480 },
        { PCI_VDEVICE(TTI, 0x2720), chip_9480 },
        { PCI_VDEVICE(TTI, 0x2721), chip_9480 },
index c4a8386..5d7dfef 100644 (file)
@@ -192,10 +192,11 @@ struct sym53c500_data {
        int fast_pio;
 };
 
-static struct scsi_pointer *sym53c500_scsi_pointer(struct scsi_cmnd *cmd)
-{
-       return scsi_cmd_priv(cmd);
-}
+struct sym53c500_cmd_priv {
+       int status;
+       int message;
+       int phase;
+};
 
 enum Phase {
     idle,
@@ -356,7 +357,7 @@ SYM53C500_intr(int irq, void *dev_id)
        struct sym53c500_data *data =
            (struct sym53c500_data *)dev->hostdata;
        struct scsi_cmnd *curSC = data->current_SC;
-       struct scsi_pointer *scsi_pointer = sym53c500_scsi_pointer(curSC);
+       struct sym53c500_cmd_priv *scp = scsi_cmd_priv(curSC);
        int fast_pio = data->fast_pio;
 
        spin_lock_irqsave(dev->host_lock, flags);
@@ -403,12 +404,11 @@ SYM53C500_intr(int irq, void *dev_id)
 
        if (int_reg & 0x20) {           /* Disconnect */
                DEB(printk("SYM53C500: disconnect intr received\n"));
-               if (scsi_pointer->phase != message_in) {        /* Unexpected disconnect */
+               if (scp->phase != message_in) { /* Unexpected disconnect */
                        curSC->result = DID_NO_CONNECT << 16;
                } else {        /* Command complete, return status and message */
-                       curSC->result = (scsi_pointer->Status & 0xff) |
-                               ((scsi_pointer->Message & 0xff) << 8) |
-                               (DID_OK << 16);
+                       curSC->result = (scp->status & 0xff) |
+                               ((scp->message & 0xff) << 8) | (DID_OK << 16);
                }
                goto idle_out;
        }
@@ -419,7 +419,7 @@ SYM53C500_intr(int irq, void *dev_id)
                        struct scatterlist *sg;
                        int i;
 
-                       scsi_pointer->phase = data_out;
+                       scp->phase = data_out;
                        VDEB(printk("SYM53C500: Data-Out phase\n"));
                        outb(FLUSH_FIFO, port_base + CMD_REG);
                        LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
@@ -438,7 +438,7 @@ SYM53C500_intr(int irq, void *dev_id)
                        struct scatterlist *sg;
                        int i;
 
-                       scsi_pointer->phase = data_in;
+                       scp->phase = data_in;
                        VDEB(printk("SYM53C500: Data-In phase\n"));
                        outb(FLUSH_FIFO, port_base + CMD_REG);
                        LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
@@ -453,12 +453,12 @@ SYM53C500_intr(int irq, void *dev_id)
                break;
 
        case 0x02:              /* COMMAND */
-               scsi_pointer->phase = command_ph;
+               scp->phase = command_ph;
                printk("SYM53C500: Warning: Unknown interrupt occurred in command phase!\n");
                break;
 
        case 0x03:              /* STATUS */
-               scsi_pointer->phase = status_ph;
+               scp->phase = status_ph;
                VDEB(printk("SYM53C500: Status phase\n"));
                outb(FLUSH_FIFO, port_base + CMD_REG);
                outb(INIT_CMD_COMPLETE, port_base + CMD_REG);
@@ -471,24 +471,22 @@ SYM53C500_intr(int irq, void *dev_id)
 
        case 0x06:              /* MESSAGE-OUT */
                DEB(printk("SYM53C500: Message-Out phase\n"));
-               scsi_pointer->phase = message_out;
+               scp->phase = message_out;
                outb(SET_ATN, port_base + CMD_REG);     /* Reject the message */
                outb(MSG_ACCEPT, port_base + CMD_REG);
                break;
 
        case 0x07:              /* MESSAGE-IN */
                VDEB(printk("SYM53C500: Message-In phase\n"));
-               scsi_pointer->phase = message_in;
+               scp->phase = message_in;
 
-               scsi_pointer->Status = inb(port_base + SCSI_FIFO);
-               scsi_pointer->Message = inb(port_base + SCSI_FIFO);
+               scp->status = inb(port_base + SCSI_FIFO);
+               scp->message = inb(port_base + SCSI_FIFO);
 
                VDEB(printk("SCSI FIFO size=%d\n", inb(port_base + FIFO_FLAGS) & 0x1f));
-               DEB(printk("Status = %02x  Message = %02x\n",
-                          scsi_pointer->Status, scsi_pointer->Message));
+               DEB(printk("Status = %02x  Message = %02x\n", scp->status, scp->message));
 
-               if (scsi_pointer->Message == SAVE_POINTERS ||
-                   scsi_pointer->Message == DISCONNECT) {
+               if (scp->message == SAVE_POINTERS || scp->message == DISCONNECT) {
                        outb(SET_ATN, port_base + CMD_REG);     /* Reject message */
                        DEB(printk("Discarding SAVE_POINTERS message\n"));
                }
@@ -500,7 +498,7 @@ out:
        return IRQ_HANDLED;
 
 idle_out:
-       scsi_pointer->phase = idle;
+       scp->phase = idle;
        scsi_done(curSC);
        goto out;
 }
@@ -548,7 +546,7 @@ SYM53C500_info(struct Scsi_Host *SChost)
 
 static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt)
 {
-       struct scsi_pointer *scsi_pointer = sym53c500_scsi_pointer(SCpnt);
+       struct sym53c500_cmd_priv *scp = scsi_cmd_priv(SCpnt);
        int i;
        int port_base = SCpnt->device->host->io_port;
        struct sym53c500_data *data =
@@ -565,9 +563,9 @@ static int SYM53C500_queue_lck(struct scsi_cmnd *SCpnt)
        VDEB(printk("\n"));
 
        data->current_SC = SCpnt;
-       scsi_pointer->phase = command_ph;
-       scsi_pointer->Status = 0;
-       scsi_pointer->Message = 0;
+       scp->phase = command_ph;
+       scp->status = 0;
+       scp->message = 0;
 
        /* We are locked here already by the mid layer */
        REG0(port_base);
@@ -682,7 +680,7 @@ static struct scsi_host_template sym53c500_driver_template = {
      .this_id                  = 7,
      .sg_tablesize             = 32,
      .shost_groups             = SYM53C500_shost_groups,
-     .cmd_size                 = sizeof(struct scsi_pointer),
+     .cmd_size                 = sizeof(struct sym53c500_cmd_priv),
 };
 
 static int SYM53C500_config_check(struct pcmcia_device *p_dev, void *priv_data)
index f90b707..01c5e8f 100644 (file)
@@ -766,6 +766,10 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
        pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity       = 0x01;
        pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt          = 0x01;
 
+       /* Enable higher IQs and OQs, 32 to 63, bit 16 */
+       if (pm8001_ha->max_q_num > 32)
+               pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
+                                                       1 << 16;
        /* Disable end to end CRC checking */
        pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16);
 
@@ -1027,6 +1031,13 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
        if (0x0000 != gst_len_mpistate)
                return -EBUSY;
 
+       /*
+        *  As per controller datasheet, after successful MPI
+        *  initialization minimum 500ms delay is required before
+        *  issuing commands.
+        */
+       msleep(500);
+
        return 0;
 }
 
@@ -1727,10 +1738,11 @@ static void
 pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 #ifdef PM8001_USE_MSIX
-       u32 mask;
-       mask = (u32)(1 << vec);
-
-       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF));
+       if (vec < 32)
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, 1U << vec);
+       else
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR_U,
+                           1U << (vec - 32));
        return;
 #endif
        pm80xx_chip_intx_interrupt_enable(pm8001_ha);
@@ -1746,12 +1758,15 @@ static void
 pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec)
 {
 #ifdef PM8001_USE_MSIX
-       u32 mask;
-       if (vec == 0xFF)
-               mask = 0xFFFFFFFF;
+       if (vec == 0xFF) {
+               /* disable all vectors 0-31, 32-63 */
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 0xFFFFFFFF);
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U, 0xFFFFFFFF);
+       } else if (vec < 32)
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, 1U << vec);
        else
-               mask = (u32)(1 << vec);
-       pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF));
+               pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_U,
+                           1U << (vec - 32));
        return;
 #endif
        pm80xx_chip_intx_interrupt_disable(pm8001_ha);
index 9285321..fd674ed 100644 (file)
@@ -3182,124 +3182,6 @@ static int pmcraid_build_ioadl(
 }
 
 /**
- * pmcraid_free_sglist - Frees an allocated SG buffer list
- * @sglist: scatter/gather list pointer
- *
- * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist
- *
- * Return value:
- *     none
- */
-static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
-{
-       sgl_free_order(sglist->scatterlist, sglist->order);
-       kfree(sglist);
-}
-
-/**
- * pmcraid_alloc_sglist - Allocates memory for a SG list
- * @buflen: buffer length
- *
- * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
- * list.
- *
- * Return value
- *     pointer to sglist / NULL on failure
- */
-static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
-{
-       struct pmcraid_sglist *sglist;
-       int sg_size;
-       int order;
-
-       sg_size = buflen / (PMCRAID_MAX_IOADLS - 1);
-       order = (sg_size > 0) ? get_order(sg_size) : 0;
-
-       /* Allocate a scatter/gather list for the DMA */
-       sglist = kzalloc(sizeof(struct pmcraid_sglist), GFP_KERNEL);
-       if (sglist == NULL)
-               return NULL;
-
-       sglist->order = order;
-       sgl_alloc_order(buflen, order, false, GFP_KERNEL | __GFP_ZERO,
-                       &sglist->num_sg);
-
-       return sglist;
-}
-
-/**
- * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list
- * @sglist: scatter/gather list pointer
- * @buffer: buffer pointer
- * @len: buffer length
- * @direction: data transfer direction
- *
- * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist
- *
- * Return value:
- * 0 on success / other on failure
- */
-static int pmcraid_copy_sglist(
-       struct pmcraid_sglist *sglist,
-       void __user *buffer,
-       u32 len,
-       int direction
-)
-{
-       struct scatterlist *sg;
-       void *kaddr;
-       int bsize_elem;
-       int i;
-       int rc = 0;
-
-       /* Determine the actual number of bytes per element */
-       bsize_elem = PAGE_SIZE * (1 << sglist->order);
-
-       sg = sglist->scatterlist;
-
-       for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), buffer += bsize_elem) {
-               struct page *page = sg_page(sg);
-
-               kaddr = kmap(page);
-               if (direction == DMA_TO_DEVICE)
-                       rc = copy_from_user(kaddr, buffer, bsize_elem);
-               else
-                       rc = copy_to_user(buffer, kaddr, bsize_elem);
-
-               kunmap(page);
-
-               if (rc) {
-                       pmcraid_err("failed to copy user data into sg list\n");
-                       return -EFAULT;
-               }
-
-               sg->length = bsize_elem;
-       }
-
-       if (len % bsize_elem) {
-               struct page *page = sg_page(sg);
-
-               kaddr = kmap(page);
-
-               if (direction == DMA_TO_DEVICE)
-                       rc = copy_from_user(kaddr, buffer, len % bsize_elem);
-               else
-                       rc = copy_to_user(buffer, kaddr, len % bsize_elem);
-
-               kunmap(page);
-
-               sg->length = len % bsize_elem;
-       }
-
-       if (rc) {
-               pmcraid_err("failed to copy user data into sg list\n");
-               rc = -EFAULT;
-       }
-
-       return rc;
-}
-
-/**
  * pmcraid_queuecommand_lck - Queue a mid-layer request
  * @scsi_cmd: scsi command struct
  *
@@ -3454,365 +3336,6 @@ static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
        return rc;
 }
 
-
-/**
- * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough
- * commands sent over IOCTL interface
- *
- * @cmd       : pointer to struct pmcraid_cmd
- * @buflen    : length of the request buffer
- * @direction : data transfer direction
- *
- * Return value
- *  0 on success, non-zero error code on failure
- */
-static int pmcraid_build_passthrough_ioadls(
-       struct pmcraid_cmd *cmd,
-       int buflen,
-       int direction
-)
-{
-       struct pmcraid_sglist *sglist = NULL;
-       struct scatterlist *sg = NULL;
-       struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
-       struct pmcraid_ioadl_desc *ioadl;
-       int i;
-
-       sglist = pmcraid_alloc_sglist(buflen);
-
-       if (!sglist) {
-               pmcraid_err("can't allocate memory for passthrough SGls\n");
-               return -ENOMEM;
-       }
-
-       sglist->num_dma_sg = dma_map_sg(&cmd->drv_inst->pdev->dev,
-                                       sglist->scatterlist,
-                                       sglist->num_sg, direction);
-
-       if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) {
-               dev_err(&cmd->drv_inst->pdev->dev,
-                       "Failed to map passthrough buffer!\n");
-               pmcraid_free_sglist(sglist);
-               return -EIO;
-       }
-
-       cmd->sglist = sglist;
-       ioarcb->request_flags0 |= NO_LINK_DESCS;
-
-       ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg);
-
-       /* Initialize IOADL descriptor addresses */
-       for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) {
-               ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg));
-               ioadl[i].address = cpu_to_le64(sg_dma_address(sg));
-               ioadl[i].flags = 0;
-       }
-
-       /* setup the last descriptor */
-       ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
-
-       return 0;
-}
-
-
-/**
- * pmcraid_release_passthrough_ioadls - release passthrough ioadls
- *
- * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated
- * @buflen: size of the request buffer
- * @direction: data transfer direction
- *
- * Return value
- *  0 on success, non-zero error code on failure
- */
-static void pmcraid_release_passthrough_ioadls(
-       struct pmcraid_cmd *cmd,
-       int buflen,
-       int direction
-)
-{
-       struct pmcraid_sglist *sglist = cmd->sglist;
-
-       if (buflen > 0) {
-               dma_unmap_sg(&cmd->drv_inst->pdev->dev,
-                            sglist->scatterlist,
-                            sglist->num_sg,
-                            direction);
-               pmcraid_free_sglist(sglist);
-               cmd->sglist = NULL;
-       }
-}
-
-/**
- * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
- *
- * @pinstance: pointer to adapter instance structure
- * @ioctl_cmd: ioctl code
- * @buflen: unused
- * @arg: pointer to pmcraid_passthrough_buffer user buffer
- *
- * Return value
- *  0 on success, non-zero error code on failure
- */
-static long pmcraid_ioctl_passthrough(
-       struct pmcraid_instance *pinstance,
-       unsigned int ioctl_cmd,
-       unsigned int buflen,
-       void __user *arg
-)
-{
-       struct pmcraid_passthrough_ioctl_buffer *buffer;
-       struct pmcraid_ioarcb *ioarcb;
-       struct pmcraid_cmd *cmd;
-       struct pmcraid_cmd *cancel_cmd;
-       void __user *request_buffer;
-       unsigned long request_offset;
-       unsigned long lock_flags;
-       void __user *ioasa;
-       u32 ioasc;
-       int request_size;
-       int buffer_size;
-       u8 direction;
-       int rc = 0;
-
-       /* If IOA reset is in progress, wait 10 secs for reset to complete */
-       if (pinstance->ioa_reset_in_progress) {
-               rc = wait_event_interruptible_timeout(
-                               pinstance->reset_wait_q,
-                               !pinstance->ioa_reset_in_progress,
-                               msecs_to_jiffies(10000));
-
-               if (!rc)
-                       return -ETIMEDOUT;
-               else if (rc < 0)
-                       return -ERESTARTSYS;
-       }
-
-       /* If adapter is not in operational state, return error */
-       if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) {
-               pmcraid_err("IOA is not operational\n");
-               return -ENOTTY;
-       }
-
-       buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer);
-       buffer = kmalloc(buffer_size, GFP_KERNEL);
-
-       if (!buffer) {
-               pmcraid_err("no memory for passthrough buffer\n");
-               return -ENOMEM;
-       }
-
-       request_offset =
-           offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer);
-
-       request_buffer = arg + request_offset;
-
-       rc = copy_from_user(buffer, arg,
-                            sizeof(struct pmcraid_passthrough_ioctl_buffer));
-
-       ioasa = arg + offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa);
-
-       if (rc) {
-               pmcraid_err("ioctl: can't copy passthrough buffer\n");
-               rc = -EFAULT;
-               goto out_free_buffer;
-       }
-
-       request_size = le32_to_cpu(buffer->ioarcb.data_transfer_length);
-
-       if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
-               direction = DMA_TO_DEVICE;
-       } else {
-               direction = DMA_FROM_DEVICE;
-       }
-
-       if (request_size < 0) {
-               rc = -EINVAL;
-               goto out_free_buffer;
-       }
-
-       /* check if we have any additional command parameters */
-       if (le16_to_cpu(buffer->ioarcb.add_cmd_param_length)
-            > PMCRAID_ADD_CMD_PARAM_LEN) {
-               rc = -EINVAL;
-               goto out_free_buffer;
-       }
-
-       cmd = pmcraid_get_free_cmd(pinstance);
-
-       if (!cmd) {
-               pmcraid_err("free command block is not available\n");
-               rc = -ENOMEM;
-               goto out_free_buffer;
-       }
-
-       cmd->scsi_cmd = NULL;
-       ioarcb = &(cmd->ioa_cb->ioarcb);
-
-       /* Copy the user-provided IOARCB stuff field by field */
-       ioarcb->resource_handle = buffer->ioarcb.resource_handle;
-       ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length;
-       ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout;
-       ioarcb->request_type = buffer->ioarcb.request_type;
-       ioarcb->request_flags0 = buffer->ioarcb.request_flags0;
-       ioarcb->request_flags1 = buffer->ioarcb.request_flags1;
-       memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN);
-
-       if (buffer->ioarcb.add_cmd_param_length) {
-               ioarcb->add_cmd_param_length =
-                       buffer->ioarcb.add_cmd_param_length;
-               ioarcb->add_cmd_param_offset =
-                       buffer->ioarcb.add_cmd_param_offset;
-               memcpy(ioarcb->add_data.u.add_cmd_params,
-                       buffer->ioarcb.add_data.u.add_cmd_params,
-                       le16_to_cpu(buffer->ioarcb.add_cmd_param_length));
-       }
-
-       /* set hrrq number where the IOA should respond to. Note that all cmds
-        * generated internally uses hrrq_id 0, exception to this is the cmd
-        * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
-        * hrrq_id assigned here in queuecommand
-        */
-       ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
-                         pinstance->num_hrrq;
-
-       if (request_size) {
-               rc = pmcraid_build_passthrough_ioadls(cmd,
-                                                     request_size,
-                                                     direction);
-               if (rc) {
-                       pmcraid_err("couldn't build passthrough ioadls\n");
-                       goto out_free_cmd;
-               }
-       }
-
-       /* If data is being written into the device, copy the data from user
-        * buffers
-        */
-       if (direction == DMA_TO_DEVICE && request_size > 0) {
-               rc = pmcraid_copy_sglist(cmd->sglist,
-                                        request_buffer,
-                                        request_size,
-                                        direction);
-               if (rc) {
-                       pmcraid_err("failed to copy user buffer\n");
-                       goto out_free_sglist;
-               }
-       }
-
-       /* passthrough ioctl is a blocking command so, put the user to sleep
-        * until timeout. Note that a timeout value of 0 means, do timeout.
-        */
-       cmd->cmd_done = pmcraid_internal_done;
-       init_completion(&cmd->wait_for_completion);
-       cmd->completion_req = 1;
-
-       pmcraid_info("command(%d) (CDB[0] = %x) for %x\n",
-                    le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
-                    cmd->ioa_cb->ioarcb.cdb[0],
-                    le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle));
-
-       spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
-       _pmcraid_fire_command(cmd);
-       spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
-
-       /* NOTE ! Remove the below line once abort_task is implemented
-        * in firmware. This line disables ioctl command timeout handling logic
-        * similar to IO command timeout handling, making ioctl commands to wait
-        * until the command completion regardless of timeout value specified in
-        * ioarcb
-        */
-       buffer->ioarcb.cmd_timeout = 0;
-
-       /* If command timeout is specified put caller to wait till that time,
-        * otherwise it would be blocking wait. If command gets timed out, it
-        * will be aborted.
-        */
-       if (buffer->ioarcb.cmd_timeout == 0) {
-               wait_for_completion(&cmd->wait_for_completion);
-       } else if (!wait_for_completion_timeout(
-                       &cmd->wait_for_completion,
-                       msecs_to_jiffies(le16_to_cpu(buffer->ioarcb.cmd_timeout) * 1000))) {
-
-               pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
-                       le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
-                       cmd->ioa_cb->ioarcb.cdb[0]);
-
-               spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
-               cancel_cmd = pmcraid_abort_cmd(cmd);
-               spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
-
-               if (cancel_cmd) {
-                       wait_for_completion(&cancel_cmd->wait_for_completion);
-                       ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
-                       pmcraid_return_cmd(cancel_cmd);
-
-                       /* if abort task couldn't find the command i.e it got
-                        * completed prior to aborting, return good completion.
-                        * if command got aborted successfully or there was IOA
-                        * reset due to abort task itself getting timedout then
-                        * return -ETIMEDOUT
-                        */
-                       if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
-                           PMCRAID_IOASC_SENSE_KEY(ioasc) == 0x00) {
-                               if (ioasc != PMCRAID_IOASC_GC_IOARCB_NOTFOUND)
-                                       rc = -ETIMEDOUT;
-                               goto out_handle_response;
-                       }
-               }
-
-               /* no command block for abort task or abort task failed to abort
-                * the IOARCB, then wait for 150 more seconds and initiate reset
-                * sequence after timeout
-                */
-               if (!wait_for_completion_timeout(
-                       &cmd->wait_for_completion,
-                       msecs_to_jiffies(150 * 1000))) {
-                       pmcraid_reset_bringup(cmd->drv_inst);
-                       rc = -ETIMEDOUT;
-               }
-       }
-
-out_handle_response:
-       /* copy entire IOASA buffer and return IOCTL success.
-        * If copying IOASA to user-buffer fails, return
-        * EFAULT
-        */
-       if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
-               sizeof(struct pmcraid_ioasa))) {
-               pmcraid_err("failed to copy ioasa buffer to user\n");
-               rc = -EFAULT;
-       }
-
-       /* If the data transfer was from device, copy the data onto user
-        * buffers
-        */
-       else if (direction == DMA_FROM_DEVICE && request_size > 0) {
-               rc = pmcraid_copy_sglist(cmd->sglist,
-                                        request_buffer,
-                                        request_size,
-                                        direction);
-               if (rc) {
-                       pmcraid_err("failed to copy user buffer\n");
-                       rc = -EFAULT;
-               }
-       }
-
-out_free_sglist:
-       pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
-
-out_free_cmd:
-       pmcraid_return_cmd(cmd);
-
-out_free_buffer:
-       kfree(buffer);
-
-       return rc;
-}
-
-
-
-
 /**
  * pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself
  *
@@ -3922,20 +3445,6 @@ static long pmcraid_chr_ioctl(
 
        switch (_IOC_TYPE(cmd)) {
 
-       case PMCRAID_PASSTHROUGH_IOCTL:
-               /* If ioctl code is to download microcode, we need to block
-                * mid-layer requests.
-                */
-               if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
-                       scsi_block_requests(pinstance->host);
-
-               retval = pmcraid_ioctl_passthrough(pinstance, cmd,
-                                                  hdr->buffer_length, argp);
-
-               if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
-                       scsi_unblock_requests(pinstance->host);
-               break;
-
        case PMCRAID_DRIVER_IOCTL:
                arg += sizeof(struct pmcraid_ioctl_header);
                retval = pmcraid_ioctl_driver(pinstance, cmd,
index bbb7531..9f59930 100644 (file)
@@ -1023,40 +1023,15 @@ struct pmcraid_ioctl_header {
 #define PMCRAID_IOCTL_SIGNATURE      "PMCRAID"
 
 /*
- * pmcraid_passthrough_ioctl_buffer - structure given as argument to
- * passthrough(or firmware handled) IOCTL commands. Note that ioarcb requires
- * 32-byte alignment so, it is necessary to pack this structure to avoid any
- * holes between ioctl_header and passthrough buffer
- *
- * .ioactl_header : ioctl header
- * .ioarcb        : filled-up ioarcb buffer, driver always reads this buffer
- * .ioasa         : buffer for ioasa, driver fills this with IOASA from firmware
- * .request_buffer: The I/O buffer (flat), driver reads/writes to this based on
- *                  the transfer directions passed in ioarcb.flags0. Contents
- *                  of this buffer are valid only when ioarcb.data_transfer_len
- *                  is not zero.
- */
-struct pmcraid_passthrough_ioctl_buffer {
-       struct pmcraid_ioctl_header ioctl_header;
-       struct pmcraid_ioarcb ioarcb;
-       struct pmcraid_ioasa  ioasa;
-       u8  request_buffer[];
-} __attribute__ ((packed, aligned(PMCRAID_IOARCB_ALIGNMENT)));
-
-/*
  * keys to differentiate between driver handled IOCTLs and passthrough
  * IOCTLs passed to IOA. driver determines the ioctl type using macro
  * _IOC_TYPE
  */
 #define PMCRAID_DRIVER_IOCTL         'D'
-#define PMCRAID_PASSTHROUGH_IOCTL    'F'
 
 #define DRV_IOCTL(n, size) \
        _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
 
-#define FMW_IOCTL(n, size) \
-       _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL,  (n), (size))
-
 /*
  * _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd.
  * This is to facilitate applications avoiding un-necessary memory allocations.
@@ -1069,12 +1044,4 @@ struct pmcraid_passthrough_ioctl_buffer {
 #define PMCRAID_IOCTL_RESET_ADAPTER          \
        DRV_IOCTL(5, sizeof(struct pmcraid_ioctl_header))
 
-/* passthrough/firmware handled commands */
-#define PMCRAID_IOCTL_PASSTHROUGH_COMMAND         \
-       FMW_IOCTL(1, sizeof(struct pmcraid_passthrough_ioctl_buffer))
-
-#define PMCRAID_IOCTL_DOWNLOAD_MICROCODE     \
-       FMW_IOCTL(2, sizeof(struct pmcraid_passthrough_ioctl_buffer))
-
-
 #endif /* _PMCRAID_H */
index 8196f89..31ec429 100644 (file)
@@ -860,6 +860,37 @@ static int qedi_task_xmit(struct iscsi_task *task)
        return qedi_iscsi_send_ioreq(task);
 }
 
+static void qedi_offload_work(struct work_struct *work)
+{
+       struct qedi_endpoint *qedi_ep =
+               container_of(work, struct qedi_endpoint, offload_work);
+       struct qedi_ctx *qedi;
+       int wait_delay = 5 * HZ;
+       int ret;
+
+       qedi = qedi_ep->qedi;
+
+       ret = qedi_iscsi_offload_conn(qedi_ep);
+       if (ret) {
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
+                        qedi_ep->iscsi_cid, qedi_ep, ret);
+               qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+               return;
+       }
+
+       ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
+                                              (qedi_ep->state ==
+                                              EP_STATE_OFLDCONN_COMPL),
+                                              wait_delay);
+       if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) {
+               qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
+               QEDI_ERR(&qedi->dbg_ctx,
+                        "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
+                        qedi_ep->iscsi_cid, qedi_ep);
+       }
+}
+
 static struct iscsi_endpoint *
 qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
                int non_blocking)
@@ -908,6 +939,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
        }
        qedi_ep = ep->dd_data;
        memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
+       INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
        qedi_ep->state = EP_STATE_IDLE;
        qedi_ep->iscsi_cid = (u32)-1;
        qedi_ep->qedi = qedi;
@@ -1056,12 +1088,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
        qedi_ep = ep->dd_data;
        qedi = qedi_ep->qedi;
 
+       flush_work(&qedi_ep->offload_work);
+
        if (qedi_ep->state == EP_STATE_OFLDCONN_START)
                goto ep_exit_recover;
 
-       if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
-               flush_work(&qedi_ep->offload_work);
-
        if (qedi_ep->conn) {
                qedi_conn = qedi_ep->conn;
                abrt_conn = qedi_conn->abrt_conn;
@@ -1235,37 +1266,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
        return rc;
 }
 
-static void qedi_offload_work(struct work_struct *work)
-{
-       struct qedi_endpoint *qedi_ep =
-               container_of(work, struct qedi_endpoint, offload_work);
-       struct qedi_ctx *qedi;
-       int wait_delay = 5 * HZ;
-       int ret;
-
-       qedi = qedi_ep->qedi;
-
-       ret = qedi_iscsi_offload_conn(qedi_ep);
-       if (ret) {
-               QEDI_ERR(&qedi->dbg_ctx,
-                        "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
-                        qedi_ep->iscsi_cid, qedi_ep, ret);
-               qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
-               return;
-       }
-
-       ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
-                                              (qedi_ep->state ==
-                                              EP_STATE_OFLDCONN_COMPL),
-                                              wait_delay);
-       if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
-               qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
-               QEDI_ERR(&qedi->dbg_ctx,
-                        "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
-                        qedi_ep->iscsi_cid, qedi_ep);
-       }
-}
-
 static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
 {
        struct qedi_ctx *qedi;
@@ -1381,7 +1381,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
                          qedi_ep->dst_addr, qedi_ep->dst_port);
        }
 
-       INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
        queue_work(qedi->offload_thread, &qedi_ep->offload_work);
 
        ret = 0;
index c607755..592a290 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/blkdev.h>
 #include <linux/crc-t10dif.h>
 #include <linux/spinlock.h>
-#include <linux/mutex.h>
 #include <linux/interrupt.h>
 #include <linux/atomic.h>
 #include <linux/hrtimer.h>
@@ -732,9 +731,7 @@ static const struct opcode_info_t opcode_info_arr[SDEB_I_LAST_ELEM_P1 + 1] = {
            {0,  0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} },
 };
 
-static atomic_t sdebug_num_hosts;
-static DEFINE_MUTEX(add_host_mutex);
-
+static int sdebug_num_hosts;
 static int sdebug_add_host = DEF_NUM_HOST;  /* in sysfs this is relative */
 static int sdebug_ato = DEF_ATO;
 static int sdebug_cdb_len = DEF_CDB_LEN;
@@ -781,7 +778,6 @@ static int sdebug_uuid_ctl = DEF_UUID_CTL;
 static bool sdebug_random = DEF_RANDOM;
 static bool sdebug_per_host_store = DEF_PER_HOST_STORE;
 static bool sdebug_removable = DEF_REMOVABLE;
-static bool sdebug_deflect_incoming;
 static bool sdebug_clustering;
 static bool sdebug_host_lock = DEF_HOST_LOCK;
 static bool sdebug_strict = DEF_STRICT;
@@ -5122,10 +5118,6 @@ static int scsi_debug_slave_configure(struct scsi_device *sdp)
                       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
        if (sdp->host->max_cmd_len != SDEBUG_MAX_CMD_LEN)
                sdp->host->max_cmd_len = SDEBUG_MAX_CMD_LEN;
-       if (smp_load_acquire(&sdebug_deflect_incoming)) {
-               pr_info("Exit early due to deflect_incoming\n");
-               return 1;
-       }
        if (devip == NULL) {
                devip = find_build_dev_info(sdp);
                if (devip == NULL)
@@ -5211,7 +5203,7 @@ static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
 }
 
 /* Deletes (stops) timers or work queues of all queued commands */
-static void stop_all_queued(bool done_with_no_conn)
+static void stop_all_queued(void)
 {
        unsigned long iflags;
        int j, k;
@@ -5220,15 +5212,13 @@ static void stop_all_queued(bool done_with_no_conn)
        struct sdebug_queued_cmd *sqcp;
        struct sdebug_dev_info *devip;
        struct sdebug_defer *sd_dp;
-       struct scsi_cmnd *scp;
 
        for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
                spin_lock_irqsave(&sqp->qc_lock, iflags);
                for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
                        if (test_bit(k, sqp->in_use_bm)) {
                                sqcp = &sqp->qc_arr[k];
-                               scp = sqcp->a_cmnd;
-                               if (!scp)
+                               if (sqcp->a_cmnd == NULL)
                                        continue;
                                devip = (struct sdebug_dev_info *)
                                        sqcp->a_cmnd->device->hostdata;
@@ -5243,10 +5233,6 @@ static void stop_all_queued(bool done_with_no_conn)
                                        l_defer_t = SDEB_DEFER_NONE;
                                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
                                stop_qc_helper(sd_dp, l_defer_t);
-                               if (done_with_no_conn && l_defer_t != SDEB_DEFER_NONE) {
-                                       scp->result = DID_NO_CONNECT << 16;
-                                       scsi_done(scp);
-                               }
                                clear_bit(k, sqp->in_use_bm);
                                spin_lock_irqsave(&sqp->qc_lock, iflags);
                        }
@@ -5389,7 +5375,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
                }
        }
        spin_unlock(&sdebug_host_list_lock);
-       stop_all_queued(false);
+       stop_all_queued();
        if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
                sdev_printk(KERN_INFO, SCpnt->device,
                            "%s: %d device(s) found\n", __func__, k);
@@ -5449,50 +5435,13 @@ static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
        }
 }
 
-static void sdeb_block_all_queues(void)
-{
-       int j;
-       struct sdebug_queue *sqp;
-
-       for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
-               atomic_set(&sqp->blocked, (int)true);
-}
-
-static void sdeb_unblock_all_queues(void)
+static void block_unblock_all_queues(bool block)
 {
        int j;
        struct sdebug_queue *sqp;
 
        for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
-               atomic_set(&sqp->blocked, (int)false);
-}
-
-static void
-sdeb_add_n_hosts(int num_hosts)
-{
-       if (num_hosts < 1)
-               return;
-       do {
-               bool found;
-               unsigned long idx;
-               struct sdeb_store_info *sip;
-               bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
-
-               found = false;
-               if (want_phs) {
-                       xa_for_each_marked(per_store_ap, idx, sip, SDEB_XA_NOT_IN_USE) {
-                               sdeb_most_recent_idx = (int)idx;
-                               found = true;
-                               break;
-                       }
-                       if (found)      /* re-use case */
-                               sdebug_add_host_helper((int)idx);
-                       else
-                               sdebug_do_add_host(true /* make new store */);
-               } else {
-                       sdebug_do_add_host(false);
-               }
-       } while (--num_hosts);
+               atomic_set(&sqp->blocked, (int)block);
 }
 
 /* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
@@ -5505,10 +5454,10 @@ static void tweak_cmnd_count(void)
        modulo = abs(sdebug_every_nth);
        if (modulo < 2)
                return;
-       sdeb_block_all_queues();
+       block_unblock_all_queues(true);
        count = atomic_read(&sdebug_cmnd_count);
        atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
-       sdeb_unblock_all_queues();
+       block_unblock_all_queues(false);
 }
 
 static void clear_queue_stats(void)
@@ -5526,15 +5475,6 @@ static bool inject_on_this_cmd(void)
        return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
 }
 
-static int process_deflect_incoming(struct scsi_cmnd *scp)
-{
-       u8 opcode = scp->cmnd[0];
-
-       if (opcode == SYNCHRONIZE_CACHE || opcode == SYNCHRONIZE_CACHE_16)
-               return 0;
-       return DID_NO_CONNECT << 16;
-}
-
 #define INCLUSIVE_TIMING_MAX_NS 1000000                /* 1 millisecond */
 
 /* Complete the processing of the thread that queued a SCSI command to this
@@ -5544,7 +5484,8 @@ static int process_deflect_incoming(struct scsi_cmnd *scp)
  */
 static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
                         int scsi_result,
-                        int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *),
+                        int (*pfp)(struct scsi_cmnd *,
+                                   struct sdebug_dev_info *),
                         int delta_jiff, int ndelay)
 {
        bool new_sd_dp;
@@ -5565,27 +5506,13 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
        }
        sdp = cmnd->device;
 
-       if (delta_jiff == 0) {
-               sqp = get_queue(cmnd);
-               if (atomic_read(&sqp->blocked)) {
-                       if (smp_load_acquire(&sdebug_deflect_incoming))
-                               return process_deflect_incoming(cmnd);
-                       else
-                               return SCSI_MLQUEUE_HOST_BUSY;
-               }
+       if (delta_jiff == 0)
                goto respond_in_thread;
-       }
 
        sqp = get_queue(cmnd);
        spin_lock_irqsave(&sqp->qc_lock, iflags);
        if (unlikely(atomic_read(&sqp->blocked))) {
                spin_unlock_irqrestore(&sqp->qc_lock, iflags);
-               if (smp_load_acquire(&sdebug_deflect_incoming)) {
-                       scsi_result = process_deflect_incoming(cmnd);
-                       goto respond_in_thread;
-               }
-               if (sdebug_verbose)
-                       pr_info("blocked --> SCSI_MLQUEUE_HOST_BUSY\n");
                return SCSI_MLQUEUE_HOST_BUSY;
        }
        num_in_q = atomic_read(&devip->num_in_q);
@@ -5774,12 +5701,8 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
 respond_in_thread:     /* call back to mid-layer using invocation thread */
        cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0;
        cmnd->result &= ~SDEG_RES_IMMED_MASK;
-       if (cmnd->result == 0 && scsi_result != 0) {
+       if (cmnd->result == 0 && scsi_result != 0)
                cmnd->result = scsi_result;
-               if (sdebug_verbose)
-                       pr_info("respond_in_thread: tag=0x%x, scp->result=0x%x\n",
-                               blk_mq_unique_tag(scsi_cmd_to_rq(cmnd)), scsi_result);
-       }
        scsi_done(cmnd);
        return 0;
 }
@@ -6064,7 +5987,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
                        int j, k;
                        struct sdebug_queue *sqp;
 
-                       sdeb_block_all_queues();
+                       block_unblock_all_queues(true);
                        for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
                             ++j, ++sqp) {
                                k = find_first_bit(sqp->in_use_bm,
@@ -6078,7 +6001,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
                                sdebug_jdelay = jdelay;
                                sdebug_ndelay = 0;
                        }
-                       sdeb_unblock_all_queues();
+                       block_unblock_all_queues(false);
                }
                return res;
        }
@@ -6104,7 +6027,7 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
                        int j, k;
                        struct sdebug_queue *sqp;
 
-                       sdeb_block_all_queues();
+                       block_unblock_all_queues(true);
                        for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
                             ++j, ++sqp) {
                                k = find_first_bit(sqp->in_use_bm,
@@ -6119,7 +6042,7 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
                                sdebug_jdelay = ndelay  ? JDELAY_OVERRIDDEN
                                                        : DEF_JDELAY;
                        }
-                       sdeb_unblock_all_queues();
+                       block_unblock_all_queues(false);
                }
                return res;
        }
@@ -6433,7 +6356,7 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
        if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
            (n <= SDEBUG_CANQUEUE) &&
            (sdebug_host_max_queue == 0)) {
-               sdeb_block_all_queues();
+               block_unblock_all_queues(true);
                k = 0;
                for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
                     ++j, ++sqp) {
@@ -6448,7 +6371,7 @@ static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
                        atomic_set(&retired_max_queue, k + 1);
                else
                        atomic_set(&retired_max_queue, 0);
-               sdeb_unblock_all_queues();
+               block_unblock_all_queues(false);
                return count;
        }
        return -EINVAL;
@@ -6537,48 +6460,43 @@ static DRIVER_ATTR_RW(virtual_gb);
 static ssize_t add_host_show(struct device_driver *ddp, char *buf)
 {
        /* absolute number of hosts currently active is what is shown */
-       return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&sdebug_num_hosts));
+       return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_num_hosts);
 }
 
-/*
- * Accept positive and negative values. Hex values (only positive) may be prefixed by '0x'.
- * To remove all hosts use a large negative number (e.g. -9999). The value 0 does nothing.
- * Returns -EBUSY if another add_host sysfs invocation is active.
- */
 static ssize_t add_host_store(struct device_driver *ddp, const char *buf,
                              size_t count)
 {
+       bool found;
+       unsigned long idx;
+       struct sdeb_store_info *sip;
+       bool want_phs = (sdebug_fake_rw == 0) && sdebug_per_host_store;
        int delta_hosts;
 
-       if (count == 0 || kstrtoint(buf, 0, &delta_hosts))
+       if (sscanf(buf, "%d", &delta_hosts) != 1)
                return -EINVAL;
-       if (sdebug_verbose)
-               pr_info("prior num_hosts=%d, num_to_add=%d\n",
-                       atomic_read(&sdebug_num_hosts), delta_hosts);
-       if (delta_hosts == 0)
-               return count;
-       if (mutex_trylock(&add_host_mutex) == 0)
-               return -EBUSY;
        if (delta_hosts > 0) {
-               sdeb_add_n_hosts(delta_hosts);
-       } else if (delta_hosts < 0) {
-               smp_store_release(&sdebug_deflect_incoming, true);
-               sdeb_block_all_queues();
-               if (delta_hosts >= atomic_read(&sdebug_num_hosts))
-                       stop_all_queued(true);
                do {
-                       if (atomic_read(&sdebug_num_hosts) < 1) {
-                               free_all_queued();
-                               break;
+                       found = false;
+                       if (want_phs) {
+                               xa_for_each_marked(per_store_ap, idx, sip,
+                                                  SDEB_XA_NOT_IN_USE) {
+                                       sdeb_most_recent_idx = (int)idx;
+                                       found = true;
+                                       break;
+                               }
+                               if (found)      /* re-use case */
+                                       sdebug_add_host_helper((int)idx);
+                               else
+                                       sdebug_do_add_host(true);
+                       } else {
+                               sdebug_do_add_host(false);
                        }
+               } while (--delta_hosts);
+       } else if (delta_hosts < 0) {
+               do {
                        sdebug_do_remove_host(false);
                } while (++delta_hosts);
-               sdeb_unblock_all_queues();
-               smp_store_release(&sdebug_deflect_incoming, false);
        }
-       mutex_unlock(&add_host_mutex);
-       if (sdebug_verbose)
-               pr_info("post num_hosts=%d\n", atomic_read(&sdebug_num_hosts));
        return count;
 }
 static DRIVER_ATTR_RW(add_host);
@@ -7089,10 +7007,6 @@ static int __init scsi_debug_init(void)
        sdebug_add_host = 0;
 
        for (k = 0; k < hosts_to_add; k++) {
-               if (smp_load_acquire(&sdebug_deflect_incoming)) {
-                       pr_info("exit early as sdebug_deflect_incoming is set\n");
-                       return 0;
-               }
                if (want_store && k == 0) {
                        ret = sdebug_add_host_helper(idx);
                        if (ret < 0) {
@@ -7110,12 +7024,8 @@ static int __init scsi_debug_init(void)
                }
        }
        if (sdebug_verbose)
-               pr_info("built %d host(s)\n", atomic_read(&sdebug_num_hosts));
+               pr_info("built %d host(s)\n", sdebug_num_hosts);
 
-       /*
-        * Even though all the hosts have been established, due to async device (LU) scanning
-        * by the scsi mid-level, there may still be devices (LUs) being set up.
-        */
        return 0;
 
 bus_unreg:
@@ -7131,17 +7041,12 @@ free_q_arr:
 
 static void __exit scsi_debug_exit(void)
 {
-       int k;
+       int k = sdebug_num_hosts;
 
-       /* Possible race with LUs still being set up; stop them asap */
-       sdeb_block_all_queues();
-       smp_store_release(&sdebug_deflect_incoming, true);
-       stop_all_queued(false);
-       for (k = 0; atomic_read(&sdebug_num_hosts) > 0; k++)
+       stop_all_queued();
+       for (; k; k--)
                sdebug_do_remove_host(true);
        free_all_queued();
-       if (sdebug_verbose)
-               pr_info("removed %d hosts\n", k);
        driver_unregister(&sdebug_driverfs_driver);
        bus_unregister(&pseudo_lld_bus);
        root_device_unregister(pseudo_primary);
@@ -7311,13 +7216,13 @@ static int sdebug_add_host_helper(int per_host_idx)
        sdbg_host->dev.bus = &pseudo_lld_bus;
        sdbg_host->dev.parent = pseudo_primary;
        sdbg_host->dev.release = &sdebug_release_adapter;
-       dev_set_name(&sdbg_host->dev, "adapter%d", atomic_read(&sdebug_num_hosts));
+       dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts);
 
        error = device_register(&sdbg_host->dev);
        if (error)
                goto clean;
 
-       atomic_inc(&sdebug_num_hosts);
+       ++sdebug_num_hosts;
        return 0;
 
 clean:
@@ -7381,7 +7286,7 @@ static void sdebug_do_remove_host(bool the_end)
                return;
 
        device_unregister(&sdbg_host->dev);
-       atomic_dec(&sdebug_num_hosts);
+       --sdebug_num_hosts;
 }
 
 static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
@@ -7389,10 +7294,10 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
        int num_in_q = 0;
        struct sdebug_dev_info *devip;
 
-       sdeb_block_all_queues();
+       block_unblock_all_queues(true);
        devip = (struct sdebug_dev_info *)sdev->hostdata;
        if (NULL == devip) {
-               sdeb_unblock_all_queues();
+               block_unblock_all_queues(false);
                return  -ENODEV;
        }
        num_in_q = atomic_read(&devip->num_in_q);
@@ -7411,7 +7316,7 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
                sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d, num_in_q=%d\n",
                            __func__, qdepth, num_in_q);
        }
-       sdeb_unblock_all_queues();
+       block_unblock_all_queues(false);
        return sdev->queue_depth;
 }
 
@@ -7519,12 +7424,13 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
        struct sdebug_defer *sd_dp;
 
        sqp = sdebug_q_arr + queue_num;
-       qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
-       if (qc_idx >= sdebug_max_queue)
-               return 0;
 
        spin_lock_irqsave(&sqp->qc_lock, iflags);
 
+       qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
+       if (qc_idx >= sdebug_max_queue)
+               goto unlock;
+
        for (first = true; first || qc_idx + 1 < sdebug_max_queue; )   {
                if (first) {
                        first = false;
@@ -7589,6 +7495,7 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
                        break;
        }
 
+unlock:
        spin_unlock_irqrestore(&sqp->qc_lock, iflags);
 
        if (num_entries > 0)
index ff89de8..b02af34 100644 (file)
@@ -30,7 +30,7 @@ static inline const char *scmd_name(const struct scsi_cmnd *scmd)
 {
        struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
 
-       if (!rq->q->disk)
+       if (!rq->q || !rq->q->disk)
                return NULL;
        return rq->q->disk->disk_name;
 }
index f4e6c68..2ef7808 100644 (file)
@@ -223,6 +223,8 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
        int ret;
        struct sbitmap sb_backup;
 
+       depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
+
        /*
         * realloc if new shift is calculated, which is caused by setting
         * up one new default queue depth after calling ->slave_configure
@@ -245,6 +247,9 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
                                scsi_device_max_queue_depth(sdev),
                                new_shift, GFP_KERNEL,
                                sdev->request_queue->node, false, true);
+       if (!ret)
+               sbitmap_resize(&sdev->budget_map, depth);
+
        if (need_free) {
                if (ret)
                        sdev->budget_map = sb_backup;
index 226a509..dc6872e 100644 (file)
@@ -1384,10 +1384,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
        if (IS_ENABLED(CONFIG_BLK_DEV_BSG)) {
                sdev->bsg_dev = scsi_bsg_register_queue(sdev);
                if (IS_ERR(sdev->bsg_dev)) {
-                       /*
-                        * We're treating error on bsg register as non-fatal, so
-                        * pretend nothing went wrong.
-                        */
                        error = PTR_ERR(sdev->bsg_dev);
                        sdev_printk(KERN_INFO, sdev,
                                    "Failed to register bsg queue, errno=%d\n",
index 27951ea..2c0dd64 100644 (file)
@@ -86,6 +86,9 @@ struct iscsi_internal {
        struct transport_container session_cont;
 };
 
+static DEFINE_IDR(iscsi_ep_idr);
+static DEFINE_MUTEX(iscsi_ep_idr_mutex);
+
 static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
 
 static struct workqueue_struct *iscsi_conn_cleanup_workq;
@@ -168,6 +171,11 @@ struct device_attribute dev_attr_##_prefix##_##_name =     \
 static void iscsi_endpoint_release(struct device *dev)
 {
        struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
+
+       mutex_lock(&iscsi_ep_idr_mutex);
+       idr_remove(&iscsi_ep_idr, ep->id);
+       mutex_unlock(&iscsi_ep_idr_mutex);
+
        kfree(ep);
 }
 
@@ -180,7 +188,7 @@ static ssize_t
 show_ep_handle(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
-       return sysfs_emit(buf, "%llu\n", (unsigned long long) ep->id);
+       return sysfs_emit(buf, "%d\n", ep->id);
 }
 static ISCSI_ATTR(ep, handle, S_IRUGO, show_ep_handle, NULL);
 
@@ -193,48 +201,32 @@ static struct attribute_group iscsi_endpoint_group = {
        .attrs = iscsi_endpoint_attrs,
 };
 
-#define ISCSI_MAX_EPID -1
-
-static int iscsi_match_epid(struct device *dev, const void *data)
-{
-       struct iscsi_endpoint *ep = iscsi_dev_to_endpoint(dev);
-       const uint64_t *epid = data;
-
-       return *epid == ep->id;
-}
-
 struct iscsi_endpoint *
 iscsi_create_endpoint(int dd_size)
 {
-       struct device *dev;
        struct iscsi_endpoint *ep;
-       uint64_t id;
-       int err;
-
-       for (id = 1; id < ISCSI_MAX_EPID; id++) {
-               dev = class_find_device(&iscsi_endpoint_class, NULL, &id,
-                                       iscsi_match_epid);
-               if (!dev)
-                       break;
-               else
-                       put_device(dev);
-       }
-       if (id == ISCSI_MAX_EPID) {
-               printk(KERN_ERR "Too many connections. Max supported %u\n",
-                      ISCSI_MAX_EPID - 1);
-               return NULL;
-       }
+       int err, id;
 
        ep = kzalloc(sizeof(*ep) + dd_size, GFP_KERNEL);
        if (!ep)
                return NULL;
 
+       mutex_lock(&iscsi_ep_idr_mutex);
+       id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO);
+       if (id < 0) {
+               mutex_unlock(&iscsi_ep_idr_mutex);
+               printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n",
+                      id);
+               goto free_ep;
+       }
+       mutex_unlock(&iscsi_ep_idr_mutex);
+
        ep->id = id;
        ep->dev.class = &iscsi_endpoint_class;
-       dev_set_name(&ep->dev, "ep-%llu", (unsigned long long) id);
+       dev_set_name(&ep->dev, "ep-%d", id);
        err = device_register(&ep->dev);
         if (err)
-                goto free_ep;
+               goto free_id;
 
        err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group);
        if (err)
@@ -248,6 +240,10 @@ unregister_dev:
        device_unregister(&ep->dev);
        return NULL;
 
+free_id:
+       mutex_lock(&iscsi_ep_idr_mutex);
+       idr_remove(&iscsi_ep_idr, id);
+       mutex_unlock(&iscsi_ep_idr_mutex);
 free_ep:
        kfree(ep);
        return NULL;
@@ -275,14 +271,17 @@ EXPORT_SYMBOL_GPL(iscsi_put_endpoint);
  */
 struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
 {
-       struct device *dev;
+       struct iscsi_endpoint *ep;
 
-       dev = class_find_device(&iscsi_endpoint_class, NULL, &handle,
-                               iscsi_match_epid);
-       if (!dev)
-               return NULL;
+       mutex_lock(&iscsi_ep_idr_mutex);
+       ep = idr_find(&iscsi_ep_idr, handle);
+       if (!ep)
+               goto unlock;
 
-       return iscsi_dev_to_endpoint(dev);
+       get_device(&ep->dev);
+unlock:
+       mutex_unlock(&iscsi_ep_idr_mutex);
+       return ep;
 }
 EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
 
@@ -2202,10 +2201,10 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
 
        switch (flag) {
        case STOP_CONN_RECOVER:
-               conn->state = ISCSI_CONN_FAILED;
+               WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
                break;
        case STOP_CONN_TERM:
-               conn->state = ISCSI_CONN_DOWN;
+               WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
                break;
        default:
                iscsi_cls_conn_printk(KERN_ERR, conn, "invalid stop flag %d\n",
@@ -2217,6 +2216,49 @@ static void iscsi_stop_conn(struct iscsi_cls_conn *conn, int flag)
        ISCSI_DBG_TRANS_CONN(conn, "Stopping conn done.\n");
 }
 
+static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
+{
+       struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
+       struct iscsi_endpoint *ep;
+
+       ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
+       WRITE_ONCE(conn->state, ISCSI_CONN_FAILED);
+
+       if (!conn->ep || !session->transport->ep_disconnect)
+               return;
+
+       ep = conn->ep;
+       conn->ep = NULL;
+
+       session->transport->unbind_conn(conn, is_active);
+       session->transport->ep_disconnect(ep);
+       ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
+}
+
+static void iscsi_if_disconnect_bound_ep(struct iscsi_cls_conn *conn,
+                                        struct iscsi_endpoint *ep,
+                                        bool is_active)
+{
+       /* Check if this was a conn error and the kernel took ownership */
+       spin_lock_irq(&conn->lock);
+       if (!test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+               spin_unlock_irq(&conn->lock);
+               iscsi_ep_disconnect(conn, is_active);
+       } else {
+               spin_unlock_irq(&conn->lock);
+               ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
+               mutex_unlock(&conn->ep_mutex);
+
+               flush_work(&conn->cleanup_work);
+               /*
+                * Userspace is now done with the EP so we can release the ref
+                * iscsi_cleanup_conn_work_fn took.
+                */
+               iscsi_put_endpoint(ep);
+               mutex_lock(&conn->ep_mutex);
+       }
+}
+
 static int iscsi_if_stop_conn(struct iscsi_transport *transport,
                              struct iscsi_uevent *ev)
 {
@@ -2238,11 +2280,24 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
                iscsi_stop_conn(conn, flag);
        } else {
                /*
+                * For offload, when iscsid is restarted it won't know about
+                * existing endpoints so it can't do a ep_disconnect. We clean
+                * it up here for userspace.
+                */
+               mutex_lock(&conn->ep_mutex);
+               if (conn->ep)
+                       iscsi_if_disconnect_bound_ep(conn, conn->ep, true);
+               mutex_unlock(&conn->ep_mutex);
+
+               /*
                 * Figure out if it was the kernel or userspace initiating this.
                 */
+               spin_lock_irq(&conn->lock);
                if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+                       spin_unlock_irq(&conn->lock);
                        iscsi_stop_conn(conn, flag);
                } else {
+                       spin_unlock_irq(&conn->lock);
                        ISCSI_DBG_TRANS_CONN(conn,
                                             "flush kernel conn cleanup.\n");
                        flush_work(&conn->cleanup_work);
@@ -2251,31 +2306,14 @@ static int iscsi_if_stop_conn(struct iscsi_transport *transport,
                 * Only clear for recovery to avoid extra cleanup runs during
                 * termination.
                 */
+               spin_lock_irq(&conn->lock);
                clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
+               spin_unlock_irq(&conn->lock);
        }
        ISCSI_DBG_TRANS_CONN(conn, "iscsi if conn stop done.\n");
        return 0;
 }
 
-static void iscsi_ep_disconnect(struct iscsi_cls_conn *conn, bool is_active)
-{
-       struct iscsi_cls_session *session = iscsi_conn_to_session(conn);
-       struct iscsi_endpoint *ep;
-
-       ISCSI_DBG_TRANS_CONN(conn, "disconnect ep.\n");
-       conn->state = ISCSI_CONN_FAILED;
-
-       if (!conn->ep || !session->transport->ep_disconnect)
-               return;
-
-       ep = conn->ep;
-       conn->ep = NULL;
-
-       session->transport->unbind_conn(conn, is_active);
-       session->transport->ep_disconnect(ep);
-       ISCSI_DBG_TRANS_CONN(conn, "disconnect ep done.\n");
-}
-
 static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
 {
        struct iscsi_cls_conn *conn = container_of(work, struct iscsi_cls_conn,
@@ -2284,18 +2322,11 @@ static void iscsi_cleanup_conn_work_fn(struct work_struct *work)
 
        mutex_lock(&conn->ep_mutex);
        /*
-        * If we are not at least bound there is nothing for us to do. Userspace
-        * will do a ep_disconnect call if offload is used, but will not be
-        * doing a stop since there is nothing to clean up, so we have to clear
-        * the cleanup bit here.
+        * Get a ref to the ep, so we don't release its ID until after
+        * userspace is done referencing it in iscsi_if_disconnect_bound_ep.
         */
-       if (conn->state != ISCSI_CONN_BOUND && conn->state != ISCSI_CONN_UP) {
-               ISCSI_DBG_TRANS_CONN(conn, "Got error while conn is already failed. Ignoring.\n");
-               clear_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags);
-               mutex_unlock(&conn->ep_mutex);
-               return;
-       }
-
+       if (conn->ep)
+               get_device(&conn->ep->dev);
        iscsi_ep_disconnect(conn, false);
 
        if (system_state != SYSTEM_RUNNING) {
@@ -2340,11 +2371,12 @@ iscsi_alloc_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid)
                conn->dd_data = &conn[1];
 
        mutex_init(&conn->ep_mutex);
+       spin_lock_init(&conn->lock);
        INIT_LIST_HEAD(&conn->conn_list);
        INIT_WORK(&conn->cleanup_work, iscsi_cleanup_conn_work_fn);
        conn->transport = transport;
        conn->cid = cid;
-       conn->state = ISCSI_CONN_DOWN;
+       WRITE_ONCE(conn->state, ISCSI_CONN_DOWN);
 
        /* this is released in the dev's release function */
        if (!get_device(&session->dev))
@@ -2542,9 +2574,32 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
        struct iscsi_uevent *ev;
        struct iscsi_internal *priv;
        int len = nlmsg_total_size(sizeof(*ev));
+       unsigned long flags;
+       int state;
 
-       if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags))
-               queue_work(iscsi_conn_cleanup_workq, &conn->cleanup_work);
+       spin_lock_irqsave(&conn->lock, flags);
+       /*
+        * Userspace will only do a stop call if we are at least bound. And, we
+        * only need to do the in kernel cleanup if in the UP state so cmds can
+        * be released to upper layers. If in other states just wait for
+        * userspace to avoid races that can leave the cleanup_work queued.
+        */
+       state = READ_ONCE(conn->state);
+       switch (state) {
+       case ISCSI_CONN_BOUND:
+       case ISCSI_CONN_UP:
+               if (!test_and_set_bit(ISCSI_CLS_CONN_BIT_CLEANUP,
+                                     &conn->flags)) {
+                       queue_work(iscsi_conn_cleanup_workq,
+                                  &conn->cleanup_work);
+               }
+               break;
+       default:
+               ISCSI_DBG_TRANS_CONN(conn, "Got conn error in state %d\n",
+                                    state);
+               break;
+       }
+       spin_unlock_irqrestore(&conn->lock, flags);
 
        priv = iscsi_if_transport_lookup(conn->transport);
        if (!priv)
@@ -2894,7 +2949,7 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
        char *data = (char*)ev + sizeof(*ev);
        struct iscsi_cls_conn *conn;
        struct iscsi_cls_session *session;
-       int err = 0, value = 0;
+       int err = 0, value = 0, state;
 
        if (ev->u.set_param.len > PAGE_SIZE)
                return -EINVAL;
@@ -2911,8 +2966,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
                        session->recovery_tmo = value;
                break;
        default:
-               if ((conn->state == ISCSI_CONN_BOUND) ||
-                       (conn->state == ISCSI_CONN_UP)) {
+               state = READ_ONCE(conn->state);
+               if (state == ISCSI_CONN_BOUND || state == ISCSI_CONN_UP) {
                        err = transport->set_param(conn, ev->u.set_param.param,
                                        data, ev->u.set_param.len);
                } else {
@@ -2984,16 +3039,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
        }
 
        mutex_lock(&conn->ep_mutex);
-       /* Check if this was a conn error and the kernel took ownership */
-       if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
-               ISCSI_DBG_TRANS_CONN(conn, "flush kernel conn cleanup.\n");
-               mutex_unlock(&conn->ep_mutex);
-
-               flush_work(&conn->cleanup_work);
-               goto put_ep;
-       }
-
-       iscsi_ep_disconnect(conn, false);
+       iscsi_if_disconnect_bound_ep(conn, ep, false);
        mutex_unlock(&conn->ep_mutex);
 put_ep:
        iscsi_put_endpoint(ep);
@@ -3696,24 +3742,17 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
                return -EINVAL;
 
        mutex_lock(&conn->ep_mutex);
+       spin_lock_irq(&conn->lock);
        if (test_bit(ISCSI_CLS_CONN_BIT_CLEANUP, &conn->flags)) {
+               spin_unlock_irq(&conn->lock);
                mutex_unlock(&conn->ep_mutex);
                ev->r.retcode = -ENOTCONN;
                return 0;
        }
+       spin_unlock_irq(&conn->lock);
 
        switch (nlh->nlmsg_type) {
        case ISCSI_UEVENT_BIND_CONN:
-               if (conn->ep) {
-                       /*
-                        * For offload boot support where iscsid is restarted
-                        * during the pivot root stage, the ep will be intact
-                        * here when the new iscsid instance starts up and
-                        * reconnects.
-                        */
-                       iscsi_ep_disconnect(conn, true);
-               }
-
                session = iscsi_session_lookup(ev->u.b_conn.sid);
                if (!session) {
                        err = -EINVAL;
@@ -3724,7 +3763,7 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
                                                ev->u.b_conn.transport_eph,
                                                ev->u.b_conn.is_leading);
                if (!ev->r.retcode)
-                       conn->state = ISCSI_CONN_BOUND;
+                       WRITE_ONCE(conn->state, ISCSI_CONN_BOUND);
 
                if (ev->r.retcode || !transport->ep_connect)
                        break;
@@ -3743,7 +3782,8 @@ static int iscsi_if_transport_conn(struct iscsi_transport *transport,
        case ISCSI_UEVENT_START_CONN:
                ev->r.retcode = transport->start_conn(conn);
                if (!ev->r.retcode)
-                       conn->state = ISCSI_CONN_UP;
+                       WRITE_ONCE(conn->state, ISCSI_CONN_UP);
+
                break;
        case ISCSI_UEVENT_SEND_PDU:
                pdu_len = nlh->nlmsg_len - sizeof(*nlh) - sizeof(*ev);
@@ -4050,10 +4090,11 @@ static ssize_t show_conn_state(struct device *dev,
 {
        struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev->parent);
        const char *state = "unknown";
+       int conn_state = READ_ONCE(conn->state);
 
-       if (conn->state >= 0 &&
-           conn->state < ARRAY_SIZE(connection_state_names))
-               state = connection_state_names[conn->state];
+       if (conn_state >= 0 &&
+           conn_state < ARRAY_SIZE(connection_state_names))
+               state = connection_state_names[conn_state];
 
        return sysfs_emit(buf, "%s\n", state);
 }
index a390679..dc6e557 100644 (file)
@@ -3216,6 +3216,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
                        sd_read_block_limits(sdkp);
                        sd_read_block_characteristics(sdkp);
                        sd_zbc_read_zones(sdkp, buffer);
+                       sd_read_cpr(sdkp);
                }
 
                sd_print_capacity(sdkp, old_capacity);
@@ -3225,7 +3226,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
                sd_read_app_tag_own(sdkp, buffer);
                sd_read_write_same(sdkp, buffer);
                sd_read_security(sdkp, buffer);
-               sd_read_cpr(sdkp);
        }
 
        /*
@@ -3475,6 +3475,7 @@ static int sd_probe(struct device *dev)
        error = device_add_disk(dev, gd, NULL);
        if (error) {
                put_device(&sdkp->disk_dev);
+               blk_cleanup_disk(gd);
                goto out;
        }
 
index 5ba9df3..cbd9289 100644 (file)
@@ -535,7 +535,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
 
        scsi_autopm_get_device(sdev);
 
-       if (ret != CDROMCLOSETRAY && ret != CDROMEJECT) {
+       if (cmd != CDROMCLOSETRAY && cmd != CDROMEJECT) {
                ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
                if (ret != -ENOSYS)
                        goto put;
index 0d2e950..586c0e5 100644 (file)
@@ -957,18 +957,6 @@ static const struct reset_control_ops ufs_qcom_reset_ops = {
        .deassert = ufs_qcom_reset_deassert,
 };
 
-#define        ANDROID_BOOT_DEV_MAX    30
-static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
-
-#ifndef MODULE
-static int __init get_android_boot_dev(char *str)
-{
-       strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
-       return 1;
-}
-__setup("androidboot.bootdevice=", get_android_boot_dev);
-#endif
-
 /**
  * ufs_qcom_init - bind phy with controller
  * @hba: host controller instance
@@ -988,9 +976,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
        struct resource *res;
        struct ufs_clk_info *clki;
 
-       if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
-               return -ENODEV;
-
        host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
        if (!host) {
                err = -ENOMEM;
index f766920..e892b9f 100644 (file)
@@ -428,6 +428,12 @@ static int ufs_intel_adl_init(struct ufs_hba *hba)
        return ufs_intel_common_init(hba);
 }
 
+static int ufs_intel_mtl_init(struct ufs_hba *hba)
+{
+       hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN;
+       return ufs_intel_common_init(hba);
+}
+
 static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
        .name                   = "intel-pci",
        .init                   = ufs_intel_common_init,
@@ -465,6 +471,16 @@ static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
        .device_reset           = ufs_intel_device_reset,
 };
 
+static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = {
+       .name                   = "intel-pci",
+       .init                   = ufs_intel_mtl_init,
+       .exit                   = ufs_intel_common_exit,
+       .hce_enable_notify      = ufs_intel_hce_enable_notify,
+       .link_startup_notify    = ufs_intel_link_startup_notify,
+       .resume                 = ufs_intel_resume,
+       .device_reset           = ufs_intel_device_reset,
+};
+
 #ifdef CONFIG_PM_SLEEP
 static int ufshcd_pci_restore(struct device *dev)
 {
@@ -579,6 +595,7 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
        { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
        { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
+       { PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
        { }     /* terminate list */
 };
 
index 88c20f3..94f545b 100644 (file)
@@ -820,8 +820,6 @@ struct ufs_hba {
        enum ufs_pm_level rpm_lvl;
        /* Desired UFS power management level during system PM */
        enum ufs_pm_level spm_lvl;
-       struct device_attribute rpm_lvl_attr;
-       struct device_attribute spm_lvl_attr;
        int pm_op_in_progress;
 
        /* Auto-Hibernate Idle Timer register value */
index b2bec19..81099b6 100644 (file)
@@ -867,12 +867,6 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
        struct ufshpb_region *rgn, *victim_rgn = NULL;
 
        list_for_each_entry(rgn, &lru_info->lh_lru_rgn, list_lru_rgn) {
-               if (!rgn) {
-                       dev_err(&hpb->sdev_ufs_lu->sdev_dev,
-                               "%s: no region allocated\n",
-                               __func__);
-                       return NULL;
-               }
                if (ufshpb_check_srgns_issue_state(hpb, rgn))
                        continue;
 
@@ -888,6 +882,11 @@ static struct ufshpb_region *ufshpb_victim_lru_info(struct ufshpb_lu *hpb)
                break;
        }
 
+       if (!victim_rgn)
+               dev_err(&hpb->sdev_ufs_lu->sdev_dev,
+                       "%s: no region allocated\n",
+                       __func__);
+
        return victim_rgn;
 }
 
index 0e6110d..578c4b6 100644 (file)
@@ -988,7 +988,7 @@ static struct virtio_driver virtio_scsi_driver = {
        .remove = virtscsi_remove,
 };
 
-static int __init init(void)
+static int __init virtio_scsi_init(void)
 {
        int ret = -ENOMEM;
 
@@ -1020,14 +1020,14 @@ error:
        return ret;
 }
 
-static void __exit fini(void)
+static void __exit virtio_scsi_fini(void)
 {
        unregister_virtio_driver(&virtio_scsi_driver);
        mempool_destroy(virtscsi_cmd_pool);
        kmem_cache_destroy(virtscsi_cmd_cache);
 }
-module_init(init);
-module_exit(fini);
+module_init(virtio_scsi_init);
+module_exit(virtio_scsi_fini);
 
 MODULE_DEVICE_TABLE(virtio, id_table);
 MODULE_DESCRIPTION("Virtio SCSI HBA driver");
index 27b9e2b..7acf919 100644 (file)
@@ -159,6 +159,8 @@ static void zorro7xx_remove_one(struct zorro_dev *z)
        scsi_remove_host(host);
 
        NCR_700_release(host);
+       if (host->base > 0x01000000)
+               iounmap(hostdata->base);
        kfree(hostdata);
        free_irq(host->irq, host);
        zorro_release_device(z);
index 904eec2..6ad4177 100644 (file)
@@ -4,7 +4,7 @@
 #
 
 obj-$(CONFIG_ARCH_ACTIONS)     += actions/
-obj-$(CONFIG_ARCH_APPLE)       += apple/
+obj-y                          += apple/
 obj-y                          += aspeed/
 obj-$(CONFIG_ARCH_AT91)                += atmel/
 obj-y                          += bcm/
index 9b8de31..a1596fe 100644 (file)
@@ -17,6 +17,30 @@ config APPLE_PMGR_PWRSTATE
          controls for SoC devices. This driver manages them through the
          generic power domain framework, and also provides reset support.
 
+config APPLE_RTKIT
+       tristate "Apple RTKit co-processor IPC protocol"
+       depends on MAILBOX
+       depends on ARCH_APPLE || COMPILE_TEST
+       default ARCH_APPLE
+       help
+         Apple SoCs such as the M1 come with various co-processors running
+         their proprietary RTKit operating system. This option enables support
+         for the protocol library used to communicate with those. It is used
+         by various client drivers.
+
+         Say 'y' here if you have an Apple SoC.
+
+config APPLE_SART
+       tristate "Apple SART DMA address filter"
+       depends on ARCH_APPLE || COMPILE_TEST
+       default ARCH_APPLE
+       help
+         Apple SART is a simple DMA address filter used on Apple SoCs such
+         as the M1. It is usually required for the NVMe coprocessor which does
+         not use a proper IOMMU.
+
+         Say 'y' here if you have an Apple SoC.
+
 endmenu
 
 endif
index c114e84..e293770 100644 (file)
@@ -1,2 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_APPLE_PMGR_PWRSTATE)      += apple-pmgr-pwrstate.o
+
+obj-$(CONFIG_APPLE_RTKIT) += apple-rtkit.o
+apple-rtkit-y = rtkit.o rtkit-crashlog.o
+
+obj-$(CONFIG_APPLE_SART) += apple-sart.o
+apple-sart-y = sart.o
diff --git a/drivers/soc/apple/rtkit-crashlog.c b/drivers/soc/apple/rtkit-crashlog.c
new file mode 100644 (file)
index 0000000..732deed
--- /dev/null
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple RTKit IPC library
+ * Copyright (C) The Asahi Linux Contributors
+ */
+#include "rtkit-internal.h"
+
+#define FOURCC(a, b, c, d) \
+       (((u32)(a) << 24) | ((u32)(b) << 16) | ((u32)(c) << 8) | ((u32)(d)))
+
+#define APPLE_RTKIT_CRASHLOG_HEADER FOURCC('C', 'L', 'H', 'E')
+#define APPLE_RTKIT_CRASHLOG_STR FOURCC('C', 's', 't', 'r')
+#define APPLE_RTKIT_CRASHLOG_VERSION FOURCC('C', 'v', 'e', 'r')
+#define APPLE_RTKIT_CRASHLOG_MBOX FOURCC('C', 'm', 'b', 'x')
+#define APPLE_RTKIT_CRASHLOG_TIME FOURCC('C', 't', 'i', 'm')
+
+struct apple_rtkit_crashlog_header {
+       u32 fourcc;
+       u32 version;
+       u32 size;
+       u32 flags;
+       u8 _unk[16];
+};
+static_assert(sizeof(struct apple_rtkit_crashlog_header) == 0x20);
+
+struct apple_rtkit_crashlog_mbox_entry {
+       u64 msg0;
+       u64 msg1;
+       u32 timestamp;
+       u8 _unk[4];
+};
+static_assert(sizeof(struct apple_rtkit_crashlog_mbox_entry) == 0x18);
+
+static void apple_rtkit_crashlog_dump_str(struct apple_rtkit *rtk, u8 *bfr,
+                                         size_t size)
+{
+       u32 idx;
+       u8 *ptr, *end;
+
+       memcpy(&idx, bfr, 4);
+
+       ptr = bfr + 4;
+       end = bfr + size;
+       while (ptr < end) {
+               u8 *newline = memchr(ptr, '\n', end - ptr);
+
+               if (newline) {
+                       u8 tmp = *newline;
+                       *newline = '\0';
+                       dev_warn(rtk->dev, "RTKit: Message (id=%x): %s\n", idx,
+                                ptr);
+                       *newline = tmp;
+                       ptr = newline + 1;
+               } else {
+                       dev_warn(rtk->dev, "RTKit: Message (id=%x): %s", idx,
+                                ptr);
+                       break;
+               }
+       }
+}
+
+static void apple_rtkit_crashlog_dump_version(struct apple_rtkit *rtk, u8 *bfr,
+                                             size_t size)
+{
+       dev_warn(rtk->dev, "RTKit: Version: %s", bfr + 16);
+}
+
+static void apple_rtkit_crashlog_dump_time(struct apple_rtkit *rtk, u8 *bfr,
+                                          size_t size)
+{
+       u64 crash_time;
+
+       memcpy(&crash_time, bfr, 8);
+       dev_warn(rtk->dev, "RTKit: Crash time: %lld", crash_time);
+}
+
+static void apple_rtkit_crashlog_dump_mailbox(struct apple_rtkit *rtk, u8 *bfr,
+                                             size_t size)
+{
+       u32 type, index, i;
+       size_t n_messages;
+       struct apple_rtkit_crashlog_mbox_entry entry;
+
+       memcpy(&type, bfr + 16, 4);
+       memcpy(&index, bfr + 24, 4);
+       n_messages = (size - 28) / sizeof(entry);
+
+       dev_warn(rtk->dev, "RTKit: Mailbox history (type = %d, index = %d)",
+                type, index);
+       for (i = 0; i < n_messages; ++i) {
+               memcpy(&entry, bfr + 28 + i * sizeof(entry), sizeof(entry));
+               dev_warn(rtk->dev, "RTKit:  #%03d@%08x: %016llx %016llx", i,
+                        entry.timestamp, entry.msg0, entry.msg1);
+       }
+}
+
+void apple_rtkit_crashlog_dump(struct apple_rtkit *rtk, u8 *bfr, size_t size)
+{
+       size_t offset;
+       u32 section_fourcc, section_size;
+       struct apple_rtkit_crashlog_header header;
+
+       memcpy(&header, bfr, sizeof(header));
+       if (header.fourcc != APPLE_RTKIT_CRASHLOG_HEADER) {
+               dev_warn(rtk->dev, "RTKit: Expected crashlog header but got %x",
+                        header.fourcc);
+               return;
+       }
+
+       if (header.size > size) {
+               dev_warn(rtk->dev, "RTKit: Crashlog size (%x) is too large",
+                        header.size);
+               return;
+       }
+
+       size = header.size;
+       offset = sizeof(header);
+
+       while (offset < size) {
+               memcpy(&section_fourcc, bfr + offset, 4);
+               memcpy(&section_size, bfr + offset + 12, 4);
+
+               switch (section_fourcc) {
+               case APPLE_RTKIT_CRASHLOG_HEADER:
+                       dev_dbg(rtk->dev, "RTKit: End of crashlog reached");
+                       return;
+               case APPLE_RTKIT_CRASHLOG_STR:
+                       apple_rtkit_crashlog_dump_str(rtk, bfr + offset + 16,
+                                                     section_size);
+                       break;
+               case APPLE_RTKIT_CRASHLOG_VERSION:
+                       apple_rtkit_crashlog_dump_version(
+                               rtk, bfr + offset + 16, section_size);
+                       break;
+               case APPLE_RTKIT_CRASHLOG_MBOX:
+                       apple_rtkit_crashlog_dump_mailbox(
+                               rtk, bfr + offset + 16, section_size);
+                       break;
+               case APPLE_RTKIT_CRASHLOG_TIME:
+                       apple_rtkit_crashlog_dump_time(rtk, bfr + offset + 16,
+                                                      section_size);
+                       break;
+               default:
+                       dev_warn(rtk->dev,
+                                "RTKit: Unknown crashlog section: %x",
+                                section_fourcc);
+               }
+
+               offset += section_size;
+       }
+
+       dev_warn(rtk->dev,
+                "RTKit: End of crashlog reached but no footer present");
+}
diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h
new file mode 100644 (file)
index 0000000..24bd619
--- /dev/null
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple RTKit IPC library
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#ifndef _APPLE_RTKIT_INTERAL_H
+#define _APPLE_RTKIT_INTERAL_H
+
+#include <linux/apple-mailbox.h>
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/soc/apple/rtkit.h>
+#include <linux/workqueue.h>
+
+#define APPLE_RTKIT_APP_ENDPOINT_START 0x20
+#define APPLE_RTKIT_MAX_ENDPOINTS 0x100
+
+struct apple_rtkit {
+       void *cookie;
+       const struct apple_rtkit_ops *ops;
+       struct device *dev;
+
+       const char *mbox_name;
+       int mbox_idx;
+       struct mbox_client mbox_cl;
+       struct mbox_chan *mbox_chan;
+
+       struct completion epmap_completion;
+       struct completion iop_pwr_ack_completion;
+       struct completion ap_pwr_ack_completion;
+
+       int boot_result;
+       int version;
+
+       unsigned int iop_power_state;
+       unsigned int ap_power_state;
+       bool crashed;
+
+       DECLARE_BITMAP(endpoints, APPLE_RTKIT_MAX_ENDPOINTS);
+
+       struct apple_rtkit_shmem ioreport_buffer;
+       struct apple_rtkit_shmem crashlog_buffer;
+
+       struct apple_rtkit_shmem syslog_buffer;
+       char *syslog_msg_buffer;
+       size_t syslog_n_entries;
+       size_t syslog_msg_size;
+
+       struct workqueue_struct *wq;
+};
+
+void apple_rtkit_crashlog_dump(struct apple_rtkit *rtk, u8 *bfr, size_t size);
+
+#endif
diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c
new file mode 100644 (file)
index 0000000..cf1129e
--- /dev/null
@@ -0,0 +1,958 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple RTKit IPC library
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include "rtkit-internal.h"
+
+enum {
+       APPLE_RTKIT_PWR_STATE_OFF = 0x00, /* power off, cannot be restarted */
+       APPLE_RTKIT_PWR_STATE_SLEEP = 0x01, /* sleeping, can be restarted */
+       APPLE_RTKIT_PWR_STATE_QUIESCED = 0x10, /* running but no communication */
+       APPLE_RTKIT_PWR_STATE_ON = 0x20, /* normal operating state */
+};
+
+enum {
+       APPLE_RTKIT_EP_MGMT = 0,
+       APPLE_RTKIT_EP_CRASHLOG = 1,
+       APPLE_RTKIT_EP_SYSLOG = 2,
+       APPLE_RTKIT_EP_DEBUG = 3,
+       APPLE_RTKIT_EP_IOREPORT = 4,
+       APPLE_RTKIT_EP_OSLOG = 8,
+};
+
+#define APPLE_RTKIT_MGMT_TYPE GENMASK_ULL(59, 52)
+
+enum {
+       APPLE_RTKIT_MGMT_HELLO = 1,
+       APPLE_RTKIT_MGMT_HELLO_REPLY = 2,
+       APPLE_RTKIT_MGMT_STARTEP = 5,
+       APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE = 6,
+       APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE_ACK = 7,
+       APPLE_RTKIT_MGMT_EPMAP = 8,
+       APPLE_RTKIT_MGMT_EPMAP_REPLY = 8,
+       APPLE_RTKIT_MGMT_SET_AP_PWR_STATE = 0xb,
+       APPLE_RTKIT_MGMT_SET_AP_PWR_STATE_ACK = 0xb,
+};
+
+#define APPLE_RTKIT_MGMT_HELLO_MINVER GENMASK_ULL(15, 0)
+#define APPLE_RTKIT_MGMT_HELLO_MAXVER GENMASK_ULL(31, 16)
+
+#define APPLE_RTKIT_MGMT_EPMAP_LAST   BIT_ULL(51)
+#define APPLE_RTKIT_MGMT_EPMAP_BASE   GENMASK_ULL(34, 32)
+#define APPLE_RTKIT_MGMT_EPMAP_BITMAP GENMASK_ULL(31, 0)
+
+#define APPLE_RTKIT_MGMT_EPMAP_REPLY_MORE BIT_ULL(0)
+
+#define APPLE_RTKIT_MGMT_STARTEP_EP   GENMASK_ULL(39, 32)
+#define APPLE_RTKIT_MGMT_STARTEP_FLAG BIT_ULL(1)
+
+#define APPLE_RTKIT_MGMT_PWR_STATE GENMASK_ULL(15, 0)
+
+#define APPLE_RTKIT_CRASHLOG_CRASH 1
+
+#define APPLE_RTKIT_BUFFER_REQUEST     1
+#define APPLE_RTKIT_BUFFER_REQUEST_SIZE GENMASK_ULL(51, 44)
+#define APPLE_RTKIT_BUFFER_REQUEST_IOVA GENMASK_ULL(41, 0)
+
+#define APPLE_RTKIT_SYSLOG_TYPE GENMASK_ULL(59, 52)
+
+#define APPLE_RTKIT_SYSLOG_LOG 5
+
+#define APPLE_RTKIT_SYSLOG_INIT             8
+#define APPLE_RTKIT_SYSLOG_N_ENTRIES GENMASK_ULL(7, 0)
+#define APPLE_RTKIT_SYSLOG_MSG_SIZE  GENMASK_ULL(31, 24)
+
+#define APPLE_RTKIT_OSLOG_TYPE GENMASK_ULL(63, 56)
+#define APPLE_RTKIT_OSLOG_INIT 1
+#define APPLE_RTKIT_OSLOG_ACK  3
+
+#define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11
+#define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12
+
+struct apple_rtkit_msg {
+       struct completion *completion;
+       struct apple_mbox_msg mbox_msg;
+};
+
+struct apple_rtkit_rx_work {
+       struct apple_rtkit *rtk;
+       u8 ep;
+       u64 msg;
+       struct work_struct work;
+};
+
+bool apple_rtkit_is_running(struct apple_rtkit *rtk)
+{
+       if (rtk->crashed)
+               return false;
+       if ((rtk->iop_power_state & 0xff) != APPLE_RTKIT_PWR_STATE_ON)
+               return false;
+       if ((rtk->ap_power_state & 0xff) != APPLE_RTKIT_PWR_STATE_ON)
+               return false;
+       return true;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_is_running);
+
+bool apple_rtkit_is_crashed(struct apple_rtkit *rtk)
+{
+       return rtk->crashed;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_is_crashed);
+
+static void apple_rtkit_management_send(struct apple_rtkit *rtk, u8 type,
+                                       u64 msg)
+{
+       msg &= ~APPLE_RTKIT_MGMT_TYPE;
+       msg |= FIELD_PREP(APPLE_RTKIT_MGMT_TYPE, type);
+       apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_MGMT, msg, NULL, false);
+}
+
+static void apple_rtkit_management_rx_hello(struct apple_rtkit *rtk, u64 msg)
+{
+       u64 reply;
+
+       int min_ver = FIELD_GET(APPLE_RTKIT_MGMT_HELLO_MINVER, msg);
+       int max_ver = FIELD_GET(APPLE_RTKIT_MGMT_HELLO_MAXVER, msg);
+       int want_ver = min(APPLE_RTKIT_MAX_SUPPORTED_VERSION, max_ver);
+
+       dev_dbg(rtk->dev, "RTKit: Min ver %d, max ver %d\n", min_ver, max_ver);
+
+       if (min_ver > APPLE_RTKIT_MAX_SUPPORTED_VERSION) {
+               dev_err(rtk->dev, "RTKit: Firmware min version %d is too new\n",
+                       min_ver);
+               goto abort_boot;
+       }
+
+       if (max_ver < APPLE_RTKIT_MIN_SUPPORTED_VERSION) {
+               dev_err(rtk->dev, "RTKit: Firmware max version %d is too old\n",
+                       max_ver);
+               goto abort_boot;
+       }
+
+       dev_info(rtk->dev, "RTKit: Initializing (protocol version %d)\n",
+                want_ver);
+       rtk->version = want_ver;
+
+       reply = FIELD_PREP(APPLE_RTKIT_MGMT_HELLO_MINVER, want_ver);
+       reply |= FIELD_PREP(APPLE_RTKIT_MGMT_HELLO_MAXVER, want_ver);
+       apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_HELLO_REPLY, reply);
+
+       return;
+
+abort_boot:
+       rtk->boot_result = -EINVAL;
+       complete_all(&rtk->epmap_completion);
+}
+
+static void apple_rtkit_management_rx_epmap(struct apple_rtkit *rtk, u64 msg)
+{
+       int i, ep;
+       u64 reply;
+       unsigned long bitmap = FIELD_GET(APPLE_RTKIT_MGMT_EPMAP_BITMAP, msg);
+       u32 base = FIELD_GET(APPLE_RTKIT_MGMT_EPMAP_BASE, msg);
+
+       dev_dbg(rtk->dev,
+               "RTKit: received endpoint bitmap 0x%lx with base 0x%x\n",
+               bitmap, base);
+
+       for_each_set_bit(i, &bitmap, 32) {
+               ep = 32 * base + i;
+               dev_dbg(rtk->dev, "RTKit: Discovered endpoint 0x%02x\n", ep);
+               set_bit(ep, rtk->endpoints);
+       }
+
+       reply = FIELD_PREP(APPLE_RTKIT_MGMT_EPMAP_BASE, base);
+       if (msg & APPLE_RTKIT_MGMT_EPMAP_LAST)
+               reply |= APPLE_RTKIT_MGMT_EPMAP_LAST;
+       else
+               reply |= APPLE_RTKIT_MGMT_EPMAP_REPLY_MORE;
+
+       apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_EPMAP_REPLY, reply);
+
+       if (!(msg & APPLE_RTKIT_MGMT_EPMAP_LAST))
+               return;
+
+       for_each_set_bit(ep, rtk->endpoints, APPLE_RTKIT_APP_ENDPOINT_START) {
+               switch (ep) {
+               /* the management endpoint is started by default */
+               case APPLE_RTKIT_EP_MGMT:
+                       break;
+
+               /* without starting these RTKit refuses to boot */
+               case APPLE_RTKIT_EP_SYSLOG:
+               case APPLE_RTKIT_EP_CRASHLOG:
+               case APPLE_RTKIT_EP_DEBUG:
+               case APPLE_RTKIT_EP_IOREPORT:
+               case APPLE_RTKIT_EP_OSLOG:
+                       dev_dbg(rtk->dev,
+                               "RTKit: Starting system endpoint 0x%02x\n", ep);
+                       apple_rtkit_start_ep(rtk, ep);
+                       break;
+
+               default:
+                       dev_warn(rtk->dev,
+                                "RTKit: Unknown system endpoint: 0x%02x\n",
+                                ep);
+               }
+       }
+
+       rtk->boot_result = 0;
+       complete_all(&rtk->epmap_completion);
+}
+
+static void apple_rtkit_management_rx_iop_pwr_ack(struct apple_rtkit *rtk,
+                                                 u64 msg)
+{
+       unsigned int new_state = FIELD_GET(APPLE_RTKIT_MGMT_PWR_STATE, msg);
+
+       dev_dbg(rtk->dev, "RTKit: IOP power state transition: 0x%x -> 0x%x\n",
+               rtk->iop_power_state, new_state);
+       rtk->iop_power_state = new_state;
+
+       complete_all(&rtk->iop_pwr_ack_completion);
+}
+
+static void apple_rtkit_management_rx_ap_pwr_ack(struct apple_rtkit *rtk,
+                                                u64 msg)
+{
+       unsigned int new_state = FIELD_GET(APPLE_RTKIT_MGMT_PWR_STATE, msg);
+
+       dev_dbg(rtk->dev, "RTKit: AP power state transition: 0x%x -> 0x%x\n",
+               rtk->ap_power_state, new_state);
+       rtk->ap_power_state = new_state;
+
+       complete_all(&rtk->ap_pwr_ack_completion);
+}
+
+static void apple_rtkit_management_rx(struct apple_rtkit *rtk, u64 msg)
+{
+       u8 type = FIELD_GET(APPLE_RTKIT_MGMT_TYPE, msg);
+
+       switch (type) {
+       case APPLE_RTKIT_MGMT_HELLO:
+               apple_rtkit_management_rx_hello(rtk, msg);
+               break;
+       case APPLE_RTKIT_MGMT_EPMAP:
+               apple_rtkit_management_rx_epmap(rtk, msg);
+               break;
+       case APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE_ACK:
+               apple_rtkit_management_rx_iop_pwr_ack(rtk, msg);
+               break;
+       case APPLE_RTKIT_MGMT_SET_AP_PWR_STATE_ACK:
+               apple_rtkit_management_rx_ap_pwr_ack(rtk, msg);
+               break;
+       default:
+               dev_warn(
+                       rtk->dev,
+                       "RTKit: unknown management message: 0x%llx (type: 0x%02x)\n",
+                       msg, type);
+       }
+}
+
+static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk,
+                                           struct apple_rtkit_shmem *buffer,
+                                           u8 ep, u64 msg)
+{
+       size_t n_4kpages = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg);
+       u64 reply;
+       int err;
+
+       buffer->buffer = NULL;
+       buffer->iomem = NULL;
+       buffer->is_mapped = false;
+       buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg);
+       buffer->size = n_4kpages << 12;
+
+       dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n",
+               buffer->size, &buffer->iova);
+
+       if (buffer->iova &&
+           (!rtk->ops->shmem_setup || !rtk->ops->shmem_destroy)) {
+               err = -EINVAL;
+               goto error;
+       }
+
+       if (rtk->ops->shmem_setup) {
+               err = rtk->ops->shmem_setup(rtk->cookie, buffer);
+               if (err)
+                       goto error;
+       } else {
+               buffer->buffer = dma_alloc_coherent(rtk->dev, buffer->size,
+                                                   &buffer->iova, GFP_KERNEL);
+               if (!buffer->buffer) {
+                       err = -ENOMEM;
+                       goto error;
+               }
+       }
+
+       if (!buffer->is_mapped) {
+               reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE,
+                                  APPLE_RTKIT_BUFFER_REQUEST);
+               reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, n_4kpages);
+               reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA,
+                                   buffer->iova);
+               apple_rtkit_send_message(rtk, ep, reply, NULL, false);
+       }
+
+       return 0;
+
+error:
+       buffer->buffer = NULL;
+       buffer->iomem = NULL;
+       buffer->iova = 0;
+       buffer->size = 0;
+       buffer->is_mapped = false;
+       return err;
+}
+
+static void apple_rtkit_free_buffer(struct apple_rtkit *rtk,
+                                   struct apple_rtkit_shmem *bfr)
+{
+       if (bfr->size == 0)
+               return;
+
+       if (rtk->ops->shmem_destroy)
+               rtk->ops->shmem_destroy(rtk->cookie, bfr);
+       else if (bfr->buffer)
+               dma_free_coherent(rtk->dev, bfr->size, bfr->buffer, bfr->iova);
+
+       bfr->buffer = NULL;
+       bfr->iomem = NULL;
+       bfr->iova = 0;
+       bfr->size = 0;
+       bfr->is_mapped = false;
+}
+
+static void apple_rtkit_memcpy(struct apple_rtkit *rtk, void *dst,
+                              struct apple_rtkit_shmem *bfr, size_t offset,
+                              size_t len)
+{
+       if (bfr->iomem)
+               memcpy_fromio(dst, bfr->iomem + offset, len);
+       else
+               memcpy(dst, bfr->buffer + offset, len);
+}
+
+static void apple_rtkit_crashlog_rx(struct apple_rtkit *rtk, u64 msg)
+{
+       u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg);
+       u8 *bfr;
+
+       if (type != APPLE_RTKIT_CRASHLOG_CRASH) {
+               dev_warn(rtk->dev, "RTKit: Unknown crashlog message: %llx\n",
+                        msg);
+               return;
+       }
+
+       if (!rtk->crashlog_buffer.size) {
+               apple_rtkit_common_rx_get_buffer(rtk, &rtk->crashlog_buffer,
+                                                APPLE_RTKIT_EP_CRASHLOG, msg);
+               return;
+       }
+
+       dev_err(rtk->dev, "RTKit: co-processor has crashed\n");
+
+       /*
+        * create a shadow copy here to make sure the co-processor isn't able
+        * to change the log while we're dumping it. this also ensures
+        * the buffer is in normal memory and not iomem for e.g. the SMC
+        */
+       bfr = kzalloc(rtk->crashlog_buffer.size, GFP_KERNEL);
+       if (bfr) {
+               apple_rtkit_memcpy(rtk, bfr, &rtk->crashlog_buffer, 0,
+                                  rtk->crashlog_buffer.size);
+               apple_rtkit_crashlog_dump(rtk, bfr, rtk->crashlog_buffer.size);
+               kfree(bfr);
+       } else {
+               dev_err(rtk->dev,
+                       "RTKit: Couldn't allocate crashlog shadow buffer\n");
+       }
+
+       rtk->crashed = true;
+       if (rtk->ops->crashed)
+               rtk->ops->crashed(rtk->cookie);
+}
+
+static void apple_rtkit_ioreport_rx(struct apple_rtkit *rtk, u64 msg)
+{
+       u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg);
+
+       switch (type) {
+       case APPLE_RTKIT_BUFFER_REQUEST:
+               apple_rtkit_common_rx_get_buffer(rtk, &rtk->ioreport_buffer,
+                                                APPLE_RTKIT_EP_IOREPORT, msg);
+               break;
+       /* unknown, must be ACKed or the co-processor will hang */
+       case 0x8:
+       case 0xc:
+               apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_IOREPORT, msg,
+                                        NULL, false);
+               break;
+       default:
+               dev_warn(rtk->dev, "RTKit: Unknown ioreport message: %llx\n",
+                        msg);
+       }
+}
+
+static void apple_rtkit_syslog_rx_init(struct apple_rtkit *rtk, u64 msg)
+{
+       rtk->syslog_n_entries = FIELD_GET(APPLE_RTKIT_SYSLOG_N_ENTRIES, msg);
+       rtk->syslog_msg_size = FIELD_GET(APPLE_RTKIT_SYSLOG_MSG_SIZE, msg);
+
+       rtk->syslog_msg_buffer = kzalloc(rtk->syslog_msg_size, GFP_KERNEL);
+
+       dev_dbg(rtk->dev,
+               "RTKit: syslog initialized: entries: %zd, msg_size: %zd\n",
+               rtk->syslog_n_entries, rtk->syslog_msg_size);
+}
+
+static void apple_rtkit_syslog_rx_log(struct apple_rtkit *rtk, u64 msg)
+{
+       u8 idx = msg & 0xff;
+       char log_context[24];
+       size_t entry_size = 0x20 + rtk->syslog_msg_size;
+
+       if (!rtk->syslog_msg_buffer) {
+               dev_warn(
+                       rtk->dev,
+                       "RTKit: received syslog message but no syslog_msg_buffer\n");
+               goto done;
+       }
+       if (!rtk->syslog_buffer.size) {
+               dev_warn(
+                       rtk->dev,
+                       "RTKit: received syslog message but syslog_buffer.size is zero\n");
+               goto done;
+       }
+       if (!rtk->syslog_buffer.buffer && !rtk->syslog_buffer.iomem) {
+               dev_warn(
+                       rtk->dev,
+                       "RTKit: received syslog message but no syslog_buffer.buffer or syslog_buffer.iomem\n");
+               goto done;
+       }
+       if (idx > rtk->syslog_n_entries) {
+               dev_warn(rtk->dev, "RTKit: syslog index %d out of range\n",
+                        idx);
+               goto done;
+       }
+
+       apple_rtkit_memcpy(rtk, log_context, &rtk->syslog_buffer,
+                          idx * entry_size + 8, sizeof(log_context));
+       apple_rtkit_memcpy(rtk, rtk->syslog_msg_buffer, &rtk->syslog_buffer,
+                          idx * entry_size + 8 + sizeof(log_context),
+                          rtk->syslog_msg_size);
+
+       log_context[sizeof(log_context) - 1] = 0;
+       rtk->syslog_msg_buffer[rtk->syslog_msg_size - 1] = 0;
+       dev_info(rtk->dev, "RTKit: syslog message: %s: %s\n", log_context,
+                rtk->syslog_msg_buffer);
+
+done:
+       apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_SYSLOG, msg, NULL, false);
+}
+
+static void apple_rtkit_syslog_rx(struct apple_rtkit *rtk, u64 msg)
+{
+       u8 type = FIELD_GET(APPLE_RTKIT_SYSLOG_TYPE, msg);
+
+       switch (type) {
+       case APPLE_RTKIT_BUFFER_REQUEST:
+               apple_rtkit_common_rx_get_buffer(rtk, &rtk->syslog_buffer,
+                                                APPLE_RTKIT_EP_SYSLOG, msg);
+               break;
+       case APPLE_RTKIT_SYSLOG_INIT:
+               apple_rtkit_syslog_rx_init(rtk, msg);
+               break;
+       case APPLE_RTKIT_SYSLOG_LOG:
+               apple_rtkit_syslog_rx_log(rtk, msg);
+               break;
+       default:
+               dev_warn(rtk->dev, "RTKit: Unknown syslog message: %llx\n",
+                        msg);
+       }
+}
+
+static void apple_rtkit_oslog_rx_init(struct apple_rtkit *rtk, u64 msg)
+{
+       u64 ack;
+
+       dev_dbg(rtk->dev, "RTKit: oslog init: msg: 0x%llx\n", msg);
+       ack = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, APPLE_RTKIT_OSLOG_ACK);
+       apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_OSLOG, ack, NULL, false);
+}
+
+static void apple_rtkit_oslog_rx(struct apple_rtkit *rtk, u64 msg)
+{
+       u8 type = FIELD_GET(APPLE_RTKIT_OSLOG_TYPE, msg);
+
+       switch (type) {
+       case APPLE_RTKIT_OSLOG_INIT:
+               apple_rtkit_oslog_rx_init(rtk, msg);
+               break;
+       default:
+               dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", msg);
+       }
+}
+
+static void apple_rtkit_rx_work(struct work_struct *work)
+{
+       struct apple_rtkit_rx_work *rtk_work =
+               container_of(work, struct apple_rtkit_rx_work, work);
+       struct apple_rtkit *rtk = rtk_work->rtk;
+
+       switch (rtk_work->ep) {
+       case APPLE_RTKIT_EP_MGMT:
+               apple_rtkit_management_rx(rtk, rtk_work->msg);
+               break;
+       case APPLE_RTKIT_EP_CRASHLOG:
+               apple_rtkit_crashlog_rx(rtk, rtk_work->msg);
+               break;
+       case APPLE_RTKIT_EP_SYSLOG:
+               apple_rtkit_syslog_rx(rtk, rtk_work->msg);
+               break;
+       case APPLE_RTKIT_EP_IOREPORT:
+               apple_rtkit_ioreport_rx(rtk, rtk_work->msg);
+               break;
+       case APPLE_RTKIT_EP_OSLOG:
+               apple_rtkit_oslog_rx(rtk, rtk_work->msg);
+               break;
+       case APPLE_RTKIT_APP_ENDPOINT_START ... 0xff:
+               if (rtk->ops->recv_message)
+                       rtk->ops->recv_message(rtk->cookie, rtk_work->ep,
+                                              rtk_work->msg);
+               else
+                       dev_warn(
+                               rtk->dev,
+                               "Received unexpected message to EP%02d: %llx\n",
+                               rtk_work->ep, rtk_work->msg);
+               break;
+       default:
+               dev_warn(rtk->dev,
+                        "RTKit: message to unknown endpoint %02x: %llx\n",
+                        rtk_work->ep, rtk_work->msg);
+       }
+
+       kfree(rtk_work);
+}
+
+static void apple_rtkit_rx(struct mbox_client *cl, void *mssg)
+{
+       struct apple_rtkit *rtk = container_of(cl, struct apple_rtkit, mbox_cl);
+       struct apple_mbox_msg *msg = mssg;
+       struct apple_rtkit_rx_work *work;
+       u8 ep = msg->msg1;
+
+       /*
+        * The message was read from a MMIO FIFO and we have to make
+        * sure all reads from buffers sent with that message happen
+        * afterwards.
+        */
+       dma_rmb();
+
+       if (!test_bit(ep, rtk->endpoints))
+               dev_warn(rtk->dev,
+                        "RTKit: Message to undiscovered endpoint 0x%02x\n",
+                        ep);
+
+       if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
+           rtk->ops->recv_message_early &&
+           rtk->ops->recv_message_early(rtk->cookie, ep, msg->msg0))
+               return;
+
+       work = kzalloc(sizeof(*work), GFP_ATOMIC);
+       if (!work)
+               return;
+
+       work->rtk = rtk;
+       work->ep = ep;
+       work->msg = msg->msg0;
+       INIT_WORK(&work->work, apple_rtkit_rx_work);
+       queue_work(rtk->wq, &work->work);
+}
+
+static void apple_rtkit_tx_done(struct mbox_client *cl, void *mssg, int r)
+{
+       struct apple_rtkit_msg *msg =
+               container_of(mssg, struct apple_rtkit_msg, mbox_msg);
+
+       if (r == -ETIME)
+               return;
+
+       if (msg->completion)
+               complete(msg->completion);
+       kfree(msg);
+}
+
+int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
+                            struct completion *completion, bool atomic)
+{
+       struct apple_rtkit_msg *msg;
+       int ret;
+       gfp_t flags;
+
+       if (rtk->crashed)
+               return -EINVAL;
+       if (ep >= APPLE_RTKIT_APP_ENDPOINT_START &&
+           !apple_rtkit_is_running(rtk))
+               return -EINVAL;
+
+       if (atomic)
+               flags = GFP_ATOMIC;
+       else
+               flags = GFP_KERNEL;
+
+       msg = kzalloc(sizeof(*msg), flags);
+       if (!msg)
+               return -ENOMEM;
+
+       msg->mbox_msg.msg0 = message;
+       msg->mbox_msg.msg1 = ep;
+       msg->completion = completion;
+
+       /*
+        * The message will be sent with a MMIO write. We need the barrier
+        * here to ensure any previous writes to buffers are visible to the
+        * device before that MMIO write happens.
+        */
+       dma_wmb();
+
+       ret = mbox_send_message(rtk->mbox_chan, &msg->mbox_msg);
+       if (ret < 0) {
+               kfree(msg);
+               return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_send_message);
+
+int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message,
+                                 unsigned long timeout, bool atomic)
+{
+       DECLARE_COMPLETION_ONSTACK(completion);
+       int ret;
+       long t;
+
+       ret = apple_rtkit_send_message(rtk, ep, message, &completion, atomic);
+       if (ret < 0)
+               return ret;
+
+       if (atomic) {
+               ret = mbox_flush(rtk->mbox_chan, timeout);
+               if (ret < 0)
+                       return ret;
+
+               if (try_wait_for_completion(&completion))
+                       return 0;
+
+               return -ETIME;
+       } else {
+               t = wait_for_completion_interruptible_timeout(
+                       &completion, msecs_to_jiffies(timeout));
+               if (t < 0)
+                       return t;
+               else if (t == 0)
+                       return -ETIME;
+               return 0;
+       }
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_send_message_wait);
+
+int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint)
+{
+       u64 msg;
+
+       if (!test_bit(endpoint, rtk->endpoints))
+               return -EINVAL;
+       if (endpoint >= APPLE_RTKIT_APP_ENDPOINT_START &&
+           !apple_rtkit_is_running(rtk))
+               return -EINVAL;
+
+       msg = FIELD_PREP(APPLE_RTKIT_MGMT_STARTEP_EP, endpoint);
+       msg |= APPLE_RTKIT_MGMT_STARTEP_FLAG;
+       apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_STARTEP, msg);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_start_ep);
+
+static int apple_rtkit_request_mbox_chan(struct apple_rtkit *rtk)
+{
+       if (rtk->mbox_name)
+               rtk->mbox_chan = mbox_request_channel_byname(&rtk->mbox_cl,
+                                                            rtk->mbox_name);
+       else
+               rtk->mbox_chan =
+                       mbox_request_channel(&rtk->mbox_cl, rtk->mbox_idx);
+
+       if (IS_ERR(rtk->mbox_chan))
+               return PTR_ERR(rtk->mbox_chan);
+       return 0;
+}
+
+static struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie,
+                                           const char *mbox_name, int mbox_idx,
+                                           const struct apple_rtkit_ops *ops)
+{
+       struct apple_rtkit *rtk;
+       int ret;
+
+       if (!ops)
+               return ERR_PTR(-EINVAL);
+
+       rtk = kzalloc(sizeof(*rtk), GFP_KERNEL);
+       if (!rtk)
+               return ERR_PTR(-ENOMEM);
+
+       rtk->dev = dev;
+       rtk->cookie = cookie;
+       rtk->ops = ops;
+
+       init_completion(&rtk->epmap_completion);
+       init_completion(&rtk->iop_pwr_ack_completion);
+       init_completion(&rtk->ap_pwr_ack_completion);
+
+       bitmap_zero(rtk->endpoints, APPLE_RTKIT_MAX_ENDPOINTS);
+       set_bit(APPLE_RTKIT_EP_MGMT, rtk->endpoints);
+
+       rtk->mbox_name = mbox_name;
+       rtk->mbox_idx = mbox_idx;
+       rtk->mbox_cl.dev = dev;
+       rtk->mbox_cl.tx_block = false;
+       rtk->mbox_cl.knows_txdone = false;
+       rtk->mbox_cl.rx_callback = &apple_rtkit_rx;
+       rtk->mbox_cl.tx_done = &apple_rtkit_tx_done;
+
+       rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM,
+                                         dev_name(rtk->dev));
+       if (!rtk->wq) {
+               ret = -ENOMEM;
+               goto free_rtk;
+       }
+
+       ret = apple_rtkit_request_mbox_chan(rtk);
+       if (ret)
+               goto destroy_wq;
+
+       return rtk;
+
+destroy_wq:
+       destroy_workqueue(rtk->wq);
+free_rtk:
+       kfree(rtk);
+       return ERR_PTR(ret);
+}
+
+static int apple_rtkit_wait_for_completion(struct completion *c)
+{
+       long t;
+
+       t = wait_for_completion_interruptible_timeout(c,
+                                                     msecs_to_jiffies(1000));
+       if (t < 0)
+               return t;
+       else if (t == 0)
+               return -ETIME;
+       else
+               return 0;
+}
+
+int apple_rtkit_reinit(struct apple_rtkit *rtk)
+{
+       /* make sure we don't handle any messages while reinitializing */
+       mbox_free_channel(rtk->mbox_chan);
+       flush_workqueue(rtk->wq);
+
+       apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
+       apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+       apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
+
+       kfree(rtk->syslog_msg_buffer);
+
+       rtk->syslog_msg_buffer = NULL;
+       rtk->syslog_n_entries = 0;
+       rtk->syslog_msg_size = 0;
+
+       bitmap_zero(rtk->endpoints, APPLE_RTKIT_MAX_ENDPOINTS);
+       set_bit(APPLE_RTKIT_EP_MGMT, rtk->endpoints);
+
+       reinit_completion(&rtk->epmap_completion);
+       reinit_completion(&rtk->iop_pwr_ack_completion);
+       reinit_completion(&rtk->ap_pwr_ack_completion);
+
+       rtk->crashed = false;
+       rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_OFF;
+       rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_OFF;
+
+       return apple_rtkit_request_mbox_chan(rtk);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_reinit);
+
+static int apple_rtkit_set_ap_power_state(struct apple_rtkit *rtk,
+                                         unsigned int state)
+{
+       u64 msg;
+       int ret;
+
+       reinit_completion(&rtk->ap_pwr_ack_completion);
+
+       msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
+       apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_AP_PWR_STATE,
+                                   msg);
+
+       ret = apple_rtkit_wait_for_completion(&rtk->ap_pwr_ack_completion);
+       if (ret)
+               return ret;
+
+       if (rtk->ap_power_state != state)
+               return -EINVAL;
+       return 0;
+}
+
+static int apple_rtkit_set_iop_power_state(struct apple_rtkit *rtk,
+                                          unsigned int state)
+{
+       u64 msg;
+       int ret;
+
+       reinit_completion(&rtk->iop_pwr_ack_completion);
+
+       msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, state);
+       apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+                                   msg);
+
+       ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion);
+       if (ret)
+               return ret;
+
+       if (rtk->iop_power_state != state)
+               return -EINVAL;
+       return 0;
+}
+
+int apple_rtkit_boot(struct apple_rtkit *rtk)
+{
+       int ret;
+
+       if (apple_rtkit_is_running(rtk))
+               return 0;
+       if (rtk->crashed)
+               return -EINVAL;
+
+       dev_dbg(rtk->dev, "RTKit: waiting for boot to finish\n");
+       ret = apple_rtkit_wait_for_completion(&rtk->epmap_completion);
+       if (ret)
+               return ret;
+       if (rtk->boot_result)
+               return rtk->boot_result;
+
+       dev_dbg(rtk->dev, "RTKit: waiting for IOP power state ACK\n");
+       ret = apple_rtkit_wait_for_completion(&rtk->iop_pwr_ack_completion);
+       if (ret)
+               return ret;
+
+       return apple_rtkit_set_ap_power_state(rtk, APPLE_RTKIT_PWR_STATE_ON);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_boot);
+
+int apple_rtkit_shutdown(struct apple_rtkit *rtk)
+{
+       int ret;
+
+       /* if OFF is used here the co-processor will not wake up again */
+       ret = apple_rtkit_set_ap_power_state(rtk,
+                                            APPLE_RTKIT_PWR_STATE_QUIESCED);
+       if (ret)
+               return ret;
+
+       ret = apple_rtkit_set_iop_power_state(rtk, APPLE_RTKIT_PWR_STATE_SLEEP);
+       if (ret)
+               return ret;
+
+       return apple_rtkit_reinit(rtk);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_shutdown);
+
+int apple_rtkit_quiesce(struct apple_rtkit *rtk)
+{
+       int ret;
+
+       ret = apple_rtkit_set_ap_power_state(rtk,
+                                            APPLE_RTKIT_PWR_STATE_QUIESCED);
+       if (ret)
+               return ret;
+
+       ret = apple_rtkit_set_iop_power_state(rtk,
+                                             APPLE_RTKIT_PWR_STATE_QUIESCED);
+       if (ret)
+               return ret;
+
+       ret = apple_rtkit_reinit(rtk);
+       if (ret)
+               return ret;
+
+       rtk->iop_power_state = APPLE_RTKIT_PWR_STATE_QUIESCED;
+       rtk->ap_power_state = APPLE_RTKIT_PWR_STATE_QUIESCED;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_quiesce);
+
+int apple_rtkit_wake(struct apple_rtkit *rtk)
+{
+       u64 msg;
+
+       if (apple_rtkit_is_running(rtk))
+               return -EINVAL;
+
+       reinit_completion(&rtk->iop_pwr_ack_completion);
+
+       /*
+        * Use open-coded apple_rtkit_set_iop_power_state since apple_rtkit_boot
+        * will wait for the completion anyway.
+        */
+       msg = FIELD_PREP(APPLE_RTKIT_MGMT_PWR_STATE, APPLE_RTKIT_PWR_STATE_ON);
+       apple_rtkit_management_send(rtk, APPLE_RTKIT_MGMT_SET_IOP_PWR_STATE,
+                                   msg);
+
+       return apple_rtkit_boot(rtk);
+}
+EXPORT_SYMBOL_GPL(apple_rtkit_wake);
+
+static void apple_rtkit_free(struct apple_rtkit *rtk)
+{
+       mbox_free_channel(rtk->mbox_chan);
+       destroy_workqueue(rtk->wq);
+
+       apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer);
+       apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer);
+       apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer);
+
+       kfree(rtk->syslog_msg_buffer);
+       kfree(rtk);
+}
+
+struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
+                                         const char *mbox_name, int mbox_idx,
+                                         const struct apple_rtkit_ops *ops)
+{
+       struct apple_rtkit *rtk;
+       int ret;
+
+       rtk = apple_rtkit_init(dev, cookie, mbox_name, mbox_idx, ops);
+       if (IS_ERR(rtk))
+               return rtk;
+
+       ret = devm_add_action_or_reset(dev, (void (*)(void *))apple_rtkit_free,
+                                      rtk);
+       if (ret)
+               return ERR_PTR(ret);
+
+       return rtk;
+}
+EXPORT_SYMBOL_GPL(devm_apple_rtkit_init);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple RTKit driver");
diff --git a/drivers/soc/apple/sart.c b/drivers/soc/apple/sart.c
new file mode 100644 (file)
index 0000000..83804b1
--- /dev/null
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SART device driver
+ * Copyright (C) The Asahi Linux Contributors
+ *
+ * Apple SART is a simple address filter for some DMA transactions.
+ * Regions of physical memory must be added to the SART's allow
+ * list before any DMA can target these. Unlike a proper
+ * IOMMU no remapping can be done and special support in the
+ * consumer driver is required since not all DMA transactions of
+ * a single device are subject to SART filtering.
+ */
+
+#include <linux/soc/apple/sart.h>
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define APPLE_SART_MAX_ENTRIES 16
+
+/* This is probably a bitfield but the exact meaning of each bit is unknown. */
+#define APPLE_SART_FLAGS_ALLOW 0xff
+
+/* SARTv2 registers */
+#define APPLE_SART2_CONFIG(idx)              (0x00 + 4 * (idx))
+#define APPLE_SART2_CONFIG_FLAGS      GENMASK(31, 24)
+#define APPLE_SART2_CONFIG_SIZE              GENMASK(23, 0)
+#define APPLE_SART2_CONFIG_SIZE_SHIFT 12
+#define APPLE_SART2_CONFIG_SIZE_MAX   GENMASK(23, 0)
+
+#define APPLE_SART2_PADDR(idx) (0x40 + 4 * (idx))
+#define APPLE_SART2_PADDR_SHIFT 12
+
+/* SARTv3 registers */
+#define APPLE_SART3_CONFIG(idx) (0x00 + 4 * (idx))
+
+#define APPLE_SART3_PADDR(idx) (0x40 + 4 * (idx))
+#define APPLE_SART3_PADDR_SHIFT 12
+
+#define APPLE_SART3_SIZE(idx)  (0x80 + 4 * (idx))
+#define APPLE_SART3_SIZE_SHIFT 12
+#define APPLE_SART3_SIZE_MAX   GENMASK(29, 0)
+
+struct apple_sart_ops {
+       void (*get_entry)(struct apple_sart *sart, int index, u8 *flags,
+                         phys_addr_t *paddr, size_t *size);
+       void (*set_entry)(struct apple_sart *sart, int index, u8 flags,
+                         phys_addr_t paddr_shifted, size_t size_shifted);
+       unsigned int size_shift;
+       unsigned int paddr_shift;
+       size_t size_max;
+};
+
+struct apple_sart {
+       struct device *dev;
+       void __iomem *regs;
+
+       const struct apple_sart_ops *ops;
+
+       unsigned long protected_entries;
+       unsigned long used_entries;
+};
+
+static void sart2_get_entry(struct apple_sart *sart, int index, u8 *flags,
+                           phys_addr_t *paddr, size_t *size)
+{
+       u32 cfg = readl(sart->regs + APPLE_SART2_CONFIG(index));
+       phys_addr_t paddr_ = readl(sart->regs + APPLE_SART2_PADDR(index));
+       size_t size_ = FIELD_GET(APPLE_SART2_CONFIG_SIZE, cfg);
+
+       *flags = FIELD_GET(APPLE_SART2_CONFIG_FLAGS, cfg);
+       *size = size_ << APPLE_SART2_CONFIG_SIZE_SHIFT;
+       *paddr = paddr_ << APPLE_SART2_PADDR_SHIFT;
+}
+
+static void sart2_set_entry(struct apple_sart *sart, int index, u8 flags,
+                           phys_addr_t paddr_shifted, size_t size_shifted)
+{
+       u32 cfg;
+
+       cfg = FIELD_PREP(APPLE_SART2_CONFIG_FLAGS, flags);
+       cfg |= FIELD_PREP(APPLE_SART2_CONFIG_SIZE, size_shifted);
+
+       writel(paddr_shifted, sart->regs + APPLE_SART2_PADDR(index));
+       writel(cfg, sart->regs + APPLE_SART2_CONFIG(index));
+}
+
+static struct apple_sart_ops sart_ops_v2 = {
+       .get_entry = sart2_get_entry,
+       .set_entry = sart2_set_entry,
+       .size_shift = APPLE_SART2_CONFIG_SIZE_SHIFT,
+       .paddr_shift = APPLE_SART2_PADDR_SHIFT,
+       .size_max = APPLE_SART2_CONFIG_SIZE_MAX,
+};
+
+static void sart3_get_entry(struct apple_sart *sart, int index, u8 *flags,
+                           phys_addr_t *paddr, size_t *size)
+{
+       phys_addr_t paddr_ = readl(sart->regs + APPLE_SART3_PADDR(index));
+       size_t size_ = readl(sart->regs + APPLE_SART3_SIZE(index));
+
+       *flags = readl(sart->regs + APPLE_SART3_CONFIG(index));
+       *size = size_ << APPLE_SART3_SIZE_SHIFT;
+       *paddr = paddr_ << APPLE_SART3_PADDR_SHIFT;
+}
+
+static void sart3_set_entry(struct apple_sart *sart, int index, u8 flags,
+                           phys_addr_t paddr_shifted, size_t size_shifted)
+{
+       writel(paddr_shifted, sart->regs + APPLE_SART3_PADDR(index));
+       writel(size_shifted, sart->regs + APPLE_SART3_SIZE(index));
+       writel(flags, sart->regs + APPLE_SART3_CONFIG(index));
+}
+
+static struct apple_sart_ops sart_ops_v3 = {
+       .get_entry = sart3_get_entry,
+       .set_entry = sart3_set_entry,
+       .size_shift = APPLE_SART3_SIZE_SHIFT,
+       .paddr_shift = APPLE_SART3_PADDR_SHIFT,
+       .size_max = APPLE_SART3_SIZE_MAX,
+};
+
+static int apple_sart_probe(struct platform_device *pdev)
+{
+       int i;
+       struct apple_sart *sart;
+       struct device *dev = &pdev->dev;
+
+       sart = devm_kzalloc(dev, sizeof(*sart), GFP_KERNEL);
+       if (!sart)
+               return -ENOMEM;
+
+       sart->dev = dev;
+       sart->ops = of_device_get_match_data(dev);
+
+       sart->regs = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(sart->regs))
+               return PTR_ERR(sart->regs);
+
+       for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
+               u8 flags;
+               size_t size;
+               phys_addr_t paddr;
+
+               sart->ops->get_entry(sart, i, &flags, &paddr, &size);
+
+               if (!flags)
+                       continue;
+
+               dev_dbg(sart->dev,
+                       "SART bootloader entry: index %02d; flags: 0x%02x; paddr: %pa; size: 0x%zx\n",
+                       i, flags, &paddr, size);
+               set_bit(i, &sart->protected_entries);
+       }
+
+       platform_set_drvdata(pdev, sart);
+       return 0;
+}
+
+struct apple_sart *devm_apple_sart_get(struct device *dev)
+{
+       struct device_node *sart_node;
+       struct platform_device *sart_pdev;
+       struct apple_sart *sart;
+       int ret;
+
+       sart_node = of_parse_phandle(dev->of_node, "apple,sart", 0);
+       if (!sart_node)
+               return ERR_PTR(-ENODEV);
+
+       sart_pdev = of_find_device_by_node(sart_node);
+       of_node_put(sart_node);
+
+       if (!sart_pdev)
+               return ERR_PTR(-ENODEV);
+
+       sart = dev_get_drvdata(&sart_pdev->dev);
+       if (!sart) {
+               put_device(&sart_pdev->dev);
+               return ERR_PTR(-EPROBE_DEFER);
+       }
+
+       ret = devm_add_action_or_reset(dev, (void (*)(void *))put_device,
+                                      &sart_pdev->dev);
+       if (ret)
+               return ERR_PTR(ret);
+
+       device_link_add(dev, &sart_pdev->dev,
+                       DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER);
+
+       return sart;
+}
+EXPORT_SYMBOL_GPL(devm_apple_sart_get);
+
+static int sart_set_entry(struct apple_sart *sart, int index, u8 flags,
+                         phys_addr_t paddr, size_t size)
+{
+       if (size & ((1 << sart->ops->size_shift) - 1))
+               return -EINVAL;
+       if (paddr & ((1 << sart->ops->paddr_shift) - 1))
+               return -EINVAL;
+
+       paddr >>= sart->ops->size_shift;
+       size >>= sart->ops->paddr_shift;
+
+       if (size > sart->ops->size_max)
+               return -EINVAL;
+
+       sart->ops->set_entry(sart, index, flags, paddr, size);
+       return 0;
+}
+
+int apple_sart_add_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
+                                 size_t size)
+{
+       int i, ret;
+
+       for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
+               if (test_bit(i, &sart->protected_entries))
+                       continue;
+               if (test_and_set_bit(i, &sart->used_entries))
+                       continue;
+
+               ret = sart_set_entry(sart, i, APPLE_SART_FLAGS_ALLOW, paddr,
+                                    size);
+               if (ret) {
+                       dev_dbg(sart->dev,
+                               "unable to set entry %d to [%pa, 0x%zx]\n",
+                               i, &paddr, size);
+                       clear_bit(i, &sart->used_entries);
+                       return ret;
+               }
+
+               dev_dbg(sart->dev, "wrote [%pa, 0x%zx] to %d\n", &paddr, size,
+                       i);
+               return 0;
+       }
+
+       dev_warn(sart->dev,
+                "no free entries left to add [paddr: 0x%pa, size: 0x%zx]\n",
+                &paddr, size);
+
+       return -EBUSY;
+}
+EXPORT_SYMBOL_GPL(apple_sart_add_allowed_region);
+
+int apple_sart_remove_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
+                                    size_t size)
+{
+       int i;
+
+       dev_dbg(sart->dev,
+               "will remove [paddr: %pa, size: 0x%zx] from allowed regions\n",
+               &paddr, size);
+
+       for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
+               u8 eflags;
+               size_t esize;
+               phys_addr_t epaddr;
+
+               if (test_bit(i, &sart->protected_entries))
+                       continue;
+
+               sart->ops->get_entry(sart, i, &eflags, &epaddr, &esize);
+
+               if (epaddr != paddr || esize != size)
+                       continue;
+
+               sart->ops->set_entry(sart, i, 0, 0, 0);
+
+               clear_bit(i, &sart->used_entries);
+               dev_dbg(sart->dev, "cleared entry %d\n", i);
+               return 0;
+       }
+
+       dev_warn(sart->dev, "entry [paddr: 0x%pa, size: 0x%zx] not found\n",
+                &paddr, size);
+
+       return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(apple_sart_remove_allowed_region);
+
+static void apple_sart_shutdown(struct platform_device *pdev)
+{
+       struct apple_sart *sart = dev_get_drvdata(&pdev->dev);
+       int i;
+
+       for (i = 0; i < APPLE_SART_MAX_ENTRIES; ++i) {
+               if (test_bit(i, &sart->protected_entries))
+                       continue;
+
+               sart->ops->set_entry(sart, i, 0, 0, 0);
+       }
+}
+
+static const struct of_device_id apple_sart_of_match[] = {
+       {
+               .compatible = "apple,t6000-sart",
+               .data = &sart_ops_v3,
+       },
+       {
+               .compatible = "apple,t8103-sart",
+               .data = &sart_ops_v2,
+       },
+       {}
+};
+MODULE_DEVICE_TABLE(of, apple_sart_of_match);
+
+static struct platform_driver apple_sart_driver = {
+       .driver = {
+               .name = "apple-sart",
+               .of_match_table = apple_sart_of_match,
+       },
+       .probe = apple_sart_probe,
+       .shutdown = apple_sart_shutdown,
+};
+module_platform_driver(apple_sart_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple SART driver");
index 7bbe46e..9407cac 100644 (file)
@@ -312,6 +312,9 @@ static int bcm_pmb_probe(struct platform_device *pdev)
        for (e = table; e->name; e++) {
                struct bcm_pmb_pm_domain *pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
 
+               if (!pd)
+                       return -ENOMEM;
+
                pd->pmb = pmb;
                pd->data = e;
                pd->genpd.name = e->name;
index fdc99a0..63477f0 100644 (file)
@@ -296,6 +296,12 @@ config ARCH_R8A774B1
        help
          This enables support for the Renesas RZ/G2N SoC.
 
+config ARCH_R9A07G043
+       bool "ARM64 Platform support for RZ/G2UL"
+       select ARCH_RZG2L
+       help
+         This enables support for the Renesas RZ/G2UL SoC variants.
+
 config ARCH_R9A07G044
        bool "ARM64 Platform support for RZ/G2L"
        select ARCH_RZG2L
index 92c7b42..30192a0 100644 (file)
@@ -64,6 +64,10 @@ static const struct renesas_family fam_rzg2l __initconst __maybe_unused = {
        .name   = "RZ/G2L",
 };
 
+static const struct renesas_family fam_rzg2ul __initconst __maybe_unused = {
+       .name   = "RZ/G2UL",
+};
+
 static const struct renesas_family fam_rzv2l __initconst __maybe_unused = {
        .name   = "RZ/V2L",
 };
@@ -148,6 +152,11 @@ static const struct renesas_soc soc_rz_g2l __initconst __maybe_unused = {
        .id     = 0x841c447,
 };
 
+static const struct renesas_soc soc_rz_g2ul __initconst __maybe_unused = {
+       .family = &fam_rzg2ul,
+       .id     = 0x8450447,
+};
+
 static const struct renesas_soc soc_rz_v2l __initconst __maybe_unused = {
        .family = &fam_rzv2l,
        .id     = 0x8447447,
@@ -340,6 +349,9 @@ static const struct of_device_id renesas_socs[] __initconst = {
 #ifdef CONFIG_ARCH_R8A779F0
        { .compatible = "renesas,r8a779f0",     .data = &soc_rcar_s4 },
 #endif
+#if defined(CONFIG_ARCH_R9A07G043)
+       { .compatible = "renesas,r9a07g043",    .data = &soc_rz_g2ul },
+#endif
 #if defined(CONFIG_ARCH_R9A07G044)
        { .compatible = "renesas,r9a07g044",    .data = &soc_rz_g2l },
 #endif
@@ -378,6 +390,7 @@ static const struct renesas_id id_prr __initconst = {
 
 static const struct of_device_id renesas_ids[] __initconst = {
        { .compatible = "renesas,bsid",                 .data = &id_bsid },
+       { .compatible = "renesas,r9a07g043-sysc",       .data = &id_rzg2l },
        { .compatible = "renesas,r9a07g044-sysc",       .data = &id_rzg2l },
        { .compatible = "renesas,r9a07g054-sysc",       .data = &id_rzg2l },
        { .compatible = "renesas,prr",                  .data = &id_prr },
index 86c7621..cad2d55 100644 (file)
@@ -1205,7 +1205,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
        addr = op->addr.val;
        len = op->data.nbytes;
 
-       if (bcm_qspi_bspi_ver_three(qspi) == true) {
+       if (has_bspi(qspi) && bcm_qspi_bspi_ver_three(qspi) == true) {
                /*
                 * The address coming into this function is a raw flash offset.
                 * But for BSPI <= V3, we need to convert it to a remapped BSPI
@@ -1224,7 +1224,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
            len < 4)
                mspi_read = true;
 
-       if (mspi_read)
+       if (!has_bspi(qspi) || mspi_read)
                return bcm_qspi_mspi_exec_mem_op(spi, op);
 
        ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
index b0c9f62..616ada8 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/iopoll.h>
 #include <linux/jiffies.h>
 #include <linux/kernel.h>
+#include <linux/log2.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/of.h>
@@ -102,12 +103,6 @@ struct cqspi_driver_platdata {
 #define CQSPI_TIMEOUT_MS                       500
 #define CQSPI_READ_TIMEOUT_MS                  10
 
-/* Instruction type */
-#define CQSPI_INST_TYPE_SINGLE                 0
-#define CQSPI_INST_TYPE_DUAL                   1
-#define CQSPI_INST_TYPE_QUAD                   2
-#define CQSPI_INST_TYPE_OCTAL                  3
-
 #define CQSPI_DUMMY_CLKS_PER_BYTE              8
 #define CQSPI_DUMMY_BYTES_MAX                  4
 #define CQSPI_DUMMY_CLKS_MAX                   31
@@ -376,10 +371,6 @@ static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr)
 static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
                              const struct spi_mem_op *op)
 {
-       f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
-       f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
-       f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
-
        /*
         * For an op to be DTR, cmd phase along with every other non-empty
         * phase should have dtr field set to 1. If an op phase has zero
@@ -389,32 +380,23 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
                       (!op->addr.nbytes || op->addr.dtr) &&
                       (!op->data.nbytes || op->data.dtr);
 
-       switch (op->data.buswidth) {
-       case 0:
-               break;
-       case 1:
-               f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
-               break;
-       case 2:
-               f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
-               break;
-       case 4:
-               f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
-               break;
-       case 8:
-               f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
-               break;
-       default:
-               return -EINVAL;
-       }
+       f_pdata->inst_width = 0;
+       if (op->cmd.buswidth)
+               f_pdata->inst_width = ilog2(op->cmd.buswidth);
+
+       f_pdata->addr_width = 0;
+       if (op->addr.buswidth)
+               f_pdata->addr_width = ilog2(op->addr.buswidth);
+
+       f_pdata->data_width = 0;
+       if (op->data.buswidth)
+               f_pdata->data_width = ilog2(op->data.buswidth);
 
        /* Right now we only support 8-8-8 DTR mode. */
        if (f_pdata->dtr) {
                switch (op->cmd.buswidth) {
                case 0:
-                       break;
                case 8:
-                       f_pdata->inst_width = CQSPI_INST_TYPE_OCTAL;
                        break;
                default:
                        return -EINVAL;
@@ -422,9 +404,7 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
 
                switch (op->addr.buswidth) {
                case 0:
-                       break;
                case 8:
-                       f_pdata->addr_width = CQSPI_INST_TYPE_OCTAL;
                        break;
                default:
                        return -EINVAL;
@@ -432,9 +412,7 @@ static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
 
                switch (op->data.buswidth) {
                case 0:
-                       break;
                case 8:
-                       f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
                        break;
                default:
                        return -EINVAL;
index 55c0920..65be8e0 100644 (file)
@@ -813,6 +813,7 @@ static int mxic_spi_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&pdev->dev, "spi_register_master failed\n");
                pm_runtime_disable(&pdev->dev);
+               mxic_spi_mem_ecc_remove(mxic);
        }
 
        return ret;
index fe82f35..24ec1c8 100644 (file)
@@ -158,14 +158,18 @@ static int rpcif_spi_probe(struct platform_device *pdev)
 
        error = rpcif_hw_init(rpc, false);
        if (error)
-               return error;
+               goto out_disable_rpm;
 
        error = spi_register_controller(ctlr);
        if (error) {
                dev_err(&pdev->dev, "spi_register_controller failed\n");
-               rpcif_disable_rpm(rpc);
+               goto out_disable_rpm;
        }
 
+       return 0;
+
+out_disable_rpm:
+       rpcif_disable_rpm(rpc);
        return error;
 }
 
index c4dd120..2e6d6bb 100644 (file)
@@ -1130,11 +1130,15 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg)
 
        if (ctlr->dma_tx)
                tx_dev = ctlr->dma_tx->device->dev;
+       else if (ctlr->dma_map_dev)
+               tx_dev = ctlr->dma_map_dev;
        else
                tx_dev = ctlr->dev.parent;
 
        if (ctlr->dma_rx)
                rx_dev = ctlr->dma_rx->device->dev;
+       else if (ctlr->dma_map_dev)
+               rx_dev = ctlr->dma_map_dev;
        else
                rx_dev = ctlr->dev.parent;
 
@@ -2406,7 +2410,8 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
                        } else {
                                struct acpi_device *adev;
 
-                               if (acpi_bus_get_device(parent_handle, &adev))
+                               adev = acpi_fetch_acpi_dev(parent_handle);
+                               if (!adev)
                                        return -ENODEV;
 
                                ctlr = acpi_spi_find_controller_by_adev(adev);
index d68611e..f056204 100644 (file)
@@ -70,7 +70,7 @@ static int __nat25_add_pppoe_tag(struct sk_buff *skb, struct pppoe_tag *tag)
        struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
        int data_len;
 
-       data_len = tag->tag_len + TAG_HDR_LEN;
+       data_len = be16_to_cpu(tag->tag_len) + TAG_HDR_LEN;
        if (skb_tailroom(skb) < data_len)
                return -1;
 
index 95d4ca5..fd7267b 100644 (file)
@@ -1821,6 +1821,7 @@ static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi)
        mutex_lock(&udev->cmdr_lock);
        page = xa_load(&udev->data_pages, dpi);
        if (likely(page)) {
+               get_page(page);
                mutex_unlock(&udev->cmdr_lock);
                return page;
        }
@@ -1877,6 +1878,7 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
                /* For the vmalloc()ed cmd area pages */
                addr = (void *)(unsigned long)info->mem[mi].addr + offset;
                page = vmalloc_to_page(addr);
+               get_page(page);
        } else {
                uint32_t dpi;
 
@@ -1887,7 +1889,6 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
                        return VM_FAULT_SIGBUS;
        }
 
-       get_page(page);
        vmf->page = page;
        return 0;
 }
index e99d840..73a1472 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 # Generic Trusted Execution Environment Configuration
-config TEE
+menuconfig TEE
        tristate "Trusted Execution Environment support"
        depends on HAVE_ARM_SMCCC || COMPILE_TEST || CPU_SUP_AMD
        select CRYPTO
@@ -13,10 +13,7 @@ config TEE
 
 if TEE
 
-menu "TEE drivers"
-
 source "drivers/tee/optee/Kconfig"
 source "drivers/tee/amdtee/Kconfig"
-endmenu
 
 endif
index bd49ec9..6554e06 100644 (file)
 #include <linux/types.h>
 #include "optee_private.h"
 
+#define MAX_ARG_PARAM_COUNT    6
+
+/*
+ * How much memory we allocate for each entry. This doesn't have to be a
+ * single page, but it makes sense to keep at least keep it as multiples of
+ * the page size.
+ */
+#define SHM_ENTRY_SIZE         PAGE_SIZE
+
+/*
+ * We need to have a compile time constant to be able to determine the
+ * maximum needed size of the bit field.
+ */
+#define MIN_ARG_SIZE           OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT)
+#define MAX_ARG_COUNT_PER_ENTRY        (SHM_ENTRY_SIZE / MIN_ARG_SIZE)
+
+/*
+ * Shared memory for argument structs are cached here. The number of
+ * arguments structs that can fit is determined at runtime depending on the
+ * needed RPC parameter count reported by secure world
+ * (optee->rpc_param_count).
+ */
+struct optee_shm_arg_entry {
+       struct list_head list_node;
+       struct tee_shm *shm;
+       DECLARE_BITMAP(map, MAX_ARG_COUNT_PER_ENTRY);
+};
+
 void optee_cq_wait_init(struct optee_call_queue *cq,
                        struct optee_call_waiter *w)
 {
@@ -104,37 +132,149 @@ static struct optee_session *find_session(struct optee_context_data *ctxdata,
        return NULL;
 }
 
-struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params,
-                                 struct optee_msg_arg **msg_arg)
+void optee_shm_arg_cache_init(struct optee *optee, u32 flags)
+{
+       INIT_LIST_HEAD(&optee->shm_arg_cache.shm_args);
+       mutex_init(&optee->shm_arg_cache.mutex);
+       optee->shm_arg_cache.flags = flags;
+}
+
+void optee_shm_arg_cache_uninit(struct optee *optee)
+{
+       struct list_head *head = &optee->shm_arg_cache.shm_args;
+       struct optee_shm_arg_entry *entry;
+
+       mutex_destroy(&optee->shm_arg_cache.mutex);
+       while (!list_empty(head)) {
+               entry = list_first_entry(head, struct optee_shm_arg_entry,
+                                        list_node);
+               list_del(&entry->list_node);
+               if (find_first_bit(entry->map, MAX_ARG_COUNT_PER_ENTRY) !=
+                    MAX_ARG_COUNT_PER_ENTRY) {
+                       pr_err("Freeing non-free entry\n");
+               }
+               tee_shm_free(entry->shm);
+               kfree(entry);
+       }
+}
+
+size_t optee_msg_arg_size(size_t rpc_param_count)
+{
+       size_t sz = OPTEE_MSG_GET_ARG_SIZE(MAX_ARG_PARAM_COUNT);
+
+       if (rpc_param_count)
+               sz += OPTEE_MSG_GET_ARG_SIZE(rpc_param_count);
+
+       return sz;
+}
+
+/**
+ * optee_get_msg_arg() - Provide shared memory for argument struct
+ * @ctx:       Caller TEE context
+ * @num_params:        Number of parameter to store
+ * @entry_ret: Entry pointer, needed when freeing the buffer
+ * @shm_ret:   Shared memory buffer
+ * @offs_ret:  Offset of argument strut in shared memory buffer
+ *
+ * @returns a pointer to the argument struct in memory, else an ERR_PTR
+ */
+struct optee_msg_arg *optee_get_msg_arg(struct tee_context *ctx,
+                                       size_t num_params,
+                                       struct optee_shm_arg_entry **entry_ret,
+                                       struct tee_shm **shm_ret,
+                                       u_int *offs_ret)
 {
        struct optee *optee = tee_get_drvdata(ctx->teedev);
-       size_t sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
-       struct tee_shm *shm;
+       size_t sz = optee_msg_arg_size(optee->rpc_param_count);
+       struct optee_shm_arg_entry *entry;
        struct optee_msg_arg *ma;
+       size_t args_per_entry;
+       u_long bit;
+       u_int offs;
+       void *res;
+
+       if (num_params > MAX_ARG_PARAM_COUNT)
+               return ERR_PTR(-EINVAL);
+
+       if (optee->shm_arg_cache.flags & OPTEE_SHM_ARG_SHARED)
+               args_per_entry = SHM_ENTRY_SIZE / sz;
+       else
+               args_per_entry = 1;
+
+       mutex_lock(&optee->shm_arg_cache.mutex);
+       list_for_each_entry(entry, &optee->shm_arg_cache.shm_args, list_node) {
+               bit = find_first_zero_bit(entry->map, MAX_ARG_COUNT_PER_ENTRY);
+               if (bit < args_per_entry)
+                       goto have_entry;
+       }
 
        /*
-        * rpc_arg_count is set to the number of allocated parameters in
-        * the RPC argument struct if a second MSG arg struct is expected.
-        * The second arg struct will then be used for RPC.
+        * No entry was found, let's allocate a new.
         */
-       if (optee->rpc_arg_count)
-               sz += OPTEE_MSG_GET_ARG_SIZE(optee->rpc_arg_count);
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry) {
+               res = ERR_PTR(-ENOMEM);
+               goto out;
+       }
 
-       shm = tee_shm_alloc_priv_buf(ctx, sz);
-       if (IS_ERR(shm))
-               return shm;
+       if (optee->shm_arg_cache.flags & OPTEE_SHM_ARG_ALLOC_PRIV)
+               res = tee_shm_alloc_priv_buf(ctx, SHM_ENTRY_SIZE);
+       else
+               res = tee_shm_alloc_kernel_buf(ctx, SHM_ENTRY_SIZE);
 
-       ma = tee_shm_get_va(shm, 0);
-       if (IS_ERR(ma)) {
-               tee_shm_free(shm);
-               return (void *)ma;
+       if (IS_ERR(res)) {
+               kfree(entry);
+               goto out;
        }
-
-       memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
+       entry->shm = res;
+       list_add(&entry->list_node, &optee->shm_arg_cache.shm_args);
+       bit = 0;
+
+have_entry:
+       offs = bit * sz;
+       res = tee_shm_get_va(entry->shm, offs);
+       if (IS_ERR(res))
+               goto out;
+       ma = res;
+       set_bit(bit, entry->map);
+       memset(ma, 0, sz);
        ma->num_params = num_params;
-       *msg_arg = ma;
+       *entry_ret = entry;
+       *shm_ret = entry->shm;
+       *offs_ret = offs;
+out:
+       mutex_unlock(&optee->shm_arg_cache.mutex);
+       return res;
+}
+
+/**
+ * optee_free_msg_arg() - Free previsouly obtained shared memory
+ * @ctx:       Caller TEE context
+ * @entry:     Pointer returned when the shared memory was obtained
+ * @offs:      Offset of shared memory buffer to free
+ *
+ * This function frees the shared memory obtained with optee_get_msg_arg().
+ */
+void optee_free_msg_arg(struct tee_context *ctx,
+                       struct optee_shm_arg_entry *entry, u_int offs)
+{
+       struct optee *optee = tee_get_drvdata(ctx->teedev);
+       size_t sz = optee_msg_arg_size(optee->rpc_param_count);
+       u_long bit;
 
-       return shm;
+       if (offs > SHM_ENTRY_SIZE || offs % sz) {
+               pr_err("Invalid offs %u\n", offs);
+               return;
+       }
+       bit = offs / sz;
+
+       mutex_lock(&optee->shm_arg_cache.mutex);
+
+       if (!test_bit(bit, entry->map))
+               pr_err("Bit pos %lu is already free\n", bit);
+       clear_bit(bit, entry->map);
+
+       mutex_unlock(&optee->shm_arg_cache.mutex);
 }
 
 int optee_open_session(struct tee_context *ctx,
@@ -143,16 +283,19 @@ int optee_open_session(struct tee_context *ctx,
 {
        struct optee *optee = tee_get_drvdata(ctx->teedev);
        struct optee_context_data *ctxdata = ctx->data;
-       int rc;
+       struct optee_shm_arg_entry *entry;
        struct tee_shm *shm;
        struct optee_msg_arg *msg_arg;
        struct optee_session *sess = NULL;
        uuid_t client_uuid;
+       u_int offs;
+       int rc;
 
        /* +2 for the meta parameters added below */
-       shm = optee_get_msg_arg(ctx, arg->num_params + 2, &msg_arg);
-       if (IS_ERR(shm))
-               return PTR_ERR(shm);
+       msg_arg = optee_get_msg_arg(ctx, arg->num_params + 2,
+                                   &entry, &shm, &offs);
+       if (IS_ERR(msg_arg))
+               return PTR_ERR(msg_arg);
 
        msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
        msg_arg->cancel_id = arg->cancel_id;
@@ -185,7 +328,7 @@ int optee_open_session(struct tee_context *ctx,
                goto out;
        }
 
-       if (optee->ops->do_call_with_arg(ctx, shm)) {
+       if (optee->ops->do_call_with_arg(ctx, shm, offs)) {
                msg_arg->ret = TEEC_ERROR_COMMUNICATION;
                msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
        }
@@ -212,26 +355,28 @@ int optee_open_session(struct tee_context *ctx,
                arg->ret_origin = msg_arg->ret_origin;
        }
 out:
-       tee_shm_free(shm);
+       optee_free_msg_arg(ctx, entry, offs);
 
        return rc;
 }
 
 int optee_close_session_helper(struct tee_context *ctx, u32 session)
 {
-       struct tee_shm *shm;
        struct optee *optee = tee_get_drvdata(ctx->teedev);
+       struct optee_shm_arg_entry *entry;
        struct optee_msg_arg *msg_arg;
+       struct tee_shm *shm;
+       u_int offs;
 
-       shm = optee_get_msg_arg(ctx, 0, &msg_arg);
-       if (IS_ERR(shm))
-               return PTR_ERR(shm);
+       msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
+       if (IS_ERR(msg_arg))
+               return PTR_ERR(msg_arg);
 
        msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
        msg_arg->session = session;
-       optee->ops->do_call_with_arg(ctx, shm);
+       optee->ops->do_call_with_arg(ctx, shm, offs);
 
-       tee_shm_free(shm);
+       optee_free_msg_arg(ctx, entry, offs);
 
        return 0;
 }
@@ -259,9 +404,11 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
 {
        struct optee *optee = tee_get_drvdata(ctx->teedev);
        struct optee_context_data *ctxdata = ctx->data;
-       struct tee_shm *shm;
+       struct optee_shm_arg_entry *entry;
        struct optee_msg_arg *msg_arg;
        struct optee_session *sess;
+       struct tee_shm *shm;
+       u_int offs;
        int rc;
 
        /* Check that the session is valid */
@@ -271,9 +418,10 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
        if (!sess)
                return -EINVAL;
 
-       shm = optee_get_msg_arg(ctx, arg->num_params, &msg_arg);
-       if (IS_ERR(shm))
-               return PTR_ERR(shm);
+       msg_arg = optee_get_msg_arg(ctx, arg->num_params,
+                                   &entry, &shm, &offs);
+       if (IS_ERR(msg_arg))
+               return PTR_ERR(msg_arg);
        msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
        msg_arg->func = arg->func;
        msg_arg->session = arg->session;
@@ -284,7 +432,7 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
        if (rc)
                goto out;
 
-       if (optee->ops->do_call_with_arg(ctx, shm)) {
+       if (optee->ops->do_call_with_arg(ctx, shm, offs)) {
                msg_arg->ret = TEEC_ERROR_COMMUNICATION;
                msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
        }
@@ -298,7 +446,7 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
        arg->ret = msg_arg->ret;
        arg->ret_origin = msg_arg->ret_origin;
 out:
-       tee_shm_free(shm);
+       optee_free_msg_arg(ctx, entry, offs);
        return rc;
 }
 
@@ -306,9 +454,11 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
 {
        struct optee *optee = tee_get_drvdata(ctx->teedev);
        struct optee_context_data *ctxdata = ctx->data;
-       struct tee_shm *shm;
+       struct optee_shm_arg_entry *entry;
        struct optee_msg_arg *msg_arg;
        struct optee_session *sess;
+       struct tee_shm *shm;
+       u_int offs;
 
        /* Check that the session is valid */
        mutex_lock(&ctxdata->mutex);
@@ -317,16 +467,16 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
        if (!sess)
                return -EINVAL;
 
-       shm = optee_get_msg_arg(ctx, 0, &msg_arg);
-       if (IS_ERR(shm))
-               return PTR_ERR(shm);
+       msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
+       if (IS_ERR(msg_arg))
+               return PTR_ERR(msg_arg);
 
        msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
        msg_arg->session = session;
        msg_arg->cancel_id = cancel_id;
-       optee->ops->do_call_with_arg(ctx, shm);
+       optee->ops->do_call_with_arg(ctx, shm, offs);
 
-       tee_shm_free(shm);
+       optee_free_msg_arg(ctx, entry, offs);
        return 0;
 }
 
@@ -362,7 +512,7 @@ int optee_check_mem_type(unsigned long start, size_t num_pages)
         * Allow kernel address to register with OP-TEE as kernel
         * pages are configured as normal memory only.
         */
-       if (virt_addr_valid(start))
+       if (virt_addr_valid(start) || is_vmalloc_addr((void *)start))
                return 0;
 
        mmap_read_lock(mm);
index daf947e..daf0773 100644 (file)
@@ -171,6 +171,7 @@ void optee_remove_common(struct optee *optee)
        optee_unregister_devices();
 
        optee_notif_uninit(optee);
+       optee_shm_arg_cache_uninit(optee);
        teedev_close_context(optee->ctx);
        /*
         * The two devices have to be unregistered before we can free the
index a5eb4ef..cbabcde 100644 (file)
@@ -601,6 +601,7 @@ done:
  * optee_ffa_do_call_with_arg() - Do a FF-A call to enter OP-TEE in secure world
  * @ctx:       calling context
  * @shm:       shared memory holding the message to pass to secure world
+ * @offs:      offset of the message in @shm
  *
  * Does a FF-A call to OP-TEE in secure world and handles eventual resulting
  * Remote Procedure Calls (RPC) from OP-TEE.
@@ -609,24 +610,33 @@ done:
  */
 
 static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
-                                     struct tee_shm *shm)
+                                     struct tee_shm *shm, u_int offs)
 {
        struct ffa_send_direct_data data = {
                .data0 = OPTEE_FFA_YIELDING_CALL_WITH_ARG,
                .data1 = (u32)shm->sec_world_id,
                .data2 = (u32)(shm->sec_world_id >> 32),
-               .data3 = shm->offset,
+               .data3 = offs,
        };
        struct optee_msg_arg *arg;
        unsigned int rpc_arg_offs;
        struct optee_msg_arg *rpc_arg;
 
-       arg = tee_shm_get_va(shm, 0);
+       /*
+        * The shared memory object has to start on a page when passed as
+        * an argument struct. This is also what the shm pool allocator
+        * returns, but check this before calling secure world to catch
+        * eventual errors early in case something changes.
+        */
+       if (shm->offset)
+               return -EINVAL;
+
+       arg = tee_shm_get_va(shm, offs);
        if (IS_ERR(arg))
                return PTR_ERR(arg);
 
        rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
-       rpc_arg = tee_shm_get_va(shm, rpc_arg_offs);
+       rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs);
        if (IS_ERR(rpc_arg))
                return PTR_ERR(rpc_arg);
 
@@ -678,7 +688,8 @@ static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
 
 static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
                                    const struct ffa_dev_ops *ops,
-                                   unsigned int *rpc_arg_count)
+                                   u32 *sec_caps,
+                                   unsigned int *rpc_param_count)
 {
        struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES };
        int rc;
@@ -693,7 +704,8 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
                return false;
        }
 
-       *rpc_arg_count = (u8)data.data1;
+       *rpc_param_count = (u8)data.data1;
+       *sec_caps = data.data2;
 
        return true;
 }
@@ -759,7 +771,7 @@ static const struct optee_ops optee_ffa_ops = {
 
 static void optee_ffa_remove(struct ffa_device *ffa_dev)
 {
-       struct optee *optee = ffa_dev->dev.driver_data;
+       struct optee *optee = ffa_dev_get_drvdata(ffa_dev);
 
        optee_remove_common(optee);
 
@@ -772,11 +784,13 @@ static void optee_ffa_remove(struct ffa_device *ffa_dev)
 static int optee_ffa_probe(struct ffa_device *ffa_dev)
 {
        const struct ffa_dev_ops *ffa_ops;
-       unsigned int rpc_arg_count;
+       unsigned int rpc_param_count;
        struct tee_shm_pool *pool;
        struct tee_device *teedev;
        struct tee_context *ctx;
+       u32 arg_cache_flags = 0;
        struct optee *optee;
+       u32 sec_caps;
        int rc;
 
        ffa_ops = ffa_dev_ops_get(ffa_dev);
@@ -788,8 +802,11 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
        if (!optee_ffa_api_is_compatbile(ffa_dev, ffa_ops))
                return -EINVAL;
 
-       if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &rpc_arg_count))
+       if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &sec_caps,
+                                    &rpc_param_count))
                return -EINVAL;
+       if (sec_caps & OPTEE_FFA_SEC_CAP_ARG_OFFSET)
+               arg_cache_flags |= OPTEE_SHM_ARG_SHARED;
 
        optee = kzalloc(sizeof(*optee), GFP_KERNEL);
        if (!optee)
@@ -805,7 +822,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
        optee->ops = &optee_ffa_ops;
        optee->ffa.ffa_dev = ffa_dev;
        optee->ffa.ffa_ops = ffa_ops;
-       optee->rpc_arg_count = rpc_arg_count;
+       optee->rpc_param_count = rpc_param_count;
 
        teedev = tee_device_alloc(&optee_ffa_clnt_desc, NULL, optee->pool,
                                  optee);
@@ -838,6 +855,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
        mutex_init(&optee->call_queue.mutex);
        INIT_LIST_HEAD(&optee->call_queue.waiters);
        optee_supp_init(&optee->supp);
+       optee_shm_arg_cache_init(optee, arg_cache_flags);
        ffa_dev_set_drvdata(ffa_dev, optee);
        ctx = teedev_open(optee->teedev);
        if (IS_ERR(ctx)) {
index ee3a03f..9726624 100644 (file)
  *                   as the second MSG arg struct for
  *                   OPTEE_FFA_YIELDING_CALL_WITH_ARG.
  *        Bit[31:8]: Reserved (MBZ)
- * w5-w7: Note used (MBZ)
+ * w5:   Bitfield of secure world capabilities OPTEE_FFA_SEC_CAP_* below,
+ *       unused bits MBZ.
+ * w6-w7: Not used (MBZ)
+ */
+/*
+ * Secure world supports giving an offset into the argument shared memory
+ * object, see also OPTEE_FFA_YIELDING_CALL_WITH_ARG
  */
+#define OPTEE_FFA_SEC_CAP_ARG_OFFSET   BIT(0)
+
 #define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2)
 
 /*
  *       OPTEE_MSG_GET_ARG_SIZE(num_params) follows a struct optee_msg_arg
  *       for RPC, this struct has reserved space for the number of RPC
  *       parameters as returned by OPTEE_FFA_EXCHANGE_CAPABILITIES.
+ *       MBZ unless the bit OPTEE_FFA_SEC_CAP_ARG_OFFSET is received with
+ *       OPTEE_FFA_EXCHANGE_CAPABILITIES.
  * w7:    Not used (MBZ)
  * Resume from RPC. Register usage:
  * w3:    Service ID, OPTEE_FFA_YIELDING_CALL_RESUME
index e77765c..a33d98d 100644 (file)
@@ -59,6 +59,16 @@ struct optee_notif {
        u_long *bitmap;
 };
 
+#define OPTEE_SHM_ARG_ALLOC_PRIV       BIT(0)
+#define OPTEE_SHM_ARG_SHARED           BIT(1)
+struct optee_shm_arg_entry;
+struct optee_shm_arg_cache {
+       u32 flags;
+       /* Serializes access to this struct */
+       struct mutex mutex;
+       struct list_head shm_args;
+};
+
 /**
  * struct optee_supp - supplicant synchronization struct
  * @ctx                        the context of current connected supplicant.
@@ -121,7 +131,7 @@ struct optee;
  */
 struct optee_ops {
        int (*do_call_with_arg)(struct tee_context *ctx,
-                               struct tee_shm *shm_arg);
+                               struct tee_shm *shm_arg, u_int offs);
        int (*to_msg_param)(struct optee *optee,
                            struct optee_msg_param *msg_params,
                            size_t num_params, const struct tee_param *params);
@@ -143,7 +153,7 @@ struct optee_ops {
  * @notif:             notification synchronization struct
  * @supp:              supplicant synchronization struct for RPC to supplicant
  * @pool:              shared memory pool
- * @rpc_arg_count:     If > 0 number of RPC parameters to make room for
+ * @rpc_param_count:   If > 0 number of RPC parameters to make room for
  * @scan_bus_done      flag if device registation was already done.
  * @scan_bus_wq                workqueue to scan optee bus and register optee drivers
  * @scan_bus_work      workq to scan optee bus and register optee drivers
@@ -157,11 +167,12 @@ struct optee {
                struct optee_smc smc;
                struct optee_ffa ffa;
        };
+       struct optee_shm_arg_cache shm_arg_cache;
        struct optee_call_queue call_queue;
        struct optee_notif notif;
        struct optee_supp supp;
        struct tee_shm_pool *pool;
-       unsigned int rpc_arg_count;
+       unsigned int rpc_param_count;
        bool   scan_bus_done;
        struct workqueue_struct *scan_bus_wq;
        struct work_struct scan_bus_work;
@@ -273,8 +284,18 @@ void optee_cq_wait_for_completion(struct optee_call_queue *cq,
 void optee_cq_wait_final(struct optee_call_queue *cq,
                         struct optee_call_waiter *w);
 int optee_check_mem_type(unsigned long start, size_t num_pages);
-struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params,
-                                 struct optee_msg_arg **msg_arg);
+
+void optee_shm_arg_cache_init(struct optee *optee, u32 flags);
+void optee_shm_arg_cache_uninit(struct optee *optee);
+struct optee_msg_arg *optee_get_msg_arg(struct tee_context *ctx,
+                                       size_t num_params,
+                                       struct optee_shm_arg_entry **entry,
+                                       struct tee_shm **shm_ret,
+                                       u_int *offs);
+void optee_free_msg_arg(struct tee_context *ctx,
+                       struct optee_shm_arg_entry *entry, u_int offs);
+size_t optee_msg_arg_size(size_t rpc_param_count);
+
 
 struct tee_shm *optee_rpc_cmd_alloc_suppl(struct tee_context *ctx, size_t sz);
 void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm);
index d44a6ae..c60896c 100644 (file)
@@ -107,14 +107,22 @@ struct optee_smc_call_get_os_revision_result {
 /*
  * Call with struct optee_msg_arg as argument
  *
- * When calling this function normal world has a few responsibilities:
+ * When called with OPTEE_SMC_CALL_WITH_RPC_ARG or
+ * OPTEE_SMC_CALL_WITH_REGD_ARG in a0 there is one RPC struct optee_msg_arg
+ * following after the first struct optee_msg_arg. The RPC struct
+ * optee_msg_arg has reserved space for the number of RPC parameters as
+ * returned by OPTEE_SMC_EXCHANGE_CAPABILITIES.
+ *
+ * When calling these functions, normal world has a few responsibilities:
  * 1. It must be able to handle eventual RPCs
  * 2. Non-secure interrupts should not be masked
  * 3. If asynchronous notifications has been negotiated successfully, then
- *    asynchronous notifications should be unmasked during this call.
+ *    the interrupt for asynchronous notifications should be unmasked
+ *    during this call.
  *
- * Call register usage:
- * a0  SMC Function ID, OPTEE_SMC*CALL_WITH_ARG
+ * Call register usage, OPTEE_SMC_CALL_WITH_ARG and
+ * OPTEE_SMC_CALL_WITH_RPC_ARG:
+ * a0  SMC Function ID, OPTEE_SMC_CALL_WITH_ARG or OPTEE_SMC_CALL_WITH_RPC_ARG
  * a1  Upper 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
  * a2  Lower 32 bits of a 64-bit physical pointer to a struct optee_msg_arg
  * a3  Cache settings, not used if physical pointer is in a predefined shared
@@ -122,6 +130,15 @@ struct optee_smc_call_get_os_revision_result {
  * a4-6        Not used
  * a7  Hypervisor Client ID register
  *
+ * Call register usage, OPTEE_SMC_CALL_WITH_REGD_ARG:
+ * a0  SMC Function ID, OPTEE_SMC_CALL_WITH_REGD_ARG
+ * a1  Upper 32 bits of a 64-bit shared memory cookie
+ * a2  Lower 32 bits of a 64-bit shared memory cookie
+ * a3  Offset of the struct optee_msg_arg in the shared memory with the
+ *     supplied cookie
+ * a4-6        Not used
+ * a7  Hypervisor Client ID register
+ *
  * Normal return register usage:
  * a0  Return value, OPTEE_SMC_RETURN_*
  * a1-3        Not used
@@ -154,6 +171,10 @@ struct optee_smc_call_get_os_revision_result {
 #define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG
 #define OPTEE_SMC_CALL_WITH_ARG \
        OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG)
+#define OPTEE_SMC_CALL_WITH_RPC_ARG \
+       OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_RPC_ARG)
+#define OPTEE_SMC_CALL_WITH_REGD_ARG \
+       OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_REGD_ARG)
 
 /*
  * Get Shared Memory Config
@@ -202,7 +223,11 @@ struct optee_smc_get_shm_config_result {
  * a0  OPTEE_SMC_RETURN_OK
  * a1  bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
  * a2  The maximum secure world notification number
- * a3-7        Preserved
+ * a3  Bit[7:0]: Number of parameters needed for RPC to be supplied
+ *               as the second MSG arg struct for
+ *               OPTEE_SMC_CALL_WITH_ARG
+ *     Bit[31:8]: Reserved (MBZ)
+ * a4-7        Preserved
  *
  * Error return register usage:
  * a0  OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world
@@ -227,6 +252,8 @@ struct optee_smc_get_shm_config_result {
 #define OPTEE_SMC_SEC_CAP_MEMREF_NULL          BIT(4)
 /* Secure world supports asynchronous notification of normal world */
 #define OPTEE_SMC_SEC_CAP_ASYNC_NOTIF          BIT(5)
+/* Secure world supports pre-allocating RPC arg struct */
+#define OPTEE_SMC_SEC_CAP_RPC_ARG              BIT(6)
 
 #define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
 #define OPTEE_SMC_EXCHANGE_CAPABILITIES \
@@ -236,7 +263,7 @@ struct optee_smc_exchange_capabilities_result {
        unsigned long status;
        unsigned long capabilities;
        unsigned long max_notif_value;
-       unsigned long reserved0;
+       unsigned long data;
 };
 
 /*
@@ -358,6 +385,9 @@ struct optee_smc_disable_shm_cache_result {
  * should be called until all pended values have been retrieved. When a
  * value is retrieved, it's cleared from the record in secure world.
  *
+ * It is expected that this function is called from an interrupt handler
+ * in normal world.
+ *
  * Call requests usage:
  * a0  SMC Function ID, OPTEE_SMC_GET_ASYNC_NOTIF_VALUE
  * a1-6        Not used
@@ -390,6 +420,12 @@ struct optee_smc_disable_shm_cache_result {
 #define OPTEE_SMC_GET_ASYNC_NOTIF_VALUE \
        OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_ASYNC_NOTIF_VALUE)
 
+/* See OPTEE_SMC_CALL_WITH_RPC_ARG above */
+#define OPTEE_SMC_FUNCID_CALL_WITH_RPC_ARG     18
+
+/* See OPTEE_SMC_CALL_WITH_REGD_ARG above */
+#define OPTEE_SMC_FUNCID_CALL_WITH_REGD_ARG    19
+
 /*
  * Resume from RPC (for example after processing a foreign interrupt)
  *
index 67b7f7d..385cb0a 100644 (file)
@@ -437,6 +437,7 @@ static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
        struct optee_msg_arg *msg_arg;
        struct tee_shm *shm_arg;
        u64 *pages_list;
+       size_t sz;
        int rc;
 
        if (!num_pages)
@@ -450,15 +451,30 @@ static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
        if (!pages_list)
                return -ENOMEM;
 
-       shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
+       /*
+        * We're about to register shared memory we can't register shared
+        * memory for this request or there's a catch-22.
+        *
+        * So in this we'll have to do the good old temporary private
+        * allocation instead of using optee_get_msg_arg().
+        */
+       sz = optee_msg_arg_size(optee->rpc_param_count);
+       shm_arg = tee_shm_alloc_priv_buf(ctx, sz);
        if (IS_ERR(shm_arg)) {
                rc = PTR_ERR(shm_arg);
                goto out;
        }
+       msg_arg = tee_shm_get_va(shm_arg, 0);
+       if (IS_ERR(msg_arg)) {
+               rc = PTR_ERR(msg_arg);
+               goto out;
+       }
 
        optee_fill_pages_list(pages_list, pages, num_pages,
                              tee_shm_get_page_offset(shm));
 
+       memset(msg_arg, 0, OPTEE_MSG_GET_ARG_SIZE(1));
+       msg_arg->num_params = 1;
        msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
        msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
                                OPTEE_MSG_ATTR_NONCONTIG;
@@ -471,7 +487,7 @@ static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
        msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
          (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
 
-       if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
+       if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) ||
            msg_arg->ret != TEEC_SUCCESS)
                rc = -EINVAL;
 
@@ -487,19 +503,37 @@ static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
        struct optee_msg_arg *msg_arg;
        struct tee_shm *shm_arg;
        int rc = 0;
+       size_t sz;
 
-       shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
+       /*
+        * We're about to unregister shared memory and we may not be able
+        * register shared memory for this request in case we're called
+        * from optee_shm_arg_cache_uninit().
+        *
+        * So in order to keep things simple in this function just as in
+        * optee_shm_register() we'll use temporary private allocation
+        * instead of using optee_get_msg_arg().
+        */
+       sz = optee_msg_arg_size(optee->rpc_param_count);
+       shm_arg = tee_shm_alloc_priv_buf(ctx, sz);
        if (IS_ERR(shm_arg))
                return PTR_ERR(shm_arg);
+       msg_arg = tee_shm_get_va(shm_arg, 0);
+       if (IS_ERR(msg_arg)) {
+               rc = PTR_ERR(msg_arg);
+               goto out;
+       }
 
+       memset(msg_arg, 0, sz);
+       msg_arg->num_params = 1;
        msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
-
        msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
        msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
 
-       if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
+       if (optee->ops->do_call_with_arg(ctx, shm_arg, 0) ||
            msg_arg->ret != TEEC_SUCCESS)
                rc = -EINVAL;
+out:
        tee_shm_free(shm_arg);
        return rc;
 }
@@ -732,16 +766,9 @@ static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
 }
 
 static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
-                               struct tee_shm *shm,
+                               struct optee_msg_arg *arg,
                                struct optee_call_ctx *call_ctx)
 {
-       struct optee_msg_arg *arg;
-
-       arg = tee_shm_get_va(shm, 0);
-       if (IS_ERR(arg)) {
-               pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
-               return;
-       }
 
        switch (arg->cmd) {
        case OPTEE_RPC_CMD_SHM_ALLOC:
@@ -765,11 +792,13 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
  * Result of RPC is written back into @param.
  */
 static void optee_handle_rpc(struct tee_context *ctx,
+                            struct optee_msg_arg *rpc_arg,
                             struct optee_rpc_param *param,
                             struct optee_call_ctx *call_ctx)
 {
        struct tee_device *teedev = ctx->teedev;
        struct optee *optee = tee_get_drvdata(teedev);
+       struct optee_msg_arg *arg;
        struct tee_shm *shm;
        phys_addr_t pa;
 
@@ -801,8 +830,19 @@ static void optee_handle_rpc(struct tee_context *ctx,
                 */
                break;
        case OPTEE_SMC_RPC_FUNC_CMD:
-               shm = reg_pair_to_ptr(param->a1, param->a2);
-               handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
+               if (rpc_arg) {
+                       arg = rpc_arg;
+               } else {
+                       shm = reg_pair_to_ptr(param->a1, param->a2);
+                       arg = tee_shm_get_va(shm, 0);
+                       if (IS_ERR(arg)) {
+                               pr_err("%s: tee_shm_get_va %p failed\n",
+                                      __func__, shm);
+                               break;
+                       }
+               }
+
+               handle_rpc_func_cmd(ctx, optee, arg, call_ctx);
                break;
        default:
                pr_warn("Unknown RPC func 0x%x\n",
@@ -816,7 +856,8 @@ static void optee_handle_rpc(struct tee_context *ctx,
 /**
  * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world
  * @ctx:       calling context
- * @arg:       shared memory holding the message to pass to secure world
+ * @shm:       shared memory holding the message to pass to secure world
+ * @offs:      offset of the message in @shm
  *
  * Does and SMC to OP-TEE in secure world and handles eventual resulting
  * Remote Procedure Calls (RPC) from OP-TEE.
@@ -824,21 +865,46 @@ static void optee_handle_rpc(struct tee_context *ctx,
  * Returns return code from secure world, 0 is OK
  */
 static int optee_smc_do_call_with_arg(struct tee_context *ctx,
-                                     struct tee_shm *arg)
+                                     struct tee_shm *shm, u_int offs)
 {
        struct optee *optee = tee_get_drvdata(ctx->teedev);
        struct optee_call_waiter w;
        struct optee_rpc_param param = { };
        struct optee_call_ctx call_ctx = { };
-       phys_addr_t parg;
+       struct optee_msg_arg *rpc_arg = NULL;
        int rc;
 
-       rc = tee_shm_get_pa(arg, 0, &parg);
-       if (rc)
-               return rc;
+       if (optee->rpc_param_count) {
+               struct optee_msg_arg *arg;
+               unsigned int rpc_arg_offs;
+
+               arg = tee_shm_get_va(shm, offs);
+               if (IS_ERR(arg))
+                       return PTR_ERR(arg);
+
+               rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
+               rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs);
+               if (IS_ERR(arg))
+                       return PTR_ERR(arg);
+       }
+
+       if  (rpc_arg && tee_shm_is_dynamic(shm)) {
+               param.a0 = OPTEE_SMC_CALL_WITH_REGD_ARG;
+               reg_pair_from_64(&param.a1, &param.a2, (u_long)shm);
+               param.a3 = offs;
+       } else {
+               phys_addr_t parg;
 
-       param.a0 = OPTEE_SMC_CALL_WITH_ARG;
-       reg_pair_from_64(&param.a1, &param.a2, parg);
+               rc = tee_shm_get_pa(shm, offs, &parg);
+               if (rc)
+                       return rc;
+
+               if (rpc_arg)
+                       param.a0 = OPTEE_SMC_CALL_WITH_RPC_ARG;
+               else
+                       param.a0 = OPTEE_SMC_CALL_WITH_ARG;
+               reg_pair_from_64(&param.a1, &param.a2, parg);
+       }
        /* Initialize waiter */
        optee_cq_wait_init(&optee->call_queue, &w);
        while (true) {
@@ -862,7 +928,7 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
                        param.a1 = res.a1;
                        param.a2 = res.a2;
                        param.a3 = res.a3;
-                       optee_handle_rpc(ctx, &param, &call_ctx);
+                       optee_handle_rpc(ctx, rpc_arg, &param, &call_ctx);
                } else {
                        rc = res.a0;
                        break;
@@ -881,17 +947,19 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,
 
 static int simple_call_with_arg(struct tee_context *ctx, u32 cmd)
 {
+       struct optee_shm_arg_entry *entry;
        struct optee_msg_arg *msg_arg;
        struct tee_shm *shm;
+       u_int offs;
 
-       shm = optee_get_msg_arg(ctx, 0, &msg_arg);
-       if (IS_ERR(shm))
-               return PTR_ERR(shm);
+       msg_arg = optee_get_msg_arg(ctx, 0, &entry, &shm, &offs);
+       if (IS_ERR(msg_arg))
+               return PTR_ERR(msg_arg);
 
        msg_arg->cmd = cmd;
-       optee_smc_do_call_with_arg(ctx, shm);
+       optee_smc_do_call_with_arg(ctx, shm, offs);
 
-       tee_shm_free(shm);
+       optee_free_msg_arg(ctx, entry, offs);
        return 0;
 }
 
@@ -1118,7 +1186,8 @@ static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
 }
 
 static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
-                                           u32 *sec_caps, u32 *max_notif_value)
+                                           u32 *sec_caps, u32 *max_notif_value,
+                                           unsigned int *rpc_param_count)
 {
        union {
                struct arm_smccc_res smccc;
@@ -1145,6 +1214,10 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
                *max_notif_value = res.result.max_notif_value;
        else
                *max_notif_value = OPTEE_DEFAULT_MAX_NOTIF_VALUE;
+       if (*sec_caps & OPTEE_SMC_SEC_CAP_RPC_ARG)
+               *rpc_param_count = (u8)res.result.data;
+       else
+               *rpc_param_count = 0;
 
        return true;
 }
@@ -1251,7 +1324,8 @@ static int optee_smc_remove(struct platform_device *pdev)
         * reference counters and also avoid wild pointers in secure world
         * into the old shared memory range.
         */
-       optee_disable_shm_cache(optee);
+       if (!optee->rpc_param_count)
+               optee_disable_shm_cache(optee);
 
        optee_smc_notif_uninit_irq(optee);
 
@@ -1274,7 +1348,10 @@ static int optee_smc_remove(struct platform_device *pdev)
  */
 static void optee_shutdown(struct platform_device *pdev)
 {
-       optee_disable_shm_cache(platform_get_drvdata(pdev));
+       struct optee *optee = platform_get_drvdata(pdev);
+
+       if (!optee->rpc_param_count)
+               optee_disable_shm_cache(optee);
 }
 
 static int optee_probe(struct platform_device *pdev)
@@ -1283,9 +1360,11 @@ static int optee_probe(struct platform_device *pdev)
        struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
        struct optee *optee = NULL;
        void *memremaped_shm = NULL;
+       unsigned int rpc_param_count;
        struct tee_device *teedev;
        struct tee_context *ctx;
        u32 max_notif_value;
+       u32 arg_cache_flags;
        u32 sec_caps;
        int rc;
 
@@ -1306,7 +1385,8 @@ static int optee_probe(struct platform_device *pdev)
        }
 
        if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps,
-                                            &max_notif_value)) {
+                                            &max_notif_value,
+                                            &rpc_param_count)) {
                pr_warn("capabilities mismatch\n");
                return -EINVAL;
        }
@@ -1314,14 +1394,48 @@ static int optee_probe(struct platform_device *pdev)
        /*
         * Try to use dynamic shared memory if possible
         */
-       if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+       if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) {
+               /*
+                * If we have OPTEE_SMC_SEC_CAP_RPC_ARG we can ask
+                * optee_get_msg_arg() to pre-register (by having
+                * OPTEE_SHM_ARG_ALLOC_PRIV cleared) the page used to pass
+                * an argument struct.
+                *
+                * With the page is pre-registered we can use a non-zero
+                * offset for argument struct, this is indicated with
+                * OPTEE_SHM_ARG_SHARED.
+                *
+                * This means that optee_smc_do_call_with_arg() will use
+                * OPTEE_SMC_CALL_WITH_REGD_ARG for pre-registered pages.
+                */
+               if (sec_caps & OPTEE_SMC_SEC_CAP_RPC_ARG)
+                       arg_cache_flags = OPTEE_SHM_ARG_SHARED;
+               else
+                       arg_cache_flags = OPTEE_SHM_ARG_ALLOC_PRIV;
+
                pool = optee_shm_pool_alloc_pages();
+       }
 
        /*
         * If dynamic shared memory is not available or failed - try static one
         */
-       if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
+       if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) {
+               /*
+                * The static memory pool can use non-zero page offsets so
+                * let optee_get_msg_arg() know that with OPTEE_SHM_ARG_SHARED.
+                *
+                * optee_get_msg_arg() should not pre-register the
+                * allocated page used to pass an argument struct, this is
+                * indicated with OPTEE_SHM_ARG_ALLOC_PRIV.
+                *
+                * This means that optee_smc_do_call_with_arg() will use
+                * OPTEE_SMC_CALL_WITH_ARG if rpc_param_count is 0, else
+                * OPTEE_SMC_CALL_WITH_RPC_ARG.
+                */
+               arg_cache_flags = OPTEE_SHM_ARG_SHARED |
+                                 OPTEE_SHM_ARG_ALLOC_PRIV;
                pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
+       }
 
        if (IS_ERR(pool))
                return PTR_ERR(pool);
@@ -1335,6 +1449,7 @@ static int optee_probe(struct platform_device *pdev)
        optee->ops = &optee_ops;
        optee->smc.invoke_fn = invoke_fn;
        optee->smc.sec_caps = sec_caps;
+       optee->rpc_param_count = rpc_param_count;
 
        teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
        if (IS_ERR(teedev)) {
@@ -1363,6 +1478,7 @@ static int optee_probe(struct platform_device *pdev)
        optee_supp_init(&optee->supp);
        optee->smc.memremaped_shm = memremaped_shm;
        optee->pool = pool;
+       optee_shm_arg_cache_init(optee, arg_cache_flags);
 
        platform_set_drvdata(pdev, optee);
        ctx = teedev_open(optee->teedev);
@@ -1403,7 +1519,12 @@ static int optee_probe(struct platform_device *pdev)
         */
        optee_disable_unmapped_shm_cache(optee);
 
-       optee_enable_shm_cache(optee);
+       /*
+        * Only enable the shm cache in case we're not able to pass the RPC
+        * arg struct right after the normal arg struct.
+        */
+       if (!optee->rpc_param_count)
+               optee_enable_shm_cache(optee);
 
        if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
                pr_info("dynamic shared memory is enabled\n");
@@ -1416,7 +1537,8 @@ static int optee_probe(struct platform_device *pdev)
        return 0;
 
 err_disable_shm_cache:
-       optee_disable_shm_cache(optee);
+       if (!optee->rpc_param_count)
+               optee_disable_shm_cache(optee);
        optee_smc_notif_uninit_irq(optee);
        optee_unregister_devices();
 err_notif_uninit:
@@ -1424,6 +1546,7 @@ err_notif_uninit:
 err_close_ctx:
        teedev_close_context(ctx);
 err_supp_uninit:
+       optee_shm_arg_cache_uninit(optee);
        optee_supp_uninit(&optee->supp);
        mutex_destroy(&optee->call_queue.mutex);
 err_unreg_supp_teedev:
index 8aa1a48..af0f7c6 100644 (file)
@@ -302,7 +302,6 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx,
                return PTR_ERR(shm);
 
        data.id = shm->id;
-       data.flags = shm->flags;
        data.size = shm->size;
 
        if (copy_to_user(udata, &data, sizeof(data)))
@@ -339,7 +338,6 @@ tee_ioctl_shm_register(struct tee_context *ctx,
                return PTR_ERR(shm);
 
        data.id = shm->id;
-       data.flags = shm->flags;
        data.length = shm->size;
 
        if (copy_to_user(udata, &data, sizeof(data)))
index f31e29e..f2b1bce 100644 (file)
@@ -23,21 +23,36 @@ static void shm_put_kernel_pages(struct page **pages, size_t page_count)
 static int shm_get_kernel_pages(unsigned long start, size_t page_count,
                                struct page **pages)
 {
-       struct kvec *kiov;
        size_t n;
        int rc;
 
-       kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL);
-       if (!kiov)
-               return -ENOMEM;
+       if (is_vmalloc_addr((void *)start)) {
+               struct page *page;
 
-       for (n = 0; n < page_count; n++) {
-               kiov[n].iov_base = (void *)(start + n * PAGE_SIZE);
-               kiov[n].iov_len = PAGE_SIZE;
-       }
+               for (n = 0; n < page_count; n++) {
+                       page = vmalloc_to_page((void *)(start + PAGE_SIZE * n));
+                       if (!page)
+                               return -ENOMEM;
+
+                       get_page(page);
+                       pages[n] = page;
+               }
+               rc = page_count;
+       } else {
+               struct kvec *kiov;
+
+               kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL);
+               if (!kiov)
+                       return -ENOMEM;
+
+               for (n = 0; n < page_count; n++) {
+                       kiov[n].iov_base = (void *)(start + n * PAGE_SIZE);
+                       kiov[n].iov_len = PAGE_SIZE;
+               }
 
-       rc = get_kernel_pages(kiov, page_count, 0, pages);
-       kfree(kiov);
+               rc = get_kernel_pages(kiov, page_count, 0, pages);
+               kfree(kiov);
+       }
 
        return rc;
 }
@@ -415,56 +430,6 @@ void tee_shm_free(struct tee_shm *shm)
 EXPORT_SYMBOL_GPL(tee_shm_free);
 
 /**
- * tee_shm_va2pa() - Get physical address of a virtual address
- * @shm:       Shared memory handle
- * @va:                Virtual address to tranlsate
- * @pa:                Returned physical address
- * @returns 0 on success and < 0 on failure
- */
-int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
-{
-       if (!shm->kaddr)
-               return -EINVAL;
-       /* Check that we're in the range of the shm */
-       if ((char *)va < (char *)shm->kaddr)
-               return -EINVAL;
-       if ((char *)va >= ((char *)shm->kaddr + shm->size))
-               return -EINVAL;
-
-       return tee_shm_get_pa(
-                       shm, (unsigned long)va - (unsigned long)shm->kaddr, pa);
-}
-EXPORT_SYMBOL_GPL(tee_shm_va2pa);
-
-/**
- * tee_shm_pa2va() - Get virtual address of a physical address
- * @shm:       Shared memory handle
- * @pa:                Physical address to tranlsate
- * @va:                Returned virtual address
- * @returns 0 on success and < 0 on failure
- */
-int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
-{
-       if (!shm->kaddr)
-               return -EINVAL;
-       /* Check that we're in the range of the shm */
-       if (pa < shm->paddr)
-               return -EINVAL;
-       if (pa >= (shm->paddr + shm->size))
-               return -EINVAL;
-
-       if (va) {
-               void *v = tee_shm_get_va(shm, pa - shm->paddr);
-
-               if (IS_ERR(v))
-                       return PTR_ERR(v);
-               *va = v;
-       }
-       return 0;
-}
-EXPORT_SYMBOL_GPL(tee_shm_pa2va);
-
-/**
  * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
  * @shm:       Shared memory handle
  * @offs:      Offset from start of this shared memory
index 8a69583..3acc0f1 100644 (file)
@@ -436,31 +436,31 @@ static void mpc512x_psc_fifo_init(struct uart_port *port)
        out_be32(&FIFO_512x(port)->rximr, MPC512x_PSC_FIFO_ALARM);
 }
 
-static int mpc512x_psc_raw_rx_rdy(struct uart_port *port)
+static unsigned int mpc512x_psc_raw_rx_rdy(struct uart_port *port)
 {
        return !(in_be32(&FIFO_512x(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY);
 }
 
-static int mpc512x_psc_raw_tx_rdy(struct uart_port *port)
+static unsigned int mpc512x_psc_raw_tx_rdy(struct uart_port *port)
 {
        return !(in_be32(&FIFO_512x(port)->txsr) & MPC512x_PSC_FIFO_FULL);
 }
 
-static int mpc512x_psc_rx_rdy(struct uart_port *port)
+static unsigned int mpc512x_psc_rx_rdy(struct uart_port *port)
 {
        return in_be32(&FIFO_512x(port)->rxsr)
            & in_be32(&FIFO_512x(port)->rximr)
            & MPC512x_PSC_FIFO_ALARM;
 }
 
-static int mpc512x_psc_tx_rdy(struct uart_port *port)
+static unsigned int mpc512x_psc_tx_rdy(struct uart_port *port)
 {
        return in_be32(&FIFO_512x(port)->txsr)
            & in_be32(&FIFO_512x(port)->tximr)
            & MPC512x_PSC_FIFO_ALARM;
 }
 
-static int mpc512x_psc_tx_empty(struct uart_port *port)
+static unsigned int mpc512x_psc_tx_empty(struct uart_port *port)
 {
        return in_be32(&FIFO_512x(port)->txsr)
            & MPC512x_PSC_FIFO_EMPTY;
@@ -780,29 +780,29 @@ static void mpc5125_psc_fifo_init(struct uart_port *port)
        out_be32(&FIFO_5125(port)->rximr, MPC512x_PSC_FIFO_ALARM);
 }
 
-static int mpc5125_psc_raw_rx_rdy(struct uart_port *port)
+static unsigned int mpc5125_psc_raw_rx_rdy(struct uart_port *port)
 {
        return !(in_be32(&FIFO_5125(port)->rxsr) & MPC512x_PSC_FIFO_EMPTY);
 }
 
-static int mpc5125_psc_raw_tx_rdy(struct uart_port *port)
+static unsigned int mpc5125_psc_raw_tx_rdy(struct uart_port *port)
 {
        return !(in_be32(&FIFO_5125(port)->txsr) & MPC512x_PSC_FIFO_FULL);
 }
 
-static int mpc5125_psc_rx_rdy(struct uart_port *port)
+static unsigned int mpc5125_psc_rx_rdy(struct uart_port *port)
 {
        return in_be32(&FIFO_5125(port)->rxsr) &
               in_be32(&FIFO_5125(port)->rximr) & MPC512x_PSC_FIFO_ALARM;
 }
 
-static int mpc5125_psc_tx_rdy(struct uart_port *port)
+static unsigned int mpc5125_psc_tx_rdy(struct uart_port *port)
 {
        return in_be32(&FIFO_5125(port)->txsr) &
               in_be32(&FIFO_5125(port)->tximr) & MPC512x_PSC_FIFO_ALARM;
 }
 
-static int mpc5125_psc_tx_empty(struct uart_port *port)
+static unsigned int mpc5125_psc_tx_empty(struct uart_port *port)
 {
        return in_be32(&FIFO_5125(port)->txsr) & MPC512x_PSC_FIFO_EMPTY;
 }
index 2f4fb09..7900130 100644 (file)
@@ -163,6 +163,7 @@ struct mlx5_vdpa_net {
        u32 cur_num_vqs;
        struct notifier_block nb;
        struct vdpa_callback config_cb;
+       struct mlx5_vdpa_wq_ent cvq_ent;
 };
 
 static void free_resources(struct mlx5_vdpa_net *ndev);
@@ -1658,6 +1659,12 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
        mvdev = wqent->mvdev;
        ndev = to_mlx5_vdpa_ndev(mvdev);
        cvq = &mvdev->cvq;
+
+       mutex_lock(&ndev->reslock);
+
+       if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
+               goto out;
+
        if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
                goto out;
 
@@ -1696,9 +1703,13 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
 
                if (vringh_need_notify_iotlb(&cvq->vring))
                        vringh_notify(&cvq->vring);
+
+               queue_work(mvdev->wq, &wqent->work);
+               break;
        }
+
 out:
-       kfree(wqent);
+       mutex_unlock(&ndev->reslock);
 }
 
 static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
@@ -1706,7 +1717,6 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
        struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
        struct mlx5_vdpa_virtqueue *mvq;
-       struct mlx5_vdpa_wq_ent *wqent;
 
        if (!is_index_valid(mvdev, idx))
                return;
@@ -1715,13 +1725,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
                if (!mvdev->wq || !mvdev->cvq.ready)
                        return;
 
-               wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
-               if (!wqent)
-                       return;
-
-               wqent->mvdev = mvdev;
-               INIT_WORK(&wqent->work, mlx5_cvq_kick_handler);
-               queue_work(mvdev->wq, &wqent->work);
+               queue_work(mvdev->wq, &ndev->cvq_ent.work);
                return;
        }
 
@@ -2180,7 +2184,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
                goto err_mr;
 
        if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
-               return 0;
+               goto err_mr;
 
        restore_channels_info(ndev);
        err = setup_driver(mvdev);
@@ -2195,12 +2199,14 @@ err_mr:
        return err;
 }
 
+/* reslock must be held for this function */
 static int setup_driver(struct mlx5_vdpa_dev *mvdev)
 {
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
        int err;
 
-       mutex_lock(&ndev->reslock);
+       WARN_ON(!mutex_is_locked(&ndev->reslock));
+
        if (ndev->setup) {
                mlx5_vdpa_warn(mvdev, "setup driver called for already setup driver\n");
                err = 0;
@@ -2230,7 +2236,6 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
                goto err_fwd;
        }
        ndev->setup = true;
-       mutex_unlock(&ndev->reslock);
 
        return 0;
 
@@ -2241,23 +2246,23 @@ err_tir:
 err_rqt:
        teardown_virtqueues(ndev);
 out:
-       mutex_unlock(&ndev->reslock);
        return err;
 }
 
+/* reslock must be held for this function */
 static void teardown_driver(struct mlx5_vdpa_net *ndev)
 {
-       mutex_lock(&ndev->reslock);
+
+       WARN_ON(!mutex_is_locked(&ndev->reslock));
+
        if (!ndev->setup)
-               goto out;
+               return;
 
        remove_fwd_to_tir(ndev);
        destroy_tir(ndev);
        destroy_rqt(ndev);
        teardown_virtqueues(ndev);
        ndev->setup = false;
-out:
-       mutex_unlock(&ndev->reslock);
 }
 
 static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
@@ -2278,6 +2283,8 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
 
        print_status(mvdev, status, true);
 
+       mutex_lock(&ndev->reslock);
+
        if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) {
                if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
                        err = setup_driver(mvdev);
@@ -2287,16 +2294,19 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
                        }
                } else {
                        mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n");
-                       return;
+                       goto err_clear;
                }
        }
 
        ndev->mvdev.status = status;
+       mutex_unlock(&ndev->reslock);
        return;
 
 err_setup:
        mlx5_vdpa_destroy_mr(&ndev->mvdev);
        ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
+err_clear:
+       mutex_unlock(&ndev->reslock);
 }
 
 static int mlx5_vdpa_reset(struct vdpa_device *vdev)
@@ -2306,6 +2316,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
 
        print_status(mvdev, 0, true);
        mlx5_vdpa_info(mvdev, "performing device reset\n");
+
+       mutex_lock(&ndev->reslock);
        teardown_driver(ndev);
        clear_vqs_ready(ndev);
        mlx5_vdpa_destroy_mr(&ndev->mvdev);
@@ -2318,6 +2330,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
                if (mlx5_vdpa_create_mr(mvdev, NULL))
                        mlx5_vdpa_warn(mvdev, "create MR failed\n");
        }
+       mutex_unlock(&ndev->reslock);
 
        return 0;
 }
@@ -2353,19 +2366,24 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
 static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb)
 {
        struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
        bool change_map;
        int err;
 
+       mutex_lock(&ndev->reslock);
+
        err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
        if (err) {
                mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
-               return err;
+               goto err;
        }
 
        if (change_map)
-               return mlx5_vdpa_change_map(mvdev, iotlb);
+               err = mlx5_vdpa_change_map(mvdev, iotlb);
 
-       return 0;
+err:
+       mutex_unlock(&ndev->reslock);
+       return err;
 }
 
 static void mlx5_vdpa_free(struct vdpa_device *vdev)
@@ -2740,6 +2758,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
        if (err)
                goto err_mr;
 
+       ndev->cvq_ent.mvdev = mvdev;
+       INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler);
        mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
        if (!mvdev->wq) {
                err = -ENOMEM;
index b7bb16f..06b6f35 100644 (file)
@@ -36,6 +36,10 @@ static bool nointxmask;
 static bool disable_vga;
 static bool disable_idle_d3;
 
+/* List of PF's that vfio_pci_core_sriov_configure() has been called on */
+static DEFINE_MUTEX(vfio_pci_sriov_pfs_mutex);
+static LIST_HEAD(vfio_pci_sriov_pfs);
+
 static inline bool vfio_vga_disabled(void)
 {
 #ifdef CONFIG_VFIO_PCI_VGA
@@ -434,47 +438,17 @@ out:
 }
 EXPORT_SYMBOL_GPL(vfio_pci_core_disable);
 
-static struct vfio_pci_core_device *get_pf_vdev(struct vfio_pci_core_device *vdev)
-{
-       struct pci_dev *physfn = pci_physfn(vdev->pdev);
-       struct vfio_device *pf_dev;
-
-       if (!vdev->pdev->is_virtfn)
-               return NULL;
-
-       pf_dev = vfio_device_get_from_dev(&physfn->dev);
-       if (!pf_dev)
-               return NULL;
-
-       if (pci_dev_driver(physfn) != pci_dev_driver(vdev->pdev)) {
-               vfio_device_put(pf_dev);
-               return NULL;
-       }
-
-       return container_of(pf_dev, struct vfio_pci_core_device, vdev);
-}
-
-static void vfio_pci_vf_token_user_add(struct vfio_pci_core_device *vdev, int val)
-{
-       struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev);
-
-       if (!pf_vdev)
-               return;
-
-       mutex_lock(&pf_vdev->vf_token->lock);
-       pf_vdev->vf_token->users += val;
-       WARN_ON(pf_vdev->vf_token->users < 0);
-       mutex_unlock(&pf_vdev->vf_token->lock);
-
-       vfio_device_put(&pf_vdev->vdev);
-}
-
 void vfio_pci_core_close_device(struct vfio_device *core_vdev)
 {
        struct vfio_pci_core_device *vdev =
                container_of(core_vdev, struct vfio_pci_core_device, vdev);
 
-       vfio_pci_vf_token_user_add(vdev, -1);
+       if (vdev->sriov_pf_core_dev) {
+               mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
+               WARN_ON(!vdev->sriov_pf_core_dev->vf_token->users);
+               vdev->sriov_pf_core_dev->vf_token->users--;
+               mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
+       }
        vfio_spapr_pci_eeh_release(vdev->pdev);
        vfio_pci_core_disable(vdev);
 
@@ -495,7 +469,12 @@ void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev)
 {
        vfio_pci_probe_mmaps(vdev);
        vfio_spapr_pci_eeh_open(vdev->pdev);
-       vfio_pci_vf_token_user_add(vdev, 1);
+
+       if (vdev->sriov_pf_core_dev) {
+               mutex_lock(&vdev->sriov_pf_core_dev->vf_token->lock);
+               vdev->sriov_pf_core_dev->vf_token->users++;
+               mutex_unlock(&vdev->sriov_pf_core_dev->vf_token->lock);
+       }
 }
 EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable);
 
@@ -1583,11 +1562,8 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
         *
         * If the VF token is provided but unused, an error is generated.
         */
-       if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token)
-               return 0; /* No VF token provided or required */
-
        if (vdev->pdev->is_virtfn) {
-               struct vfio_pci_core_device *pf_vdev = get_pf_vdev(vdev);
+               struct vfio_pci_core_device *pf_vdev = vdev->sriov_pf_core_dev;
                bool match;
 
                if (!pf_vdev) {
@@ -1600,7 +1576,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
                }
 
                if (!vf_token) {
-                       vfio_device_put(&pf_vdev->vdev);
                        pci_info_ratelimited(vdev->pdev,
                                "VF token required to access device\n");
                        return -EACCES;
@@ -1610,8 +1585,6 @@ static int vfio_pci_validate_vf_token(struct vfio_pci_core_device *vdev,
                match = uuid_equal(uuid, &pf_vdev->vf_token->uuid);
                mutex_unlock(&pf_vdev->vf_token->lock);
 
-               vfio_device_put(&pf_vdev->vdev);
-
                if (!match) {
                        pci_info_ratelimited(vdev->pdev,
                                "Incorrect VF token provided for device\n");
@@ -1732,8 +1705,30 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb,
 static int vfio_pci_vf_init(struct vfio_pci_core_device *vdev)
 {
        struct pci_dev *pdev = vdev->pdev;
+       struct vfio_pci_core_device *cur;
+       struct pci_dev *physfn;
        int ret;
 
+       if (pdev->is_virtfn) {
+               /*
+                * If this VF was created by our vfio_pci_core_sriov_configure()
+                * then we can find the PF vfio_pci_core_device now, and due to
+                * the locking in pci_disable_sriov() it cannot change until
+                * this VF device driver is removed.
+                */
+               physfn = pci_physfn(vdev->pdev);
+               mutex_lock(&vfio_pci_sriov_pfs_mutex);
+               list_for_each_entry(cur, &vfio_pci_sriov_pfs, sriov_pfs_item) {
+                       if (cur->pdev == physfn) {
+                               vdev->sriov_pf_core_dev = cur;
+                               break;
+                       }
+               }
+               mutex_unlock(&vfio_pci_sriov_pfs_mutex);
+               return 0;
+       }
+
+       /* Not a SRIOV PF */
        if (!pdev->is_physfn)
                return 0;
 
@@ -1805,6 +1800,7 @@ void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
        INIT_LIST_HEAD(&vdev->ioeventfds_list);
        mutex_init(&vdev->vma_lock);
        INIT_LIST_HEAD(&vdev->vma_list);
+       INIT_LIST_HEAD(&vdev->sriov_pfs_item);
        init_rwsem(&vdev->memory_lock);
 }
 EXPORT_SYMBOL_GPL(vfio_pci_core_init_device);
@@ -1896,7 +1892,7 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
 {
        struct pci_dev *pdev = vdev->pdev;
 
-       pci_disable_sriov(pdev);
+       vfio_pci_core_sriov_configure(pdev, 0);
 
        vfio_unregister_group_dev(&vdev->vdev);
 
@@ -1935,21 +1931,49 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_aer_err_detected);
 
 int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn)
 {
+       struct vfio_pci_core_device *vdev;
        struct vfio_device *device;
        int ret = 0;
 
+       device_lock_assert(&pdev->dev);
+
        device = vfio_device_get_from_dev(&pdev->dev);
        if (!device)
                return -ENODEV;
 
-       if (nr_virtfn == 0)
-               pci_disable_sriov(pdev);
-       else
+       vdev = container_of(device, struct vfio_pci_core_device, vdev);
+
+       if (nr_virtfn) {
+               mutex_lock(&vfio_pci_sriov_pfs_mutex);
+               /*
+                * The thread that adds the vdev to the list is the only thread
+                * that gets to call pci_enable_sriov() and we will only allow
+                * it to be called once without going through
+                * pci_disable_sriov()
+                */
+               if (!list_empty(&vdev->sriov_pfs_item)) {
+                       ret = -EINVAL;
+                       goto out_unlock;
+               }
+               list_add_tail(&vdev->sriov_pfs_item, &vfio_pci_sriov_pfs);
+               mutex_unlock(&vfio_pci_sriov_pfs_mutex);
                ret = pci_enable_sriov(pdev, nr_virtfn);
+               if (ret)
+                       goto out_del;
+               ret = nr_virtfn;
+               goto out_put;
+       }
 
-       vfio_device_put(device);
+       pci_disable_sriov(pdev);
 
-       return ret < 0 ? ret : nr_virtfn;
+out_del:
+       mutex_lock(&vfio_pci_sriov_pfs_mutex);
+       list_del_init(&vdev->sriov_pfs_item);
+out_unlock:
+       mutex_unlock(&vfio_pci_sriov_pfs_mutex);
+out_put:
+       vfio_device_put(device);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(vfio_pci_core_sriov_configure);
 
index 34d6bb1..a6bb0e4 100644 (file)
@@ -1579,7 +1579,14 @@ static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
                         * If it's not a platform device, at least print a warning. A
                         * fix would add code to remove the device from the system.
                         */
-                       if (dev_is_platform(device)) {
+                       if (!device) {
+                               /* TODO: Represent each OF framebuffer as its own
+                                * device in the device hierarchy. For now, offb
+                                * doesn't have such a device, so unregister the
+                                * framebuffer as before without warning.
+                                */
+                               do_unregister_framebuffer(registered_fb[i]);
+                       } else if (dev_is_platform(device)) {
                                registered_fb[i]->forced_out = true;
                                platform_device_unregister(to_platform_device(device));
                        } else {
index 75c8d56..22f15f4 100644 (file)
@@ -526,9 +526,8 @@ int virtio_device_restore(struct virtio_device *dev)
                        goto err;
        }
 
-       /* If restore didn't do it, mark device DRIVER_OK ourselves. */
-       if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK))
-               virtio_device_ready(dev);
+       /* Finally, tell the device we're all set */
+       virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
 
        virtio_config_enable(dev);
 
index dfe26fa..617a7f4 100644 (file)
@@ -689,29 +689,34 @@ void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)
 }
 EXPORT_SYMBOL(xen_free_ballooned_pages);
 
-#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
-static void __init balloon_add_region(unsigned long start_pfn,
-                                     unsigned long pages)
+static void __init balloon_add_regions(void)
 {
+#if defined(CONFIG_XEN_PV)
+       unsigned long start_pfn, pages;
        unsigned long pfn, extra_pfn_end;
+       unsigned int i;
 
-       /*
-        * If the amount of usable memory has been limited (e.g., with
-        * the 'mem' command line parameter), don't add pages beyond
-        * this limit.
-        */
-       extra_pfn_end = min(max_pfn, start_pfn + pages);
+       for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+               pages = xen_extra_mem[i].n_pfns;
+               if (!pages)
+                       continue;
 
-       for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
-               /* totalram_pages and totalhigh_pages do not
-                  include the boot-time balloon extension, so
-                  don't subtract from it. */
-               balloon_append(pfn_to_page(pfn));
-       }
+               start_pfn = xen_extra_mem[i].start_pfn;
 
-       balloon_stats.total_pages += extra_pfn_end - start_pfn;
-}
+               /*
+                * If the amount of usable memory has been limited (e.g., with
+                * the 'mem' command line parameter), don't add pages beyond
+                * this limit.
+                */
+               extra_pfn_end = min(max_pfn, start_pfn + pages);
+
+               for (pfn = start_pfn; pfn < extra_pfn_end; pfn++)
+                       balloon_append(pfn_to_page(pfn));
+
+               balloon_stats.total_pages += extra_pfn_end - start_pfn;
+       }
 #endif
+}
 
 static int __init balloon_init(void)
 {
@@ -745,20 +750,7 @@ static int __init balloon_init(void)
        register_sysctl_table(xen_root);
 #endif
 
-#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
-       {
-               int i;
-
-               /*
-                * Initialize the balloon with pages from the extra memory
-                * regions (see arch/x86/xen/setup.c).
-                */
-               for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++)
-                       if (xen_extra_mem[i].n_pfns)
-                               balloon_add_region(xen_extra_mem[i].start_pfn,
-                                                  xen_extra_mem[i].n_pfns);
-       }
-#endif
+       balloon_add_regions();
 
        task = kthread_run(balloon_thread, NULL, "xen-balloon");
        if (IS_ERR(task)) {
index a8b4105..a39f2d3 100644 (file)
@@ -230,39 +230,6 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
 }
 EXPORT_SYMBOL(xen_free_unpopulated_pages);
 
-#ifdef CONFIG_XEN_PV
-static int __init init(void)
-{
-       unsigned int i;
-
-       if (!xen_domain())
-               return -ENODEV;
-
-       if (!xen_pv_domain())
-               return 0;
-
-       /*
-        * Initialize with pages from the extra memory regions (see
-        * arch/x86/xen/setup.c).
-        */
-       for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
-               unsigned int j;
-
-               for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
-                       struct page *pg =
-                               pfn_to_page(xen_extra_mem[i].start_pfn + j);
-
-                       pg->zone_device_data = page_list;
-                       page_list = pg;
-                       list_count++;
-               }
-       }
-
-       return 0;
-}
-subsys_initcall(init);
-#endif
-
 static int __init unpopulated_init(void)
 {
        int ret;
index 6bcf147..4763132 100644 (file)
@@ -616,8 +616,7 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
                _debug("write discard %x @%llx [%llx]", len, start, i_size);
 
                /* The dirty region was entirely beyond the EOF. */
-               fscache_clear_page_bits(afs_vnode_cache(vnode),
-                                       mapping, start, len, caching);
+               fscache_clear_page_bits(mapping, start, len, caching);
                afs_pages_written_back(vnode, start, len);
                ret = 0;
        }
index 6556e13..63c7ebb 100644 (file)
@@ -1117,11 +1117,11 @@ out_free_interp:
                         * independently randomized mmap region (0 load_bias
                         * without MAP_FIXED nor MAP_FIXED_NOREPLACE).
                         */
-                       alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
-                       if (interpreter || alignment > ELF_MIN_ALIGN) {
+                       if (interpreter) {
                                load_bias = ELF_ET_DYN_BASE;
                                if (current->flags & PF_RANDOMIZE)
                                        load_bias += arch_mmap_rnd();
+                               alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
                                if (alignment)
                                        load_bias &= ~(alignment - 1);
                                elf_flags |= MAP_FIXED_NOREPLACE;
index c22d287..0dd6de9 100644 (file)
@@ -2503,12 +2503,6 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
                return ERR_PTR(ret);
        }
 
-       /*
-        * New block group is likely to be used soon. Try to activate it now.
-        * Failure is OK for now.
-        */
-       btrfs_zone_activate(cache);
-
        ret = exclude_super_stripes(cache);
        if (ret) {
                /* We may have excluded something, so call this just in case */
@@ -2946,7 +2940,6 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
        struct btrfs_path *path = NULL;
        LIST_HEAD(dirty);
        struct list_head *io = &cur_trans->io_bgs;
-       int num_started = 0;
        int loops = 0;
 
        spin_lock(&cur_trans->dirty_bgs_lock);
@@ -3012,7 +3005,6 @@ again:
                        cache->io_ctl.inode = NULL;
                        ret = btrfs_write_out_cache(trans, cache, path);
                        if (ret == 0 && cache->io_ctl.inode) {
-                               num_started++;
                                should_put = 0;
 
                                /*
@@ -3113,7 +3105,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
        int should_put;
        struct btrfs_path *path;
        struct list_head *io = &cur_trans->io_bgs;
-       int num_started = 0;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -3171,7 +3162,6 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
                        cache->io_ctl.inode = NULL;
                        ret = btrfs_write_out_cache(trans, cache, path);
                        if (ret == 0 && cache->io_ctl.inode) {
-                               num_started++;
                                should_put = 0;
                                list_add_tail(&cache->io_list, io);
                        } else {
@@ -3455,7 +3445,7 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
        return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
 }
 
-static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
+static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
 {
        struct btrfs_block_group *bg;
        int ret;
@@ -3542,7 +3532,11 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
 out:
        btrfs_trans_release_chunk_metadata(trans);
 
-       return ret;
+       if (ret)
+               return ERR_PTR(ret);
+
+       btrfs_get_block_group(bg);
+       return bg;
 }
 
 /*
@@ -3657,10 +3651,17 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
 {
        struct btrfs_fs_info *fs_info = trans->fs_info;
        struct btrfs_space_info *space_info;
+       struct btrfs_block_group *ret_bg;
        bool wait_for_alloc = false;
        bool should_alloc = false;
+       bool from_extent_allocation = false;
        int ret = 0;
 
+       if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) {
+               from_extent_allocation = true;
+               force = CHUNK_ALLOC_FORCE;
+       }
+
        /* Don't re-enter if we're already allocating a chunk */
        if (trans->allocating_chunk)
                return -ENOSPC;
@@ -3750,9 +3751,22 @@ int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
                        force_metadata_allocation(fs_info);
        }
 
-       ret = do_chunk_alloc(trans, flags);
+       ret_bg = do_chunk_alloc(trans, flags);
        trans->allocating_chunk = false;
 
+       if (IS_ERR(ret_bg)) {
+               ret = PTR_ERR(ret_bg);
+       } else if (from_extent_allocation) {
+               /*
+                * New block group is likely to be used soon. Try to activate
+                * it now. Failure is OK for now.
+                */
+               btrfs_zone_activate(ret_bg);
+       }
+
+       if (!ret)
+               btrfs_put_block_group(ret_bg);
+
        spin_lock(&space_info->lock);
        if (ret < 0) {
                if (ret == -ENOSPC)
index 93aabc6..e8308f2 100644 (file)
@@ -35,11 +35,15 @@ enum btrfs_discard_state {
  * the FS with empty chunks
  *
  * CHUNK_ALLOC_FORCE means it must try to allocate one
+ *
+ * CHUNK_ALLOC_FORCE_FOR_EXTENT like CHUNK_ALLOC_FORCE but called from
+ * find_free_extent() that also activaes the zone
  */
 enum btrfs_chunk_alloc_enum {
        CHUNK_ALLOC_NO_FORCE,
        CHUNK_ALLOC_LIMITED,
        CHUNK_ALLOC_FORCE,
+       CHUNK_ALLOC_FORCE_FOR_EXTENT,
 };
 
 struct btrfs_caching_control {
index be476f0..19bf36d 100644 (file)
@@ -537,6 +537,9 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
        cb->orig_bio = NULL;
        cb->nr_pages = nr_pages;
 
+       if (blkcg_css)
+               kthread_associate_blkcg(blkcg_css);
+
        while (cur_disk_bytenr < disk_start + compressed_len) {
                u64 offset = cur_disk_bytenr - disk_start;
                unsigned int index = offset >> PAGE_SHIFT;
@@ -555,6 +558,8 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
                                bio = NULL;
                                goto finish_cb;
                        }
+                       if (blkcg_css)
+                               bio->bi_opf |= REQ_CGROUP_PUNT;
                }
                /*
                 * We should never reach next_stripe_start start as we will
@@ -612,6 +617,9 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
        return 0;
 
 finish_cb:
+       if (blkcg_css)
+               kthread_associate_blkcg(NULL);
+
        if (bio) {
                bio->bi_status = ret;
                bio_endio(bio);
index b30309f..126f244 100644 (file)
@@ -1850,9 +1850,10 @@ again:
 
        ret = btrfs_insert_fs_root(fs_info, root);
        if (ret) {
-               btrfs_put_root(root);
-               if (ret == -EEXIST)
+               if (ret == -EEXIST) {
+                       btrfs_put_root(root);
                        goto again;
+               }
                goto fail;
        }
        return root;
index f477035..6aa92f8 100644 (file)
@@ -4082,7 +4082,7 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
                        }
 
                        ret = btrfs_chunk_alloc(trans, ffe_ctl->flags,
-                                               CHUNK_ALLOC_FORCE);
+                                               CHUNK_ALLOC_FORCE_FOR_EXTENT);
 
                        /* Do not bail out on ENOSPC since we can do more. */
                        if (ret == -ENOSPC)
index 0399cf8..151e9da 100644 (file)
@@ -118,7 +118,7 @@ struct btrfs_bio_ctrl {
  */
 struct extent_changeset {
        /* How many bytes are set/cleared in this operation */
-       unsigned int bytes_changed;
+       u64 bytes_changed;
 
        /* Changed ranges */
        struct ulist range_changed;
index 9f455c9..380054c 100644 (file)
@@ -2957,8 +2957,9 @@ out:
        return ret;
 }
 
-static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
+static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
 {
+       struct inode *inode = file_inode(file);
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct extent_state *cached_state = NULL;
@@ -2990,6 +2991,10 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
                goto out_only_mutex;
        }
 
+       ret = file_modified(file);
+       if (ret)
+               goto out_only_mutex;
+
        lockstart = round_up(offset, btrfs_inode_sectorsize(BTRFS_I(inode)));
        lockend = round_down(offset + len,
                             btrfs_inode_sectorsize(BTRFS_I(inode))) - 1;
@@ -3430,7 +3435,7 @@ static long btrfs_fallocate(struct file *file, int mode,
                return -EOPNOTSUPP;
 
        if (mode & FALLOC_FL_PUNCH_HOLE)
-               return btrfs_punch_hole(inode, offset, len);
+               return btrfs_punch_hole(file, offset, len);
 
        /*
         * Only trigger disk allocation, don't trigger qgroup reserve
@@ -3452,6 +3457,10 @@ static long btrfs_fallocate(struct file *file, int mode,
                        goto out;
        }
 
+       ret = file_modified(file);
+       if (ret)
+               goto out;
+
        /*
         * TODO: Move these two operations after we have checked
         * accurate reserved space, or fallocate can still fail but
index 6bfc434..5082b9c 100644 (file)
@@ -1128,7 +1128,6 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
        int ret = 0;
 
        if (btrfs_is_free_space_inode(inode)) {
-               WARN_ON_ONCE(1);
                ret = -EINVAL;
                goto out_unlock;
        }
@@ -2017,8 +2016,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
                 * to use run_delalloc_nocow() here, like for  regular
                 * preallocated inodes.
                 */
-               ASSERT(!zoned ||
-                      (zoned && btrfs_is_data_reloc_root(inode->root)));
+               ASSERT(!zoned || btrfs_is_data_reloc_root(inode->root));
                ret = run_delalloc_nocow(inode, locked_page, start, end,
                                         page_started, nr_written);
        } else if (!inode_can_compress(inode) ||
@@ -4488,6 +4486,13 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
                           dest->root_key.objectid);
                return -EPERM;
        }
+       if (atomic_read(&dest->nr_swapfiles)) {
+               spin_unlock(&dest->root_item_lock);
+               btrfs_warn(fs_info,
+                          "attempt to delete subvolume %llu with active swapfile",
+                          root->root_key.objectid);
+               return -EPERM;
+       }
        root_flags = btrfs_root_flags(&dest->root_item);
        btrfs_set_root_flags(&dest->root_item,
                             root_flags | BTRFS_ROOT_SUBVOL_DEAD);
@@ -7438,6 +7443,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
        u64 block_start, orig_start, orig_block_len, ram_bytes;
        bool can_nocow = false;
        bool space_reserved = false;
+       u64 prev_len;
        int ret = 0;
 
        /*
@@ -7465,6 +7471,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
                        can_nocow = true;
        }
 
+       prev_len = len;
        if (can_nocow) {
                struct extent_map *em2;
 
@@ -7494,8 +7501,6 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
                        goto out;
                }
        } else {
-               const u64 prev_len = len;
-
                /* Our caller expects us to free the input extent map. */
                free_extent_map(em);
                *map = NULL;
@@ -7526,7 +7531,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
         * We have created our ordered extent, so we can now release our reservation
         * for an outstanding extent.
         */
-       btrfs_delalloc_release_extents(BTRFS_I(inode), len);
+       btrfs_delalloc_release_extents(BTRFS_I(inode), prev_len);
 
        /*
         * Need to update the i_size under the extent lock so buffered
@@ -11107,8 +11112,23 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
         * set. We use this counter to prevent snapshots. We must increment it
         * before walking the extents because we don't want a concurrent
         * snapshot to run after we've already checked the extents.
+        *
+        * It is possible that subvolume is marked for deletion but still not
+        * removed yet. To prevent this race, we check the root status before
+        * activating the swapfile.
         */
+       spin_lock(&root->root_item_lock);
+       if (btrfs_root_dead(root)) {
+               spin_unlock(&root->root_item_lock);
+
+               btrfs_exclop_finish(fs_info);
+               btrfs_warn(fs_info,
+               "cannot activate swapfile because subvolume %llu is being deleted",
+                       root->root_key.objectid);
+               return -EPERM;
+       }
        atomic_inc(&root->nr_swapfiles);
+       spin_unlock(&root->root_item_lock);
 
        isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize);
 
index 238cee5..be6c245 100644 (file)
@@ -1239,7 +1239,7 @@ static u32 get_extent_max_capacity(const struct extent_map *em)
 }
 
 static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
-                                    bool locked)
+                                    u32 extent_thresh, u64 newer_than, bool locked)
 {
        struct extent_map *next;
        bool ret = false;
@@ -1249,11 +1249,12 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
                return false;
 
        /*
-        * We want to check if the next extent can be merged with the current
-        * one, which can be an extent created in a past generation, so we pass
-        * a minimum generation of 0 to defrag_lookup_extent().
+        * Here we need to pass @newer_then when checking the next extent, or
+        * we will hit a case we mark current extent for defrag, but the next
+        * one will not be a target.
+        * This will just cause extra IO without really reducing the fragments.
         */
-       next = defrag_lookup_extent(inode, em->start + em->len, 0, locked);
+       next = defrag_lookup_extent(inode, em->start + em->len, newer_than, locked);
        /* No more em or hole */
        if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
                goto out;
@@ -1265,6 +1266,13 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em,
         */
        if (next->len >= get_extent_max_capacity(em))
                goto out;
+       /* Skip older extent */
+       if (next->generation < newer_than)
+               goto out;
+       /* Also check extent size */
+       if (next->len >= extent_thresh)
+               goto out;
+
        ret = true;
 out:
        free_extent_map(next);
@@ -1470,7 +1478,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
                        goto next;
 
                next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
-                                                         locked);
+                                               extent_thresh, newer_than, locked);
                if (!next_mergeable) {
                        struct defrag_target_range *last;
 
@@ -5448,8 +5456,6 @@ long btrfs_ioctl(struct file *file, unsigned int
                return btrfs_ioctl_fs_info(fs_info, argp);
        case BTRFS_IOC_DEV_INFO:
                return btrfs_ioctl_dev_info(fs_info, argp);
-       case BTRFS_IOC_BALANCE:
-               return btrfs_ioctl_balance(file, NULL);
        case BTRFS_IOC_TREE_SEARCH:
                return btrfs_ioctl_tree_search(inode, argp);
        case BTRFS_IOC_TREE_SEARCH_V2:
index 1be7cb2..a8cc736 100644 (file)
@@ -1896,23 +1896,18 @@ static void update_dev_time(const char *device_path)
        path_put(&path);
 }
 
-static int btrfs_rm_dev_item(struct btrfs_device *device)
+static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
+                            struct btrfs_device *device)
 {
        struct btrfs_root *root = device->fs_info->chunk_root;
        int ret;
        struct btrfs_path *path;
        struct btrfs_key key;
-       struct btrfs_trans_handle *trans;
 
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
 
-       trans = btrfs_start_transaction(root, 0);
-       if (IS_ERR(trans)) {
-               btrfs_free_path(path);
-               return PTR_ERR(trans);
-       }
        key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
        key.type = BTRFS_DEV_ITEM_KEY;
        key.offset = device->devid;
@@ -1923,21 +1918,12 @@ static int btrfs_rm_dev_item(struct btrfs_device *device)
        if (ret) {
                if (ret > 0)
                        ret = -ENOENT;
-               btrfs_abort_transaction(trans, ret);
-               btrfs_end_transaction(trans);
                goto out;
        }
 
        ret = btrfs_del_item(trans, root, path);
-       if (ret) {
-               btrfs_abort_transaction(trans, ret);
-               btrfs_end_transaction(trans);
-       }
-
 out:
        btrfs_free_path(path);
-       if (!ret)
-               ret = btrfs_commit_transaction(trans);
        return ret;
 }
 
@@ -2078,6 +2064,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
                    struct btrfs_dev_lookup_args *args,
                    struct block_device **bdev, fmode_t *mode)
 {
+       struct btrfs_trans_handle *trans;
        struct btrfs_device *device;
        struct btrfs_fs_devices *cur_devices;
        struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
@@ -2098,7 +2085,7 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
 
        ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
        if (ret)
-               goto out;
+               return ret;
 
        device = btrfs_find_device(fs_info->fs_devices, args);
        if (!device) {
@@ -2106,27 +2093,22 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
                        ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
                else
                        ret = -ENOENT;
-               goto out;
+               return ret;
        }
 
        if (btrfs_pinned_by_swapfile(fs_info, device)) {
                btrfs_warn_in_rcu(fs_info,
                  "cannot remove device %s (devid %llu) due to active swapfile",
                                  rcu_str_deref(device->name), device->devid);
-               ret = -ETXTBSY;
-               goto out;
+               return -ETXTBSY;
        }
 
-       if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
-               ret = BTRFS_ERROR_DEV_TGT_REPLACE;
-               goto out;
-       }
+       if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
+               return BTRFS_ERROR_DEV_TGT_REPLACE;
 
        if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
-           fs_info->fs_devices->rw_devices == 1) {
-               ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
-               goto out;
-       }
+           fs_info->fs_devices->rw_devices == 1)
+               return BTRFS_ERROR_DEV_ONLY_WRITABLE;
 
        if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
                mutex_lock(&fs_info->chunk_mutex);
@@ -2139,14 +2121,22 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
        if (ret)
                goto error_undo;
 
-       /*
-        * TODO: the superblock still includes this device in its num_devices
-        * counter although write_all_supers() is not locked out. This
-        * could give a filesystem state which requires a degraded mount.
-        */
-       ret = btrfs_rm_dev_item(device);
-       if (ret)
+       trans = btrfs_start_transaction(fs_info->chunk_root, 0);
+       if (IS_ERR(trans)) {
+               ret = PTR_ERR(trans);
                goto error_undo;
+       }
+
+       ret = btrfs_rm_dev_item(trans, device);
+       if (ret) {
+               /* Any error in dev item removal is critical */
+               btrfs_crit(fs_info,
+                          "failed to remove device item for devid %llu: %d",
+                          device->devid, ret);
+               btrfs_abort_transaction(trans, ret);
+               btrfs_end_transaction(trans);
+               return ret;
+       }
 
        clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
        btrfs_scrub_cancel_dev(device);
@@ -2229,7 +2219,8 @@ int btrfs_rm_device(struct btrfs_fs_info *fs_info,
                free_fs_devices(cur_devices);
        }
 
-out:
+       ret = btrfs_commit_transaction(trans);
+
        return ret;
 
 error_undo:
@@ -2240,7 +2231,7 @@ error_undo:
                device->fs_devices->rw_devices++;
                mutex_unlock(&fs_info->chunk_mutex);
        }
-       goto out;
+       return ret;
 }
 
 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
@@ -4439,10 +4430,12 @@ static int balance_kthread(void *data)
        struct btrfs_fs_info *fs_info = data;
        int ret = 0;
 
+       sb_start_write(fs_info->sb);
        mutex_lock(&fs_info->balance_mutex);
        if (fs_info->balance_ctl)
                ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
        mutex_unlock(&fs_info->balance_mutex);
+       sb_end_write(fs_info->sb);
 
        return ret;
 }
index b7b5fac..1b1b310 100644 (file)
@@ -1801,7 +1801,6 @@ struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
 
        map = em->map_lookup;
        /* We only support single profile for now */
-       ASSERT(map->num_stripes == 1);
        device = map->stripes[0].dev;
 
        free_extent_map(em);
@@ -1976,18 +1975,16 @@ int btrfs_zone_finish(struct btrfs_block_group *block_group)
 
 bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
 {
+       struct btrfs_fs_info *fs_info = fs_devices->fs_info;
        struct btrfs_device *device;
        bool ret = false;
 
-       if (!btrfs_is_zoned(fs_devices->fs_info))
+       if (!btrfs_is_zoned(fs_info))
                return true;
 
-       /* Non-single profiles are not supported yet */
-       ASSERT((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0);
-
        /* Check if there is a device with active zones left */
-       mutex_lock(&fs_devices->device_list_mutex);
-       list_for_each_entry(device, &fs_devices->devices, dev_list) {
+       mutex_lock(&fs_info->chunk_mutex);
+       list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
                struct btrfs_zoned_device_info *zinfo = device->zone_info;
 
                if (!device->bdev)
@@ -1999,7 +1996,7 @@ bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags)
                        break;
                }
        }
-       mutex_unlock(&fs_devices->device_list_mutex);
+       mutex_unlock(&fs_info->chunk_mutex);
 
        return ret;
 }
index f256c8a..ca9f3e4 100644 (file)
@@ -57,6 +57,16 @@ static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
        trace_cachefiles_mark_inactive(object, inode);
 }
 
+static void cachefiles_do_unmark_inode_in_use(struct cachefiles_object *object,
+                                             struct dentry *dentry)
+{
+       struct inode *inode = d_backing_inode(dentry);
+
+       inode_lock(inode);
+       __cachefiles_unmark_inode_in_use(object, dentry);
+       inode_unlock(inode);
+}
+
 /*
  * Unmark a backing inode and tell cachefilesd that there's something that can
  * be culled.
@@ -68,9 +78,7 @@ void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
        struct inode *inode = file_inode(file);
 
        if (inode) {
-               inode_lock(inode);
-               __cachefiles_unmark_inode_in_use(object, file->f_path.dentry);
-               inode_unlock(inode);
+               cachefiles_do_unmark_inode_in_use(object, file->f_path.dentry);
 
                if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
                        atomic_long_add(inode->i_blocks, &cache->b_released);
@@ -484,7 +492,7 @@ struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
                                object, d_backing_inode(path.dentry), ret,
                                cachefiles_trace_trunc_error);
                        file = ERR_PTR(ret);
-                       goto out_dput;
+                       goto out_unuse;
                }
        }
 
@@ -494,15 +502,20 @@ struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
                trace_cachefiles_vfs_error(object, d_backing_inode(path.dentry),
                                           PTR_ERR(file),
                                           cachefiles_trace_open_error);
-               goto out_dput;
+               goto out_unuse;
        }
        if (unlikely(!file->f_op->read_iter) ||
            unlikely(!file->f_op->write_iter)) {
                fput(file);
                pr_notice("Cache does not support read_iter and write_iter\n");
                file = ERR_PTR(-EINVAL);
+               goto out_unuse;
        }
 
+       goto out_dput;
+
+out_unuse:
+       cachefiles_do_unmark_inode_in_use(object, path.dentry);
 out_dput:
        dput(path.dentry);
 out:
@@ -590,14 +603,16 @@ static bool cachefiles_open_file(struct cachefiles_object *object,
 check_failed:
        fscache_cookie_lookup_negative(object->cookie);
        cachefiles_unmark_inode_in_use(object, file);
-       if (ret == -ESTALE) {
-               fput(file);
-               dput(dentry);
+       fput(file);
+       dput(dentry);
+       if (ret == -ESTALE)
                return cachefiles_create_file(object);
-       }
+       return false;
+
 error_fput:
        fput(file);
 error:
+       cachefiles_do_unmark_inode_in_use(object, dentry);
        dput(dentry);
        return false;
 }
index 3546510..00b087c 100644 (file)
@@ -203,7 +203,7 @@ bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
        if (!buf)
                return false;
        buf->reserved = cpu_to_be32(0);
-       memcpy(buf->data, p, len);
+       memcpy(buf->data, p, volume->vcookie->coherency_len);
 
        ret = cachefiles_inject_write_error();
        if (ret == 0)
index a47fa44..2b1a1c0 100644 (file)
@@ -266,22 +266,24 @@ static void cifs_kill_sb(struct super_block *sb)
         * before we kill the sb.
         */
        if (cifs_sb->root) {
+               for (node = rb_first(root); node; node = rb_next(node)) {
+                       tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+                       tcon = tlink_tcon(tlink);
+                       if (IS_ERR(tcon))
+                               continue;
+                       cfid = &tcon->crfid;
+                       mutex_lock(&cfid->fid_mutex);
+                       if (cfid->dentry) {
+                               dput(cfid->dentry);
+                               cfid->dentry = NULL;
+                       }
+                       mutex_unlock(&cfid->fid_mutex);
+               }
+
+               /* finally release root dentry */
                dput(cifs_sb->root);
                cifs_sb->root = NULL;
        }
-       node = rb_first(root);
-       while (node != NULL) {
-               tlink = rb_entry(node, struct tcon_link, tl_rbnode);
-               tcon = tlink_tcon(tlink);
-               cfid = &tcon->crfid;
-               mutex_lock(&cfid->fid_mutex);
-               if (cfid->dentry) {
-                       dput(cfid->dentry);
-                       cfid->dentry = NULL;
-               }
-               mutex_unlock(&cfid->fid_mutex);
-               node = rb_next(node);
-       }
 
        kill_anon_super(sb);
        cifs_umount(cifs_sb);
@@ -944,7 +946,7 @@ cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
        ssize_t rc;
        struct inode *inode = file_inode(iocb->ki_filp);
 
-       if (iocb->ki_filp->f_flags & O_DIRECT)
+       if (iocb->ki_flags & IOCB_DIRECT)
                return cifs_user_readv(iocb, iter);
 
        rc = cifs_revalidate_mapping(inode);
index 15a5c5d..c0542bd 100644 (file)
@@ -153,5 +153,5 @@ extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
 #define SMB3_PRODUCT_BUILD 35
-#define CIFS_VERSION   "2.35"
+#define CIFS_VERSION   "2.36"
 #endif                         /* _CIFSFS_H */
index ee3b7c1..902e8c6 100644 (file)
@@ -453,9 +453,7 @@ static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_
        return rc;
 }
 
-static int
-reconnect_dfs_server(struct TCP_Server_Info *server,
-                    bool mark_smb_session)
+static int reconnect_dfs_server(struct TCP_Server_Info *server)
 {
        int rc = 0;
        const char *refpath = server->current_fullpath + 1;
@@ -479,7 +477,12 @@ reconnect_dfs_server(struct TCP_Server_Info *server,
        if (!cifs_tcp_ses_needs_reconnect(server, num_targets))
                return 0;
 
-       cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
+       /*
+        * Unconditionally mark all sessions & tcons for reconnect as we might be connecting to a
+        * different server or share during failover.  It could be improved by adding some logic to
+        * only do that in case it connects to a different server or share, though.
+        */
+       cifs_mark_tcp_ses_conns_for_reconnect(server, true);
 
        cifs_abort_connection(server);
 
@@ -537,7 +540,7 @@ int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
        }
        spin_unlock(&cifs_tcp_ses_lock);
 
-       return reconnect_dfs_server(server, mark_smb_session);
+       return reconnect_dfs_server(server);
 }
 #else
 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
@@ -1046,7 +1049,7 @@ smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
                spin_unlock(&server->req_lock);
                wake_up(&server->request_q);
 
-               trace_smb3_add_credits(server->CurrentMid,
+               trace_smb3_hdr_credits(server->CurrentMid,
                                server->conn_id, server->hostname, scredits,
                                le16_to_cpu(shdr->CreditRequest), in_flight);
                cifs_server_dbg(FYI, "%s: added %u credits total=%d\n",
@@ -4465,7 +4468,7 @@ static int tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tco
         */
        if (rc && server->current_fullpath != server->origin_fullpath) {
                server->current_fullpath = server->origin_fullpath;
-               cifs_reconnect(tcon->ses->server, true);
+               cifs_signal_cifsd_for_reconnect(server, true);
        }
 
        dfs_cache_free_tgts(tl);
index 852e54e..bbdf328 100644 (file)
@@ -85,6 +85,9 @@ parse_mf_symlink(const u8 *buf, unsigned int buf_len, unsigned int *_link_len,
        if (rc != 1)
                return -EINVAL;
 
+       if (link_len > CIFS_MF_SYMLINK_LINK_MAXLEN)
+               return -EINVAL;
+
        rc = symlink_hash(link_len, link_str, md5_hash);
        if (rc) {
                cifs_dbg(FYI, "%s: MD5 hash failure: %d\n", __func__, rc);
index ebe236b..235aa1b 100644 (file)
@@ -896,7 +896,7 @@ map_and_check_smb_error(struct mid_q_entry *mid, bool logErr)
                if (class == ERRSRV && code == ERRbaduid) {
                        cifs_dbg(FYI, "Server returned 0x%x, reconnecting session...\n",
                                code);
-                       cifs_reconnect(mid->server, false);
+                       cifs_signal_cifsd_for_reconnect(mid->server, false);
                }
        }
 
index c653beb..3fe47a8 100644 (file)
@@ -150,16 +150,18 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
                struct smb2_transform_hdr *thdr =
                        (struct smb2_transform_hdr *)buf;
                struct cifs_ses *ses = NULL;
+               struct cifs_ses *iter;
 
                /* decrypt frame now that it is completely read in */
                spin_lock(&cifs_tcp_ses_lock);
-               list_for_each_entry(ses, &srvr->smb_ses_list, smb_ses_list) {
-                       if (ses->Suid == le64_to_cpu(thdr->SessionId))
+               list_for_each_entry(iter, &srvr->smb_ses_list, smb_ses_list) {
+                       if (iter->Suid == le64_to_cpu(thdr->SessionId)) {
+                               ses = iter;
                                break;
+                       }
                }
                spin_unlock(&cifs_tcp_ses_lock);
-               if (list_entry_is_head(ses, &srvr->smb_ses_list,
-                                      smb_ses_list)) {
+               if (!ses) {
                        cifs_dbg(VFS, "no decryption - session id not found\n");
                        return 1;
                }
index db23f5b..a67df8e 100644 (file)
@@ -86,6 +86,9 @@ smb2_add_credits(struct TCP_Server_Info *server,
        if (*val > 65000) {
                *val = 65000; /* Don't get near 64K credits, avoid srv bugs */
                pr_warn_once("server overflowed SMB3 credits\n");
+               trace_smb3_overflow_credits(server->CurrentMid,
+                                           server->conn_id, server->hostname, *val,
+                                           add, server->in_flight);
        }
        server->in_flight--;
        if (server->in_flight == 0 &&
@@ -251,7 +254,7 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
        in_flight = server->in_flight;
        spin_unlock(&server->req_lock);
 
-       trace_smb3_add_credits(server->CurrentMid,
+       trace_smb3_wait_credits(server->CurrentMid,
                        server->conn_id, server->hostname, scredits, -(credits->value), in_flight);
        cifs_dbg(FYI, "%s: removed %u credits total=%d\n",
                        __func__, credits->value, scredits);
@@ -300,7 +303,7 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
        spin_unlock(&server->req_lock);
        wake_up(&server->request_q);
 
-       trace_smb3_add_credits(server->CurrentMid,
+       trace_smb3_adj_credits(server->CurrentMid,
                        server->conn_id, server->hostname, scredits,
                        credits->value - new_val, in_flight);
        cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n",
@@ -2492,7 +2495,7 @@ smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
                spin_unlock(&server->req_lock);
                wake_up(&server->request_q);
 
-               trace_smb3_add_credits(server->CurrentMid,
+               trace_smb3_pend_credits(server->CurrentMid,
                                server->conn_id, server->hostname, scredits,
                                le16_to_cpu(shdr->CreditRequest), in_flight);
                cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n",
index 6cecf30..bc27961 100644 (file)
@@ -1006,6 +1006,13 @@ DEFINE_SMB3_CREDIT_EVENT(credit_timeout);
 DEFINE_SMB3_CREDIT_EVENT(insufficient_credits);
 DEFINE_SMB3_CREDIT_EVENT(too_many_credits);
 DEFINE_SMB3_CREDIT_EVENT(add_credits);
+DEFINE_SMB3_CREDIT_EVENT(adj_credits);
+DEFINE_SMB3_CREDIT_EVENT(hdr_credits);
+DEFINE_SMB3_CREDIT_EVENT(nblk_credits);
+DEFINE_SMB3_CREDIT_EVENT(pend_credits);
+DEFINE_SMB3_CREDIT_EVENT(wait_credits);
+DEFINE_SMB3_CREDIT_EVENT(waitff_credits);
+DEFINE_SMB3_CREDIT_EVENT(overflow_credits);
 DEFINE_SMB3_CREDIT_EVENT(set_credits);
 
 #endif /* _CIFS_TRACE_H */
index eeb1a69..d9d1c35 100644 (file)
@@ -542,7 +542,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
                in_flight = server->in_flight;
                spin_unlock(&server->req_lock);
 
-               trace_smb3_add_credits(server->CurrentMid,
+               trace_smb3_nblk_credits(server->CurrentMid,
                                server->conn_id, server->hostname, scredits, -1, in_flight);
                cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
                                __func__, 1, scredits);
@@ -648,7 +648,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
                        in_flight = server->in_flight;
                        spin_unlock(&server->req_lock);
 
-                       trace_smb3_add_credits(server->CurrentMid,
+                       trace_smb3_waitff_credits(server->CurrentMid,
                                        server->conn_id, server->hostname, scredits,
                                        -(num_credits), in_flight);
                        cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
index 7d2e692..ada8fe8 100644 (file)
@@ -412,6 +412,7 @@ void __fput_sync(struct file *file)
 }
 
 EXPORT_SYMBOL(fput);
+EXPORT_SYMBOL(__fput_sync);
 
 void __init files_init(void)
 {
index 76316c4..b313a97 100644 (file)
@@ -38,6 +38,3 @@ config FSCACHE_DEBUG
          enabled by setting bits in /sys/modules/fscache/parameter/debug.
 
          See Documentation/filesystems/caching/fscache.rst for more information.
-
-config FSCACHE_OLD_API
-       bool
index 2749933..d645f8b 100644 (file)
@@ -214,7 +214,7 @@ void fscache_relinquish_cache(struct fscache_cache *cache)
 
        cache->ops = NULL;
        cache->cache_priv = NULL;
-       smp_store_release(&cache->state, FSCACHE_CACHE_IS_NOT_PRESENT);
+       fscache_set_cache_state(cache, FSCACHE_CACHE_IS_NOT_PRESENT);
        fscache_put_cache(cache, where);
 }
 EXPORT_SYMBOL(fscache_relinquish_cache);
index 9bb1ab5..9d3cf01 100644 (file)
@@ -30,7 +30,7 @@ static DEFINE_SPINLOCK(fscache_cookie_lru_lock);
 DEFINE_TIMER(fscache_cookie_lru_timer, fscache_cookie_lru_timed_out);
 static DECLARE_WORK(fscache_cookie_lru_work, fscache_cookie_lru_worker);
 static const char fscache_cookie_states[FSCACHE_COOKIE_STATE__NR] = "-LCAIFUWRD";
-unsigned int fscache_lru_cookie_timeout = 10 * HZ;
+static unsigned int fscache_lru_cookie_timeout = 10 * HZ;
 
 void fscache_print_cookie(struct fscache_cookie *cookie, char prefix)
 {
@@ -1069,6 +1069,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
 }
 EXPORT_SYMBOL(__fscache_invalidate);
 
+#ifdef CONFIG_PROC_FS
 /*
  * Generate a list of extant cookies in /proc/fs/fscache/cookies
  */
@@ -1145,3 +1146,4 @@ const struct seq_operations fscache_cookies_seq_ops = {
        .stop   = fscache_cookies_seq_stop,
        .show   = fscache_cookies_seq_show,
 };
+#endif
index ed1c9ed..1336f51 100644 (file)
@@ -56,7 +56,9 @@ static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
  * cookie.c
  */
 extern struct kmem_cache *fscache_cookie_jar;
+#ifdef CONFIG_PROC_FS
 extern const struct seq_operations fscache_cookies_seq_ops;
+#endif
 extern struct timer_list fscache_cookie_lru_timer;
 
 extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
@@ -137,7 +139,9 @@ int fscache_stats_show(struct seq_file *m, void *v);
 /*
  * volume.c
  */
+#ifdef CONFIG_PROC_FS
 extern const struct seq_operations fscache_volumes_seq_ops;
+#endif
 
 struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
                                          enum fscache_volume_trace where);
index c8c7fe9..3af3b08 100644 (file)
@@ -235,8 +235,7 @@ static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
 {
        struct fscache_write_request *wreq = priv;
 
-       fscache_clear_page_bits(fscache_cres_cookie(&wreq->cache_resources),
-                               wreq->mapping, wreq->start, wreq->len,
+       fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
                                wreq->set_bits);
 
        if (wreq->term_func)
@@ -296,7 +295,7 @@ abandon_end:
 abandon_free:
        kfree(wreq);
 abandon:
-       fscache_clear_page_bits(cookie, mapping, start, len, cond);
+       fscache_clear_page_bits(mapping, start, len, cond);
        if (term_func)
                term_func(term_func_priv, ret, false);
 }
index a8413f0..4479013 100644 (file)
@@ -63,7 +63,6 @@
 #include <net/sock.h>
 #include <net/af_unix.h>
 #include <net/scm.h>
-#include <net/busy_poll.h>
 #include <linux/anon_inodes.h>
 #include <linux/sched/mm.h>
 #include <linux/uaccess.h>
                        IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
 
 #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
-                               REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
-                               REQ_F_ASYNC_DATA)
+                               REQ_F_POLLED | REQ_F_CREDS | REQ_F_ASYNC_DATA)
 
 #define IO_TCTX_REFS_CACHE_NR  (1U << 10)
 
@@ -412,11 +410,6 @@ struct io_ring_ctx {
        struct list_head        sqd_list;
 
        unsigned long           check_cq_overflow;
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       /* used to track busy poll napi_id */
-       struct list_head        napi_list;
-       spinlock_t              napi_lock;      /* napi_list lock */
-#endif
 
        struct {
                unsigned                cached_cq_tail;
@@ -500,7 +493,6 @@ struct io_uring_task {
        const struct io_ring_ctx *last;
        struct io_wq            *io_wq;
        struct percpu_counter   inflight;
-       atomic_t                inflight_tracked;
        atomic_t                in_idle;
 
        spinlock_t              task_lock;
@@ -592,7 +584,8 @@ struct io_rw {
        /* NOTE: kiocb has the file as the first member, so don't do it here */
        struct kiocb                    kiocb;
        u64                             addr;
-       u64                             len;
+       u32                             len;
+       u32                             flags;
 };
 
 struct io_connect {
@@ -654,10 +647,10 @@ struct io_epoll {
 
 struct io_splice {
        struct file                     *file_out;
-       struct file                     *file_in;
        loff_t                          off_out;
        loff_t                          off_in;
        u64                             len;
+       int                             splice_fd_in;
        unsigned int                    flags;
 };
 
@@ -914,7 +907,11 @@ struct io_kiocb {
 
        u64                             user_data;
        u32                             result;
-       u32                             cflags;
+       /* fd initially, then cflags for completion */
+       union {
+               u32                     cflags;
+               int                     fd;
+       };
 
        struct io_ring_ctx              *ctx;
        struct task_struct              *task;
@@ -923,8 +920,12 @@ struct io_kiocb {
        /* store used ubuf, so we can prevent reloading */
        struct io_mapped_ubuf           *imu;
 
-       /* used by request caches, completion batching and iopoll */
-       struct io_wq_work_node          comp_list;
+       union {
+               /* used by request caches, completion batching and iopoll */
+               struct io_wq_work_node  comp_list;
+               /* cache ->apoll->events */
+               int apoll_events;
+       };
        atomic_t                        refs;
        atomic_t                        poll_refs;
        struct io_task_work             io_task_work;
@@ -1182,8 +1183,11 @@ static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
                                     struct io_uring_rsrc_update2 *up,
                                     unsigned nr_args);
 static void io_clean_op(struct io_kiocb *req);
-static struct file *io_file_get(struct io_ring_ctx *ctx,
-                               struct io_kiocb *req, int fd, bool fixed);
+static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
+                                            unsigned issue_flags);
+static inline struct file *io_file_get_normal(struct io_kiocb *req, int fd);
+static void io_drop_inflight_file(struct io_kiocb *req);
+static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags);
 static void __io_queue_sqe(struct io_kiocb *req);
 static void io_rsrc_put_work(struct work_struct *work);
 
@@ -1313,13 +1317,20 @@ static void io_rsrc_refs_refill(struct io_ring_ctx *ctx)
 }
 
 static inline void io_req_set_rsrc_node(struct io_kiocb *req,
-                                       struct io_ring_ctx *ctx)
+                                       struct io_ring_ctx *ctx,
+                                       unsigned int issue_flags)
 {
        if (!req->fixed_rsrc_refs) {
                req->fixed_rsrc_refs = &ctx->rsrc_node->refs;
-               ctx->rsrc_cached_refs--;
-               if (unlikely(ctx->rsrc_cached_refs < 0))
-                       io_rsrc_refs_refill(ctx);
+
+               if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+                       lockdep_assert_held(&ctx->uring_lock);
+                       ctx->rsrc_cached_refs--;
+                       if (unlikely(ctx->rsrc_cached_refs < 0))
+                               io_rsrc_refs_refill(ctx);
+               } else {
+                       percpu_ref_get(req->fixed_rsrc_refs);
+               }
        }
 }
 
@@ -1424,29 +1435,9 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
                          bool cancel_all)
        __must_hold(&req->ctx->timeout_lock)
 {
-       struct io_kiocb *req;
-
        if (task && head->task != task)
                return false;
-       if (cancel_all)
-               return true;
-
-       io_for_each_link(req, head) {
-               if (req->flags & REQ_F_INFLIGHT)
-                       return true;
-       }
-       return false;
-}
-
-static bool io_match_linked(struct io_kiocb *head)
-{
-       struct io_kiocb *req;
-
-       io_for_each_link(req, head) {
-               if (req->flags & REQ_F_INFLIGHT)
-                       return true;
-       }
-       return false;
+       return cancel_all;
 }
 
 /*
@@ -1456,24 +1447,9 @@ static bool io_match_linked(struct io_kiocb *head)
 static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
                               bool cancel_all)
 {
-       bool matched;
-
        if (task && head->task != task)
                return false;
-       if (cancel_all)
-               return true;
-
-       if (head->flags & REQ_F_LINK_TIMEOUT) {
-               struct io_ring_ctx *ctx = head->ctx;
-
-               /* protect against races with linked timeouts */
-               spin_lock_irq(&ctx->timeout_lock);
-               matched = io_match_linked(head);
-               spin_unlock_irq(&ctx->timeout_lock);
-       } else {
-               matched = io_match_linked(head);
-       }
-       return matched;
+       return cancel_all;
 }
 
 static inline bool req_has_async_data(struct io_kiocb *req)
@@ -1595,10 +1571,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
        INIT_WQ_LIST(&ctx->locked_free_list);
        INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
        INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       INIT_LIST_HEAD(&ctx->napi_list);
-       spin_lock_init(&ctx->napi_lock);
-#endif
        return ctx;
 err:
        kfree(ctx->dummy_ubuf);
@@ -1636,14 +1608,6 @@ static inline bool io_req_ffs_set(struct io_kiocb *req)
        return req->flags & REQ_F_FIXED_FILE;
 }
 
-static inline void io_req_track_inflight(struct io_kiocb *req)
-{
-       if (!(req->flags & REQ_F_INFLIGHT)) {
-               req->flags |= REQ_F_INFLIGHT;
-               atomic_inc(&current->io_uring->inflight_tracked);
-       }
-}
-
 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
 {
        if (WARN_ON_ONCE(!req->link))
@@ -1687,14 +1651,6 @@ static void io_prep_async_work(struct io_kiocb *req)
                if (def->unbound_nonreg_file)
                        req->work.flags |= IO_WQ_WORK_UNBOUND;
        }
-
-       switch (req->opcode) {
-       case IORING_OP_SPLICE:
-       case IORING_OP_TEE:
-               if (!S_ISREG(file_inode(req->splice.file_in)->i_mode))
-                       req->work.flags |= IO_WQ_WORK_UNBOUND;
-               break;
-       }
 }
 
 static void io_prep_async_link(struct io_kiocb *req)
@@ -1788,12 +1744,11 @@ static __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
        __must_hold(&ctx->completion_lock)
 {
        u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
+       struct io_kiocb *req, *tmp;
 
        spin_lock_irq(&ctx->timeout_lock);
-       while (!list_empty(&ctx->timeout_list)) {
+       list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
                u32 events_needed, events_got;
-               struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
-                                               struct io_kiocb, timeout.list);
 
                if (io_is_timeout_noseq(req))
                        break;
@@ -1810,7 +1765,6 @@ static __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
                if (events_got < events_needed)
                        break;
 
-               list_del_init(&req->timeout.list);
                io_kill_timeout(req, 0);
        }
        ctx->cq_last_tm_flush = seq;
@@ -2562,6 +2516,8 @@ static void io_req_task_work_add(struct io_kiocb *req, bool priority)
 
        WARN_ON_ONCE(!tctx);
 
+       io_drop_inflight_file(req);
+
        spin_lock_irqsave(&tctx->task_lock, flags);
        if (priority)
                wq_list_add_tail(&req->io_task_work.node, &tctx->prior_task_list);
@@ -3186,42 +3142,11 @@ static inline bool io_file_supports_nowait(struct io_kiocb *req)
 
 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       struct io_ring_ctx *ctx = req->ctx;
        struct kiocb *kiocb = &req->rw.kiocb;
-       struct file *file = req->file;
        unsigned ioprio;
        int ret;
 
-       if (!io_req_ffs_set(req))
-               req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
-
        kiocb->ki_pos = READ_ONCE(sqe->off);
-       kiocb->ki_flags = iocb_flags(file);
-       ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
-       if (unlikely(ret))
-               return ret;
-
-       /*
-        * If the file is marked O_NONBLOCK, still allow retry for it if it
-        * supports async. Otherwise it's impossible to use O_NONBLOCK files
-        * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
-        */
-       if ((kiocb->ki_flags & IOCB_NOWAIT) ||
-           ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
-               req->flags |= REQ_F_NOWAIT;
-
-       if (ctx->flags & IORING_SETUP_IOPOLL) {
-               if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
-                       return -EOPNOTSUPP;
-
-               kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
-               kiocb->ki_complete = io_complete_rw_iopoll;
-               req->iopoll_completed = 0;
-       } else {
-               if (kiocb->ki_flags & IOCB_HIPRI)
-                       return -EINVAL;
-               kiocb->ki_complete = io_complete_rw;
-       }
 
        ioprio = READ_ONCE(sqe->ioprio);
        if (ioprio) {
@@ -3237,6 +3162,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        req->imu = NULL;
        req->rw.addr = READ_ONCE(sqe->addr);
        req->rw.len = READ_ONCE(sqe->len);
+       req->rw.flags = READ_ONCE(sqe->rw_flags);
        req->buf_index = READ_ONCE(sqe->buf_index);
        return 0;
 }
@@ -3265,19 +3191,18 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
 {
        struct kiocb *kiocb = &req->rw.kiocb;
-       bool is_stream = req->file->f_mode & FMODE_STREAM;
 
-       if (kiocb->ki_pos == -1) {
-               if (!is_stream) {
-                       req->flags |= REQ_F_CUR_POS;
-                       kiocb->ki_pos = req->file->f_pos;
-                       return &kiocb->ki_pos;
-               } else {
-                       kiocb->ki_pos = 0;
-                       return NULL;
-               }
+       if (kiocb->ki_pos != -1)
+               return &kiocb->ki_pos;
+
+       if (!(req->file->f_mode & FMODE_STREAM)) {
+               req->flags |= REQ_F_CUR_POS;
+               kiocb->ki_pos = req->file->f_pos;
+               return &kiocb->ki_pos;
        }
-       return is_stream ? NULL : &kiocb->ki_pos;
+
+       kiocb->ki_pos = 0;
+       return NULL;
 }
 
 static void kiocb_done(struct io_kiocb *req, ssize_t ret,
@@ -3367,7 +3292,8 @@ static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter
        return 0;
 }
 
-static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
+static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter,
+                          unsigned int issue_flags)
 {
        struct io_mapped_ubuf *imu = req->imu;
        u16 index, buf_index = req->buf_index;
@@ -3377,7 +3303,7 @@ static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter)
 
                if (unlikely(buf_index >= ctx->nr_user_bufs))
                        return -EFAULT;
-               io_req_set_rsrc_node(req, ctx);
+               io_req_set_rsrc_node(req, ctx, issue_flags);
                index = array_index_nospec(buf_index, ctx->nr_user_bufs);
                imu = READ_ONCE(ctx->user_bufs[index]);
                req->imu = imu;
@@ -3539,7 +3465,7 @@ static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req,
        ssize_t ret;
 
        if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
-               ret = io_import_fixed(req, rw, iter);
+               ret = io_import_fixed(req, rw, iter, issue_flags);
                if (ret)
                        return ERR_PTR(ret);
                return NULL;
@@ -3740,13 +3666,6 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
        return 0;
 }
 
-static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
-       if (unlikely(!(req->file->f_mode & FMODE_READ)))
-               return -EBADF;
-       return io_prep_rw(req, sqe);
-}
-
 /*
  * This is our waitqueue callback handler, registered through __folio_lock_async()
  * when we initially tried to do the IO with the iocb armed our waitqueue.
@@ -3834,6 +3753,49 @@ static bool need_read_all(struct io_kiocb *req)
                S_ISBLK(file_inode(req->file)->i_mode);
 }
 
+static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
+{
+       struct kiocb *kiocb = &req->rw.kiocb;
+       struct io_ring_ctx *ctx = req->ctx;
+       struct file *file = req->file;
+       int ret;
+
+       if (unlikely(!file || !(file->f_mode & mode)))
+               return -EBADF;
+
+       if (!io_req_ffs_set(req))
+               req->flags |= io_file_get_flags(file) << REQ_F_SUPPORT_NOWAIT_BIT;
+
+       kiocb->ki_flags = iocb_flags(file);
+       ret = kiocb_set_rw_flags(kiocb, req->rw.flags);
+       if (unlikely(ret))
+               return ret;
+
+       /*
+        * If the file is marked O_NONBLOCK, still allow retry for it if it
+        * supports async. Otherwise it's impossible to use O_NONBLOCK files
+        * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
+        */
+       if ((kiocb->ki_flags & IOCB_NOWAIT) ||
+           ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req)))
+               req->flags |= REQ_F_NOWAIT;
+
+       if (ctx->flags & IORING_SETUP_IOPOLL) {
+               if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
+                       return -EOPNOTSUPP;
+
+               kiocb->ki_flags |= IOCB_HIPRI | IOCB_ALLOC_CACHE;
+               kiocb->ki_complete = io_complete_rw_iopoll;
+               req->iopoll_completed = 0;
+       } else {
+               if (kiocb->ki_flags & IOCB_HIPRI)
+                       return -EINVAL;
+               kiocb->ki_complete = io_complete_rw;
+       }
+
+       return 0;
+}
+
 static int io_read(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_rw_state __s, *s = &__s;
@@ -3869,6 +3831,9 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                iov_iter_restore(&s->iter, &s->iter_state);
                iovec = NULL;
        }
+       ret = io_rw_init_file(req, FMODE_READ);
+       if (unlikely(ret))
+               return ret;
        req->result = iov_iter_count(&s->iter);
 
        if (force_nonblock) {
@@ -3972,13 +3937,6 @@ out_free:
        return 0;
 }
 
-static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
-{
-       if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
-               return -EBADF;
-       return io_prep_rw(req, sqe);
-}
-
 static int io_write(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_rw_state __s, *s = &__s;
@@ -3999,6 +3957,9 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
                iov_iter_restore(&s->iter, &s->iter_state);
                iovec = NULL;
        }
+       ret = io_rw_init_file(req, FMODE_WRITE);
+       if (unlikely(ret))
+               return ret;
        req->result = iov_iter_count(&s->iter);
 
        if (force_nonblock) {
@@ -4369,18 +4330,11 @@ static int __io_splice_prep(struct io_kiocb *req,
        if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
 
-       sp->file_in = NULL;
        sp->len = READ_ONCE(sqe->len);
        sp->flags = READ_ONCE(sqe->splice_flags);
-
        if (unlikely(sp->flags & ~valid_flags))
                return -EINVAL;
-
-       sp->file_in = io_file_get(req->ctx, req, READ_ONCE(sqe->splice_fd_in),
-                                 (sp->flags & SPLICE_F_FD_IN_FIXED));
-       if (!sp->file_in)
-               return -EBADF;
-       req->flags |= REQ_F_NEED_CLEANUP;
+       sp->splice_fd_in = READ_ONCE(sqe->splice_fd_in);
        return 0;
 }
 
@@ -4395,20 +4349,29 @@ static int io_tee_prep(struct io_kiocb *req,
 static int io_tee(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_splice *sp = &req->splice;
-       struct file *in = sp->file_in;
        struct file *out = sp->file_out;
        unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
+       struct file *in;
        long ret = 0;
 
        if (issue_flags & IO_URING_F_NONBLOCK)
                return -EAGAIN;
+
+       if (sp->flags & SPLICE_F_FD_IN_FIXED)
+               in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
+       else
+               in = io_file_get_normal(req, sp->splice_fd_in);
+       if (!in) {
+               ret = -EBADF;
+               goto done;
+       }
+
        if (sp->len)
                ret = do_tee(in, out, sp->len, flags);
 
        if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
                io_put_file(in);
-       req->flags &= ~REQ_F_NEED_CLEANUP;
-
+done:
        if (ret != sp->len)
                req_set_fail(req);
        io_req_complete(req, ret);
@@ -4427,15 +4390,24 @@ static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_splice *sp = &req->splice;
-       struct file *in = sp->file_in;
        struct file *out = sp->file_out;
        unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
        loff_t *poff_in, *poff_out;
+       struct file *in;
        long ret = 0;
 
        if (issue_flags & IO_URING_F_NONBLOCK)
                return -EAGAIN;
 
+       if (sp->flags & SPLICE_F_FD_IN_FIXED)
+               in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags);
+       else
+               in = io_file_get_normal(req, sp->splice_fd_in);
+       if (!in) {
+               ret = -EBADF;
+               goto done;
+       }
+
        poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
        poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
 
@@ -4444,8 +4416,7 @@ static int io_splice(struct io_kiocb *req, unsigned int issue_flags)
 
        if (!(sp->flags & SPLICE_F_FD_IN_FIXED))
                io_put_file(in);
-       req->flags &= ~REQ_F_NEED_CLEANUP;
-
+done:
        if (ret != sp->len)
                req_set_fail(req);
        io_req_complete(req, ret);
@@ -4513,9 +4484,6 @@ static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_ring_ctx *ctx = req->ctx;
 
-       if (!req->file)
-               return -EBADF;
-
        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
@@ -5757,108 +5725,6 @@ IO_NETOP_FN(send);
 IO_NETOP_FN(recv);
 #endif /* CONFIG_NET */
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-
-#define NAPI_TIMEOUT                   (60 * SEC_CONVERSION)
-
-struct napi_entry {
-       struct list_head        list;
-       unsigned int            napi_id;
-       unsigned long           timeout;
-};
-
-/*
- * Add busy poll NAPI ID from sk.
- */
-static void io_add_napi(struct file *file, struct io_ring_ctx *ctx)
-{
-       unsigned int napi_id;
-       struct socket *sock;
-       struct sock *sk;
-       struct napi_entry *ne;
-
-       if (!net_busy_loop_on())
-               return;
-
-       sock = sock_from_file(file);
-       if (!sock)
-               return;
-
-       sk = sock->sk;
-       if (!sk)
-               return;
-
-       napi_id = READ_ONCE(sk->sk_napi_id);
-
-       /* Non-NAPI IDs can be rejected */
-       if (napi_id < MIN_NAPI_ID)
-               return;
-
-       spin_lock(&ctx->napi_lock);
-       list_for_each_entry(ne, &ctx->napi_list, list) {
-               if (ne->napi_id == napi_id) {
-                       ne->timeout = jiffies + NAPI_TIMEOUT;
-                       goto out;
-               }
-       }
-
-       ne = kmalloc(sizeof(*ne), GFP_NOWAIT);
-       if (!ne)
-               goto out;
-
-       ne->napi_id = napi_id;
-       ne->timeout = jiffies + NAPI_TIMEOUT;
-       list_add_tail(&ne->list, &ctx->napi_list);
-out:
-       spin_unlock(&ctx->napi_lock);
-}
-
-static inline void io_check_napi_entry_timeout(struct napi_entry *ne)
-{
-       if (time_after(jiffies, ne->timeout)) {
-               list_del(&ne->list);
-               kfree(ne);
-       }
-}
-
-/*
- * Busy poll if globally on and supporting sockets found
- */
-static bool io_napi_busy_loop(struct list_head *napi_list)
-{
-       struct napi_entry *ne, *n;
-
-       list_for_each_entry_safe(ne, n, napi_list, list) {
-               napi_busy_loop(ne->napi_id, NULL, NULL, true,
-                              BUSY_POLL_BUDGET);
-               io_check_napi_entry_timeout(ne);
-       }
-       return !list_empty(napi_list);
-}
-
-static void io_free_napi_list(struct io_ring_ctx *ctx)
-{
-       spin_lock(&ctx->napi_lock);
-       while (!list_empty(&ctx->napi_list)) {
-               struct napi_entry *ne =
-                       list_first_entry(&ctx->napi_list, struct napi_entry,
-                                        list);
-
-               list_del(&ne->list);
-               kfree(ne);
-       }
-       spin_unlock(&ctx->napi_lock);
-}
-#else
-static inline void io_add_napi(struct file *file, struct io_ring_ctx *ctx)
-{
-}
-
-static inline void io_free_napi_list(struct io_ring_ctx *ctx)
-{
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 struct io_poll_table {
        struct poll_table_struct pt;
        struct io_kiocb *req;
@@ -5972,10 +5838,9 @@ static void io_poll_remove_entries(struct io_kiocb *req)
  * either spurious wakeup or multishot CQE is served. 0 when it's done with
  * the request, then the mask is stored in req->result.
  */
-static int io_poll_check_events(struct io_kiocb *req)
+static int io_poll_check_events(struct io_kiocb *req, bool locked)
 {
        struct io_ring_ctx *ctx = req->ctx;
-       struct io_poll_iocb *poll = io_poll_get_single(req);
        int v;
 
        /* req->task == current here, checking PF_EXITING is safe */
@@ -5992,14 +5857,17 @@ static int io_poll_check_events(struct io_kiocb *req)
                        return -ECANCELED;
 
                if (!req->result) {
-                       struct poll_table_struct pt = { ._key = req->cflags };
+                       struct poll_table_struct pt = { ._key = req->apoll_events };
+                       unsigned flags = locked ? 0 : IO_URING_F_UNLOCKED;
 
-                       req->result = vfs_poll(req->file, &pt) & req->cflags;
+                       if (unlikely(!io_assign_file(req, flags)))
+                               return -EBADF;
+                       req->result = vfs_poll(req->file, &pt) & req->apoll_events;
                }
 
                /* multishot, just fill an CQE and proceed */
-               if (req->result && !(req->cflags & EPOLLONESHOT)) {
-                       __poll_t mask = mangle_poll(req->result & poll->events);
+               if (req->result && !(req->apoll_events & EPOLLONESHOT)) {
+                       __poll_t mask = mangle_poll(req->result & req->apoll_events);
                        bool filled;
 
                        spin_lock(&ctx->completion_lock);
@@ -6010,7 +5878,6 @@ static int io_poll_check_events(struct io_kiocb *req)
                        if (unlikely(!filled))
                                return -ECANCELED;
                        io_cqring_ev_posted(ctx);
-                       io_add_napi(req->file, ctx);
                } else if (req->result) {
                        return 0;
                }
@@ -6029,7 +5896,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
 
-       ret = io_poll_check_events(req);
+       ret = io_poll_check_events(req, *locked);
        if (ret > 0)
                return;
 
@@ -6054,7 +5921,7 @@ static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
        struct io_ring_ctx *ctx = req->ctx;
        int ret;
 
-       ret = io_poll_check_events(req);
+       ret = io_poll_check_events(req, *locked);
        if (ret > 0)
                return;
 
@@ -6078,7 +5945,7 @@ static void __io_poll_execute(struct io_kiocb *req, int mask, int events)
         * CPU. We want to avoid pulling in req->apoll->events for that
         * case.
         */
-       req->cflags = events;
+       req->apoll_events = events;
        if (req->opcode == IORING_OP_POLL_ADD)
                req->io_task_work.func = io_poll_task_func;
        else
@@ -6261,7 +6128,6 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
                __io_poll_execute(req, mask, poll->events);
                return 0;
        }
-       io_add_napi(req->file, req->ctx);
 
        /*
         * Release ownership. If someone tried to queue a tw while it was
@@ -6471,7 +6337,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
                return -EINVAL;
 
        io_req_set_refcount(req);
-       req->cflags = poll->events = io_poll_parse_events(sqe, flags);
+       req->apoll_events = poll->events = io_poll_parse_events(sqe, flags);
        return 0;
 }
 
@@ -6766,6 +6632,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
        if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
                return -EINVAL;
 
+       INIT_LIST_HEAD(&req->timeout.list);
        data->mode = io_translate_timeout_mode(flags);
        hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
 
@@ -6972,6 +6839,7 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
        up.nr = 0;
        up.tags = 0;
        up.resv = 0;
+       up.resv2 = 0;
 
        io_ring_submit_lock(ctx, needs_lock);
        ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
@@ -6992,11 +6860,10 @@ static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        case IORING_OP_READV:
        case IORING_OP_READ_FIXED:
        case IORING_OP_READ:
-               return io_read_prep(req, sqe);
        case IORING_OP_WRITEV:
        case IORING_OP_WRITE_FIXED:
        case IORING_OP_WRITE:
-               return io_write_prep(req, sqe);
+               return io_prep_rw(req, sqe);
        case IORING_OP_POLL_ADD:
                return io_poll_add_prep(req, sqe);
        case IORING_OP_POLL_REMOVE:
@@ -7179,11 +7046,6 @@ static void io_clean_op(struct io_kiocb *req)
                        kfree(io->free_iov);
                        break;
                        }
-               case IORING_OP_SPLICE:
-               case IORING_OP_TEE:
-                       if (!(req->splice.flags & SPLICE_F_FD_IN_FIXED))
-                               io_put_file(req->splice.file_in);
-                       break;
                case IORING_OP_OPENAT:
                case IORING_OP_OPENAT2:
                        if (req->open.filename)
@@ -7218,11 +7080,6 @@ static void io_clean_op(struct io_kiocb *req)
                kfree(req->apoll);
                req->apoll = NULL;
        }
-       if (req->flags & REQ_F_INFLIGHT) {
-               struct io_uring_task *tctx = req->task->io_uring;
-
-               atomic_dec(&tctx->inflight_tracked);
-       }
        if (req->flags & REQ_F_CREDS)
                put_cred(req->creds);
        if (req->flags & REQ_F_ASYNC_DATA) {
@@ -7232,11 +7089,31 @@ static void io_clean_op(struct io_kiocb *req)
        req->flags &= ~IO_REQ_CLEAN_FLAGS;
 }
 
+static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
+{
+       if (req->file || !io_op_defs[req->opcode].needs_file)
+               return true;
+
+       if (req->flags & REQ_F_FIXED_FILE)
+               req->file = io_file_get_fixed(req, req->fd, issue_flags);
+       else
+               req->file = io_file_get_normal(req, req->fd);
+       if (req->file)
+               return true;
+
+       req_set_fail(req);
+       req->result = -EBADF;
+       return false;
+}
+
 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
 {
        const struct cred *creds = NULL;
        int ret;
 
+       if (unlikely(!io_assign_file(req, issue_flags)))
+               return -EBADF;
+
        if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
                creds = override_creds(req->creds);
 
@@ -7386,10 +7263,11 @@ static struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
 static void io_wq_submit_work(struct io_wq_work *work)
 {
        struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+       const struct io_op_def *def = &io_op_defs[req->opcode];
        unsigned int issue_flags = IO_URING_F_UNLOCKED;
        bool needs_poll = false;
        struct io_kiocb *timeout;
-       int ret = 0;
+       int ret = 0, err = -ECANCELED;
 
        /* one will be dropped by ->io_free_work() after returning to io-wq */
        if (!(req->flags & REQ_F_REFCOUNT))
@@ -7401,14 +7279,20 @@ static void io_wq_submit_work(struct io_wq_work *work)
        if (timeout)
                io_queue_linked_timeout(timeout);
 
+
        /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
        if (work->flags & IO_WQ_WORK_CANCEL) {
-               io_req_task_queue_fail(req, -ECANCELED);
+fail:
+               io_req_task_queue_fail(req, err);
                return;
        }
+       if (!io_assign_file(req, issue_flags)) {
+               err = -EBADF;
+               work->flags |= IO_WQ_WORK_CANCEL;
+               goto fail;
+       }
 
        if (req->flags & REQ_F_FORCE_ASYNC) {
-               const struct io_op_def *def = &io_op_defs[req->opcode];
                bool opcode_poll = def->pollin || def->pollout;
 
                if (opcode_poll && file_can_poll(req->file)) {
@@ -7465,46 +7349,56 @@ static void io_fixed_file_set(struct io_fixed_file *file_slot, struct file *file
        file_slot->file_ptr = file_ptr;
 }
 
-static inline struct file *io_file_get_fixed(struct io_ring_ctx *ctx,
-                                            struct io_kiocb *req, int fd)
+static inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
+                                            unsigned int issue_flags)
 {
-       struct file *file;
+       struct io_ring_ctx *ctx = req->ctx;
+       struct file *file = NULL;
        unsigned long file_ptr;
 
+       if (issue_flags & IO_URING_F_UNLOCKED)
+               mutex_lock(&ctx->uring_lock);
+
        if (unlikely((unsigned int)fd >= ctx->nr_user_files))
-               return NULL;
+               goto out;
        fd = array_index_nospec(fd, ctx->nr_user_files);
        file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
        file = (struct file *) (file_ptr & FFS_MASK);
        file_ptr &= ~FFS_MASK;
        /* mask in overlapping REQ_F and FFS bits */
        req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
-       io_req_set_rsrc_node(req, ctx);
+       io_req_set_rsrc_node(req, ctx, 0);
+out:
+       if (issue_flags & IO_URING_F_UNLOCKED)
+               mutex_unlock(&ctx->uring_lock);
        return file;
 }
 
-static struct file *io_file_get_normal(struct io_ring_ctx *ctx,
-                                      struct io_kiocb *req, int fd)
+/*
+ * Drop the file for requeue operations. Only used of req->file is the
+ * io_uring descriptor itself.
+ */
+static void io_drop_inflight_file(struct io_kiocb *req)
+{
+       if (unlikely(req->flags & REQ_F_INFLIGHT)) {
+               fput(req->file);
+               req->file = NULL;
+               req->flags &= ~REQ_F_INFLIGHT;
+       }
+}
+
+static struct file *io_file_get_normal(struct io_kiocb *req, int fd)
 {
        struct file *file = fget(fd);
 
-       trace_io_uring_file_get(ctx, req, req->user_data, fd);
+       trace_io_uring_file_get(req->ctx, req, req->user_data, fd);
 
        /* we don't allow fixed io_uring files */
-       if (file && unlikely(file->f_op == &io_uring_fops))
-               io_req_track_inflight(req);
+       if (file && file->f_op == &io_uring_fops)
+               req->flags |= REQ_F_INFLIGHT;
        return file;
 }
 
-static inline struct file *io_file_get(struct io_ring_ctx *ctx,
-                                      struct io_kiocb *req, int fd, bool fixed)
-{
-       if (fixed)
-               return io_file_get_fixed(ctx, req, fd);
-       else
-               return io_file_get_normal(ctx, req, fd);
-}
-
 static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
 {
        struct io_kiocb *prev = req->timeout.prev;
@@ -7744,6 +7638,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
        if (io_op_defs[opcode].needs_file) {
                struct io_submit_state *state = &ctx->submit_state;
 
+               req->fd = READ_ONCE(sqe->fd);
+
                /*
                 * Plug now if we have more than 2 IO left after this, and the
                 * target is potentially a read/write to block based storage.
@@ -7753,11 +7649,6 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
                        state->need_plug = false;
                        blk_start_plug_nr_ios(&state->plug, state->submit_nr);
                }
-
-               req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd),
-                                       (sqe_flags & IOSQE_FIXED_FILE));
-               if (unlikely(!req->file))
-                       return -EBADF;
        }
 
        personality = READ_ONCE(sqe->personality);
@@ -8032,13 +7923,7 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
                    !(ctx->flags & IORING_SETUP_R_DISABLED))
                        ret = io_submit_sqes(ctx, to_submit);
                mutex_unlock(&ctx->uring_lock);
-#ifdef CONFIG_NET_RX_BUSY_POLL
-               spin_lock(&ctx->napi_lock);
-               if (!list_empty(&ctx->napi_list) &&
-                   io_napi_busy_loop(&ctx->napi_list))
-                       ++ret;
-               spin_unlock(&ctx->napi_lock);
-#endif
+
                if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
                        wake_up(&ctx->sqo_sq_wait);
                if (creds)
@@ -8176,9 +8061,6 @@ struct io_wait_queue {
        struct io_ring_ctx *ctx;
        unsigned cq_tail;
        unsigned nr_timeouts;
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       unsigned busy_poll_to;
-#endif
 };
 
 static inline bool io_should_wake(struct io_wait_queue *iowq)
@@ -8240,87 +8122,6 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
        return 1;
 }
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-static void io_adjust_busy_loop_timeout(struct timespec64 *ts,
-                                       struct io_wait_queue *iowq)
-{
-       unsigned busy_poll_to = READ_ONCE(sysctl_net_busy_poll);
-       struct timespec64 pollto = ns_to_timespec64(1000 * (s64)busy_poll_to);
-
-       if (timespec64_compare(ts, &pollto) > 0) {
-               *ts = timespec64_sub(*ts, pollto);
-               iowq->busy_poll_to = busy_poll_to;
-       } else {
-               u64 to = timespec64_to_ns(ts);
-
-               do_div(to, 1000);
-               iowq->busy_poll_to = to;
-               ts->tv_sec = 0;
-               ts->tv_nsec = 0;
-       }
-}
-
-static inline bool io_busy_loop_timeout(unsigned long start_time,
-                                       unsigned long bp_usec)
-{
-       if (bp_usec) {
-               unsigned long end_time = start_time + bp_usec;
-               unsigned long now = busy_loop_current_time();
-
-               return time_after(now, end_time);
-       }
-       return true;
-}
-
-static bool io_busy_loop_end(void *p, unsigned long start_time)
-{
-       struct io_wait_queue *iowq = p;
-
-       return signal_pending(current) ||
-              io_should_wake(iowq) ||
-              io_busy_loop_timeout(start_time, iowq->busy_poll_to);
-}
-
-static void io_blocking_napi_busy_loop(struct list_head *napi_list,
-                                      struct io_wait_queue *iowq)
-{
-       unsigned long start_time =
-               list_is_singular(napi_list) ? 0 :
-               busy_loop_current_time();
-
-       do {
-               if (list_is_singular(napi_list)) {
-                       struct napi_entry *ne =
-                               list_first_entry(napi_list,
-                                                struct napi_entry, list);
-
-                       napi_busy_loop(ne->napi_id, io_busy_loop_end, iowq,
-                                      true, BUSY_POLL_BUDGET);
-                       io_check_napi_entry_timeout(ne);
-                       break;
-               }
-       } while (io_napi_busy_loop(napi_list) &&
-                !io_busy_loop_end(iowq, start_time));
-}
-
-static void io_putback_napi_list(struct io_ring_ctx *ctx,
-                                struct list_head *napi_list)
-{
-       struct napi_entry *cne, *lne;
-
-       spin_lock(&ctx->napi_lock);
-       list_for_each_entry(cne, &ctx->napi_list, list)
-               list_for_each_entry(lne, napi_list, list)
-                       if (cne->napi_id == lne->napi_id) {
-                               list_del(&lne->list);
-                               kfree(lne);
-                               break;
-                       }
-       list_splice(napi_list, &ctx->napi_list);
-       spin_unlock(&ctx->napi_lock);
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 /*
  * Wait until events become available, if we don't already have some. The
  * application must reap them itself, as they reside on the shared cq ring.
@@ -8333,9 +8134,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
        struct io_rings *rings = ctx->rings;
        ktime_t timeout = KTIME_MAX;
        int ret;
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       LIST_HEAD(local_napi_list);
-#endif
 
        do {
                io_cqring_overflow_flush(ctx);
@@ -8358,29 +8156,13 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                        return ret;
        }
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       iowq.busy_poll_to = 0;
-       if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
-               spin_lock(&ctx->napi_lock);
-               list_splice_init(&ctx->napi_list, &local_napi_list);
-               spin_unlock(&ctx->napi_lock);
-       }
-#endif
        if (uts) {
                struct timespec64 ts;
 
                if (get_timespec64(&ts, uts))
                        return -EFAULT;
-#ifdef CONFIG_NET_RX_BUSY_POLL
-               if (!list_empty(&local_napi_list))
-                       io_adjust_busy_loop_timeout(&ts, &iowq);
-#endif
                timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
        }
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       else if (!list_empty(&local_napi_list))
-               iowq.busy_poll_to = READ_ONCE(sysctl_net_busy_poll);
-#endif
 
        init_waitqueue_func_entry(&iowq.wq, io_wake_function);
        iowq.wq.private = current;
@@ -8390,12 +8172,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
        iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
 
        trace_io_uring_cqring_wait(ctx, min_events);
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       if (iowq.busy_poll_to)
-               io_blocking_napi_busy_loop(&local_napi_list, &iowq);
-       if (!list_empty(&local_napi_list))
-               io_putback_napi_list(ctx, &local_napi_list);
-#endif
        do {
                /* if we can't even flush overflow, don't wait for more */
                if (!io_cqring_overflow_flush(ctx)) {
@@ -8864,8 +8640,12 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
                refcount_add(skb->truesize, &sk->sk_wmem_alloc);
                skb_queue_head(&sk->sk_receive_queue, skb);
 
-               for (i = 0; i < nr_files; i++)
-                       fput(fpl->fp[i]);
+               for (i = 0; i < nr; i++) {
+                       struct file *file = io_file_from_index(ctx, i + offset);
+
+                       if (file)
+                               fput(file);
+               }
        } else {
                kfree_skb(skb);
                free_uid(fpl->user);
@@ -9156,13 +8936,15 @@ static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
 static int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
                                 struct io_rsrc_node *node, void *rsrc)
 {
+       u64 *tag_slot = io_get_tag_slot(data, idx);
        struct io_rsrc_put *prsrc;
 
        prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
        if (!prsrc)
                return -ENOMEM;
 
-       prsrc->tag = *io_get_tag_slot(data, idx);
+       prsrc->tag = *tag_slot;
+       *tag_slot = 0;
        prsrc->rsrc = rsrc;
        list_add(&prsrc->list, &node->rsrc_list);
        return 0;
@@ -9231,7 +9013,7 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
        bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
        struct io_fixed_file *file_slot;
        struct file *file;
-       int ret, i;
+       int ret;
 
        io_ring_submit_lock(ctx, needs_lock);
        ret = -ENXIO;
@@ -9244,8 +9026,8 @@ static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
        if (ret)
                goto out;
 
-       i = array_index_nospec(offset, ctx->nr_user_files);
-       file_slot = io_fixed_file_slot(&ctx->file_table, i);
+       offset = array_index_nospec(offset, ctx->nr_user_files);
+       file_slot = io_fixed_file_slot(&ctx->file_table, offset);
        ret = -EBADF;
        if (!file_slot->file_ptr)
                goto out;
@@ -9301,8 +9083,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
 
                if (file_slot->file_ptr) {
                        file = (struct file *)(file_slot->file_ptr & FFS_MASK);
-                       err = io_queue_rsrc_removal(data, up->offset + done,
-                                                   ctx->rsrc_node, file);
+                       err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
                        if (err)
                                break;
                        file_slot->file_ptr = 0;
@@ -9327,7 +9108,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
                                err = -EBADF;
                                break;
                        }
-                       *io_get_tag_slot(data, up->offset + done) = tag;
+                       *io_get_tag_slot(data, i) = tag;
                        io_fixed_file_set(file_slot, file);
                        err = io_sqe_file_register(ctx, file, i);
                        if (err) {
@@ -9411,7 +9192,6 @@ static __cold int io_uring_alloc_task_context(struct task_struct *task,
        xa_init(&tctx->xa);
        init_waitqueue_head(&tctx->wait);
        atomic_set(&tctx->in_idle, 0);
-       atomic_set(&tctx->inflight_tracked, 0);
        task->io_uring = tctx;
        spin_lock_init(&tctx->task_lock);
        INIT_WQ_LIST(&tctx->task_list);
@@ -9986,7 +9766,7 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
 
                i = array_index_nospec(offset, ctx->nr_user_bufs);
                if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
-                       err = io_queue_rsrc_removal(ctx->buf_data, offset,
+                       err = io_queue_rsrc_removal(ctx->buf_data, i,
                                                    ctx->rsrc_node, ctx->user_bufs[i]);
                        if (unlikely(err)) {
                                io_buffer_unmap(ctx, &imu);
@@ -10181,7 +9961,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
        io_req_caches_free(ctx);
        if (ctx->hash_map)
                io_wq_put_hash(ctx->hash_map);
-       io_free_napi_list(ctx);
        kfree(ctx->cancel_hash);
        kfree(ctx->dummy_ubuf);
        kfree(ctx->io_buffers);
@@ -10604,7 +10383,7 @@ static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
 {
        if (tracked)
-               return atomic_read(&tctx->inflight_tracked);
+               return 0;
        return percpu_counter_sum(&tctx->inflight);
 }
 
@@ -10755,6 +10534,11 @@ static int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
                        break;
                }
 
+               if (reg.resv) {
+                       ret = -EINVAL;
+                       break;
+               }
+
                if (reg.offset == -1U) {
                        start = 0;
                        end = IO_RINGFD_REG_MAX;
@@ -10801,7 +10585,7 @@ static int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg,
                        ret = -EFAULT;
                        break;
                }
-               if (reg.offset >= IO_RINGFD_REG_MAX) {
+               if (reg.resv || reg.offset >= IO_RINGFD_REG_MAX) {
                        ret = -EINVAL;
                        break;
                }
@@ -10928,6 +10712,8 @@ static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz
                return -EINVAL;
        if (copy_from_user(&arg, argp, sizeof(arg)))
                return -EFAULT;
+       if (arg.pad)
+               return -EINVAL;
        *sig = u64_to_user_ptr(arg.sigmask);
        *argsz = arg.sigmask_sz;
        *ts = u64_to_user_ptr(arg.ts);
@@ -11409,7 +11195,8 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
                        IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
                        IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
                        IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
-                       IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP;
+                       IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
+                       IORING_FEAT_LINKED_FILE;
 
        if (copy_to_user(params, p, sizeof(*p))) {
                ret = -EFAULT;
@@ -11620,8 +11407,6 @@ static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
        __u32 tmp;
        int err;
 
-       if (up->resv)
-               return -EINVAL;
        if (check_add_overflow(up->offset, nr_args, &tmp))
                return -EOVERFLOW;
        err = io_rsrc_node_switch_start(ctx);
@@ -11647,6 +11432,8 @@ static int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
        memset(&up, 0, sizeof(up));
        if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
                return -EFAULT;
+       if (up.resv || up.resv2)
+               return -EINVAL;
        return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
 }
 
@@ -11659,7 +11446,7 @@ static int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
                return -EINVAL;
        if (copy_from_user(&up, arg, sizeof(up)))
                return -EFAULT;
-       if (!up.nr || up.resv)
+       if (!up.nr || up.resv || up.resv2)
                return -EINVAL;
        return __io_register_rsrc_update(ctx, type, &up, up.nr);
 }
@@ -11707,7 +11494,15 @@ static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
        if (len > cpumask_size())
                len = cpumask_size();
 
-       if (copy_from_user(new_mask, arg, len)) {
+       if (in_compat_syscall()) {
+               ret = compat_get_bitmap(cpumask_bits(new_mask),
+                                       (const compat_ulong_t __user *)arg,
+                                       len * 8 /* CHAR_BIT */);
+       } else {
+               ret = copy_from_user(new_mask, arg, len);
+       }
+
+       if (ret) {
                free_cpumask_var(new_mask);
                return -EFAULT;
        }
index 3f1829b..509657f 100644 (file)
@@ -3673,18 +3673,14 @@ static struct dentry *filename_create(int dfd, struct filename *name,
 {
        struct dentry *dentry = ERR_PTR(-EEXIST);
        struct qstr last;
+       bool want_dir = lookup_flags & LOOKUP_DIRECTORY;
+       unsigned int reval_flag = lookup_flags & LOOKUP_REVAL;
+       unsigned int create_flags = LOOKUP_CREATE | LOOKUP_EXCL;
        int type;
        int err2;
        int error;
-       bool is_dir = (lookup_flags & LOOKUP_DIRECTORY);
 
-       /*
-        * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any
-        * other flags passed in are ignored!
-        */
-       lookup_flags &= LOOKUP_REVAL;
-
-       error = filename_parentat(dfd, name, lookup_flags, path, &last, &type);
+       error = filename_parentat(dfd, name, reval_flag, path, &last, &type);
        if (error)
                return ERR_PTR(error);
 
@@ -3698,11 +3694,13 @@ static struct dentry *filename_create(int dfd, struct filename *name,
        /* don't fail immediately if it's r/o, at least try to report other errors */
        err2 = mnt_want_write(path->mnt);
        /*
-        * Do the final lookup.
+        * Do the final lookup.  Suppress 'create' if there is a trailing
+        * '/', and a directory wasn't requested.
         */
-       lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL;
+       if (last.name[last.len] && !want_dir)
+               create_flags = 0;
        inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
-       dentry = __lookup_hash(&last, path->dentry, lookup_flags);
+       dentry = __lookup_hash(&last, path->dentry, reval_flag | create_flags);
        if (IS_ERR(dentry))
                goto unlock;
 
@@ -3716,7 +3714,7 @@ static struct dentry *filename_create(int dfd, struct filename *name,
         * all is fine. Let's be bastards - you had / on the end, you've
         * been asking for (non-existent) directory. -ENOENT for you.
         */
-       if (unlikely(!is_dir && last.name[last.len])) {
+       if (unlikely(!create_flags)) {
                error = -ENOENT;
                goto fail;
        }
index 47a53b3..14a7222 100644 (file)
@@ -4,10 +4,6 @@ config NFS_FS
        depends on INET && FILE_LOCKING && MULTIUSER
        select LOCKD
        select SUNRPC
-       select CRYPTO
-       select CRYPTO_HASH
-       select XXHASH
-       select CRYPTO_XXHASH
        select NFS_ACL_SUPPORT if NFS_V3_ACL
        help
          Choose Y here if you want to access files residing on other
index bac4cf1..c6b263b 100644 (file)
@@ -39,7 +39,7 @@
 #include <linux/sched.h>
 #include <linux/kmemleak.h>
 #include <linux/xattr.h>
-#include <linux/xxhash.h>
+#include <linux/hash.h>
 
 #include "delegation.h"
 #include "iostat.h"
@@ -350,10 +350,7 @@ out:
  * of directory cookies. Content is addressed by the value of the
  * cookie index of the first readdir entry in a page.
  *
- * The xxhash algorithm is chosen because it is fast, and is supposed
- * to result in a decent flat distribution of hashes.
- *
- * We then select only the first 18 bits to avoid issues with excessive
+ * We select only the first 18 bits to avoid issues with excessive
  * memory use for the page cache XArray. 18 bits should allow the caching
  * of 262144 pages of sequences of readdir entries. Since each page holds
  * 127 readdir entries for a typical 64-bit system, that works out to a
@@ -363,7 +360,7 @@ static pgoff_t nfs_readdir_page_cookie_hash(u64 cookie)
 {
        if (cookie == 0)
                return 0;
-       return xxhash(&cookie, sizeof(cookie), 0) & NFS_READDIR_COOKIE_MASK;
+       return hash_64(cookie, 18);
 }
 
 static bool nfs_readdir_page_validate(struct page *page, u64 last_cookie,
@@ -1991,16 +1988,6 @@ const struct dentry_operations nfs4_dentry_operations = {
 };
 EXPORT_SYMBOL_GPL(nfs4_dentry_operations);
 
-static fmode_t flags_to_mode(int flags)
-{
-       fmode_t res = (__force fmode_t)flags & FMODE_EXEC;
-       if ((flags & O_ACCMODE) != O_WRONLY)
-               res |= FMODE_READ;
-       if ((flags & O_ACCMODE) != O_RDONLY)
-               res |= FMODE_WRITE;
-       return res;
-}
-
 static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags, struct file *filp)
 {
        return alloc_nfs_open_context(dentry, flags_to_mode(open_flags), filp);
index 7eb3b08..b4e46b0 100644 (file)
@@ -1180,7 +1180,6 @@ int nfs_open(struct inode *inode, struct file *filp)
        nfs_fscache_open_file(inode, filp);
        return 0;
 }
-EXPORT_SYMBOL_GPL(nfs_open);
 
 /*
  * This function is called whenever some part of NFS notices that
index 57b0497..7eefa16 100644 (file)
@@ -42,6 +42,16 @@ static inline bool nfs_lookup_is_soft_revalidate(const struct dentry *dentry)
        return true;
 }
 
+static inline fmode_t flags_to_mode(int flags)
+{
+       fmode_t res = (__force fmode_t)flags & FMODE_EXEC;
+       if ((flags & O_ACCMODE) != O_WRONLY)
+               res |= FMODE_READ;
+       if ((flags & O_ACCMODE) != O_RDONLY)
+               res |= FMODE_WRITE;
+       return res;
+}
+
 /*
  * Note: RFC 1813 doesn't limit the number of auth flavors that
  * a server can return, so make something up.
index ad3405c..e7b34f7 100644 (file)
@@ -997,7 +997,7 @@ int __init nfs4_xattr_cache_init(void)
 
        nfs4_xattr_cache_cachep = kmem_cache_create("nfs4_xattr_cache_cache",
            sizeof(struct nfs4_xattr_cache), 0,
-           (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT),
+           (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
            nfs4_xattr_cache_init_once);
        if (nfs4_xattr_cache_cachep == NULL)
                return -ENOMEM;
index d258933..7b861e4 100644 (file)
@@ -32,6 +32,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
        struct dentry *parent = NULL;
        struct inode *dir;
        unsigned openflags = filp->f_flags;
+       fmode_t f_mode;
        struct iattr attr;
        int err;
 
@@ -50,8 +51,9 @@ nfs4_file_open(struct inode *inode, struct file *filp)
        if (err)
                return err;
 
+       f_mode = filp->f_mode;
        if ((openflags & O_ACCMODE) == 3)
-               return nfs_open(inode, filp);
+               f_mode |= flags_to_mode(openflags);
 
        /* We can't create new files here */
        openflags &= ~(O_CREAT|O_EXCL);
@@ -59,7 +61,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
        parent = dget_parent(dentry);
        dir = d_inode(parent);
 
-       ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode, filp);
+       ctx = alloc_nfs_open_context(file_dentry(filp), f_mode, filp);
        err = PTR_ERR(ctx);
        if (IS_ERR(ctx))
                goto out;
index e3f5b38..16106f8 100644 (file)
@@ -9615,6 +9615,8 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
        nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
 
        task = rpc_run_task(&task_setup_data);
+       if (IS_ERR(task))
+               return ERR_CAST(task);
 
        status = rpc_wait_for_completion_task(task);
        if (status != 0)
index 5fa11e1..6f325e1 100644 (file)
@@ -347,6 +347,7 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (data == NULL)
                return ERR_PTR(-ENOMEM);
+       task_setup_data.task = &data->task;
        task_setup_data.callback_data = data;
 
        data->cred = get_current_cred();
index c08882f..2c1b027 100644 (file)
@@ -237,6 +237,13 @@ nfsd_file_check_write_error(struct nfsd_file *nf)
 }
 
 static void
+nfsd_file_flush(struct nfsd_file *nf)
+{
+       if (nf->nf_file && vfs_fsync(nf->nf_file, 1) != 0)
+               nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
+}
+
+static void
 nfsd_file_do_unhash(struct nfsd_file *nf)
 {
        lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
@@ -295,19 +302,15 @@ nfsd_file_put_noref(struct nfsd_file *nf)
 void
 nfsd_file_put(struct nfsd_file *nf)
 {
-       bool is_hashed;
-
        set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
-       if (refcount_read(&nf->nf_ref) > 2 || !nf->nf_file) {
+       if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0) {
+               nfsd_file_flush(nf);
                nfsd_file_put_noref(nf);
-               return;
+       } else {
+               nfsd_file_put_noref(nf);
+               if (nf->nf_file)
+                       nfsd_file_schedule_laundrette();
        }
-
-       filemap_flush(nf->nf_file->f_mapping);
-       is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
-       nfsd_file_put_noref(nf);
-       if (is_hashed)
-               nfsd_file_schedule_laundrette();
        if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
                nfsd_file_gc();
 }
@@ -328,6 +331,7 @@ nfsd_file_dispose_list(struct list_head *dispose)
        while(!list_empty(dispose)) {
                nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
                list_del(&nf->nf_lru);
+               nfsd_file_flush(nf);
                nfsd_file_put_noref(nf);
        }
 }
@@ -341,6 +345,7 @@ nfsd_file_dispose_list_sync(struct list_head *dispose)
        while(!list_empty(dispose)) {
                nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
                list_del(&nf->nf_lru);
+               nfsd_file_flush(nf);
                if (!refcount_dec_and_test(&nf->nf_ref))
                        continue;
                if (nfsd_file_free(nf))
index 367551b..b576080 100644 (file)
@@ -249,34 +249,34 @@ nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
        int w;
 
        if (!svcxdr_encode_stat(xdr, resp->status))
-               return 0;
+               return false;
 
        if (dentry == NULL || d_really_is_negative(dentry))
-               return 1;
+               return true;
        inode = d_inode(dentry);
 
        if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat))
-               return 0;
+               return false;
        if (xdr_stream_encode_u32(xdr, resp->mask) < 0)
-               return 0;
+               return false;
 
        rqstp->rq_res.page_len = w = nfsacl_size(
                (resp->mask & NFS_ACL)   ? resp->acl_access  : NULL,
                (resp->mask & NFS_DFACL) ? resp->acl_default : NULL);
        while (w > 0) {
                if (!*(rqstp->rq_next_page++))
-                       return 1;
+                       return true;
                w -= PAGE_SIZE;
        }
 
        if (!nfs_stream_encode_acl(xdr, inode, resp->acl_access,
                                   resp->mask & NFS_ACL, 0))
-               return 0;
+               return false;
        if (!nfs_stream_encode_acl(xdr, inode, resp->acl_default,
                                   resp->mask & NFS_DFACL, NFS_ACL_DEFAULT))
-               return 0;
+               return false;
 
-       return 1;
+       return true;
 }
 
 /* ACCESS */
@@ -286,17 +286,17 @@ nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
        struct nfsd3_accessres *resp = rqstp->rq_resp;
 
        if (!svcxdr_encode_stat(xdr, resp->status))
-               return 0;
+               return false;
        switch (resp->status) {
        case nfs_ok:
                if (!svcxdr_encode_fattr(rqstp, xdr, &resp->fh, &resp->stat))
-                       return 0;
+                       return false;
                if (xdr_stream_encode_u32(xdr, resp->access) < 0)
-                       return 0;
+                       return false;
                break;
        }
 
-       return 1;
+       return true;
 }
 
 /*
index 7f734be..5c2c944 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -348,9 +348,6 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat
 #  define choose_32_64(a,b) b
 #endif
 
-#define valid_dev(x)  choose_32_64(old_valid_dev(x),true)
-#define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
-
 #ifndef INIT_STRUCT_STAT_PADDING
 #  define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
 #endif
@@ -359,7 +356,9 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
 {
        struct stat tmp;
 
-       if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
+       if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
+               return -EOVERFLOW;
+       if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
                return -EOVERFLOW;
 #if BITS_PER_LONG == 32
        if (stat->size > MAX_NON_LFS)
@@ -367,7 +366,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
 #endif
 
        INIT_STRUCT_STAT_PADDING(tmp);
-       tmp.st_dev = encode_dev(stat->dev);
+       tmp.st_dev = new_encode_dev(stat->dev);
        tmp.st_ino = stat->ino;
        if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
                return -EOVERFLOW;
@@ -377,7 +376,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
                return -EOVERFLOW;
        SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
        SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
-       tmp.st_rdev = encode_dev(stat->rdev);
+       tmp.st_rdev = new_encode_dev(stat->rdev);
        tmp.st_size = stat->size;
        tmp.st_atime = stat->atime.tv_sec;
        tmp.st_mtime = stat->mtime.tv_sec;
@@ -665,11 +664,13 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
 {
        struct compat_stat tmp;
 
-       if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
+       if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
+               return -EOVERFLOW;
+       if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
                return -EOVERFLOW;
 
        memset(&tmp, 0, sizeof(tmp));
-       tmp.st_dev = old_encode_dev(stat->dev);
+       tmp.st_dev = new_encode_dev(stat->dev);
        tmp.st_ino = stat->ino;
        if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
                return -EOVERFLOW;
@@ -679,7 +680,7 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
                return -EOVERFLOW;
        SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
        SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
-       tmp.st_rdev = old_encode_dev(stat->rdev);
+       tmp.st_rdev = new_encode_dev(stat->rdev);
        if ((u64) stat->size > MAX_NON_LFS)
                return -EOVERFLOW;
        tmp.st_size = stat->size;
index 42dcf96..a12ac03 100644 (file)
@@ -703,19 +703,6 @@ int sysfs_change_owner(struct kobject *kobj, kuid_t kuid, kgid_t kgid)
 
        ktype = get_ktype(kobj);
        if (ktype) {
-               struct attribute **kattr;
-
-               /*
-                * Change owner of the default attributes associated with the
-                * ktype of @kobj.
-                */
-               for (kattr = ktype->default_attrs; kattr && *kattr; kattr++) {
-                       error = sysfs_file_change_owner(kobj, (*kattr)->name,
-                                                       kuid, kgid);
-                       if (error)
-                               return error;
-               }
-
                /*
                 * Change owner of the default groups associated with the
                 * ktype of @kobj.
index 3f7f01f..c4b78c2 100644 (file)
@@ -509,7 +509,6 @@ extern int unregister_acpi_notifier(struct notifier_block *);
  * External Functions
  */
 
-int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device);
 struct acpi_device *acpi_fetch_acpi_dev(acpi_handle handle);
 acpi_status acpi_bus_get_status_handle(acpi_handle handle,
                                       unsigned long long *sta);
index c08758b..c05d2ce 100644 (file)
@@ -269,6 +269,7 @@ bool hv_isolation_type_snp(void);
 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
 void hyperv_cleanup(void);
 bool hv_query_ext_cap(u64 cap_query);
+void hv_setup_dma_ops(struct device *dev, bool coherent);
 void *hv_map_memory(void *addr, unsigned long size);
 void hv_unmap_memory(void *addr);
 #else /* CONFIG_HYPERV */
index fd7feb5..eee6f77 100644 (file)
@@ -565,10 +565,14 @@ static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)       \
        do {                                                    \
                unsigned long _sz = huge_page_size(h);          \
-               if (_sz == PMD_SIZE)                            \
-                       tlb_flush_pmd_range(tlb, address, _sz); \
-               else if (_sz == PUD_SIZE)                       \
+               if (_sz >= P4D_SIZE)                            \
+                       tlb_flush_p4d_range(tlb, address, _sz); \
+               else if (_sz >= PUD_SIZE)                       \
                        tlb_flush_pud_range(tlb, address, _sz); \
+               else if (_sz >= PMD_SIZE)                       \
+                       tlb_flush_pmd_range(tlb, address, _sz); \
+               else                                            \
+                       tlb_flush_pte_range(tlb, address, _sz); \
                __tlb_remove_tlb_entry(tlb, ptep, address);     \
        } while (0)
 
index 8fc6373..df30f11 100644 (file)
@@ -143,7 +143,7 @@ static inline void put_unaligned_be48(const u64 val, void *p)
 
 static inline u64 __get_unaligned_be48(const u8 *p)
 {
-       return (u64)p[0] << 40 | (u64)p[1] << 32 | p[2] << 24 |
+       return (u64)p[0] << 40 | (u64)p[1] << 32 | (u64)p[2] << 24 |
                p[3] << 16 | p[4] << 8 | p[5];
 }
 
diff --git a/include/dt-bindings/reset/amlogic,meson-s4-reset.h b/include/dt-bindings/reset/amlogic,meson-s4-reset.h
new file mode 100644 (file)
index 0000000..eab428e
--- /dev/null
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2021 Amlogic, Inc. All rights reserved.
+ * Author: Zelong Dong <zelong.dong@amlogic.com>
+ *
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_MESON_S4_RESET_H
+#define _DT_BINDINGS_AMLOGIC_MESON_S4_RESET_H
+
+/*     RESET0                                  */
+#define RESET_USB_DDR0                 0
+#define RESET_USB_DDR1                 1
+#define RESET_USB_DDR2                 2
+#define RESET_USB_DDR3                 3
+#define RESET_USBCTRL                  4
+/*                                     5-7     */
+#define RESET_USBPHY20                 8
+#define RESET_USBPHY21                 9
+/*                                     10-15   */
+#define RESET_HDMITX_APB               16
+#define RESET_BRG_VCBUS_DEC            17
+#define RESET_VCBUS                    18
+#define RESET_VID_PLL_DIV              19
+#define RESET_VDI6                     20
+#define RESET_GE2D                     21
+#define RESET_HDMITXPHY                        22
+#define RESET_VID_LOCK                 23
+#define RESET_VENCL                    24
+#define RESET_VDAC                     25
+#define RESET_VENCP                    26
+#define RESET_VENCI                    27
+#define RESET_RDMA                     28
+#define RESET_HDMI_TX                  29
+#define RESET_VIU                      30
+#define RESET_VENC                     31
+
+/*     RESET1                                  */
+#define RESET_AUDIO                    32
+#define RESET_MALI_APB                 33
+#define RESET_MALI                     34
+#define RESET_DDR_APB                  35
+#define RESET_DDR                      36
+#define RESET_DOS_APB                  37
+#define RESET_DOS                      38
+/*                                     39-47   */
+#define RESET_ETH                      48
+/*                                     49-51   */
+#define RESET_DEMOD                    52
+/*                                     53-63   */
+
+/*     RESET2                                  */
+#define RESET_ABUS_ARB                 64
+#define RESET_IR_CTRL                  65
+#define RESET_TEMPSENSOR_DDR           66
+#define RESET_TEMPSENSOR_PLL           67
+/*                                     68-71   */
+#define RESET_SMART_CARD               72
+#define RESET_SPICC0                   73
+/*                                     74      */
+#define RESET_RSA                      75
+/*                                     76-79   */
+#define RESET_MSR_CLK                  80
+#define RESET_SPIFC                    81
+#define RESET_SARADC                   82
+/*                                     83-87   */
+#define RESET_ACODEC                   88
+#define RESET_CEC                      89
+#define RESET_AFIFO                    90
+#define RESET_WATCHDOG                 91
+/*                                     92-95   */
+
+/*     RESET3                                  */
+/*                                     96-127  */
+
+/*     RESET4                                  */
+/*                                     128-131 */
+#define RESET_PWM_AB                   132
+#define RESET_PWM_CD                   133
+#define RESET_PWM_EF                   134
+#define RESET_PWM_GH                   135
+#define RESET_PWM_IJ                   136
+/*                                     137     */
+#define RESET_UART_A                   138
+#define RESET_UART_B                   139
+#define RESET_UART_C                   140
+#define RESET_UART_D                   141
+#define RESET_UART_E                   142
+/*                                     143     */
+#define RESET_I2C_S_A                  144
+#define RESET_I2C_M_A                  145
+#define RESET_I2C_M_B                  146
+#define RESET_I2C_M_C                  147
+#define RESET_I2C_M_D                  148
+#define RESET_I2C_M_E                  149
+/*                                     150-151 */
+#define RESET_SD_EMMC_A                        152
+#define RESET_SD_EMMC_B                        153
+#define RESET_NAND_EMMC                        154
+/*                                     155-159 */
+
+/* RESET5 */
+#define RESET_BRG_VDEC_PIPL0           160
+#define RESET_BRG_HEVCF_PIPL0          161
+/*                                     162     */
+#define RESET_BRG_HCODEC_PIPL0         163
+#define RESET_BRG_GE2D_PIPL0           164
+#define RESET_BRG_VPU_PIPL0            165
+#define RESET_BRG_CPU_PIPL0            166
+#define RESET_BRG_MALI_PIPL0           167
+/*                                     168     */
+#define RESET_BRG_MALI_PIPL1           169
+/*                                     170-171 */
+#define RESET_BRG_HEVCF_PIPL1          172
+#define RESET_BRG_HEVCB_PIPL1          173
+/*                                     174-183 */
+#define RESET_RAMA                     184
+/*                                     185-186 */
+#define RESET_BRG_NIC_VAPB             187
+#define RESET_BRG_NIC_DSU              188
+#define RESET_BRG_NIC_SYSCLK           189
+#define RESET_BRG_NIC_MAIN             190
+#define RESET_BRG_NIC_ALL              191
+
+#endif
index 85651e4..e5c76c1 100644 (file)
@@ -38,7 +38,12 @@ struct ffa_driver {
 
 static inline void ffa_dev_set_drvdata(struct ffa_device *fdev, void *data)
 {
-       fdev->dev.driver_data = data;
+       dev_set_drvdata(&fdev->dev, data);
+}
+
+static inline void *ffa_dev_get_drvdata(struct ffa_device *fdev)
+{
+       return dev_get_drvdata(&fdev->dev);
 }
 
 #if IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT)
index c1fc4af..3a9d2d7 100644 (file)
@@ -570,9 +570,11 @@ static inline u32 type_flag(u32 type)
        return type & ~BPF_BASE_TYPE_MASK;
 }
 
+/* only use after check_attach_btf_id() */
 static inline enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog)
 {
-       return prog->aux->dst_prog ? prog->aux->dst_prog->type : prog->type;
+       return prog->type == BPF_PROG_TYPE_EXT ?
+               prog->aux->dst_prog->type : prog->type;
 }
 
 #endif /* _LINUX_BPF_VERIFIER_H */
index fec374f..ec7f25d 100644 (file)
@@ -61,6 +61,21 @@ to_dma_fence_array(struct dma_fence *fence)
        return container_of(fence, struct dma_fence_array, base);
 }
 
+/**
+ * dma_fence_array_for_each - iterate over all fences in array
+ * @fence: current fence
+ * @index: index into the array
+ * @head: potential dma_fence_array object
+ *
+ * Test if @array is a dma_fence_array object and if yes iterate over all fences
+ * in the array. If not just iterate over the fence in @array itself.
+ *
+ * For a deep dive iterator see dma_fence_unwrap_for_each().
+ */
+#define dma_fence_array_for_each(fence, index, head)                   \
+       for (index = 0, fence = dma_fence_array_first(head); fence;     \
+            ++(index), fence = dma_fence_array_next(head, index))
+
 struct dma_fence_array *dma_fence_array_create(int num_fences,
                                               struct dma_fence **fences,
                                               u64 context, unsigned seqno,
@@ -68,4 +83,8 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
 
 bool dma_fence_match_context(struct dma_fence *fence, u64 context);
 
+struct dma_fence *dma_fence_array_first(struct dma_fence *head);
+struct dma_fence *dma_fence_array_next(struct dma_fence *head,
+                                      unsigned int index);
+
 #endif /* __LINUX_DMA_FENCE_ARRAY_H */
index 10d51bc..4bdf0b9 100644 (file)
@@ -112,6 +112,8 @@ static inline void dma_fence_chain_free(struct dma_fence_chain *chain)
  *
  * Iterate over all fences in the chain. We keep a reference to the current
  * fence while inside the loop which must be dropped when breaking out.
+ *
+ * For a deep dive iterator see dma_fence_unwrap_for_each().
  */
 #define dma_fence_chain_for_each(iter, head)   \
        for (iter = dma_fence_get(head); iter; \
diff --git a/include/linux/dma-fence-unwrap.h b/include/linux/dma-fence-unwrap.h
new file mode 100644 (file)
index 0000000..77e335a
--- /dev/null
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * fence-chain: chain fences together in a timeline
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ * Authors:
+ *     Christian König <christian.koenig@amd.com>
+ */
+
+#ifndef __LINUX_DMA_FENCE_UNWRAP_H
+#define __LINUX_DMA_FENCE_UNWRAP_H
+
+#include <linux/dma-fence-chain.h>
+#include <linux/dma-fence-array.h>
+
+/**
+ * struct dma_fence_unwrap - cursor into the container structure
+ *
+ * Should be used with dma_fence_unwrap_for_each() iterator macro.
+ */
+struct dma_fence_unwrap {
+       /**
+        * @chain: potential dma_fence_chain, but can be other fence as well
+        */
+       struct dma_fence *chain;
+       /**
+        * @array: potential dma_fence_array, but can be other fence as well
+        */
+       struct dma_fence *array;
+       /**
+        * @index: last returned index if @array is really a dma_fence_array
+        */
+       unsigned int index;
+};
+
+/* Internal helper to start new array iteration, don't use directly */
+static inline struct dma_fence *
+__dma_fence_unwrap_array(struct dma_fence_unwrap * cursor)
+{
+       cursor->array = dma_fence_chain_contained(cursor->chain);
+       cursor->index = 0;
+       return dma_fence_array_first(cursor->array);
+}
+
+/**
+ * dma_fence_unwrap_first - return the first fence from fence containers
+ * @head: the entrypoint into the containers
+ * @cursor: current position inside the containers
+ *
+ * Unwraps potential dma_fence_chain/dma_fence_array containers and return the
+ * first fence.
+ */
+static inline struct dma_fence *
+dma_fence_unwrap_first(struct dma_fence *head, struct dma_fence_unwrap *cursor)
+{
+       cursor->chain = dma_fence_get(head);
+       return __dma_fence_unwrap_array(cursor);
+}
+
+/**
+ * dma_fence_unwrap_next - return the next fence from a fence containers
+ * @cursor: current position inside the containers
+ *
+ * Continue unwrapping the dma_fence_chain/dma_fence_array containers and return
+ * the next fence from them.
+ */
+static inline struct dma_fence *
+dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
+{
+       struct dma_fence *tmp;
+
+       ++cursor->index;
+       tmp = dma_fence_array_next(cursor->array, cursor->index);
+       if (tmp)
+               return tmp;
+
+       cursor->chain = dma_fence_chain_walk(cursor->chain);
+       return __dma_fence_unwrap_array(cursor);
+}
+
+/**
+ * dma_fence_unwrap_for_each - iterate over all fences in containers
+ * @fence: current fence
+ * @cursor: current position inside the containers
+ * @head: starting point for the iterator
+ *
+ * Unwrap dma_fence_chain and dma_fence_array containers and deep dive into all
+ * potential fences in them. If @head is just a normal fence only that one is
+ * returned.
+ */
+#define dma_fence_unwrap_for_each(fence, cursor, head)                 \
+       for (fence = dma_fence_unwrap_first(head, cursor); fence;       \
+            fence = dma_fence_unwrap_next(cursor))
+
+#endif
index 6727fb0..e255390 100644 (file)
@@ -573,7 +573,6 @@ int fscache_write(struct netfs_cache_resources *cres,
 
 /**
  * fscache_clear_page_bits - Clear the PG_fscache bits from a set of pages
- * @cookie: The cookie representing the cache object
  * @mapping: The netfs inode to use as the source
  * @start: The start position in @mapping
  * @len: The amount of data to unlock
@@ -582,8 +581,7 @@ int fscache_write(struct netfs_cache_resources *cres,
  * Clear the PG_fscache flag from a sequence of pages and wake up anyone who's
  * waiting.
  */
-static inline void fscache_clear_page_bits(struct fscache_cookie *cookie,
-                                          struct address_space *mapping,
+static inline void fscache_clear_page_bits(struct address_space *mapping,
                                           loff_t start, size_t len,
                                           bool caching)
 {
index 761f8f1..3e3d36f 100644 (file)
@@ -613,9 +613,11 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
 #ifdef CONFIG_NUMA
 struct page *alloc_pages(gfp_t gfp, unsigned int order);
 struct folio *folio_alloc(gfp_t gfp, unsigned order);
-extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
+struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
                        struct vm_area_struct *vma, unsigned long addr,
                        bool hugepage);
+struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
+               unsigned long addr, bool hugepage);
 #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
        alloc_pages_vma(gfp_mask, order, vma, addr, true)
 #else
@@ -627,8 +629,10 @@ static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
 {
        return __folio_alloc_node(gfp, order, numa_node_id());
 }
-#define alloc_pages_vma(gfp_mask, order, vma, addr, false)\
+#define alloc_pages_vma(gfp_mask, order, vma, addr, hugepage) \
        alloc_pages(gfp_mask, order)
+#define vma_alloc_folio(gfp, order, vma, addr, hugepage)               \
+       folio_alloc(gfp, order)
 #define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
        alloc_pages(gfp_mask, order)
 #endif
index c3aa8b3..e71f6e1 100644 (file)
@@ -688,7 +688,7 @@ void acpi_dev_remove_driver_gpios(struct acpi_device *adev);
 int devm_acpi_dev_add_driver_gpios(struct device *dev,
                                   const struct acpi_gpio_mapping *gpios);
 
-struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label);
+struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label);
 
 #else  /* CONFIG_GPIOLIB && CONFIG_ACPI */
 
@@ -705,6 +705,12 @@ static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
        return -ENXIO;
 }
 
+static inline struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin,
+                                                          char *label)
+{
+       return ERR_PTR(-ENOSYS);
+}
+
 #endif /* CONFIG_GPIOLIB && CONFIG_ACPI */
 
 
index 98c9351..874aabd 100644 (file)
@@ -222,6 +222,15 @@ struct gpio_irq_chip {
        bool per_parent_data;
 
        /**
+        * @initialized:
+        *
+        * Flag to track GPIO chip irq member's initialization.
+        * This flag will make sure GPIO chip irq members are not used
+        * before they are initialized.
+        */
+       bool initialized;
+
+       /**
         * @init_hw: optional routine to initialize hardware before
         * an IRQ chip will be added. This is quite useful when
         * a particular driver wants to clear IRQ related registers
index 08ba599..a890428 100644 (file)
 )
 
 /**
- * lower_48_bits() - return bits 0-47 of a number
- * @n: the number we're accessing
- */
-static inline u64 lower_48_bits(u64 n)
-{
-       return n & ((1ull << 48) - 1);
-}
-
-/**
  * upper_32_bits - return bits 32-63 of a number
  * @n: the number we're accessing
  *
index f49e642..726857a 100644 (file)
@@ -204,6 +204,22 @@ static __always_inline __must_check bool kfence_free(void *addr)
  */
 bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);
 
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+/**
+ * __kfence_obj_info() - fill kmem_obj_info struct
+ * @kpp: kmem_obj_info to be filled
+ * @object: the object
+ *
+ * Return:
+ * * false - not a KFENCE object
+ * * true - a KFENCE object, filled @kpp
+ *
+ * Copies information to @kpp for KFENCE objects.
+ */
+bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
+#endif
+
 #else /* CONFIG_KFENCE */
 
 static inline bool is_kfence_address(const void *addr) { return false; }
@@ -221,6 +237,14 @@ static inline bool __must_check kfence_handle_page_fault(unsigned long addr, boo
        return false;
 }
 
+#ifdef CONFIG_PRINTK
+struct kmem_obj_info;
+static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+       return false;
+}
+#endif
+
 #endif
 
 #endif /* _LINUX_KFENCE_H */
index c7b4739..57fb972 100644 (file)
@@ -120,7 +120,6 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
 struct kobj_type {
        void (*release)(struct kobject *kobj);
        const struct sysfs_ops *sysfs_ops;
-       struct attribute **default_attrs;       /* use default_groups instead */
        const struct attribute_group **default_groups;
        const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
        const void *(*namespace)(struct kobject *kobj);
index 6d635e8..975e33b 100644 (file)
@@ -44,9 +44,9 @@ static inline void local_lock_debug_init(local_lock_t *l)
 }
 #else /* CONFIG_DEBUG_LOCK_ALLOC */
 # define LOCAL_LOCK_DEBUG_INIT(lockname)
-# define local_lock_acquire(__ll)  do { typecheck(local_lock_t *, __ll); } while (0)
-# define local_lock_release(__ll)  do { typecheck(local_lock_t *, __ll); } while (0)
-# define local_lock_debug_init(__ll)  do { typecheck(local_lock_t *, __ll); } while (0)
+static inline void local_lock_acquire(local_lock_t *l) { }
+static inline void local_lock_release(local_lock_t *l) { }
+static inline void local_lock_debug_init(local_lock_t *l) { }
 #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
 
 #define INIT_LOCAL_LOCK(lockname)      { LOCAL_LOCK_DEBUG_INIT(lockname) }
index 71101d1..de5c64b 100644 (file)
@@ -175,7 +175,7 @@ void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq);
 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd,
                int retries);
 
-int mmc_hw_reset(struct mmc_host *host);
+int mmc_hw_reset(struct mmc_card *card);
 int mmc_sw_reset(struct mmc_host *host);
 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card);
 
index 962b14d..46ffab8 100644 (file)
@@ -1397,13 +1397,16 @@ static inline unsigned long *section_to_usemap(struct mem_section *ms)
 
 static inline struct mem_section *__nr_to_section(unsigned long nr)
 {
+       unsigned long root = SECTION_NR_TO_ROOT(nr);
+
+       if (unlikely(root >= NR_SECTION_ROOTS))
+               return NULL;
+
 #ifdef CONFIG_SPARSEMEM_EXTREME
-       if (!mem_section)
+       if (!mem_section || !mem_section[root])
                return NULL;
 #endif
-       if (!mem_section[SECTION_NR_TO_ROOT(nr)])
-               return NULL;
-       return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
+       return &mem_section[root][nr & SECTION_ROOT_MASK];
 }
 extern size_t mem_section_usage_size(void);
 
index 49ba486..2863e5a 100644 (file)
@@ -1694,6 +1694,7 @@ struct nfs_unlinkdata {
 struct nfs_renamedata {
        struct nfs_renameargs   args;
        struct nfs_renameres    res;
+       struct rpc_task         task;
        const struct cred       *cred;
        struct inode            *old_dir;
        struct dentry           *old_dentry;
index b87551f..1c58646 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/notifier.h>
 #include <linux/types.h>
 
-#define SCMI_MAX_STR_SIZE      16
+#define SCMI_MAX_STR_SIZE      64
 #define SCMI_MAX_NUM_RATES     16
 
 /**
@@ -44,6 +44,8 @@ struct scmi_clock_info {
        char name[SCMI_MAX_STR_SIZE];
        unsigned int enable_latency;
        bool rate_discrete;
+       bool rate_changed_notifications;
+       bool rate_change_requested_notifications;
        union {
                struct {
                        int num_rates;
@@ -146,7 +148,8 @@ struct scmi_perf_proto_ops {
  */
 struct scmi_power_proto_ops {
        int (*num_domains_get)(const struct scmi_protocol_handle *ph);
-       char *(*name_get)(const struct scmi_protocol_handle *ph, u32 domain);
+       const char *(*name_get)(const struct scmi_protocol_handle *ph,
+                               u32 domain);
 #define SCMI_POWER_STATE_TYPE_SHIFT    30
 #define SCMI_POWER_STATE_ID_MASK       (BIT(28) - 1)
 #define SCMI_POWER_STATE_PARAM(type, id) \
@@ -484,13 +487,19 @@ struct scmi_sensor_proto_ops {
  */
 struct scmi_reset_proto_ops {
        int (*num_domains_get)(const struct scmi_protocol_handle *ph);
-       char *(*name_get)(const struct scmi_protocol_handle *ph, u32 domain);
+       const char *(*name_get)(const struct scmi_protocol_handle *ph,
+                               u32 domain);
        int (*latency_get)(const struct scmi_protocol_handle *ph, u32 domain);
        int (*reset)(const struct scmi_protocol_handle *ph, u32 domain);
        int (*assert)(const struct scmi_protocol_handle *ph, u32 domain);
        int (*deassert)(const struct scmi_protocol_handle *ph, u32 domain);
 };
 
+enum scmi_voltage_level_mode {
+       SCMI_VOLTAGE_LEVEL_SET_AUTO,
+       SCMI_VOLTAGE_LEVEL_SET_SYNC,
+};
+
 /**
  * struct scmi_voltage_info - describe one available SCMI Voltage Domain
  *
@@ -503,7 +512,8 @@ struct scmi_reset_proto_ops {
  *              supported voltage level
  * @negative_volts_allowed: True if any of the entries of @levels_uv represent
  *                         a negative voltage.
- * @attributes: represents Voltage Domain advertised attributes
+ * @async_level_set: True when the voltage domain supports asynchronous level
+ *                  set commands.
  * @name: name assigned to the Voltage Domain by platform
  * @num_levels: number of total entries in @levels_uv.
  * @levels_uv: array of entries describing the available voltage levels for
@@ -513,7 +523,7 @@ struct scmi_voltage_info {
        unsigned int id;
        bool segmented;
        bool negative_volts_allowed;
-       unsigned int attributes;
+       bool async_level_set;
        char name[SCMI_MAX_STR_SIZE];
        unsigned int num_levels;
 #define SCMI_VOLTAGE_SEGMENT_LOW       0
@@ -544,7 +554,7 @@ struct scmi_voltage_proto_ops {
        int (*config_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
                          u32 *config);
        int (*level_set)(const struct scmi_protocol_handle *ph, u32 domain_id,
-                        u32 flags, s32 volt_uV);
+                        enum scmi_voltage_level_mode mode, s32 volt_uV);
        int (*level_get)(const struct scmi_protocol_handle *ph, u32 domain_id,
                         s32 *volt_uV);
 };
@@ -742,6 +752,8 @@ void scmi_protocol_unregister(const struct scmi_protocol *proto);
 /* SCMI Notification API - Custom Event Reports */
 enum scmi_notification_events {
        SCMI_EVENT_POWER_STATE_CHANGED = 0x0,
+       SCMI_EVENT_CLOCK_RATE_CHANGED = 0x0,
+       SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED = 0x1,
        SCMI_EVENT_PERFORMANCE_LIMITS_CHANGED = 0x0,
        SCMI_EVENT_PERFORMANCE_LEVEL_CHANGED = 0x1,
        SCMI_EVENT_SENSOR_TRIP_POINT_EVENT = 0x0,
@@ -758,6 +770,13 @@ struct scmi_power_state_changed_report {
        unsigned int    power_state;
 };
 
+struct scmi_clock_rate_notif_report {
+       ktime_t                 timestamp;
+       unsigned int            agent_id;
+       unsigned int            clock_id;
+       unsigned long long      rate;
+};
+
 struct scmi_system_power_state_notifier_report {
        ktime_t         timestamp;
        unsigned int    agent_id;
diff --git a/include/linux/soc/apple/rtkit.h b/include/linux/soc/apple/rtkit.h
new file mode 100644 (file)
index 0000000..88eb832
--- /dev/null
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple RTKit IPC Library
+ * Copyright (C) The Asahi Linux Contributors
+ *
+ * Apple's SoCs come with various co-processors running their RTKit operating
+ * system. This protocol library is used by client drivers to use the
+ * features provided by them.
+ */
+#ifndef _LINUX_APPLE_RTKIT_H_
+#define _LINUX_APPLE_RTKIT_H_
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/mailbox_client.h>
+
+/*
+ * Struct to represent implementation-specific RTKit operations.
+ *
+ * @buffer:    Shared memory buffer allocated inside normal RAM.
+ * @iomem:     Shared memory buffer controlled by the co-processors.
+ * @size:      Size of the shared memory buffer.
+ * @iova:      Device VA of shared memory buffer.
+ * @is_mapped: Shared memory buffer is managed by the co-processor.
+ */
+
+struct apple_rtkit_shmem {
+       void *buffer;
+       void __iomem *iomem;
+       size_t size;
+       dma_addr_t iova;
+       bool is_mapped;
+};
+
+/*
+ * Struct to represent implementation-specific RTKit operations.
+ *
+ * @crashed:       Called when the co-processor has crashed. Runs in process
+ *                 context.
+ * @recv_message:  Function called when a message from RTKit is received
+ *                 on a non-system endpoint. Called from a worker thread.
+ * @recv_message_early:
+ *                 Like recv_message, but called from atomic context. It
+ *                 should return true if it handled the message. If it
+ *                 returns false, the message will be passed on to the
+ *                 worker thread.
+ * @shmem_setup:   Setup shared memory buffer. If bfr.is_iomem is true the
+ *                 buffer is managed by the co-processor and needs to be mapped.
+ *                 Otherwise the buffer is managed by Linux and needs to be
+ *                 allocated. If not specified dma_alloc_coherent is used.
+ *                 Called in process context.
+ * @shmem_destroy: Undo the shared memory buffer setup in shmem_setup. If not
+ *                 specified dma_free_coherent is used. Called in process
+ *                 context.
+ */
+struct apple_rtkit_ops {
+       void (*crashed)(void *cookie);
+       void (*recv_message)(void *cookie, u8 endpoint, u64 message);
+       bool (*recv_message_early)(void *cookie, u8 endpoint, u64 message);
+       int (*shmem_setup)(void *cookie, struct apple_rtkit_shmem *bfr);
+       void (*shmem_destroy)(void *cookie, struct apple_rtkit_shmem *bfr);
+};
+
+struct apple_rtkit;
+
+/*
+ * Initializes the internal state required to handle RTKit. This
+ * should usually be called within _probe.
+ *
+ * @dev:         Pointer to the device node this coprocessor is assocated with
+ * @cookie:      opaque cookie passed to all functions defined in rtkit_ops
+ * @mbox_name:   mailbox name used to communicate with the co-processor
+ * @mbox_idx:    mailbox index to be used if mbox_name is NULL
+ * @ops:         pointer to rtkit_ops to be used for this co-processor
+ */
+struct apple_rtkit *devm_apple_rtkit_init(struct device *dev, void *cookie,
+                                         const char *mbox_name, int mbox_idx,
+                                         const struct apple_rtkit_ops *ops);
+
+/*
+ * Reinitialize internal structures. Must only be called with the co-processor
+ * is held in reset.
+ */
+int apple_rtkit_reinit(struct apple_rtkit *rtk);
+
+/*
+ * Handle RTKit's boot process. Should be called after the CPU of the
+ * co-processor has been started.
+ */
+int apple_rtkit_boot(struct apple_rtkit *rtk);
+
+/*
+ * Quiesce the co-processor.
+ */
+int apple_rtkit_quiesce(struct apple_rtkit *rtk);
+
+/*
+ * Wake the co-processor up from hibernation mode.
+ */
+int apple_rtkit_wake(struct apple_rtkit *rtk);
+
+/*
+ * Shutdown the co-processor
+ */
+int apple_rtkit_shutdown(struct apple_rtkit *rtk);
+
+/*
+ * Checks if RTKit is running and ready to handle messages.
+ */
+bool apple_rtkit_is_running(struct apple_rtkit *rtk);
+
+/*
+ * Checks if RTKit has crashed.
+ */
+bool apple_rtkit_is_crashed(struct apple_rtkit *rtk);
+
+/*
+ * Starts an endpoint. Must be called after boot but before any messages can be
+ * sent or received from that endpoint.
+ */
+int apple_rtkit_start_ep(struct apple_rtkit *rtk, u8 endpoint);
+
+/*
+ * Send a message to the given endpoint.
+ *
+ * @rtk:            RTKit reference
+ * @ep:             target endpoint
+ * @message:        message to be sent
+ * @completeion:    will be completed once the message has been submitted
+ *                  to the hardware FIFO. Can be NULL.
+ * @atomic:         if set to true this function can be called from atomic
+ *                  context.
+ */
+int apple_rtkit_send_message(struct apple_rtkit *rtk, u8 ep, u64 message,
+                            struct completion *completion, bool atomic);
+
+/*
+ * Send a message to the given endpoint and wait until it has been submitted
+ * to the hardware FIFO.
+ * Will return zero on success and a negative error code on failure
+ * (e.g. -ETIME when the message couldn't be written within the given
+ * timeout)
+ *
+ * @rtk:            RTKit reference
+ * @ep:             target endpoint
+ * @message:        message to be sent
+ * @timeout:        timeout in milliseconds to allow the message transmission
+ *                  to be completed
+ * @atomic:         if set to true this function can be called from atomic
+ *                  context.
+ */
+int apple_rtkit_send_message_wait(struct apple_rtkit *rtk, u8 ep, u64 message,
+                                 unsigned long timeout, bool atomic);
+
+#endif /* _LINUX_APPLE_RTKIT_H_ */
diff --git a/include/linux/soc/apple/sart.h b/include/linux/soc/apple/sart.h
new file mode 100644 (file)
index 0000000..2249bf6
--- /dev/null
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/*
+ * Apple SART device driver
+ * Copyright (C) The Asahi Linux Contributors
+ *
+ * Apple SART is a simple address filter for DMA transactions.
+ * Regions of physical memory must be added to the SART's allow
+ * list before any DMA can target these. Unlike a proper
+ * IOMMU no remapping can be done.
+ */
+
+#ifndef _LINUX_SOC_APPLE_SART_H_
+#define _LINUX_SOC_APPLE_SART_H_
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
+struct apple_sart;
+
+/*
+ * Get a reference to the SART attached to dev.
+ *
+ * Looks for the phandle reference in apple,sart and returns a pointer
+ * to the corresponding apple_sart struct to be used with
+ * apple_sart_add_allowed_region and apple_sart_remove_allowed_region.
+ */
+struct apple_sart *devm_apple_sart_get(struct device *dev);
+
+/*
+ * Adds the region [paddr, paddr+size] to the DMA allow list.
+ *
+ * @sart: SART reference
+ * @paddr: Start address of the region to be used for DMA
+ * @size: Size of the region to be used for DMA.
+ */
+int apple_sart_add_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
+                                 size_t size);
+
+/*
+ * Removes the region [paddr, paddr+size] from the DMA allow list.
+ *
+ * Note that exact same paddr and size used for apple_sart_add_allowed_region
+ * have to be passed.
+ *
+ * @sart: SART reference
+ * @paddr: Start address of the region no longer used for DMA
+ * @size: Size of the region no longer used for DMA.
+ */
+int apple_sart_remove_allowed_region(struct apple_sart *sart, phys_addr_t paddr,
+                                    size_t size);
+
+#endif /* _LINUX_SOC_APPLE_SART_H_ */
index 3e56a97..df53bed 100644 (file)
@@ -180,13 +180,13 @@ extern int static_call_text_reserved(void *start, void *end);
 
 extern long __static_call_return0(void);
 
-#define __DEFINE_STATIC_CALL(name, _func, _func_init)                  \
+#define DEFINE_STATIC_CALL(name, _func)                                        \
        DECLARE_STATIC_CALL(name, _func);                               \
        struct static_call_key STATIC_CALL_KEY(name) = {                \
-               .func = _func_init,                                     \
+               .func = _func,                                          \
                .type = 1,                                              \
        };                                                              \
-       ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init)
+       ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
 
 #define DEFINE_STATIC_CALL_NULL(name, _func)                           \
        DECLARE_STATIC_CALL(name, _func);                               \
@@ -196,6 +196,14 @@ extern long __static_call_return0(void);
        };                                                              \
        ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
 
+#define DEFINE_STATIC_CALL_RET0(name, _func)                           \
+       DECLARE_STATIC_CALL(name, _func);                               \
+       struct static_call_key STATIC_CALL_KEY(name) = {                \
+               .func = __static_call_return0,                          \
+               .type = 1,                                              \
+       };                                                              \
+       ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
+
 #define static_call_cond(name) (void)__static_call(name)
 
 #define EXPORT_STATIC_CALL(name)                                       \
@@ -217,12 +225,12 @@ extern long __static_call_return0(void);
 
 static inline int static_call_init(void) { return 0; }
 
-#define __DEFINE_STATIC_CALL(name, _func, _func_init)                  \
+#define DEFINE_STATIC_CALL(name, _func)                                        \
        DECLARE_STATIC_CALL(name, _func);                               \
        struct static_call_key STATIC_CALL_KEY(name) = {                \
-               .func = _func_init,                                     \
+               .func = _func,                                          \
        };                                                              \
-       ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func_init)
+       ARCH_DEFINE_STATIC_CALL_TRAMP(name, _func)
 
 #define DEFINE_STATIC_CALL_NULL(name, _func)                           \
        DECLARE_STATIC_CALL(name, _func);                               \
@@ -231,6 +239,12 @@ static inline int static_call_init(void) { return 0; }
        };                                                              \
        ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name)
 
+#define DEFINE_STATIC_CALL_RET0(name, _func)                           \
+       DECLARE_STATIC_CALL(name, _func);                               \
+       struct static_call_key STATIC_CALL_KEY(name) = {                \
+               .func = __static_call_return0,                          \
+       };                                                              \
+       ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name)
 
 #define static_call_cond(name) (void)__static_call(name)
 
@@ -248,10 +262,7 @@ static inline int static_call_text_reserved(void *start, void *end)
        return 0;
 }
 
-static inline long __static_call_return0(void)
-{
-       return 0;
-}
+extern long __static_call_return0(void);
 
 #define EXPORT_STATIC_CALL(name)                                       \
        EXPORT_SYMBOL(STATIC_CALL_KEY(name));                           \
@@ -281,11 +292,14 @@ static inline long __static_call_return0(void)
                .func = _func_init,                                     \
        }
 
+#define DEFINE_STATIC_CALL(name, _func)                                        \
+       __DEFINE_STATIC_CALL(name, _func, _func)
+
 #define DEFINE_STATIC_CALL_NULL(name, _func)                           \
-       DECLARE_STATIC_CALL(name, _func);                               \
-       struct static_call_key STATIC_CALL_KEY(name) = {                \
-               .func = NULL,                                           \
-       }
+       __DEFINE_STATIC_CALL(name, _func, NULL)
+
+#define DEFINE_STATIC_CALL_RET0(name, _func)                           \
+       __DEFINE_STATIC_CALL(name, _func, __static_call_return0)
 
 static inline void __static_call_nop(void) { }
 
@@ -327,10 +341,4 @@ static inline int static_call_text_reserved(void *start, void *end)
 
 #endif /* CONFIG_HAVE_STATIC_CALL */
 
-#define DEFINE_STATIC_CALL(name, _func)                                        \
-       __DEFINE_STATIC_CALL(name, _func, _func)
-
-#define DEFINE_STATIC_CALL_RET0(name, _func)                           \
-       __DEFINE_STATIC_CALL(name, _func, __static_call_return0)
-
 #endif /* _LINUX_STATIC_CALL_H */
index a5dda49..217711f 100644 (file)
@@ -395,6 +395,7 @@ struct svc_deferred_req {
        size_t                  addrlen;
        struct sockaddr_storage daddr;  /* where reply must come from */
        size_t                  daddrlen;
+       void                    *xprt_ctxt;
        struct cache_deferred_req handle;
        size_t                  xprt_hlen;
        int                     argslen;
index 45a9530..522bbf9 100644 (file)
@@ -144,7 +144,7 @@ struct rpc_xprt_ops {
        unsigned short  (*get_srcport)(struct rpc_xprt *xprt);
        int             (*buf_alloc)(struct rpc_task *task);
        void            (*buf_free)(struct rpc_task *task);
-       void            (*prepare_request)(struct rpc_rqst *req);
+       int             (*prepare_request)(struct rpc_rqst *req);
        int             (*send_request)(struct rpc_rqst *req);
        void            (*wait_for_reply_request)(struct rpc_task *task);
        void            (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
@@ -358,10 +358,9 @@ int                        xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
 void                   xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
 void                   xprt_free_slot(struct rpc_xprt *xprt,
                                       struct rpc_rqst *req);
-void                   xprt_request_prepare(struct rpc_rqst *req);
 bool                   xprt_prepare_transmit(struct rpc_task *task);
 void                   xprt_request_enqueue_transmit(struct rpc_task *task);
-void                   xprt_request_enqueue_receive(struct rpc_task *task);
+int                    xprt_request_enqueue_receive(struct rpc_task *task);
 void                   xprt_request_wait_receive(struct rpc_task *task);
 void                   xprt_request_dequeue_xprt(struct rpc_task *task);
 bool                   xprt_request_need_retransmit(struct rpc_task *task);
index a4b1af5..248f4ac 100644 (file)
@@ -59,6 +59,15 @@ struct crc64_pi_tuple {
        __u8   ref_tag[6];
 };
 
+/**
+ * lower_48_bits() - return bits 0-47 of a number
+ * @n: the number we're accessing
+ */
+static inline u64 lower_48_bits(u64 n)
+{
+       return n & ((1ull << 48) - 1);
+}
+
 static inline u64 ext_pi_ref_tag(struct request *rq)
 {
        unsigned int shift = ilog2(queue_logical_block_size(rq->q));
index 911cad3..17eb1c5 100644 (file)
@@ -299,24 +299,6 @@ void tee_shm_free(struct tee_shm *shm);
 void tee_shm_put(struct tee_shm *shm);
 
 /**
- * tee_shm_va2pa() - Get physical address of a virtual address
- * @shm:       Shared memory handle
- * @va:                Virtual address to tranlsate
- * @pa:                Returned physical address
- * @returns 0 on success and < 0 on failure
- */
-int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa);
-
-/**
- * tee_shm_pa2va() - Get virtual address of a physical address
- * @shm:       Shared memory handle
- * @pa:                Physical address to tranlsate
- * @va:                Returned virtual address
- * @returns 0 on success and < 0 on failure
- */
-int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va);
-
-/**
  * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
  * @shm:       Shared memory handle
  * @offs:      Offset from start of this shared memory
index 059b18e..5745c90 100644 (file)
@@ -75,7 +75,7 @@
  * By default we use get_cycles() for this purpose, but individual
  * architectures may override this in their asm/timex.h header file.
  */
-#define random_get_entropy()   get_cycles()
+#define random_get_entropy()   ((unsigned long)get_cycles())
 #endif
 
 /*
index 74a4a0f..48f2dd3 100644 (file)
@@ -133,6 +133,8 @@ struct vfio_pci_core_device {
        struct mutex            ioeventfds_lock;
        struct list_head        ioeventfds_list;
        struct vfio_pci_vf_token        *vf_token;
+       struct list_head                sriov_pfs_item;
+       struct vfio_pci_core_device     *sriov_pf_core_dev;
        struct notifier_block   nb;
        struct mutex            vma_lock;
        struct list_head        vma_list;
index dafdc7f..b341dd6 100644 (file)
@@ -23,8 +23,6 @@ struct virtio_shm_region {
  *       any of @get/@set, @get_status/@set_status, or @get_features/
  *       @finalize_features are NOT safe to be called from an atomic
  *       context.
- * @enable_cbs: enable the callbacks
- *      vdev: the virtio_device
  * @get: read the value of a configuration field
  *     vdev: the virtio_device
  *     offset: the offset of the configuration field
@@ -78,7 +76,6 @@ struct virtio_shm_region {
  */
 typedef void vq_callback_t(struct virtqueue *);
 struct virtio_config_ops {
-       void (*enable_cbs)(struct virtio_device *vdev);
        void (*get)(struct virtio_device *vdev, unsigned offset,
                    void *buf, unsigned len);
        void (*set)(struct virtio_device *vdev, unsigned offset,
@@ -233,9 +230,6 @@ void virtio_device_ready(struct virtio_device *dev)
 {
        unsigned status = dev->config->get_status(dev);
 
-       if (dev->config->enable_cbs)
-                  dev->config->enable_cbs(dev);
-
        BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
        dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
 }
index aa33e10..9f65f1b 100644 (file)
@@ -59,6 +59,8 @@ struct flow_dissector_key_vlan {
                __be16  vlan_tci;
        };
        __be16  vlan_tpid;
+       __be16  vlan_eth_type;
+       u16     padding;
 };
 
 struct flow_dissector_mpls_lse {
index d37268f..82800d5 100644 (file)
@@ -36,8 +36,6 @@ struct mctp_hdr {
 #define MCTP_HDR_TAG_SHIFT     0
 #define MCTP_HDR_TAG_MASK      GENMASK(2, 0)
 
-#define MCTP_HEADER_MAXLEN     4
-
 #define MCTP_INITIAL_DEFAULT_NET       1
 
 static inline bool mctp_address_unicast(mctp_eid_t eid)
index e76c946..d0a2477 100644 (file)
@@ -53,8 +53,10 @@ enum {
 
 #define ISID_SIZE                      6
 
-/* Connection suspend "bit" */
-#define ISCSI_SUSPEND_BIT              1
+/* Connection flags */
+#define ISCSI_CONN_FLAG_SUSPEND_TX     BIT(0)
+#define ISCSI_CONN_FLAG_SUSPEND_RX     BIT(1)
+#define ISCSI_CONN_FLAG_BOUND          BIT(2)
 
 #define ISCSI_ITT_MASK                 0x1fff
 #define ISCSI_TOTAL_CMDS_MAX           4096
@@ -211,8 +213,7 @@ struct iscsi_conn {
        struct list_head        cmdqueue;       /* data-path cmd queue */
        struct list_head        requeue;        /* tasks needing another run */
        struct work_struct      xmitwork;       /* per-conn. xmit workqueue */
-       unsigned long           suspend_tx;     /* suspend Tx */
-       unsigned long           suspend_rx;     /* suspend Rx */
+       unsigned long           flags;          /* ISCSI_CONN_FLAGs */
 
        /* negotiated params */
        unsigned                max_recv_dlength; /* initiator_max_recv_dsl*/
index 38e4a67..9acb842 100644 (file)
@@ -211,6 +211,8 @@ struct iscsi_cls_conn {
        struct mutex ep_mutex;
        struct iscsi_endpoint *ep;
 
+       /* Used when accessing flags and queueing work. */
+       spinlock_t lock;
        unsigned long flags;
        struct work_struct cleanup_work;
 
@@ -295,7 +297,7 @@ extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
 struct iscsi_endpoint {
        void *dd_data;                  /* LLD private data */
        struct device dev;
-       uint64_t id;
+       int id;
        struct iscsi_cls_conn *conn;
 };
 
index b7e9b58..6d4cc49 100644 (file)
@@ -284,6 +284,7 @@ int snd_card_disconnect(struct snd_card *card);
 void snd_card_disconnect_sync(struct snd_card *card);
 int snd_card_free(struct snd_card *card);
 int snd_card_free_when_closed(struct snd_card *card);
+int snd_card_free_on_error(struct device *dev, int ret);
 void snd_card_set_id(struct snd_card *card, const char *id);
 int snd_card_register(struct snd_card *card);
 int snd_card_info_init(void);
index 653dfff..8d79ceb 100644 (file)
@@ -51,6 +51,11 @@ struct snd_dma_device {
 #define SNDRV_DMA_TYPE_DEV_SG  SNDRV_DMA_TYPE_DEV /* no SG-buf support */
 #define SNDRV_DMA_TYPE_DEV_WC_SG       SNDRV_DMA_TYPE_DEV_WC
 #endif
+/* fallback types, don't use those directly */
+#ifdef CONFIG_SND_DMA_SGBUF
+#define SNDRV_DMA_TYPE_DEV_SG_FALLBACK         10
+#define SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK      11
+#endif
 
 /*
  * info for buffer allocation
index 0f34f13..3995c58 100644 (file)
@@ -1004,7 +1004,6 @@ DEFINE_RPC_XPRT_LIFETIME_EVENT(connect);
 DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_auto);
 DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_done);
 DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_force);
-DEFINE_RPC_XPRT_LIFETIME_EVENT(disconnect_cleanup);
 DEFINE_RPC_XPRT_LIFETIME_EVENT(destroy);
 
 DECLARE_EVENT_CLASS(rpc_xprt_event,
@@ -2016,17 +2015,18 @@ DECLARE_EVENT_CLASS(svc_deferred_event,
        TP_STRUCT__entry(
                __field(const void *, dr)
                __field(u32, xid)
-               __string(addr, dr->xprt->xpt_remotebuf)
+               __array(__u8, addr, INET6_ADDRSTRLEN + 10)
        ),
 
        TP_fast_assign(
                __entry->dr = dr;
                __entry->xid = be32_to_cpu(*(__be32 *)(dr->args +
                                                       (dr->xprt_hlen>>2)));
-               __assign_str(addr, dr->xprt->xpt_remotebuf);
+               snprintf(__entry->addr, sizeof(__entry->addr) - 1,
+                        "%pISpc", (struct sockaddr *)&dr->addr);
        ),
 
-       TP_printk("addr=%s dr=%p xid=0x%08x", __get_str(addr), __entry->dr,
+       TP_printk("addr=%s dr=%p xid=0x%08x", __entry->addr, __entry->dr,
                __entry->xid)
 );
 
index 784adc6..1845cf7 100644 (file)
@@ -296,6 +296,7 @@ struct io_uring_params {
 #define IORING_FEAT_NATIVE_WORKERS     (1U << 9)
 #define IORING_FEAT_RSRC_TAGS          (1U << 10)
 #define IORING_FEAT_CQE_SKIP           (1U << 11)
+#define IORING_FEAT_LINKED_FILE                (1U << 12)
 
 /*
  * io_uring_register(2) opcodes and arguments
index 3021ea2..7837ba4 100644 (file)
@@ -1,4 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_STDDEF_H
+#define _UAPI_LINUX_STDDEF_H
+
 #include <linux/compiler_types.h>
 
 #ifndef __always_inline
@@ -41,3 +44,4 @@
                struct { } __empty_ ## NAME; \
                TYPE NAME[]; \
        }
+#endif
index 25a6c53..23e5716 100644 (file)
 #define TEE_IOC_MAGIC  0xa4
 #define TEE_IOC_BASE   0
 
-/* Flags relating to shared memory */
-#define TEE_IOCTL_SHM_MAPPED   0x1     /* memory mapped in normal world */
-#define TEE_IOCTL_SHM_DMA_BUF  0x2     /* dma-buf handle on shared memory */
-
 #define TEE_MAX_ARG_SIZE       1024
 
 #define TEE_GEN_CAP_GP         (1 << 0)/* GlobalPlatform compliant TEE */
index 471d719..847a82b 100644 (file)
@@ -114,7 +114,8 @@ obj-$(CONFIG_CPU_PM) += cpu_pm.o
 obj-$(CONFIG_BPF) += bpf/
 obj-$(CONFIG_KCSAN) += kcsan/
 obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
-obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call.o
+obj-$(CONFIG_HAVE_STATIC_CALL) += static_call.o
+obj-$(CONFIG_HAVE_STATIC_CALL_INLINE) += static_call_inline.o
 obj-$(CONFIG_CFI_CLANG) += cfi.o
 
 obj-$(CONFIG_PERF_EVENTS) += events/
index 5797c2a..d0a9aa0 100644 (file)
@@ -71,7 +71,6 @@ struct cpuhp_cpu_state {
        bool                    rollback;
        bool                    single;
        bool                    bringup;
-       int                     cpu;
        struct hlist_node       *node;
        struct hlist_node       *last;
        enum cpuhp_state        cb_state;
@@ -475,7 +474,7 @@ static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
 #endif
 
 static inline enum cpuhp_state
-cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
+cpuhp_set_state(int cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
 {
        enum cpuhp_state prev_state = st->state;
        bool bringup = st->state < target;
@@ -486,14 +485,15 @@ cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
        st->target = target;
        st->single = false;
        st->bringup = bringup;
-       if (cpu_dying(st->cpu) != !bringup)
-               set_cpu_dying(st->cpu, !bringup);
+       if (cpu_dying(cpu) != !bringup)
+               set_cpu_dying(cpu, !bringup);
 
        return prev_state;
 }
 
 static inline void
-cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
+cpuhp_reset_state(int cpu, struct cpuhp_cpu_state *st,
+                 enum cpuhp_state prev_state)
 {
        bool bringup = !st->bringup;
 
@@ -520,8 +520,8 @@ cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
        }
 
        st->bringup = bringup;
-       if (cpu_dying(st->cpu) != !bringup)
-               set_cpu_dying(st->cpu, !bringup);
+       if (cpu_dying(cpu) != !bringup)
+               set_cpu_dying(cpu, !bringup);
 }
 
 /* Regular hotplug invocation of the AP hotplug thread */
@@ -541,15 +541,16 @@ static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
        wait_for_ap_thread(st, st->bringup);
 }
 
-static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
+static int cpuhp_kick_ap(int cpu, struct cpuhp_cpu_state *st,
+                        enum cpuhp_state target)
 {
        enum cpuhp_state prev_state;
        int ret;
 
-       prev_state = cpuhp_set_state(st, target);
+       prev_state = cpuhp_set_state(cpu, st, target);
        __cpuhp_kick_ap(st);
        if ((ret = st->result)) {
-               cpuhp_reset_state(st, prev_state);
+               cpuhp_reset_state(cpu, st, prev_state);
                __cpuhp_kick_ap(st);
        }
 
@@ -581,7 +582,7 @@ static int bringup_wait_for_ap(unsigned int cpu)
        if (st->target <= CPUHP_AP_ONLINE_IDLE)
                return 0;
 
-       return cpuhp_kick_ap(st, st->target);
+       return cpuhp_kick_ap(cpu, st, st->target);
 }
 
 static int bringup_cpu(unsigned int cpu)
@@ -704,7 +705,7 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                         ret, cpu, cpuhp_get_step(st->state)->name,
                         st->state);
 
-               cpuhp_reset_state(st, prev_state);
+               cpuhp_reset_state(cpu, st, prev_state);
                if (can_rollback_cpu(st))
                        WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
                                                            prev_state));
@@ -721,7 +722,6 @@ static void cpuhp_create(unsigned int cpu)
 
        init_completion(&st->done_up);
        init_completion(&st->done_down);
-       st->cpu = cpu;
 }
 
 static int cpuhp_should_run(unsigned int cpu)
@@ -875,7 +875,7 @@ static int cpuhp_kick_ap_work(unsigned int cpu)
        cpuhp_lock_release(true);
 
        trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
-       ret = cpuhp_kick_ap(st, st->target);
+       ret = cpuhp_kick_ap(cpu, st, st->target);
        trace_cpuhp_exit(cpu, st->state, prev_state, ret);
 
        return ret;
@@ -1107,7 +1107,7 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                         ret, cpu, cpuhp_get_step(st->state)->name,
                         st->state);
 
-               cpuhp_reset_state(st, prev_state);
+               cpuhp_reset_state(cpu, st, prev_state);
 
                if (st->state < prev_state)
                        WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
@@ -1134,7 +1134,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
 
        cpuhp_tasks_frozen = tasks_frozen;
 
-       prev_state = cpuhp_set_state(st, target);
+       prev_state = cpuhp_set_state(cpu, st, target);
        /*
         * If the current CPU state is in the range of the AP hotplug thread,
         * then we need to kick the thread.
@@ -1165,7 +1165,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
        ret = cpuhp_down_callbacks(cpu, st, target);
        if (ret && st->state < prev_state) {
                if (st->state == CPUHP_TEARDOWN_CPU) {
-                       cpuhp_reset_state(st, prev_state);
+                       cpuhp_reset_state(cpu, st, prev_state);
                        __cpuhp_kick_ap(st);
                } else {
                        WARN(1, "DEAD callback error for CPU%d", cpu);
@@ -1352,7 +1352,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
 
        cpuhp_tasks_frozen = tasks_frozen;
 
-       cpuhp_set_state(st, target);
+       cpuhp_set_state(cpu, st, target);
        /*
         * If the current CPU state is in the range of the AP hotplug thread,
         * then we need to kick the thread once more.
index 4632b0f..8a6cd53 100644 (file)
@@ -114,6 +114,7 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
                dma_direct_sync_single_for_cpu(dev, addr, size, dir);
 
        if (unlikely(is_swiotlb_buffer(dev, phys)))
-               swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
+               swiotlb_tbl_unmap_single(dev, phys, size, dir,
+                                        attrs | DMA_ATTR_SKIP_CPU_SYNC);
 }
 #endif /* _KERNEL_DMA_DIRECT_H */
index e57a224..93c3b86 100644 (file)
@@ -392,7 +392,7 @@ DEFINE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
 DEFINE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
 void dynamic_irqentry_exit_cond_resched(void)
 {
-       if (!static_key_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
+       if (!static_branch_unlikely(&sk_dynamic_irqentry_exit_cond_resched))
                return;
        raw_irqentry_exit_cond_resched();
 }
index cfde994..23bb197 100644 (file)
@@ -574,8 +574,7 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
                              enum event_type_t event_type);
 
 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
-                            enum event_type_t event_type,
-                            struct task_struct *task);
+                            enum event_type_t event_type);
 
 static void update_context_time(struct perf_event_context *ctx);
 static u64 perf_event_time(struct perf_event *event);
@@ -781,7 +780,6 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx,
 static inline void update_cgrp_time_from_event(struct perf_event *event)
 {
        struct perf_cgroup_info *info;
-       struct perf_cgroup *cgrp;
 
        /*
         * ensure we access cgroup data only when needed and
@@ -790,21 +788,19 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
        if (!is_cgroup_event(event))
                return;
 
-       cgrp = perf_cgroup_from_task(current, event->ctx);
+       info = this_cpu_ptr(event->cgrp->info);
        /*
         * Do not update time when cgroup is not active
         */
-       if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) {
-               info = this_cpu_ptr(event->cgrp->info);
+       if (info->active)
                __update_cgrp_time(info, perf_clock(), true);
-       }
 }
 
 static inline void
-perf_cgroup_set_timestamp(struct task_struct *task,
-                         struct perf_event_context *ctx)
+perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
 {
-       struct perf_cgroup *cgrp;
+       struct perf_event_context *ctx = &cpuctx->ctx;
+       struct perf_cgroup *cgrp = cpuctx->cgrp;
        struct perf_cgroup_info *info;
        struct cgroup_subsys_state *css;
 
@@ -813,10 +809,10 @@ perf_cgroup_set_timestamp(struct task_struct *task,
         * ensure we do not access cgroup data
         * unless we have the cgroup pinned (css_get)
         */
-       if (!task || !ctx->nr_cgroups)
+       if (!cgrp)
                return;
 
-       cgrp = perf_cgroup_from_task(task, ctx);
+       WARN_ON_ONCE(!ctx->nr_cgroups);
 
        for (css = &cgrp->css; css; css = css->parent) {
                cgrp = container_of(css, struct perf_cgroup, css);
@@ -828,17 +824,12 @@ perf_cgroup_set_timestamp(struct task_struct *task,
 
 static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);
 
-#define PERF_CGROUP_SWOUT      0x1 /* cgroup switch out every event */
-#define PERF_CGROUP_SWIN       0x2 /* cgroup switch in events based on task */
-
 /*
  * reschedule events based on the cgroup constraint of task.
- *
- * mode SWOUT : schedule out everything
- * mode SWIN : schedule in based on cgroup for next
  */
-static void perf_cgroup_switch(struct task_struct *task, int mode)
+static void perf_cgroup_switch(struct task_struct *task)
 {
+       struct perf_cgroup *cgrp;
        struct perf_cpu_context *cpuctx, *tmp;
        struct list_head *list;
        unsigned long flags;
@@ -849,35 +840,31 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
         */
        local_irq_save(flags);
 
+       cgrp = perf_cgroup_from_task(task, NULL);
+
        list = this_cpu_ptr(&cgrp_cpuctx_list);
        list_for_each_entry_safe(cpuctx, tmp, list, cgrp_cpuctx_entry) {
                WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
+               if (READ_ONCE(cpuctx->cgrp) == cgrp)
+                       continue;
 
                perf_ctx_lock(cpuctx, cpuctx->task_ctx);
                perf_pmu_disable(cpuctx->ctx.pmu);
 
-               if (mode & PERF_CGROUP_SWOUT) {
-                       cpu_ctx_sched_out(cpuctx, EVENT_ALL);
-                       /*
-                        * must not be done before ctxswout due
-                        * to event_filter_match() in event_sched_out()
-                        */
-                       cpuctx->cgrp = NULL;
-               }
+               cpu_ctx_sched_out(cpuctx, EVENT_ALL);
+               /*
+                * must not be done before ctxswout due
+                * to update_cgrp_time_from_cpuctx() in
+                * ctx_sched_out()
+                */
+               cpuctx->cgrp = cgrp;
+               /*
+                * set cgrp before ctxsw in to allow
+                * perf_cgroup_set_timestamp() in ctx_sched_in()
+                * to not have to pass task around
+                */
+               cpu_ctx_sched_in(cpuctx, EVENT_ALL);
 
-               if (mode & PERF_CGROUP_SWIN) {
-                       WARN_ON_ONCE(cpuctx->cgrp);
-                       /*
-                        * set cgrp before ctxsw in to allow
-                        * event_filter_match() to not have to pass
-                        * task around
-                        * we pass the cpuctx->ctx to perf_cgroup_from_task()
-                        * because cgorup events are only per-cpu
-                        */
-                       cpuctx->cgrp = perf_cgroup_from_task(task,
-                                                            &cpuctx->ctx);
-                       cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
-               }
                perf_pmu_enable(cpuctx->ctx.pmu);
                perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
        }
@@ -885,58 +872,6 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
        local_irq_restore(flags);
 }
 
-static inline void perf_cgroup_sched_out(struct task_struct *task,
-                                        struct task_struct *next)
-{
-       struct perf_cgroup *cgrp1;
-       struct perf_cgroup *cgrp2 = NULL;
-
-       rcu_read_lock();
-       /*
-        * we come here when we know perf_cgroup_events > 0
-        * we do not need to pass the ctx here because we know
-        * we are holding the rcu lock
-        */
-       cgrp1 = perf_cgroup_from_task(task, NULL);
-       cgrp2 = perf_cgroup_from_task(next, NULL);
-
-       /*
-        * only schedule out current cgroup events if we know
-        * that we are switching to a different cgroup. Otherwise,
-        * do no touch the cgroup events.
-        */
-       if (cgrp1 != cgrp2)
-               perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
-
-       rcu_read_unlock();
-}
-
-static inline void perf_cgroup_sched_in(struct task_struct *prev,
-                                       struct task_struct *task)
-{
-       struct perf_cgroup *cgrp1;
-       struct perf_cgroup *cgrp2 = NULL;
-
-       rcu_read_lock();
-       /*
-        * we come here when we know perf_cgroup_events > 0
-        * we do not need to pass the ctx here because we know
-        * we are holding the rcu lock
-        */
-       cgrp1 = perf_cgroup_from_task(task, NULL);
-       cgrp2 = perf_cgroup_from_task(prev, NULL);
-
-       /*
-        * only need to schedule in cgroup events if we are changing
-        * cgroup during ctxsw. Cgroup events were not scheduled
-        * out of ctxsw out if that was not the case.
-        */
-       if (cgrp1 != cgrp2)
-               perf_cgroup_switch(task, PERF_CGROUP_SWIN);
-
-       rcu_read_unlock();
-}
-
 static int perf_cgroup_ensure_storage(struct perf_event *event,
                                struct cgroup_subsys_state *css)
 {
@@ -1032,22 +967,10 @@ perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ct
         */
        cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
 
-       /*
-        * Since setting cpuctx->cgrp is conditional on the current @cgrp
-        * matching the event's cgroup, we must do this for every new event,
-        * because if the first would mismatch, the second would not try again
-        * and we would leave cpuctx->cgrp unset.
-        */
-       if (ctx->is_active && !cpuctx->cgrp) {
-               struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
-
-               if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
-                       cpuctx->cgrp = cgrp;
-       }
-
        if (ctx->nr_cgroups++)
                return;
 
+       cpuctx->cgrp = perf_cgroup_from_task(current, ctx);
        list_add(&cpuctx->cgrp_cpuctx_entry,
                        per_cpu_ptr(&cgrp_cpuctx_list, event->cpu));
 }
@@ -1069,9 +992,7 @@ perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *c
        if (--ctx->nr_cgroups)
                return;
 
-       if (ctx->is_active && cpuctx->cgrp)
-               cpuctx->cgrp = NULL;
-
+       cpuctx->cgrp = NULL;
        list_del(&cpuctx->cgrp_cpuctx_entry);
 }
 
@@ -1100,16 +1021,6 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx,
 {
 }
 
-static inline void perf_cgroup_sched_out(struct task_struct *task,
-                                        struct task_struct *next)
-{
-}
-
-static inline void perf_cgroup_sched_in(struct task_struct *prev,
-                                       struct task_struct *task)
-{
-}
-
 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
                                      struct perf_event_attr *attr,
                                      struct perf_event *group_leader)
@@ -1118,13 +1029,7 @@ static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
 }
 
 static inline void
-perf_cgroup_set_timestamp(struct task_struct *task,
-                         struct perf_event_context *ctx)
-{
-}
-
-static inline void
-perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
+perf_cgroup_set_timestamp(struct perf_cpu_context *cpuctx)
 {
 }
 
@@ -1147,6 +1052,10 @@ static inline void
 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
 {
 }
+
+static void perf_cgroup_switch(struct task_struct *task)
+{
+}
 #endif
 
 /*
@@ -2713,8 +2622,7 @@ static void ctx_sched_out(struct perf_event_context *ctx,
 static void
 ctx_sched_in(struct perf_event_context *ctx,
             struct perf_cpu_context *cpuctx,
-            enum event_type_t event_type,
-            struct task_struct *task);
+            enum event_type_t event_type);
 
 static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
                               struct perf_event_context *ctx,
@@ -2730,15 +2638,14 @@ static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
 }
 
 static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
-                               struct perf_event_context *ctx,
-                               struct task_struct *task)
+                               struct perf_event_context *ctx)
 {
-       cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
+       cpu_ctx_sched_in(cpuctx, EVENT_PINNED);
        if (ctx)
-               ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
-       cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
+               ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
+       cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
-               ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
+               ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
 }
 
 /*
@@ -2788,7 +2695,7 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
        else if (ctx_event_type & EVENT_PINNED)
                cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
 
-       perf_event_sched_in(cpuctx, task_ctx, current);
+       perf_event_sched_in(cpuctx, task_ctx);
        perf_pmu_enable(cpuctx->ctx.pmu);
 }
 
@@ -3011,7 +2918,7 @@ static void __perf_event_enable(struct perf_event *event,
                return;
 
        if (!event_filter_match(event)) {
-               ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
+               ctx_sched_in(ctx, cpuctx, EVENT_TIME);
                return;
        }
 
@@ -3020,7 +2927,7 @@ static void __perf_event_enable(struct perf_event *event,
         * then don't put it on unless the group is on.
         */
        if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
-               ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
+               ctx_sched_in(ctx, cpuctx, EVENT_TIME);
                return;
        }
 
@@ -3668,7 +3575,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
         * cgroup event are system-wide mode only
         */
        if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
-               perf_cgroup_sched_out(task, next);
+               perf_cgroup_switch(next);
 }
 
 /*
@@ -3865,8 +3772,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
 static void
 ctx_sched_in(struct perf_event_context *ctx,
             struct perf_cpu_context *cpuctx,
-            enum event_type_t event_type,
-            struct task_struct *task)
+            enum event_type_t event_type)
 {
        int is_active = ctx->is_active;
 
@@ -3878,7 +3784,7 @@ ctx_sched_in(struct perf_event_context *ctx,
        if (is_active ^ EVENT_TIME) {
                /* start ctx time */
                __update_context_time(ctx, false);
-               perf_cgroup_set_timestamp(task, ctx);
+               perf_cgroup_set_timestamp(cpuctx);
                /*
                 * CPU-release for the below ->is_active store,
                 * see __load_acquire() in perf_event_time_now()
@@ -3909,12 +3815,11 @@ ctx_sched_in(struct perf_event_context *ctx,
 }
 
 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
-                            enum event_type_t event_type,
-                            struct task_struct *task)
+                            enum event_type_t event_type)
 {
        struct perf_event_context *ctx = &cpuctx->ctx;
 
-       ctx_sched_in(ctx, cpuctx, event_type, task);
+       ctx_sched_in(ctx, cpuctx, event_type);
 }
 
 static void perf_event_context_sched_in(struct perf_event_context *ctx,
@@ -3956,7 +3861,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
         */
        if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
                cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
-       perf_event_sched_in(cpuctx, ctx, task);
+       perf_event_sched_in(cpuctx, ctx);
 
        if (cpuctx->sched_cb_usage && pmu->sched_task)
                pmu->sched_task(cpuctx->task_ctx, true);
@@ -3984,16 +3889,6 @@ void __perf_event_task_sched_in(struct task_struct *prev,
        struct perf_event_context *ctx;
        int ctxn;
 
-       /*
-        * If cgroup events exist on this CPU, then we need to check if we have
-        * to switch in PMU state; cgroup event are system-wide mode only.
-        *
-        * Since cgroup events are CPU events, we must schedule these in before
-        * we schedule in the task events.
-        */
-       if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
-               perf_cgroup_sched_in(prev, task);
-
        for_each_task_context_nr(ctxn) {
                ctx = task->perf_event_ctxp[ctxn];
                if (likely(!ctx))
@@ -4267,7 +4162,7 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
        if (cpu_event)
                rotate_ctx(&cpuctx->ctx, cpu_event);
 
-       perf_event_sched_in(cpuctx, task_ctx, current);
+       perf_event_sched_in(cpuctx, task_ctx);
 
        perf_pmu_enable(cpuctx->ctx.pmu);
        perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
@@ -4339,7 +4234,7 @@ static void perf_event_enable_on_exec(int ctxn)
                clone_ctx = unclone_ctx(ctx);
                ctx_resched(cpuctx, ctx, event_type);
        } else {
-               ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
+               ctx_sched_in(ctx, cpuctx, EVENT_TIME);
        }
        perf_ctx_unlock(cpuctx, ctx);
 
@@ -11635,6 +11530,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
        event->state            = PERF_EVENT_STATE_INACTIVE;
 
+       if (parent_event)
+               event->event_caps = parent_event->event_caps;
+
        if (event->attr.sigtrap)
                atomic_set(&event->event_limit, 1);
 
@@ -13562,7 +13460,7 @@ static int __perf_cgroup_move(void *info)
 {
        struct task_struct *task = info;
        rcu_read_lock();
-       perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
+       perf_cgroup_switch(task);
        rcu_read_unlock();
        return 0;
 }
index f7ff891..fdf1704 100644 (file)
@@ -269,8 +269,9 @@ static int __irq_build_affinity_masks(unsigned int startvec,
         */
        if (numvecs <= nodes) {
                for_each_node_mask(n, nodemsk) {
-                       cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
-                                  node_to_cpumask[n]);
+                       /* Ensure that only CPUs which are in both masks are set */
+                       cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
+                       cpumask_or(&masks[curvec].mask, &masks[curvec].mask, nmsk);
                        if (++curvec == last_affv)
                                curvec = firstvec;
                }
index f7df715..7afa40f 100644 (file)
@@ -137,7 +137,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
        if (!irq_work_claim(work))
                return false;
 
-       kasan_record_aux_stack(work);
+       kasan_record_aux_stack_noalloc(work);
 
        preempt_disable();
        if (cpu != smp_processor_id()) {
index d575b49..51efaab 100644 (file)
@@ -5752,6 +5752,8 @@ static inline struct task_struct *pick_task(struct rq *rq)
 
 extern void task_vruntime_update(struct rq *rq, struct task_struct *p, bool in_fi);
 
+static void queue_core_balance(struct rq *rq);
+
 static struct task_struct *
 pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 {
@@ -5801,7 +5803,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
                }
 
                rq->core_pick = NULL;
-               return next;
+               goto out;
        }
 
        put_prev_task_balance(rq, prev, rf);
@@ -5851,7 +5853,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
                         */
                        WARN_ON_ONCE(fi_before);
                        task_vruntime_update(rq, next, false);
-                       goto done;
+                       goto out_set_next;
                }
        }
 
@@ -5970,8 +5972,12 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
                resched_curr(rq_i);
        }
 
-done:
+out_set_next:
        set_next_task(rq, next);
+out:
+       if (rq->core->core_forceidle_count && next == rq->idle)
+               queue_core_balance(rq);
+
        return next;
 }
 
@@ -6000,7 +6006,7 @@ static bool try_steal_cookie(int this, int that)
                if (p == src->core_pick || p == src->curr)
                        goto next;
 
-               if (!cpumask_test_cpu(this, &p->cpus_mask))
+               if (!is_cpu_allowed(p, this))
                        goto next;
 
                if (p->core_occupation > dst->idle->core_occupation)
@@ -6066,7 +6072,7 @@ static void sched_core_balance(struct rq *rq)
 
 static DEFINE_PER_CPU(struct callback_head, core_balance_head);
 
-void queue_core_balance(struct rq *rq)
+static void queue_core_balance(struct rq *rq)
 {
        if (!sched_core_enabled(rq))
                return;
index 8f8b502..ecb0d70 100644 (file)
@@ -434,7 +434,6 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir
 {
        update_idle_core(rq);
        schedstat_inc(rq->sched_goidle);
-       queue_core_balance(rq);
 }
 
 #ifdef CONFIG_SMP
index 58263f9..8dccb34 100644 (file)
@@ -1232,8 +1232,6 @@ static inline bool sched_group_cookie_match(struct rq *rq,
        return false;
 }
 
-extern void queue_core_balance(struct rq *rq);
-
 static inline bool sched_core_enqueued(struct task_struct *p)
 {
        return !RB_EMPTY_NODE(&p->core_node);
@@ -1267,10 +1265,6 @@ static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
        return &rq->__lock;
 }
 
-static inline void queue_core_balance(struct rq *rq)
-{
-}
-
 static inline bool sched_cpu_cookie_match(struct rq *rq, struct task_struct *p)
 {
        return true;
index 01a7c17..65a630f 100644 (file)
@@ -579,7 +579,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
 
        /* There shouldn't be any pending callbacks on an offline CPU. */
        if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
-                    !warned && !llist_empty(head))) {
+                    !warned && entry != NULL)) {
                warned = true;
                WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
 
index f2b8bae..e9c3e69 100644 (file)
@@ -1,549 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
-#include <linux/init.h>
 #include <linux/static_call.h>
-#include <linux/bug.h>
-#include <linux/smp.h>
-#include <linux/sort.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/cpu.h>
-#include <linux/processor.h>
-#include <asm/sections.h>
-
-extern struct static_call_site __start_static_call_sites[],
-                              __stop_static_call_sites[];
-extern struct static_call_tramp_key __start_static_call_tramp_key[],
-                                   __stop_static_call_tramp_key[];
-
-static bool static_call_initialized;
-
-/* mutex to protect key modules/sites */
-static DEFINE_MUTEX(static_call_mutex);
-
-static void static_call_lock(void)
-{
-       mutex_lock(&static_call_mutex);
-}
-
-static void static_call_unlock(void)
-{
-       mutex_unlock(&static_call_mutex);
-}
-
-static inline void *static_call_addr(struct static_call_site *site)
-{
-       return (void *)((long)site->addr + (long)&site->addr);
-}
-
-static inline unsigned long __static_call_key(const struct static_call_site *site)
-{
-       return (long)site->key + (long)&site->key;
-}
-
-static inline struct static_call_key *static_call_key(const struct static_call_site *site)
-{
-       return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS);
-}
-
-/* These assume the key is word-aligned. */
-static inline bool static_call_is_init(struct static_call_site *site)
-{
-       return __static_call_key(site) & STATIC_CALL_SITE_INIT;
-}
-
-static inline bool static_call_is_tail(struct static_call_site *site)
-{
-       return __static_call_key(site) & STATIC_CALL_SITE_TAIL;
-}
-
-static inline void static_call_set_init(struct static_call_site *site)
-{
-       site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) -
-                   (long)&site->key;
-}
-
-static int static_call_site_cmp(const void *_a, const void *_b)
-{
-       const struct static_call_site *a = _a;
-       const struct static_call_site *b = _b;
-       const struct static_call_key *key_a = static_call_key(a);
-       const struct static_call_key *key_b = static_call_key(b);
-
-       if (key_a < key_b)
-               return -1;
-
-       if (key_a > key_b)
-               return 1;
-
-       return 0;
-}
-
-static void static_call_site_swap(void *_a, void *_b, int size)
-{
-       long delta = (unsigned long)_a - (unsigned long)_b;
-       struct static_call_site *a = _a;
-       struct static_call_site *b = _b;
-       struct static_call_site tmp = *a;
-
-       a->addr = b->addr  - delta;
-       a->key  = b->key   - delta;
-
-       b->addr = tmp.addr + delta;
-       b->key  = tmp.key  + delta;
-}
-
-static inline void static_call_sort_entries(struct static_call_site *start,
-                                           struct static_call_site *stop)
-{
-       sort(start, stop - start, sizeof(struct static_call_site),
-            static_call_site_cmp, static_call_site_swap);
-}
-
-static inline bool static_call_key_has_mods(struct static_call_key *key)
-{
-       return !(key->type & 1);
-}
-
-static inline struct static_call_mod *static_call_key_next(struct static_call_key *key)
-{
-       if (!static_call_key_has_mods(key))
-               return NULL;
-
-       return key->mods;
-}
-
-static inline struct static_call_site *static_call_key_sites(struct static_call_key *key)
-{
-       if (static_call_key_has_mods(key))
-               return NULL;
-
-       return (struct static_call_site *)(key->type & ~1);
-}
-
-void __static_call_update(struct static_call_key *key, void *tramp, void *func)
-{
-       struct static_call_site *site, *stop;
-       struct static_call_mod *site_mod, first;
-
-       cpus_read_lock();
-       static_call_lock();
-
-       if (key->func == func)
-               goto done;
-
-       key->func = func;
-
-       arch_static_call_transform(NULL, tramp, func, false);
-
-       /*
-        * If uninitialized, we'll not update the callsites, but they still
-        * point to the trampoline and we just patched that.
-        */
-       if (WARN_ON_ONCE(!static_call_initialized))
-               goto done;
-
-       first = (struct static_call_mod){
-               .next = static_call_key_next(key),
-               .mod = NULL,
-               .sites = static_call_key_sites(key),
-       };
-
-       for (site_mod = &first; site_mod; site_mod = site_mod->next) {
-               bool init = system_state < SYSTEM_RUNNING;
-               struct module *mod = site_mod->mod;
-
-               if (!site_mod->sites) {
-                       /*
-                        * This can happen if the static call key is defined in
-                        * a module which doesn't use it.
-                        *
-                        * It also happens in the has_mods case, where the
-                        * 'first' entry has no sites associated with it.
-                        */
-                       continue;
-               }
-
-               stop = __stop_static_call_sites;
-
-               if (mod) {
-#ifdef CONFIG_MODULES
-                       stop = mod->static_call_sites +
-                              mod->num_static_call_sites;
-                       init = mod->state == MODULE_STATE_COMING;
-#endif
-               }
-
-               for (site = site_mod->sites;
-                    site < stop && static_call_key(site) == key; site++) {
-                       void *site_addr = static_call_addr(site);
-
-                       if (!init && static_call_is_init(site))
-                               continue;
-
-                       if (!kernel_text_address((unsigned long)site_addr)) {
-                               /*
-                                * This skips patching built-in __exit, which
-                                * is part of init_section_contains() but is
-                                * not part of kernel_text_address().
-                                *
-                                * Skipping built-in __exit is fine since it
-                                * will never be executed.
-                                */
-                               WARN_ONCE(!static_call_is_init(site),
-                                         "can't patch static call site at %pS",
-                                         site_addr);
-                               continue;
-                       }
-
-                       arch_static_call_transform(site_addr, NULL, func,
-                                                  static_call_is_tail(site));
-               }
-       }
-
-done:
-       static_call_unlock();
-       cpus_read_unlock();
-}
-EXPORT_SYMBOL_GPL(__static_call_update);
-
-static int __static_call_init(struct module *mod,
-                             struct static_call_site *start,
-                             struct static_call_site *stop)
-{
-       struct static_call_site *site;
-       struct static_call_key *key, *prev_key = NULL;
-       struct static_call_mod *site_mod;
-
-       if (start == stop)
-               return 0;
-
-       static_call_sort_entries(start, stop);
-
-       for (site = start; site < stop; site++) {
-               void *site_addr = static_call_addr(site);
-
-               if ((mod && within_module_init((unsigned long)site_addr, mod)) ||
-                   (!mod && init_section_contains(site_addr, 1)))
-                       static_call_set_init(site);
-
-               key = static_call_key(site);
-               if (key != prev_key) {
-                       prev_key = key;
-
-                       /*
-                        * For vmlinux (!mod) avoid the allocation by storing
-                        * the sites pointer in the key itself. Also see
-                        * __static_call_update()'s @first.
-                        *
-                        * This allows architectures (eg. x86) to call
-                        * static_call_init() before memory allocation works.
-                        */
-                       if (!mod) {
-                               key->sites = site;
-                               key->type |= 1;
-                               goto do_transform;
-                       }
-
-                       site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
-                       if (!site_mod)
-                               return -ENOMEM;
-
-                       /*
-                        * When the key has a direct sites pointer, extract
-                        * that into an explicit struct static_call_mod, so we
-                        * can have a list of modules.
-                        */
-                       if (static_call_key_sites(key)) {
-                               site_mod->mod = NULL;
-                               site_mod->next = NULL;
-                               site_mod->sites = static_call_key_sites(key);
-
-                               key->mods = site_mod;
-
-                               site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
-                               if (!site_mod)
-                                       return -ENOMEM;
-                       }
-
-                       site_mod->mod = mod;
-                       site_mod->sites = site;
-                       site_mod->next = static_call_key_next(key);
-                       key->mods = site_mod;
-               }
-
-do_transform:
-               arch_static_call_transform(site_addr, NULL, key->func,
-                               static_call_is_tail(site));
-       }
-
-       return 0;
-}
-
-static int addr_conflict(struct static_call_site *site, void *start, void *end)
-{
-       unsigned long addr = (unsigned long)static_call_addr(site);
-
-       if (addr <= (unsigned long)end &&
-           addr + CALL_INSN_SIZE > (unsigned long)start)
-               return 1;
-
-       return 0;
-}
-
-static int __static_call_text_reserved(struct static_call_site *iter_start,
-                                      struct static_call_site *iter_stop,
-                                      void *start, void *end, bool init)
-{
-       struct static_call_site *iter = iter_start;
-
-       while (iter < iter_stop) {
-               if (init || !static_call_is_init(iter)) {
-                       if (addr_conflict(iter, start, end))
-                               return 1;
-               }
-               iter++;
-       }
-
-       return 0;
-}
-
-#ifdef CONFIG_MODULES
-
-static int __static_call_mod_text_reserved(void *start, void *end)
-{
-       struct module *mod;
-       int ret;
-
-       preempt_disable();
-       mod = __module_text_address((unsigned long)start);
-       WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
-       if (!try_module_get(mod))
-               mod = NULL;
-       preempt_enable();
-
-       if (!mod)
-               return 0;
-
-       ret = __static_call_text_reserved(mod->static_call_sites,
-                       mod->static_call_sites + mod->num_static_call_sites,
-                       start, end, mod->state == MODULE_STATE_COMING);
-
-       module_put(mod);
-
-       return ret;
-}
-
-static unsigned long tramp_key_lookup(unsigned long addr)
-{
-       struct static_call_tramp_key *start = __start_static_call_tramp_key;
-       struct static_call_tramp_key *stop = __stop_static_call_tramp_key;
-       struct static_call_tramp_key *tramp_key;
-
-       for (tramp_key = start; tramp_key != stop; tramp_key++) {
-               unsigned long tramp;
-
-               tramp = (long)tramp_key->tramp + (long)&tramp_key->tramp;
-               if (tramp == addr)
-                       return (long)tramp_key->key + (long)&tramp_key->key;
-       }
-
-       return 0;
-}
-
-static int static_call_add_module(struct module *mod)
-{
-       struct static_call_site *start = mod->static_call_sites;
-       struct static_call_site *stop = start + mod->num_static_call_sites;
-       struct static_call_site *site;
-
-       for (site = start; site != stop; site++) {
-               unsigned long s_key = __static_call_key(site);
-               unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
-               unsigned long key;
-
-               /*
-                * Is the key is exported, 'addr' points to the key, which
-                * means modules are allowed to call static_call_update() on
-                * it.
-                *
-                * Otherwise, the key isn't exported, and 'addr' points to the
-                * trampoline so we need to lookup the key.
-                *
-                * We go through this dance to prevent crazy modules from
-                * abusing sensitive static calls.
-                */
-               if (!kernel_text_address(addr))
-                       continue;
-
-               key = tramp_key_lookup(addr);
-               if (!key) {
-                       pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n",
-                               static_call_addr(site));
-                       return -EINVAL;
-               }
-
-               key |= s_key & STATIC_CALL_SITE_FLAGS;
-               site->key = key - (long)&site->key;
-       }
-
-       return __static_call_init(mod, start, stop);
-}
-
-static void static_call_del_module(struct module *mod)
-{
-       struct static_call_site *start = mod->static_call_sites;
-       struct static_call_site *stop = mod->static_call_sites +
-                                       mod->num_static_call_sites;
-       struct static_call_key *key, *prev_key = NULL;
-       struct static_call_mod *site_mod, **prev;
-       struct static_call_site *site;
-
-       for (site = start; site < stop; site++) {
-               key = static_call_key(site);
-               if (key == prev_key)
-                       continue;
-
-               prev_key = key;
-
-               for (prev = &key->mods, site_mod = key->mods;
-                    site_mod && site_mod->mod != mod;
-                    prev = &site_mod->next, site_mod = site_mod->next)
-                       ;
-
-               if (!site_mod)
-                       continue;
-
-               *prev = site_mod->next;
-               kfree(site_mod);
-       }
-}
-
-static int static_call_module_notify(struct notifier_block *nb,
-                                    unsigned long val, void *data)
-{
-       struct module *mod = data;
-       int ret = 0;
-
-       cpus_read_lock();
-       static_call_lock();
-
-       switch (val) {
-       case MODULE_STATE_COMING:
-               ret = static_call_add_module(mod);
-               if (ret) {
-                       WARN(1, "Failed to allocate memory for static calls");
-                       static_call_del_module(mod);
-               }
-               break;
-       case MODULE_STATE_GOING:
-               static_call_del_module(mod);
-               break;
-       }
-
-       static_call_unlock();
-       cpus_read_unlock();
-
-       return notifier_from_errno(ret);
-}
-
-static struct notifier_block static_call_module_nb = {
-       .notifier_call = static_call_module_notify,
-};
-
-#else
-
-static inline int __static_call_mod_text_reserved(void *start, void *end)
-{
-       return 0;
-}
-
-#endif /* CONFIG_MODULES */
-
-int static_call_text_reserved(void *start, void *end)
-{
-       bool init = system_state < SYSTEM_RUNNING;
-       int ret = __static_call_text_reserved(__start_static_call_sites,
-                       __stop_static_call_sites, start, end, init);
-
-       if (ret)
-               return ret;
-
-       return __static_call_mod_text_reserved(start, end);
-}
-
-int __init static_call_init(void)
-{
-       int ret;
-
-       if (static_call_initialized)
-               return 0;
-
-       cpus_read_lock();
-       static_call_lock();
-       ret = __static_call_init(NULL, __start_static_call_sites,
-                                __stop_static_call_sites);
-       static_call_unlock();
-       cpus_read_unlock();
-
-       if (ret) {
-               pr_err("Failed to allocate memory for static_call!\n");
-               BUG();
-       }
-
-       static_call_initialized = true;
-
-#ifdef CONFIG_MODULES
-       register_module_notifier(&static_call_module_nb);
-#endif
-       return 0;
-}
-early_initcall(static_call_init);
 
 long __static_call_return0(void)
 {
        return 0;
 }
 EXPORT_SYMBOL_GPL(__static_call_return0);
-
-#ifdef CONFIG_STATIC_CALL_SELFTEST
-
-static int func_a(int x)
-{
-       return x+1;
-}
-
-static int func_b(int x)
-{
-       return x+2;
-}
-
-DEFINE_STATIC_CALL(sc_selftest, func_a);
-
-static struct static_call_data {
-      int (*func)(int);
-      int val;
-      int expect;
-} static_call_data [] __initdata = {
-      { NULL,   2, 3 },
-      { func_b, 2, 4 },
-      { func_a, 2, 3 }
-};
-
-static int __init test_static_call_init(void)
-{
-      int i;
-
-      for (i = 0; i < ARRAY_SIZE(static_call_data); i++ ) {
-             struct static_call_data *scd = &static_call_data[i];
-
-              if (scd->func)
-                      static_call_update(sc_selftest, scd->func);
-
-              WARN_ON(static_call(sc_selftest)(scd->val) != scd->expect);
-      }
-
-      return 0;
-}
-early_initcall(test_static_call_init);
-
-#endif /* CONFIG_STATIC_CALL_SELFTEST */
diff --git a/kernel/static_call_inline.c b/kernel/static_call_inline.c
new file mode 100644 (file)
index 0000000..dc5665b
--- /dev/null
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/static_call.h>
+#include <linux/bug.h>
+#include <linux/smp.h>
+#include <linux/sort.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/processor.h>
+#include <asm/sections.h>
+
+extern struct static_call_site __start_static_call_sites[],
+                              __stop_static_call_sites[];
+extern struct static_call_tramp_key __start_static_call_tramp_key[],
+                                   __stop_static_call_tramp_key[];
+
+static bool static_call_initialized;
+
+/* mutex to protect key modules/sites */
+static DEFINE_MUTEX(static_call_mutex);
+
+static void static_call_lock(void)
+{
+       mutex_lock(&static_call_mutex);
+}
+
+static void static_call_unlock(void)
+{
+       mutex_unlock(&static_call_mutex);
+}
+
+static inline void *static_call_addr(struct static_call_site *site)
+{
+       return (void *)((long)site->addr + (long)&site->addr);
+}
+
+static inline unsigned long __static_call_key(const struct static_call_site *site)
+{
+       return (long)site->key + (long)&site->key;
+}
+
+static inline struct static_call_key *static_call_key(const struct static_call_site *site)
+{
+       return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS);
+}
+
+/* These assume the key is word-aligned. */
+static inline bool static_call_is_init(struct static_call_site *site)
+{
+       return __static_call_key(site) & STATIC_CALL_SITE_INIT;
+}
+
+static inline bool static_call_is_tail(struct static_call_site *site)
+{
+       return __static_call_key(site) & STATIC_CALL_SITE_TAIL;
+}
+
+static inline void static_call_set_init(struct static_call_site *site)
+{
+       site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) -
+                   (long)&site->key;
+}
+
+static int static_call_site_cmp(const void *_a, const void *_b)
+{
+       const struct static_call_site *a = _a;
+       const struct static_call_site *b = _b;
+       const struct static_call_key *key_a = static_call_key(a);
+       const struct static_call_key *key_b = static_call_key(b);
+
+       if (key_a < key_b)
+               return -1;
+
+       if (key_a > key_b)
+               return 1;
+
+       return 0;
+}
+
+static void static_call_site_swap(void *_a, void *_b, int size)
+{
+       long delta = (unsigned long)_a - (unsigned long)_b;
+       struct static_call_site *a = _a;
+       struct static_call_site *b = _b;
+       struct static_call_site tmp = *a;
+
+       a->addr = b->addr  - delta;
+       a->key  = b->key   - delta;
+
+       b->addr = tmp.addr + delta;
+       b->key  = tmp.key  + delta;
+}
+
+static inline void static_call_sort_entries(struct static_call_site *start,
+                                           struct static_call_site *stop)
+{
+       sort(start, stop - start, sizeof(struct static_call_site),
+            static_call_site_cmp, static_call_site_swap);
+}
+
+static inline bool static_call_key_has_mods(struct static_call_key *key)
+{
+       return !(key->type & 1);
+}
+
+static inline struct static_call_mod *static_call_key_next(struct static_call_key *key)
+{
+       if (!static_call_key_has_mods(key))
+               return NULL;
+
+       return key->mods;
+}
+
+static inline struct static_call_site *static_call_key_sites(struct static_call_key *key)
+{
+       if (static_call_key_has_mods(key))
+               return NULL;
+
+       return (struct static_call_site *)(key->type & ~1);
+}
+
+void __static_call_update(struct static_call_key *key, void *tramp, void *func)
+{
+       struct static_call_site *site, *stop;
+       struct static_call_mod *site_mod, first;
+
+       cpus_read_lock();
+       static_call_lock();
+
+       if (key->func == func)
+               goto done;
+
+       key->func = func;
+
+       arch_static_call_transform(NULL, tramp, func, false);
+
+       /*
+        * If uninitialized, we'll not update the callsites, but they still
+        * point to the trampoline and we just patched that.
+        */
+       if (WARN_ON_ONCE(!static_call_initialized))
+               goto done;
+
+       first = (struct static_call_mod){
+               .next = static_call_key_next(key),
+               .mod = NULL,
+               .sites = static_call_key_sites(key),
+       };
+
+       for (site_mod = &first; site_mod; site_mod = site_mod->next) {
+               bool init = system_state < SYSTEM_RUNNING;
+               struct module *mod = site_mod->mod;
+
+               if (!site_mod->sites) {
+                       /*
+                        * This can happen if the static call key is defined in
+                        * a module which doesn't use it.
+                        *
+                        * It also happens in the has_mods case, where the
+                        * 'first' entry has no sites associated with it.
+                        */
+                       continue;
+               }
+
+               stop = __stop_static_call_sites;
+
+               if (mod) {
+#ifdef CONFIG_MODULES
+                       stop = mod->static_call_sites +
+                              mod->num_static_call_sites;
+                       init = mod->state == MODULE_STATE_COMING;
+#endif
+               }
+
+               for (site = site_mod->sites;
+                    site < stop && static_call_key(site) == key; site++) {
+                       void *site_addr = static_call_addr(site);
+
+                       if (!init && static_call_is_init(site))
+                               continue;
+
+                       if (!kernel_text_address((unsigned long)site_addr)) {
+                               /*
+                                * This skips patching built-in __exit, which
+                                * is part of init_section_contains() but is
+                                * not part of kernel_text_address().
+                                *
+                                * Skipping built-in __exit is fine since it
+                                * will never be executed.
+                                */
+                               WARN_ONCE(!static_call_is_init(site),
+                                         "can't patch static call site at %pS",
+                                         site_addr);
+                               continue;
+                       }
+
+                       arch_static_call_transform(site_addr, NULL, func,
+                                                  static_call_is_tail(site));
+               }
+       }
+
+done:
+       static_call_unlock();
+       cpus_read_unlock();
+}
+EXPORT_SYMBOL_GPL(__static_call_update);
+
+static int __static_call_init(struct module *mod,
+                             struct static_call_site *start,
+                             struct static_call_site *stop)
+{
+       struct static_call_site *site;
+       struct static_call_key *key, *prev_key = NULL;
+       struct static_call_mod *site_mod;
+
+       if (start == stop)
+               return 0;
+
+       static_call_sort_entries(start, stop);
+
+       for (site = start; site < stop; site++) {
+               void *site_addr = static_call_addr(site);
+
+               if ((mod && within_module_init((unsigned long)site_addr, mod)) ||
+                   (!mod && init_section_contains(site_addr, 1)))
+                       static_call_set_init(site);
+
+               key = static_call_key(site);
+               if (key != prev_key) {
+                       prev_key = key;
+
+                       /*
+                        * For vmlinux (!mod) avoid the allocation by storing
+                        * the sites pointer in the key itself. Also see
+                        * __static_call_update()'s @first.
+                        *
+                        * This allows architectures (eg. x86) to call
+                        * static_call_init() before memory allocation works.
+                        */
+                       if (!mod) {
+                               key->sites = site;
+                               key->type |= 1;
+                               goto do_transform;
+                       }
+
+                       site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
+                       if (!site_mod)
+                               return -ENOMEM;
+
+                       /*
+                        * When the key has a direct sites pointer, extract
+                        * that into an explicit struct static_call_mod, so we
+                        * can have a list of modules.
+                        */
+                       if (static_call_key_sites(key)) {
+                               site_mod->mod = NULL;
+                               site_mod->next = NULL;
+                               site_mod->sites = static_call_key_sites(key);
+
+                               key->mods = site_mod;
+
+                               site_mod = kzalloc(sizeof(*site_mod), GFP_KERNEL);
+                               if (!site_mod)
+                                       return -ENOMEM;
+                       }
+
+                       site_mod->mod = mod;
+                       site_mod->sites = site;
+                       site_mod->next = static_call_key_next(key);
+                       key->mods = site_mod;
+               }
+
+do_transform:
+               arch_static_call_transform(site_addr, NULL, key->func,
+                               static_call_is_tail(site));
+       }
+
+       return 0;
+}
+
+static int addr_conflict(struct static_call_site *site, void *start, void *end)
+{
+       unsigned long addr = (unsigned long)static_call_addr(site);
+
+       if (addr <= (unsigned long)end &&
+           addr + CALL_INSN_SIZE > (unsigned long)start)
+               return 1;
+
+       return 0;
+}
+
+static int __static_call_text_reserved(struct static_call_site *iter_start,
+                                      struct static_call_site *iter_stop,
+                                      void *start, void *end, bool init)
+{
+       struct static_call_site *iter = iter_start;
+
+       while (iter < iter_stop) {
+               if (init || !static_call_is_init(iter)) {
+                       if (addr_conflict(iter, start, end))
+                               return 1;
+               }
+               iter++;
+       }
+
+       return 0;
+}
+
+#ifdef CONFIG_MODULES
+
+static int __static_call_mod_text_reserved(void *start, void *end)
+{
+       struct module *mod;
+       int ret;
+
+       preempt_disable();
+       mod = __module_text_address((unsigned long)start);
+       WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
+       if (!try_module_get(mod))
+               mod = NULL;
+       preempt_enable();
+
+       if (!mod)
+               return 0;
+
+       ret = __static_call_text_reserved(mod->static_call_sites,
+                       mod->static_call_sites + mod->num_static_call_sites,
+                       start, end, mod->state == MODULE_STATE_COMING);
+
+       module_put(mod);
+
+       return ret;
+}
+
+static unsigned long tramp_key_lookup(unsigned long addr)
+{
+       struct static_call_tramp_key *start = __start_static_call_tramp_key;
+       struct static_call_tramp_key *stop = __stop_static_call_tramp_key;
+       struct static_call_tramp_key *tramp_key;
+
+       for (tramp_key = start; tramp_key != stop; tramp_key++) {
+               unsigned long tramp;
+
+               tramp = (long)tramp_key->tramp + (long)&tramp_key->tramp;
+               if (tramp == addr)
+                       return (long)tramp_key->key + (long)&tramp_key->key;
+       }
+
+       return 0;
+}
+
+static int static_call_add_module(struct module *mod)
+{
+       struct static_call_site *start = mod->static_call_sites;
+       struct static_call_site *stop = start + mod->num_static_call_sites;
+       struct static_call_site *site;
+
+       for (site = start; site != stop; site++) {
+               unsigned long s_key = __static_call_key(site);
+               unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
+               unsigned long key;
+
+               /*
+                * Is the key is exported, 'addr' points to the key, which
+                * means modules are allowed to call static_call_update() on
+                * it.
+                *
+                * Otherwise, the key isn't exported, and 'addr' points to the
+                * trampoline so we need to lookup the key.
+                *
+                * We go through this dance to prevent crazy modules from
+                * abusing sensitive static calls.
+                */
+               if (!kernel_text_address(addr))
+                       continue;
+
+               key = tramp_key_lookup(addr);
+               if (!key) {
+                       pr_warn("Failed to fixup __raw_static_call() usage at: %ps\n",
+                               static_call_addr(site));
+                       return -EINVAL;
+               }
+
+               key |= s_key & STATIC_CALL_SITE_FLAGS;
+               site->key = key - (long)&site->key;
+       }
+
+       return __static_call_init(mod, start, stop);
+}
+
+static void static_call_del_module(struct module *mod)
+{
+       struct static_call_site *start = mod->static_call_sites;
+       struct static_call_site *stop = mod->static_call_sites +
+                                       mod->num_static_call_sites;
+       struct static_call_key *key, *prev_key = NULL;
+       struct static_call_mod *site_mod, **prev;
+       struct static_call_site *site;
+
+       for (site = start; site < stop; site++) {
+               key = static_call_key(site);
+               if (key == prev_key)
+                       continue;
+
+               prev_key = key;
+
+               for (prev = &key->mods, site_mod = key->mods;
+                    site_mod && site_mod->mod != mod;
+                    prev = &site_mod->next, site_mod = site_mod->next)
+                       ;
+
+               if (!site_mod)
+                       continue;
+
+               *prev = site_mod->next;
+               kfree(site_mod);
+       }
+}
+
+static int static_call_module_notify(struct notifier_block *nb,
+                                    unsigned long val, void *data)
+{
+       struct module *mod = data;
+       int ret = 0;
+
+       cpus_read_lock();
+       static_call_lock();
+
+       switch (val) {
+       case MODULE_STATE_COMING:
+               ret = static_call_add_module(mod);
+               if (ret) {
+                       WARN(1, "Failed to allocate memory for static calls");
+                       static_call_del_module(mod);
+               }
+               break;
+       case MODULE_STATE_GOING:
+               static_call_del_module(mod);
+               break;
+       }
+
+       static_call_unlock();
+       cpus_read_unlock();
+
+       return notifier_from_errno(ret);
+}
+
+static struct notifier_block static_call_module_nb = {
+       .notifier_call = static_call_module_notify,
+};
+
+#else
+
+static inline int __static_call_mod_text_reserved(void *start, void *end)
+{
+       return 0;
+}
+
+#endif /* CONFIG_MODULES */
+
+int static_call_text_reserved(void *start, void *end)
+{
+       bool init = system_state < SYSTEM_RUNNING;
+       int ret = __static_call_text_reserved(__start_static_call_sites,
+                       __stop_static_call_sites, start, end, init);
+
+       if (ret)
+               return ret;
+
+       return __static_call_mod_text_reserved(start, end);
+}
+
+int __init static_call_init(void)
+{
+       int ret;
+
+       if (static_call_initialized)
+               return 0;
+
+       cpus_read_lock();
+       static_call_lock();
+       ret = __static_call_init(NULL, __start_static_call_sites,
+                                __stop_static_call_sites);
+       static_call_unlock();
+       cpus_read_unlock();
+
+       if (ret) {
+               pr_err("Failed to allocate memory for static_call!\n");
+               BUG();
+       }
+
+       static_call_initialized = true;
+
+#ifdef CONFIG_MODULES
+       register_module_notifier(&static_call_module_nb);
+#endif
+       return 0;
+}
+early_initcall(static_call_init);
+
+#ifdef CONFIG_STATIC_CALL_SELFTEST
+
+static int func_a(int x)
+{
+       return x+1;
+}
+
+static int func_b(int x)
+{
+       return x+2;
+}
+
+DEFINE_STATIC_CALL(sc_selftest, func_a);
+
+static struct static_call_data {
+      int (*func)(int);
+      int val;
+      int expect;
+} static_call_data [] __initdata = {
+      { NULL,   2, 3 },
+      { func_b, 2, 4 },
+      { func_a, 2, 3 }
+};
+
+static int __init test_static_call_init(void)
+{
+      int i;
+
+      for (i = 0; i < ARRAY_SIZE(static_call_data); i++ ) {
+             struct static_call_data *scd = &static_call_data[i];
+
+              if (scd->func)
+                      static_call_update(sc_selftest, scd->func);
+
+              WARN_ON(static_call(sc_selftest)(scd->val) != scd->expect);
+      }
+
+      return 0;
+}
+early_initcall(test_static_call_init);
+
+#endif /* CONFIG_STATIC_CALL_SELFTEST */
index 2d76c91..d257721 100644 (file)
@@ -188,7 +188,7 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
         */
        if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) {
 #ifdef CONFIG_NO_HZ_FULL
-               WARN_ON(tick_nohz_full_running);
+               WARN_ON_ONCE(tick_nohz_full_running);
 #endif
                tick_do_timer_cpu = cpu;
        }
@@ -1538,7 +1538,7 @@ void tick_cancel_sched_timer(int cpu)
 }
 #endif
 
-/**
+/*
  * Async notification about clocksource changes
  */
 void tick_clock_notify(void)
@@ -1559,7 +1559,7 @@ void tick_oneshot_notify(void)
        set_bit(0, &ts->check_clocks);
 }
 
-/**
+/*
  * Check, if a change happened, which makes oneshot possible.
  *
  * Called cyclic from the hrtimer softirq (driven by the timer
index 85f1021..9dd2a39 100644 (file)
@@ -1722,11 +1722,14 @@ static inline void __run_timers(struct timer_base *base)
               time_after_eq(jiffies, base->next_expiry)) {
                levels = collect_expired_timers(base, heads);
                /*
-                * The only possible reason for not finding any expired
-                * timer at this clk is that all matching timers have been
-                * dequeued.
+                * The two possible reasons for not finding any expired
+                * timer at this clk are that all matching timers have been
+                * dequeued or no timer has been queued since
+                * base::next_expiry was set to base::clk +
+                * NEXT_TIMER_MAX_DELTA.
                 */
-               WARN_ON_ONCE(!levels && !base->next_expiry_recalc);
+               WARN_ON_ONCE(!levels && !base->next_expiry_recalc
+                            && base->timers_pending);
                base->clk++;
                base->next_expiry = __next_timer_interrupt(base);
 
index 7fa2ebc..d8553f4 100644 (file)
@@ -2349,11 +2349,11 @@ kprobe_multi_link_handler(struct fprobe *fp, unsigned long entry_ip,
 }
 
 static int
-kprobe_multi_resolve_syms(const void *usyms, u32 cnt,
+kprobe_multi_resolve_syms(const void __user *usyms, u32 cnt,
                          unsigned long *addrs)
 {
        unsigned long addr, size;
-       const char **syms;
+       const char __user **syms;
        int err = -ENOMEM;
        unsigned int i;
        char *func;
index ab463a4..b568337 100644 (file)
@@ -65,7 +65,7 @@ static void rethook_free_rcu(struct rcu_head *head)
  */
 void rethook_free(struct rethook *rh)
 {
-       rcu_assign_pointer(rh->handler, NULL);
+       WRITE_ONCE(rh->handler, NULL);
 
        call_rcu(&rh->rcu, rethook_free_rcu);
 }
index 56fa037..5f0e71a 100644 (file)
@@ -54,32 +54,6 @@ void kobject_get_ownership(struct kobject *kobj, kuid_t *uid, kgid_t *gid)
                kobj->ktype->get_ownership(kobj, uid, gid);
 }
 
-/*
- * populate_dir - populate directory with attributes.
- * @kobj: object we're working on.
- *
- * Most subsystems have a set of default attributes that are associated
- * with an object that registers with them.  This is a helper called during
- * object registration that loops through the default attributes of the
- * subsystem and creates attributes files for them in sysfs.
- */
-static int populate_dir(struct kobject *kobj)
-{
-       const struct kobj_type *t = get_ktype(kobj);
-       struct attribute *attr;
-       int error = 0;
-       int i;
-
-       if (t && t->default_attrs) {
-               for (i = 0; (attr = t->default_attrs[i]) != NULL; i++) {
-                       error = sysfs_create_file(kobj, attr);
-                       if (error)
-                               break;
-               }
-       }
-       return error;
-}
-
 static int create_dir(struct kobject *kobj)
 {
        const struct kobj_type *ktype = get_ktype(kobj);
@@ -90,12 +64,6 @@ static int create_dir(struct kobject *kobj)
        if (error)
                return error;
 
-       error = populate_dir(kobj);
-       if (error) {
-               sysfs_remove_dir(kobj);
-               return error;
-       }
-
        if (ktype) {
                error = sysfs_create_groups(kobj, ktype->default_groups);
                if (error) {
index 926f482..fd1728d 100644 (file)
@@ -271,8 +271,12 @@ static FORCE_INLINE int LZ4_decompress_generic(
                        ip += length;
                        op += length;
 
-                       /* Necessarily EOF, due to parsing restrictions */
-                       if (!partialDecoding || (cpy == oend))
+                       /* Necessarily EOF when !partialDecoding.
+                        * When partialDecoding, it is EOF if we've either
+                        * filled the output buffer or
+                        * can't proceed with reading an offset for following match.
+                        */
+                       if (!partialDecoding || (cpy == oend) || (ip >= (iend - 2)))
                                break;
                } else {
                        /* may overwrite up to WILDCOPYLENGTH beyond cpy */
index c3e37aa..fe915db 100644 (file)
 #include "internal.h"
 
 #ifdef CONFIG_COMPACTION
+/*
+ * Fragmentation score check interval for proactive compaction purposes.
+ */
+#define HPAGE_FRAG_CHECK_INTERVAL_MSEC (500)
+
 static inline void count_compact_event(enum vm_event_item item)
 {
        count_vm_event(item);
@@ -51,11 +56,6 @@ static inline void count_compact_events(enum vm_event_item item, long delta)
 #define pageblock_end_pfn(pfn)         block_end_pfn(pfn, pageblock_order)
 
 /*
- * Fragmentation score check interval for proactive compaction purposes.
- */
-static const unsigned int HPAGE_FRAG_CHECK_INTERVAL_MSEC = 500;
-
-/*
  * Page order with-respect-to which proactive compaction
  * calculates external fragmentation, which is used as
  * the "fragmentation score" of a node/zone.
index 3a5ffb5..9a1eef6 100644 (file)
@@ -1063,12 +1063,6 @@ void __init pagecache_init(void)
                init_waitqueue_head(&folio_wait_table[i]);
 
        page_writeback_init();
-
-       /*
-        * tmpfs uses the ZERO_PAGE for reading holes: it is up-to-date,
-        * and splice's page_cache_pipe_buf_confirm() needs to see that.
-        */
-       SetPageUptodate(ZERO_PAGE(0));
 }
 
 /*
index 0cc0c4d..1a69299 100644 (file)
@@ -624,7 +624,7 @@ void __kmap_local_sched_out(void)
 
                /* With debug all even slots are unmapped and act as guard */
                if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
-                       WARN_ON_ONCE(!pte_none(pteval));
+                       WARN_ON_ONCE(pte_val(pteval) != 0);
                        continue;
                }
                if (WARN_ON_ONCE(pte_none(pteval)))
@@ -661,7 +661,7 @@ void __kmap_local_sched_in(void)
 
                /* With debug all even slots are unmapped and act as guard */
                if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
-                       WARN_ON_ONCE(!pte_none(pteval));
+                       WARN_ON_ONCE(pte_val(pteval) != 0);
                        continue;
                }
                if (WARN_ON_ONCE(pte_none(pteval)))
index 2fe3821..c468fee 100644 (file)
@@ -2145,15 +2145,14 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
         * pmd against. Otherwise we can end up replacing wrong folio.
         */
        VM_BUG_ON(freeze && !folio);
-       if (folio) {
-               VM_WARN_ON_ONCE(!folio_test_locked(folio));
-               if (folio != page_folio(pmd_page(*pmd)))
-                       goto out;
-       }
+       VM_WARN_ON_ONCE(folio && !folio_test_locked(folio));
 
        if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd) ||
-           is_pmd_migration_entry(*pmd))
+           is_pmd_migration_entry(*pmd)) {
+               if (folio && folio != page_folio(pmd_page(*pmd)))
+                       goto out;
                __split_huge_pmd_locked(vma, pmd, range.start, freeze);
+       }
 
 out:
        spin_unlock(ptl);
index b34f501..f8ca7cc 100644 (file)
@@ -3475,7 +3475,6 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
 {
        int nr_nodes, node;
        struct page *page;
-       int rc = 0;
 
        lockdep_assert_held(&hugetlb_lock);
 
@@ -3486,15 +3485,19 @@ static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
        }
 
        for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
-               if (!list_empty(&h->hugepage_freelists[node])) {
-                       page = list_entry(h->hugepage_freelists[node].next,
-                                       struct page, lru);
-                       rc = demote_free_huge_page(h, page);
-                       break;
+               list_for_each_entry(page, &h->hugepage_freelists[node], lru) {
+                       if (PageHWPoison(page))
+                               continue;
+
+                       return demote_free_huge_page(h, page);
                }
        }
 
-       return rc;
+       /*
+        * Only way to get here is if all pages on free lists are poisoned.
+        * Return -EBUSY so that caller will not retry.
+        */
+       return -EBUSY;
 }
 
 #define HSTATE_ATTR_RO(_name) \
index 07a76c4..9e1b654 100644 (file)
@@ -336,8 +336,6 @@ void __kasan_poison_vmalloc(const void *start, unsigned long size)
 
 #endif
 
-#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
-
 void kasan_enable_tagging(void)
 {
        if (kasan_arg_mode == KASAN_ARG_MODE_ASYNC)
@@ -347,6 +345,9 @@ void kasan_enable_tagging(void)
        else
                hw_enable_tagging_sync();
 }
+
+#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
+
 EXPORT_SYMBOL_GPL(kasan_enable_tagging);
 
 void kasan_force_async_fault(void)
index d79b83d..b01b4bb 100644 (file)
@@ -355,25 +355,27 @@ static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
 #define hw_set_mem_tag_range(addr, size, tag, init) \
                        arch_set_mem_tag_range((addr), (size), (tag), (init))
 
+void kasan_enable_tagging(void);
+
 #else /* CONFIG_KASAN_HW_TAGS */
 
 #define hw_enable_tagging_sync()
 #define hw_enable_tagging_async()
 #define hw_enable_tagging_asymm()
 
+static inline void kasan_enable_tagging(void) { }
+
 #endif /* CONFIG_KASAN_HW_TAGS */
 
 #if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
 
-void kasan_enable_tagging(void);
 void kasan_force_async_fault(void);
 
-#else /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */
+#else /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */
 
-static inline void kasan_enable_tagging(void) { }
 static inline void kasan_force_async_fault(void) { }
 
-#endif /* CONFIG_KASAN_HW_TAGS || CONFIG_KASAN_KUNIT_TEST */
+#endif /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */
 
 #ifdef CONFIG_KASAN_SW_TAGS
 u8 kasan_random_tag(void);
index a203747..9b2b5f5 100644 (file)
@@ -231,27 +231,6 @@ static bool kfence_unprotect(unsigned long addr)
        return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
 }
 
-static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
-{
-       long index;
-
-       /* The checks do not affect performance; only called from slow-paths. */
-
-       if (!is_kfence_address((void *)addr))
-               return NULL;
-
-       /*
-        * May be an invalid index if called with an address at the edge of
-        * __kfence_pool, in which case we would report an "invalid access"
-        * error.
-        */
-       index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
-       if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
-               return NULL;
-
-       return &kfence_metadata[index];
-}
-
 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
 {
        unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
index 9a6c4b1..600f2e2 100644 (file)
@@ -96,6 +96,27 @@ struct kfence_metadata {
 
 extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
 
+static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
+{
+       long index;
+
+       /* The checks do not affect performance; only called from slow-paths. */
+
+       if (!is_kfence_address((void *)addr))
+               return NULL;
+
+       /*
+        * May be an invalid index if called with an address at the edge of
+        * __kfence_pool, in which case we would report an "invalid access"
+        * error.
+        */
+       index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
+       if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
+               return NULL;
+
+       return &kfence_metadata[index];
+}
+
 /* KFENCE error types for report generation. */
 enum kfence_error_type {
        KFENCE_ERROR_OOB,               /* Detected a out-of-bounds access. */
index f93a7b2..f5a6d8b 100644 (file)
@@ -273,3 +273,50 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r
        /* We encountered a memory safety error, taint the kernel! */
        add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
 }
+
+#ifdef CONFIG_PRINTK
+static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack)
+{
+       int i, j;
+
+       i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
+       for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j)
+               kp_stack[j] = (void *)track->stack_entries[i];
+       if (j < KS_ADDRS_COUNT)
+               kp_stack[j] = NULL;
+}
+
+bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+       struct kfence_metadata *meta = addr_to_metadata((unsigned long)object);
+       unsigned long flags;
+
+       if (!meta)
+               return false;
+
+       /*
+        * If state is UNUSED at least show the pointer requested; the rest
+        * would be garbage data.
+        */
+       kpp->kp_ptr = object;
+
+       /* Requesting info an a never-used object is almost certainly a bug. */
+       if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED))
+               return true;
+
+       raw_spin_lock_irqsave(&meta->lock, flags);
+
+       kpp->kp_slab = slab;
+       kpp->kp_slab_cache = meta->cache;
+       kpp->kp_objp = (void *)meta->addr;
+       kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
+       if (meta->state == KFENCE_OBJECT_FREED)
+               kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
+       /* get_stack_skipnr() ensures the first entry is outside allocator. */
+       kpp->kp_ret = kpp->kp_stack[0];
+
+       raw_spin_unlock_irqrestore(&meta->lock, flags);
+
+       return true;
+}
+#endif
index acd7cbb..a182f5d 100644 (file)
@@ -1132,7 +1132,7 @@ EXPORT_SYMBOL(kmemleak_no_scan);
 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
                               gfp_t gfp)
 {
-       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+       if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
                kmemleak_alloc(__va(phys), size, min_count, gfp);
 }
 EXPORT_SYMBOL(kmemleak_alloc_phys);
@@ -1146,7 +1146,7 @@ EXPORT_SYMBOL(kmemleak_alloc_phys);
  */
 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
 {
-       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+       if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
                kmemleak_free_part(__va(phys), size);
 }
 EXPORT_SYMBOL(kmemleak_free_part_phys);
@@ -1158,7 +1158,7 @@ EXPORT_SYMBOL(kmemleak_free_part_phys);
  */
 void __ref kmemleak_not_leak_phys(phys_addr_t phys)
 {
-       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+       if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
                kmemleak_not_leak(__va(phys));
 }
 EXPORT_SYMBOL(kmemleak_not_leak_phys);
@@ -1170,7 +1170,7 @@ EXPORT_SYMBOL(kmemleak_not_leak_phys);
  */
 void __ref kmemleak_ignore_phys(phys_addr_t phys)
 {
-       if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
+       if (PHYS_PFN(phys) >= min_low_pfn && PHYS_PFN(phys) < max_low_pfn)
                kmemleak_ignore(__va(phys));
 }
 EXPORT_SYMBOL(kmemleak_ignore_phys);
index c669d87..ba76428 100644 (file)
@@ -395,12 +395,6 @@ static void memcg_reparent_list_lru_node(struct list_lru *lru, int nid,
        struct list_lru_one *src, *dst;
 
        /*
-        * If there is no lru entry in this nlru, we can skip it immediately.
-        */
-       if (!READ_ONCE(nlru->nr_items))
-               return;
-
-       /*
         * Since list_lru_{add,del} may be called under an IRQ-safe lock,
         * we have to use IRQ-safe primitives here to avoid deadlock.
         */
index a2516d3..8c74107 100644 (file)
@@ -1191,8 +1191,10 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
  */
 static struct page *new_page(struct page *page, unsigned long start)
 {
+       struct folio *dst, *src = page_folio(page);
        struct vm_area_struct *vma;
        unsigned long address;
+       gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
 
        vma = find_vma(current->mm, start);
        while (vma) {
@@ -1202,24 +1204,19 @@ static struct page *new_page(struct page *page, unsigned long start)
                vma = vma->vm_next;
        }
 
-       if (PageHuge(page)) {
-               return alloc_huge_page_vma(page_hstate(compound_head(page)),
+       if (folio_test_hugetlb(src))
+               return alloc_huge_page_vma(page_hstate(&src->page),
                                vma, address);
-       } else if (PageTransHuge(page)) {
-               struct page *thp;
 
-               thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
-                                        HPAGE_PMD_ORDER);
-               if (!thp)
-                       return NULL;
-               prep_transhuge_page(thp);
-               return thp;
-       }
+       if (folio_test_large(src))
+               gfp = GFP_TRANSHUGE;
+
        /*
-        * if !vma, alloc_page_vma() will use task or system default policy
+        * if !vma, vma_alloc_folio() will use task or system default policy
         */
-       return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
-                       vma, address);
+       dst = vma_alloc_folio(gfp, folio_order(src), vma, address,
+                       folio_test_large(src));
+       return &dst->page;
 }
 #else
 
@@ -2227,6 +2224,19 @@ out:
 }
 EXPORT_SYMBOL(alloc_pages_vma);
 
+struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
+               unsigned long addr, bool hugepage)
+{
+       struct folio *folio;
+
+       folio = (struct folio *)alloc_pages_vma(gfp, order, vma, addr,
+                       hugepage);
+       if (folio && order > 1)
+               prep_transhuge_page(&folio->page);
+
+       return folio;
+}
+
 /**
  * alloc_pages - Allocate pages.
  * @gfp: GFP flags.
@@ -2733,6 +2743,7 @@ alloc_new:
        mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
        if (!mpol_new)
                goto err_out;
+       atomic_set(&mpol_new->refcnt, 1);
        goto restart;
 }
 
index de175e2..6c31ee1 100644 (file)
@@ -1520,10 +1520,11 @@ out:
 
 struct page *alloc_migration_target(struct page *page, unsigned long private)
 {
+       struct folio *folio = page_folio(page);
        struct migration_target_control *mtc;
        gfp_t gfp_mask;
        unsigned int order = 0;
-       struct page *new_page = NULL;
+       struct folio *new_folio = NULL;
        int nid;
        int zidx;
 
@@ -1531,34 +1532,31 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
        gfp_mask = mtc->gfp_mask;
        nid = mtc->nid;
        if (nid == NUMA_NO_NODE)
-               nid = page_to_nid(page);
+               nid = folio_nid(folio);
 
-       if (PageHuge(page)) {
-               struct hstate *h = page_hstate(compound_head(page));
+       if (folio_test_hugetlb(folio)) {
+               struct hstate *h = page_hstate(&folio->page);
 
                gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
                return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
        }
 
-       if (PageTransHuge(page)) {
+       if (folio_test_large(folio)) {
                /*
                 * clear __GFP_RECLAIM to make the migration callback
                 * consistent with regular THP allocations.
                 */
                gfp_mask &= ~__GFP_RECLAIM;
                gfp_mask |= GFP_TRANSHUGE;
-               order = HPAGE_PMD_ORDER;
+               order = folio_order(folio);
        }
-       zidx = zone_idx(page_zone(page));
+       zidx = zone_idx(folio_zone(folio));
        if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
                gfp_mask |= __GFP_HIGHMEM;
 
-       new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
-
-       if (new_page && PageTransHuge(new_page))
-               prep_transhuge_page(new_page);
+       new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
 
-       return new_page;
+       return &new_folio->page;
 }
 
 #ifdef CONFIG_NUMA
@@ -1999,32 +1997,20 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
                                           unsigned long data)
 {
        int nid = (int) data;
-       struct page *newpage;
-
-       newpage = __alloc_pages_node(nid,
-                                        (GFP_HIGHUSER_MOVABLE |
-                                         __GFP_THISNODE | __GFP_NOMEMALLOC |
-                                         __GFP_NORETRY | __GFP_NOWARN) &
-                                        ~__GFP_RECLAIM, 0);
-
-       return newpage;
-}
-
-static struct page *alloc_misplaced_dst_page_thp(struct page *page,
-                                                unsigned long data)
-{
-       int nid = (int) data;
-       struct page *newpage;
-
-       newpage = alloc_pages_node(nid, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
-                                  HPAGE_PMD_ORDER);
-       if (!newpage)
-               goto out;
-
-       prep_transhuge_page(newpage);
+       int order = compound_order(page);
+       gfp_t gfp = __GFP_THISNODE;
+       struct folio *new;
+
+       if (order > 0)
+               gfp |= GFP_TRANSHUGE_LIGHT;
+       else {
+               gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
+                       __GFP_NOWARN;
+               gfp &= ~__GFP_RECLAIM;
+       }
+       new = __folio_alloc_node(gfp, order, nid);
 
-out:
-       return newpage;
+       return &new->page;
 }
 
 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
@@ -2082,23 +2068,9 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
        int nr_remaining;
        unsigned int nr_succeeded;
        LIST_HEAD(migratepages);
-       new_page_t *new;
-       bool compound;
        int nr_pages = thp_nr_pages(page);
 
        /*
-        * PTE mapped THP or HugeTLB page can't reach here so the page could
-        * be either base page or THP.  And it must be head page if it is
-        * THP.
-        */
-       compound = PageTransHuge(page);
-
-       if (compound)
-               new = alloc_misplaced_dst_page_thp;
-       else
-               new = alloc_misplaced_dst_page;
-
-       /*
         * Don't migrate file pages that are mapped in multiple processes
         * with execute permissions as they are probably shared libraries.
         */
@@ -2118,9 +2090,9 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
                goto out;
 
        list_add(&page->lru, &migratepages);
-       nr_remaining = migrate_pages(&migratepages, *new, NULL, node,
-                                    MIGRATE_ASYNC, MR_NUMA_MISPLACED,
-                                    &nr_succeeded);
+       nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
+                                    NULL, node, MIGRATE_ASYNC,
+                                    MR_NUMA_MISPLACED, &nr_succeeded);
        if (nr_remaining) {
                if (!list_empty(&migratepages)) {
                        list_del(&page->lru);
index 9d76da7..303d329 100644 (file)
@@ -486,6 +486,9 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
        pmd_t *old_pmd, *new_pmd;
        pud_t *old_pud, *new_pud;
 
+       if (!len)
+               return 0;
+
        old_end = old_addr + len;
        flush_cache_range(vma, old_addr, old_end);
 
index 2db9578..33ca8ca 100644 (file)
@@ -128,7 +128,7 @@ static DEFINE_MUTEX(pcp_batch_high_lock);
 struct pagesets {
        local_lock_t lock;
 };
-static DEFINE_PER_CPU(struct pagesets, pagesets) __maybe_unused = {
+static DEFINE_PER_CPU(struct pagesets, pagesets) = {
        .lock = INIT_LOCAL_LOCK(lock),
 };
 
@@ -6131,7 +6131,7 @@ static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
        do {
                zone_type--;
                zone = pgdat->node_zones + zone_type;
-               if (managed_zone(zone)) {
+               if (populated_zone(zone)) {
                        zoneref_set_zone(zone, &zonerefs[nr_zones++]);
                        check_highest_zone(zone_type);
                }
index b417f00..89fbf3c 100644 (file)
@@ -51,54 +51,6 @@ void end_swap_bio_write(struct bio *bio)
        bio_put(bio);
 }
 
-static void swap_slot_free_notify(struct page *page)
-{
-       struct swap_info_struct *sis;
-       struct gendisk *disk;
-       swp_entry_t entry;
-
-       /*
-        * There is no guarantee that the page is in swap cache - the software
-        * suspend code (at least) uses end_swap_bio_read() against a non-
-        * swapcache page.  So we must check PG_swapcache before proceeding with
-        * this optimization.
-        */
-       if (unlikely(!PageSwapCache(page)))
-               return;
-
-       sis = page_swap_info(page);
-       if (data_race(!(sis->flags & SWP_BLKDEV)))
-               return;
-
-       /*
-        * The swap subsystem performs lazy swap slot freeing,
-        * expecting that the page will be swapped out again.
-        * So we can avoid an unnecessary write if the page
-        * isn't redirtied.
-        * This is good for real swap storage because we can
-        * reduce unnecessary I/O and enhance wear-leveling
-        * if an SSD is used as the as swap device.
-        * But if in-memory swap device (eg zram) is used,
-        * this causes a duplicated copy between uncompressed
-        * data in VM-owned memory and compressed data in
-        * zram-owned memory.  So let's free zram-owned memory
-        * and make the VM-owned decompressed page *dirty*,
-        * so the page should be swapped out somewhere again if
-        * we again wish to reclaim it.
-        */
-       disk = sis->bdev->bd_disk;
-       entry.val = page_private(page);
-       if (disk->fops->swap_slot_free_notify && __swap_count(entry) == 1) {
-               unsigned long offset;
-
-               offset = swp_offset(entry);
-
-               SetPageDirty(page);
-               disk->fops->swap_slot_free_notify(sis->bdev,
-                               offset);
-       }
-}
-
 static void end_swap_bio_read(struct bio *bio)
 {
        struct page *page = bio_first_page_all(bio);
@@ -114,7 +66,6 @@ static void end_swap_bio_read(struct bio *bio)
        }
 
        SetPageUptodate(page);
-       swap_slot_free_notify(page);
 out:
        unlock_page(page);
        WRITE_ONCE(bio->bi_private, NULL);
@@ -394,11 +345,6 @@ int swap_readpage(struct page *page, bool synchronous)
        if (sis->flags & SWP_SYNCHRONOUS_IO) {
                ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
                if (!ret) {
-                       if (trylock_page(page)) {
-                               swap_slot_free_notify(page);
-                               unlock_page(page);
-                       }
-
                        count_vm_event(PSWPIN);
                        goto out;
                }
index 1187f9c..14a5cda 100644 (file)
@@ -163,7 +163,8 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
                return not_found(pvmw);
 
        if (unlikely(is_vm_hugetlb_page(vma))) {
-               unsigned long size = pvmw->nr_pages * PAGE_SIZE;
+               struct hstate *hstate = hstate_vma(vma);
+               unsigned long size = huge_page_size(hstate);
                /* The only possible mapping was handled on last iteration */
                if (pvmw->pte)
                        return not_found(pvmw);
@@ -173,8 +174,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
                if (!pvmw->pte)
                        return false;
 
-               pvmw->ptl = huge_pte_lockptr(size_to_hstate(size), mm,
-                                               pvmw->pte);
+               pvmw->ptl = huge_pte_lockptr(hstate, mm, pvmw->pte);
                spin_lock(pvmw->ptl);
                if (!check_pte(pvmw))
                        return not_found(pvmw);
index 098638d..3b3cf28 100644 (file)
@@ -158,6 +158,22 @@ const struct address_space_operations secretmem_aops = {
        .isolate_page   = secretmem_isolate_page,
 };
 
+static int secretmem_setattr(struct user_namespace *mnt_userns,
+                            struct dentry *dentry, struct iattr *iattr)
+{
+       struct inode *inode = d_inode(dentry);
+       unsigned int ia_valid = iattr->ia_valid;
+
+       if ((ia_valid & ATTR_SIZE) && inode->i_size)
+               return -EINVAL;
+
+       return simple_setattr(mnt_userns, dentry, iattr);
+}
+
+static const struct inode_operations secretmem_iops = {
+       .setattr = secretmem_setattr,
+};
+
 static struct vfsmount *secretmem_mnt;
 
 static struct file *secretmem_file_create(unsigned long flags)
@@ -177,6 +193,7 @@ static struct file *secretmem_file_create(unsigned long flags)
        mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER);
        mapping_set_unevictable(inode->i_mapping);
 
+       inode->i_op = &secretmem_iops;
        inode->i_mapping->a_ops = &secretmem_aops;
 
        /* pretend we are a normal file with zero size */
index 529c9ad..4b2fea3 100644 (file)
@@ -2513,7 +2513,6 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
                pgoff_t end_index;
                unsigned long nr, ret;
                loff_t i_size = i_size_read(inode);
-               bool got_page;
 
                end_index = i_size >> PAGE_SHIFT;
                if (index > end_index)
@@ -2570,24 +2569,34 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
                         */
                        if (!offset)
                                mark_page_accessed(page);
-                       got_page = true;
+                       /*
+                        * Ok, we have the page, and it's up-to-date, so
+                        * now we can copy it to user space...
+                        */
+                       ret = copy_page_to_iter(page, offset, nr, to);
+                       put_page(page);
+
+               } else if (iter_is_iovec(to)) {
+                       /*
+                        * Copy to user tends to be so well optimized, but
+                        * clear_user() not so much, that it is noticeably
+                        * faster to copy the zero page instead of clearing.
+                        */
+                       ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to);
                } else {
-                       page = ZERO_PAGE(0);
-                       got_page = false;
+                       /*
+                        * But submitting the same page twice in a row to
+                        * splice() - or others? - can result in confusion:
+                        * so don't attempt that optimization on pipes etc.
+                        */
+                       ret = iov_iter_zero(nr, to);
                }
 
-               /*
-                * Ok, we have the page, and it's up-to-date, so
-                * now we can copy it to user space...
-                */
-               ret = copy_page_to_iter(page, offset, nr, to);
                retval += ret;
                offset += ret;
                index += offset >> PAGE_SHIFT;
                offset &= ~PAGE_MASK;
 
-               if (got_page)
-                       put_page(page);
                if (!iov_iter_count(to))
                        break;
                if (ret < nr) {
index b04e400..0edb474 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3665,7 +3665,7 @@ EXPORT_SYMBOL(__kmalloc_node_track_caller);
 #endif /* CONFIG_NUMA */
 
 #ifdef CONFIG_PRINTK
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
 {
        struct kmem_cache *cachep;
        unsigned int objnr;
index fd7ae20..95eb341 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -868,7 +868,7 @@ struct kmem_obj_info {
        void *kp_stack[KS_ADDRS_COUNT];
        void *kp_free_stack[KS_ADDRS_COUNT];
 };
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
 #endif
 
 #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
index 6ee64d6..2b3206a 100644 (file)
@@ -555,6 +555,13 @@ bool kmem_valid_obj(void *object)
 }
 EXPORT_SYMBOL_GPL(kmem_valid_obj);
 
+static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+{
+       if (__kfence_obj_info(kpp, object, slab))
+               return;
+       __kmem_obj_info(kpp, object, slab);
+}
+
 /**
  * kmem_dump_obj - Print available slab provenance information
  * @object: slab object for which to find provenance information.
@@ -590,6 +597,8 @@ void kmem_dump_obj(void *object)
                pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
        else
                pr_cont(" slab%s", cp);
+       if (is_kfence_address(object))
+               pr_cont(" (kfence)");
        if (kp.kp_objp)
                pr_cont(" start %px", kp.kp_objp);
        if (kp.kp_data_offset)
index dfa6808..40ea6e2 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -463,7 +463,7 @@ out:
 }
 
 #ifdef CONFIG_PRINTK
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
 {
        kpp->kp_ptr = object;
        kpp->kp_slab = slab;
index 74d92aa..ed5c2c0 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4312,7 +4312,7 @@ int __kmem_cache_shutdown(struct kmem_cache *s)
 }
 
 #ifdef CONFIG_PRINTK
-void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
+void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
 {
        void *base;
        int __maybe_unused i;
index e163372..0b17498 100644 (file)
@@ -1671,17 +1671,6 @@ static DEFINE_MUTEX(vmap_purge_lock);
 /* for per-CPU blocks */
 static void purge_fragmented_blocks_allcpus(void);
 
-#ifdef CONFIG_X86_64
-/*
- * called before a call to iounmap() if the caller wants vm_area_struct's
- * immediately freed.
- */
-void set_iounmap_nonlazy(void)
-{
-       atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
-}
-#endif /* CONFIG_X86_64 */
-
 /*
  * Purges all lazily-freed vmap areas.
  */
index a7044e9..64470a7 100644 (file)
@@ -7016,24 +7016,33 @@ BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len
        if (!th->ack || th->rst || th->syn)
                return -ENOENT;
 
+       if (unlikely(iph_len < sizeof(struct iphdr)))
+               return -EINVAL;
+
        if (tcp_synq_no_recent_overflow(sk))
                return -ENOENT;
 
        cookie = ntohl(th->ack_seq) - 1;
 
-       switch (sk->sk_family) {
-       case AF_INET:
-               if (unlikely(iph_len < sizeof(struct iphdr)))
+       /* Both struct iphdr and struct ipv6hdr have the version field at the
+        * same offset so we can cast to the shorter header (struct iphdr).
+        */
+       switch (((struct iphdr *)iph)->version) {
+       case 4:
+               if (sk->sk_family == AF_INET6 && ipv6_only_sock(sk))
                        return -EINVAL;
 
                ret = __cookie_v4_check((struct iphdr *)iph, th, cookie);
                break;
 
 #if IS_BUILTIN(CONFIG_IPV6)
-       case AF_INET6:
+       case 6:
                if (unlikely(iph_len < sizeof(struct ipv6hdr)))
                        return -EINVAL;
 
+               if (sk->sk_family != AF_INET6)
+                       return -EINVAL;
+
                ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie);
                break;
 #endif /* CONFIG_IPV6 */
index 03b6e64..6f7ec72 100644 (file)
@@ -1032,7 +1032,7 @@ bool __skb_flow_dissect(const struct net *net,
                key_eth_addrs = skb_flow_dissector_target(flow_dissector,
                                                          FLOW_DISSECTOR_KEY_ETH_ADDRS,
                                                          target_container);
-               memcpy(key_eth_addrs, &eth->h_dest, sizeof(*key_eth_addrs));
+               memcpy(key_eth_addrs, eth, sizeof(*key_eth_addrs));
        }
 
 proto_again:
@@ -1183,6 +1183,7 @@ proto_again:
                                         VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
                        }
                        key_vlan->vlan_tpid = saved_vlan_tpid;
+                       key_vlan->vlan_eth_type = proto;
                }
 
                fdret = FLOW_DISSECT_RET_PROTO_AGAIN;
index 159c9c6..d1381ea 100644 (file)
@@ -5242,6 +5242,8 @@ static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
                *prividx = attr_id_l3_stats;
 
                size_l3 = rtnl_offload_xstats_get_size_stats(dev, t_l3);
+               if (!size_l3)
+                       goto skip_l3_stats;
                attr = nla_reserve_64bit(skb, attr_id_l3_stats, size_l3,
                                         IFLA_OFFLOAD_XSTATS_UNSPEC);
                if (!attr)
@@ -5253,6 +5255,7 @@ static int rtnl_offload_xstats_fill(struct sk_buff *skb, struct net_device *dev,
                        return err;
 
                have_data = true;
+skip_l3_stats:
                *prividx = 0;
        }
 
index 10bde7c..30b523f 100644 (file)
@@ -5276,11 +5276,18 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
        if (skb_cloned(to))
                return false;
 
-       /* The page pool signature of struct page will eventually figure out
-        * which pages can be recycled or not but for now let's prohibit slab
-        * allocated and page_pool allocated SKBs from being coalesced.
+       /* In general, avoid mixing slab allocated and page_pool allocated
+        * pages within the same SKB. However when @to is not pp_recycle and
+        * @from is cloned, we can transition frag pages from page_pool to
+        * reference counted.
+        *
+        * On the other hand, don't allow coalescing two pp_recycle SKBs if
+        * @from is cloned, in case the SKB is using page_pool fragment
+        * references (PP_FLAG_PAGE_FRAG). Since we only take full page
+        * references for cloned SKBs at the moment that would result in
+        * inconsistent reference counts.
         */
-       if (to->pp_recycle != from->pp_recycle)
+       if (to->pp_recycle != (from->pp_recycle && !skb_cloned(from)))
                return false;
 
        if (len <= skb_tailroom(to)) {
index ca6af86..cf93322 100644 (file)
@@ -562,7 +562,6 @@ static void dsa_port_teardown(struct dsa_port *dp)
 {
        struct devlink_port *dlp = &dp->devlink_port;
        struct dsa_switch *ds = dp->ds;
-       struct net_device *slave;
 
        if (!dp->setup)
                return;
@@ -584,11 +583,9 @@ static void dsa_port_teardown(struct dsa_port *dp)
                dsa_port_link_unregister_of(dp);
                break;
        case DSA_PORT_TYPE_USER:
-               slave = dp->slave;
-
-               if (slave) {
+               if (dp->slave) {
+                       dsa_slave_destroy(dp->slave);
                        dp->slave = NULL;
-                       dsa_slave_destroy(slave);
                }
                break;
        }
@@ -1147,17 +1144,17 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
        if (err)
                goto teardown_cpu_ports;
 
-       err = dsa_tree_setup_master(dst);
+       err = dsa_tree_setup_ports(dst);
        if (err)
                goto teardown_switches;
 
-       err = dsa_tree_setup_ports(dst);
+       err = dsa_tree_setup_master(dst);
        if (err)
-               goto teardown_master;
+               goto teardown_ports;
 
        err = dsa_tree_setup_lags(dst);
        if (err)
-               goto teardown_ports;
+               goto teardown_master;
 
        dst->setup = true;
 
@@ -1165,10 +1162,10 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
 
        return 0;
 
-teardown_ports:
-       dsa_tree_teardown_ports(dst);
 teardown_master:
        dsa_tree_teardown_master(dst);
+teardown_ports:
+       dsa_tree_teardown_ports(dst);
 teardown_switches:
        dsa_tree_teardown_switches(dst);
 teardown_cpu_ports:
@@ -1186,10 +1183,10 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst)
 
        dsa_tree_teardown_lags(dst);
 
-       dsa_tree_teardown_ports(dst);
-
        dsa_tree_teardown_master(dst);
 
+       dsa_tree_teardown_ports(dst);
+
        dsa_tree_teardown_switches(dst);
 
        dsa_tree_teardown_cpu_ports(dst);
index 991c293..2851e44 100644 (file)
@@ -335,11 +335,24 @@ static const struct attribute_group dsa_group = {
        .attrs  = dsa_slave_attrs,
 };
 
+static void dsa_master_reset_mtu(struct net_device *dev)
+{
+       int err;
+
+       err = dev_set_mtu(dev, ETH_DATA_LEN);
+       if (err)
+               netdev_dbg(dev,
+                          "Unable to reset MTU to exclude DSA overheads\n");
+}
+
 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
 {
+       const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops;
        struct dsa_switch *ds = cpu_dp->ds;
        struct device_link *consumer_link;
-       int ret;
+       int mtu, ret;
+
+       mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops);
 
        /* The DSA master must use SET_NETDEV_DEV for this to work. */
        consumer_link = device_link_add(ds->dev, dev->dev.parent,
@@ -349,6 +362,15 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
                           "Failed to create a device link to DSA switch %s\n",
                           dev_name(ds->dev));
 
+       /* The switch driver may not implement ->port_change_mtu(), case in
+        * which dsa_slave_change_mtu() will not update the master MTU either,
+        * so we need to do that here.
+        */
+       ret = dev_set_mtu(dev, mtu);
+       if (ret)
+               netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n",
+                           ret, mtu);
+
        /* If we use a tagging format that doesn't have an ethertype
         * field, make sure that all packets from this point on get
         * sent to the tag format's receive function.
@@ -384,6 +406,7 @@ void dsa_master_teardown(struct net_device *dev)
        sysfs_remove_group(&dev->dev.kobj, &dsa_group);
        dsa_netdev_ops_set(dev, NULL);
        dsa_master_ethtool_teardown(dev);
+       dsa_master_reset_mtu(dev);
        dsa_master_set_promiscuity(dev, -1);
 
        dev->dsa_ptr = NULL;
index cc8e84e..ccb6203 100644 (file)
@@ -889,8 +889,13 @@ int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
        }
 
        if (cfg->fc_oif || cfg->fc_gw_family) {
-               struct fib_nh *nh = fib_info_nh(fi, 0);
+               struct fib_nh *nh;
+
+               /* cannot match on nexthop object attributes */
+               if (fi->nh)
+                       return 1;
 
+               nh = fib_info_nh(fi, 0);
                if (cfg->fc_encap) {
                        if (fib_encap_match(net, cfg->fc_encap_type,
                                            cfg->fc_encap, nh, cfg, extack))
index e23f058..fa63ef2 100644 (file)
@@ -485,7 +485,7 @@ int ip6_forward(struct sk_buff *skb)
                goto drop;
 
        if (!net->ipv6.devconf_all->disable_policy &&
-           !idev->cnf.disable_policy &&
+           (!idev || !idev->cnf.disable_policy) &&
            !xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
                __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
                goto drop;
index a9775c8..4e74bc6 100644 (file)
@@ -1653,7 +1653,6 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval,
        mifi_t mifi;
        struct net *net = sock_net(sk);
        struct mr_table *mrt;
-       bool do_wrmifwhole;
 
        if (sk->sk_type != SOCK_RAW ||
            inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
@@ -1761,6 +1760,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval,
 #ifdef CONFIG_IPV6_PIMSM_V2
        case MRT6_PIM:
        {
+               bool do_wrmifwhole;
                int v;
 
                if (optlen != sizeof(v))
index 2fa10e6..169e9df 100644 (file)
@@ -4484,7 +4484,7 @@ static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
        struct inet6_dev *idev;
        int type;
 
-       if (netif_is_l3_master(skb->dev) &&
+       if (netif_is_l3_master(skb->dev) ||
            dst->dev == net->loopback_dev)
                idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
        else
index 9479f27..88d9cc9 100644 (file)
@@ -441,7 +441,7 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
 #define PRINT_HT_CAP(_cond, _str) \
        do { \
        if (_cond) \
-                       p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \
+                       p += scnprintf(p, bufsz + buf - p, "\t" _str "\n"); \
        } while (0)
        char *buf, *p;
        int i;
index f0702d9..e22b0cb 100644 (file)
@@ -93,13 +93,13 @@ out_release:
 static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
        DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name);
-       const int hlen = MCTP_HEADER_MAXLEN + sizeof(struct mctp_hdr);
        int rc, addrlen = msg->msg_namelen;
        struct sock *sk = sock->sk;
        struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
        struct mctp_skb_cb *cb;
        struct mctp_route *rt;
-       struct sk_buff *skb;
+       struct sk_buff *skb = NULL;
+       int hlen;
 
        if (addr) {
                const u8 tagbits = MCTP_TAG_MASK | MCTP_TAG_OWNER |
@@ -129,6 +129,34 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        if (addr->smctp_network == MCTP_NET_ANY)
                addr->smctp_network = mctp_default_net(sock_net(sk));
 
+       /* direct addressing */
+       if (msk->addr_ext && addrlen >= sizeof(struct sockaddr_mctp_ext)) {
+               DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
+                                extaddr, msg->msg_name);
+               struct net_device *dev;
+
+               rc = -EINVAL;
+               rcu_read_lock();
+               dev = dev_get_by_index_rcu(sock_net(sk), extaddr->smctp_ifindex);
+               /* check for correct halen */
+               if (dev && extaddr->smctp_halen == dev->addr_len) {
+                       hlen = LL_RESERVED_SPACE(dev) + sizeof(struct mctp_hdr);
+                       rc = 0;
+               }
+               rcu_read_unlock();
+               if (rc)
+                       goto err_free;
+               rt = NULL;
+       } else {
+               rt = mctp_route_lookup(sock_net(sk), addr->smctp_network,
+                                      addr->smctp_addr.s_addr);
+               if (!rt) {
+                       rc = -EHOSTUNREACH;
+                       goto err_free;
+               }
+               hlen = LL_RESERVED_SPACE(rt->dev->dev) + sizeof(struct mctp_hdr);
+       }
+
        skb = sock_alloc_send_skb(sk, hlen + 1 + len,
                                  msg->msg_flags & MSG_DONTWAIT, &rc);
        if (!skb)
@@ -147,8 +175,8 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        cb = __mctp_cb(skb);
        cb->net = addr->smctp_network;
 
-       /* direct addressing */
-       if (msk->addr_ext && addrlen >= sizeof(struct sockaddr_mctp_ext)) {
+       if (!rt) {
+               /* fill extended address in cb */
                DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
                                 extaddr, msg->msg_name);
 
@@ -159,17 +187,9 @@ static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
                }
 
                cb->ifindex = extaddr->smctp_ifindex;
+               /* smctp_halen is checked above */
                cb->halen = extaddr->smctp_halen;
                memcpy(cb->haddr, extaddr->smctp_haddr, cb->halen);
-
-               rt = NULL;
-       } else {
-               rt = mctp_route_lookup(sock_net(sk), addr->smctp_network,
-                                      addr->smctp_addr.s_addr);
-               if (!rt) {
-                       rc = -EHOSTUNREACH;
-                       goto err_free;
-               }
        }
 
        rc = mctp_local_output(sk, rt, skb, addr->smctp_addr.s_addr,
index d5e7db8..3b24b8d 100644 (file)
@@ -503,6 +503,11 @@ static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb)
 
        if (cb->ifindex) {
                /* direct route; use the hwaddr we stashed in sendmsg */
+               if (cb->halen != skb->dev->addr_len) {
+                       /* sanity check, sendmsg should have already caught this */
+                       kfree_skb(skb);
+                       return -EMSGSIZE;
+               }
                daddr = cb->haddr;
        } else {
                /* If lookup fails let the device handle daddr==NULL */
@@ -512,7 +517,7 @@ static int mctp_route_output(struct mctp_route *route, struct sk_buff *skb)
 
        rc = dev_hard_header(skb, skb->dev, ntohs(skb->protocol),
                             daddr, skb->dev->dev_addr, skb->len);
-       if (rc) {
+       if (rc < 0) {
                kfree_skb(skb);
                return -EHOSTUNREACH;
        }
@@ -756,7 +761,7 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
 {
        const unsigned int hlen = sizeof(struct mctp_hdr);
        struct mctp_hdr *hdr, *hdr2;
-       unsigned int pos, size;
+       unsigned int pos, size, headroom;
        struct sk_buff *skb2;
        int rc;
        u8 seq;
@@ -770,6 +775,9 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
                return -EMSGSIZE;
        }
 
+       /* keep same headroom as the original skb */
+       headroom = skb_headroom(skb);
+
        /* we've got the header */
        skb_pull(skb, hlen);
 
@@ -777,7 +785,7 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
                /* size of message payload */
                size = min(mtu - hlen, skb->len - pos);
 
-               skb2 = alloc_skb(MCTP_HEADER_MAXLEN + hlen + size, GFP_KERNEL);
+               skb2 = alloc_skb(headroom + hlen + size, GFP_KERNEL);
                if (!skb2) {
                        rc = -ENOMEM;
                        break;
@@ -793,7 +801,7 @@ static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
                        skb_set_owner_w(skb2, skb->sk);
 
                /* establish packet */
-               skb_reserve(skb2, MCTP_HEADER_MAXLEN);
+               skb_reserve(skb2, headroom);
                skb_reset_network_header(skb2);
                skb_put(skb2, hlen + size);
                skb2->transport_header = skb2->network_header + hlen;
index 5ddfdb2..16c3a39 100644 (file)
@@ -5526,7 +5526,7 @@ int nft_set_elem_expr_clone(const struct nft_ctx *ctx, struct nft_set *set,
        int err, i, k;
 
        for (i = 0; i < set->num_exprs; i++) {
-               expr = kzalloc(set->exprs[i]->ops->size, GFP_KERNEL);
+               expr = kzalloc(set->exprs[i]->ops->size, GFP_KERNEL_ACCOUNT);
                if (!expr)
                        goto err_expr;
 
@@ -9363,7 +9363,7 @@ int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest)
 }
 EXPORT_SYMBOL_GPL(nft_parse_u32_check);
 
-static unsigned int nft_parse_register(const struct nlattr *attr, u32 *preg)
+static int nft_parse_register(const struct nlattr *attr, u32 *preg)
 {
        unsigned int reg;
 
index 38caa66..f590ee1 100644 (file)
@@ -290,7 +290,7 @@ static bool nft_bitwise_reduce(struct nft_regs_track *track,
        if (!track->regs[priv->sreg].selector)
                return false;
 
-       bitwise = nft_expr_priv(expr);
+       bitwise = nft_expr_priv(track->regs[priv->dreg].selector);
        if (track->regs[priv->sreg].selector == track->regs[priv->dreg].selector &&
            track->regs[priv->sreg].num_reg == 0 &&
            track->regs[priv->dreg].bitwise &&
@@ -442,7 +442,7 @@ static bool nft_bitwise_fast_reduce(struct nft_regs_track *track,
        if (!track->regs[priv->sreg].selector)
                return false;
 
-       bitwise = nft_expr_priv(expr);
+       bitwise = nft_expr_priv(track->regs[priv->dreg].selector);
        if (track->regs[priv->sreg].selector == track->regs[priv->dreg].selector &&
            track->regs[priv->dreg].bitwise &&
            track->regs[priv->dreg].bitwise->ops == expr->ops &&
index 9de1462..d657f99 100644 (file)
@@ -77,7 +77,7 @@ static int nft_connlimit_do_init(const struct nft_ctx *ctx,
                        invert = true;
        }
 
-       priv->list = kmalloc(sizeof(*priv->list), GFP_KERNEL);
+       priv->list = kmalloc(sizeof(*priv->list), GFP_KERNEL_ACCOUNT);
        if (!priv->list)
                return -ENOMEM;
 
index da90836..f4d3573 100644 (file)
@@ -62,7 +62,7 @@ static int nft_counter_do_init(const struct nlattr * const tb[],
        struct nft_counter __percpu *cpu_stats;
        struct nft_counter *this_cpu;
 
-       cpu_stats = alloc_percpu(struct nft_counter);
+       cpu_stats = alloc_percpu_gfp(struct nft_counter, GFP_KERNEL_ACCOUNT);
        if (cpu_stats == NULL)
                return -ENOMEM;
 
index 43d0d4a..bb15a55 100644 (file)
@@ -30,7 +30,7 @@ static int nft_last_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
        u64 last_jiffies;
        int err;
 
-       last = kzalloc(sizeof(*last), GFP_KERNEL);
+       last = kzalloc(sizeof(*last), GFP_KERNEL_ACCOUNT);
        if (!last)
                return -ENOMEM;
 
index d4a6cf3..04ea8b9 100644 (file)
@@ -90,7 +90,7 @@ static int nft_limit_init(struct nft_limit_priv *priv,
                                 priv->rate);
        }
 
-       priv->limit = kmalloc(sizeof(*priv->limit), GFP_KERNEL);
+       priv->limit = kmalloc(sizeof(*priv->limit), GFP_KERNEL_ACCOUNT);
        if (!priv->limit)
                return -ENOMEM;
 
index d7db57e..e6b0df6 100644 (file)
@@ -90,7 +90,7 @@ static int nft_quota_do_init(const struct nlattr * const tb[],
                        return -EOPNOTSUPP;
        }
 
-       priv->consumed = kmalloc(sizeof(*priv->consumed), GFP_KERNEL);
+       priv->consumed = kmalloc(sizeof(*priv->consumed), GFP_KERNEL_ACCOUNT);
        if (!priv->consumed)
                return -ENOMEM;
 
index bd3792f..6d9e8e0 100644 (file)
@@ -37,12 +37,11 @@ static void nft_socket_wildcard(const struct nft_pktinfo *pkt,
 
 #ifdef CONFIG_SOCK_CGROUP_DATA
 static noinline bool
-nft_sock_get_eval_cgroupv2(u32 *dest, const struct nft_pktinfo *pkt, u32 level)
+nft_sock_get_eval_cgroupv2(u32 *dest, struct sock *sk, const struct nft_pktinfo *pkt, u32 level)
 {
-       struct sock *sk = skb_to_full_sk(pkt->skb);
        struct cgroup *cgrp;
 
-       if (!sk || !sk_fullsock(sk) || !net_eq(nft_net(pkt), sock_net(sk)))
+       if (!sk_fullsock(sk))
                return false;
 
        cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
@@ -109,7 +108,7 @@ static void nft_socket_eval(const struct nft_expr *expr,
                break;
 #ifdef CONFIG_SOCK_CGROUP_DATA
        case NFT_SOCKET_CGROUPV2:
-               if (!nft_sock_get_eval_cgroupv2(dest, pkt, priv->level)) {
+               if (!nft_sock_get_eval_cgroupv2(dest, sk, pkt, priv->level)) {
                        regs->verdict.code = NFT_BREAK;
                        return;
                }
index d253738..6a193cc 100644 (file)
@@ -560,6 +560,10 @@ static int nci_close_device(struct nci_dev *ndev)
        mutex_lock(&ndev->req_lock);
 
        if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
+               /* Need to flush the cmd wq in case
+                * there is a queued/running cmd_work
+                */
+               flush_workqueue(ndev->cmd_wq);
                del_timer_sync(&ndev->cmd_timer);
                del_timer_sync(&ndev->data_timer);
                mutex_unlock(&ndev->req_lock);
index 7056cb1..1b5d730 100644 (file)
@@ -1051,7 +1051,7 @@ static int clone(struct datapath *dp, struct sk_buff *skb,
        int rem = nla_len(attr);
        bool dont_clone_flow_key;
 
-       /* The first action is always 'OVS_CLONE_ATTR_ARG'. */
+       /* The first action is always 'OVS_CLONE_ATTR_EXEC'. */
        clone_arg = nla_data(attr);
        dont_clone_flow_key = nla_get_u32(clone_arg);
        actions = nla_next(clone_arg, &rem);
index cc282a5..7176156 100644 (file)
@@ -2317,6 +2317,62 @@ static struct sw_flow_actions *nla_alloc_flow_actions(int size)
        return sfa;
 }
 
+static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len);
+
+static void ovs_nla_free_check_pkt_len_action(const struct nlattr *action)
+{
+       const struct nlattr *a;
+       int rem;
+
+       nla_for_each_nested(a, action, rem) {
+               switch (nla_type(a)) {
+               case OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL:
+               case OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER:
+                       ovs_nla_free_nested_actions(nla_data(a), nla_len(a));
+                       break;
+               }
+       }
+}
+
+static void ovs_nla_free_clone_action(const struct nlattr *action)
+{
+       const struct nlattr *a = nla_data(action);
+       int rem = nla_len(action);
+
+       switch (nla_type(a)) {
+       case OVS_CLONE_ATTR_EXEC:
+               /* The real list of actions follows this attribute. */
+               a = nla_next(a, &rem);
+               ovs_nla_free_nested_actions(a, rem);
+               break;
+       }
+}
+
+static void ovs_nla_free_dec_ttl_action(const struct nlattr *action)
+{
+       const struct nlattr *a = nla_data(action);
+
+       switch (nla_type(a)) {
+       case OVS_DEC_TTL_ATTR_ACTION:
+               ovs_nla_free_nested_actions(nla_data(a), nla_len(a));
+               break;
+       }
+}
+
+static void ovs_nla_free_sample_action(const struct nlattr *action)
+{
+       const struct nlattr *a = nla_data(action);
+       int rem = nla_len(action);
+
+       switch (nla_type(a)) {
+       case OVS_SAMPLE_ATTR_ARG:
+               /* The real list of actions follows this attribute. */
+               a = nla_next(a, &rem);
+               ovs_nla_free_nested_actions(a, rem);
+               break;
+       }
+}
+
 static void ovs_nla_free_set_action(const struct nlattr *a)
 {
        const struct nlattr *ovs_key = nla_data(a);
@@ -2330,25 +2386,54 @@ static void ovs_nla_free_set_action(const struct nlattr *a)
        }
 }
 
-void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
+static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len)
 {
        const struct nlattr *a;
        int rem;
 
-       if (!sf_acts)
+       /* Whenever new actions are added, the need to update this
+        * function should be considered.
+        */
+       BUILD_BUG_ON(OVS_ACTION_ATTR_MAX != 23);
+
+       if (!actions)
                return;
 
-       nla_for_each_attr(a, sf_acts->actions, sf_acts->actions_len, rem) {
+       nla_for_each_attr(a, actions, len, rem) {
                switch (nla_type(a)) {
-               case OVS_ACTION_ATTR_SET:
-                       ovs_nla_free_set_action(a);
+               case OVS_ACTION_ATTR_CHECK_PKT_LEN:
+                       ovs_nla_free_check_pkt_len_action(a);
+                       break;
+
+               case OVS_ACTION_ATTR_CLONE:
+                       ovs_nla_free_clone_action(a);
                        break;
+
                case OVS_ACTION_ATTR_CT:
                        ovs_ct_free_action(a);
                        break;
+
+               case OVS_ACTION_ATTR_DEC_TTL:
+                       ovs_nla_free_dec_ttl_action(a);
+                       break;
+
+               case OVS_ACTION_ATTR_SAMPLE:
+                       ovs_nla_free_sample_action(a);
+                       break;
+
+               case OVS_ACTION_ATTR_SET:
+                       ovs_nla_free_set_action(a);
+                       break;
                }
        }
+}
+
+void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts)
+{
+       if (!sf_acts)
+               return;
 
+       ovs_nla_free_nested_actions(sf_acts->actions, sf_acts->actions_len);
        kfree(sf_acts);
 }
 
@@ -3458,7 +3543,9 @@ static int clone_action_to_attr(const struct nlattr *attr,
        if (!start)
                return -EMSGSIZE;
 
-       err = ovs_nla_put_actions(nla_data(attr), rem, skb);
+       /* Skipping the OVS_CLONE_ATTR_EXEC that is always the first attribute. */
+       attr = nla_next(nla_data(attr), &rem);
+       err = ovs_nla_put_actions(attr, rem, skb);
 
        if (err)
                nla_nest_cancel(skb, start);
index 25bbc4c..f15d694 100644 (file)
@@ -113,8 +113,8 @@ static __net_exit void rxrpc_exit_net(struct net *net)
        struct rxrpc_net *rxnet = rxrpc_net(net);
 
        rxnet->live = false;
-       del_timer_sync(&rxnet->peer_keepalive_timer);
        cancel_work_sync(&rxnet->peer_keepalive_work);
+       del_timer_sync(&rxnet->peer_keepalive_timer);
        rxrpc_destroy_all_calls(rxnet);
        rxrpc_destroy_all_connections(rxnet);
        rxrpc_destroy_all_peers(rxnet);
index 2957f8f..f0699f3 100644 (file)
@@ -1672,10 +1672,10 @@ static int tcf_chain_tp_insert(struct tcf_chain *chain,
        if (chain->flushing)
                return -EAGAIN;
 
+       RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
        if (*chain_info->pprev == chain->filter_chain)
                tcf_chain0_head_change(chain, tp);
        tcf_proto_get(tp);
-       RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
        rcu_assign_pointer(*chain_info->pprev, tp);
 
        return 0;
index c80fc49..ed5e6f0 100644 (file)
@@ -1013,6 +1013,7 @@ static int fl_set_key_mpls(struct nlattr **tb,
 static void fl_set_key_vlan(struct nlattr **tb,
                            __be16 ethertype,
                            int vlan_id_key, int vlan_prio_key,
+                           int vlan_next_eth_type_key,
                            struct flow_dissector_key_vlan *key_val,
                            struct flow_dissector_key_vlan *key_mask)
 {
@@ -1031,6 +1032,11 @@ static void fl_set_key_vlan(struct nlattr **tb,
        }
        key_val->vlan_tpid = ethertype;
        key_mask->vlan_tpid = cpu_to_be16(~0);
+       if (tb[vlan_next_eth_type_key]) {
+               key_val->vlan_eth_type =
+                       nla_get_be16(tb[vlan_next_eth_type_key]);
+               key_mask->vlan_eth_type = cpu_to_be16(~0);
+       }
 }
 
 static void fl_set_key_flag(u32 flower_key, u32 flower_mask,
@@ -1602,8 +1608,9 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
 
                if (eth_type_vlan(ethertype)) {
                        fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID,
-                                       TCA_FLOWER_KEY_VLAN_PRIO, &key->vlan,
-                                       &mask->vlan);
+                                       TCA_FLOWER_KEY_VLAN_PRIO,
+                                       TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+                                       &key->vlan, &mask->vlan);
 
                        if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) {
                                ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]);
@@ -1611,6 +1618,7 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
                                        fl_set_key_vlan(tb, ethertype,
                                                        TCA_FLOWER_KEY_CVLAN_ID,
                                                        TCA_FLOWER_KEY_CVLAN_PRIO,
+                                                       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
                                                        &key->cvlan, &mask->cvlan);
                                        fl_set_key_val(tb, &key->basic.n_proto,
                                                       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
@@ -3002,13 +3010,13 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net,
                goto nla_put_failure;
 
        if (mask->basic.n_proto) {
-               if (mask->cvlan.vlan_tpid) {
+               if (mask->cvlan.vlan_eth_type) {
                        if (nla_put_be16(skb, TCA_FLOWER_KEY_CVLAN_ETH_TYPE,
                                         key->basic.n_proto))
                                goto nla_put_failure;
-               } else if (mask->vlan.vlan_tpid) {
+               } else if (mask->vlan.vlan_eth_type) {
                        if (nla_put_be16(skb, TCA_FLOWER_KEY_VLAN_ETH_TYPE,
-                                        key->basic.n_proto))
+                                        key->vlan.vlan_eth_type))
                                goto nla_put_failure;
                }
        }
index 377f896..b9c71a3 100644 (file)
@@ -417,7 +417,8 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
 {
        struct taprio_sched *q = qdisc_priv(sch);
 
-       if (skb->sk && sock_flag(skb->sk, SOCK_TXTIME)) {
+       /* sk_flags are only safe to use on full sockets. */
+       if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
                if (!is_valid_interval(skb, sch))
                        return qdisc_drop(skb, sch, to_free);
        } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) {
index a18609f..e213aaf 100644 (file)
@@ -914,6 +914,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
                                ctx->asoc->base.sk->sk_err = -error;
                                return;
                        }
+                       ctx->asoc->stats.octrlchunks++;
                        break;
 
                case SCTP_CID_ABORT:
@@ -938,7 +939,10 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
 
                case SCTP_CID_HEARTBEAT:
                        if (chunk->pmtu_probe) {
-                               sctp_packet_singleton(ctx->transport, chunk, ctx->gfp);
+                               error = sctp_packet_singleton(ctx->transport,
+                                                             chunk, ctx->gfp);
+                               if (!error)
+                                       ctx->asoc->stats.octrlchunks++;
                                break;
                        }
                        fallthrough;
index 7f342bc..52edee1 100644 (file)
@@ -781,7 +781,7 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
                }
        }
 
-       if (security_sctp_assoc_request(new_asoc, chunk->skb)) {
+       if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
                sctp_association_free(new_asoc);
                return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
@@ -932,7 +932,7 @@ enum sctp_disposition sctp_sf_do_5_1E_ca(struct net *net,
 
        /* Set peer label for connection. */
        if (security_sctp_assoc_established((struct sctp_association *)asoc,
-                                           chunk->skb))
+                                           chunk->head_skb ?: chunk->skb))
                return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
 
        /* Verify that the chunk length for the COOKIE-ACK is OK.
@@ -2262,7 +2262,7 @@ enum sctp_disposition sctp_sf_do_5_2_4_dupcook(
        }
 
        /* Update socket peer label if first association. */
-       if (security_sctp_assoc_request(new_asoc, chunk->skb)) {
+       if (security_sctp_assoc_request(new_asoc, chunk->head_skb ?: chunk->skb)) {
                sctp_association_free(new_asoc);
                return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
        }
index 3e1a960..7b04276 100644 (file)
@@ -5636,7 +5636,7 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
         * Set the daddr and initialize id to something more random and also
         * copy over any ip options.
         */
-       sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
+       sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sock->sk);
        sp->pf->copy_ip_options(sk, sock->sk);
 
        /* Populate the fields of the newsk from the oldsk and migrate the
index f0d118e..14ddc40 100644 (file)
@@ -121,6 +121,7 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
                                          bool *own_req)
 {
        struct smc_sock *smc;
+       struct sock *child;
 
        smc = smc_clcsock_user_data(sk);
 
@@ -134,8 +135,17 @@ static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
        }
 
        /* passthrough to original syn recv sock fct */
-       return smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash,
-                                             own_req);
+       child = smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash,
+                                              own_req);
+       /* child must not inherit smc or its ops */
+       if (child) {
+               rcu_assign_sk_user_data(child, NULL);
+
+               /* v4-mapped sockets don't inherit parent ops. Don't restore. */
+               if (inet_csk(child)->icsk_af_ops == inet_csk(sk)->icsk_af_ops)
+                       inet_csk(child)->icsk_af_ops = smc->ori_af_ops;
+       }
+       return child;
 
 drop:
        dst_release(dst);
index ce27399..f9f3f59 100644 (file)
@@ -191,7 +191,8 @@ static int smc_nl_ueid_dumpinfo(struct sk_buff *skb, u32 portid, u32 seq,
                          flags, SMC_NETLINK_DUMP_UEID);
        if (!hdr)
                return -ENOMEM;
-       snprintf(ueid_str, sizeof(ueid_str), "%s", ueid);
+       memcpy(ueid_str, ueid, SMC_MAX_EID_LEN);
+       ueid_str[SMC_MAX_EID_LEN] = 0;
        if (nla_put_string(skb, SMC_NLA_EID_TABLE_ENTRY, ueid_str)) {
                genlmsg_cancel(skb, hdr);
                return -EMSGSIZE;
@@ -252,7 +253,8 @@ int smc_nl_dump_seid(struct sk_buff *skb, struct netlink_callback *cb)
                goto end;
 
        smc_ism_get_system_eid(&seid);
-       snprintf(seid_str, sizeof(seid_str), "%s", seid);
+       memcpy(seid_str, seid, SMC_MAX_EID_LEN);
+       seid_str[SMC_MAX_EID_LEN] = 0;
        if (nla_put_string(skb, SMC_NLA_SEID_ENTRY, seid_str))
                goto err;
        read_lock(&smc_clc_eid_table.lock);
index 7984f88..7055ed1 100644 (file)
@@ -311,8 +311,9 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
        list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
                if (!strncmp(ibdev->ibdev->name, ib_name,
                             sizeof(ibdev->ibdev->name)) ||
-                   !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name,
-                            IB_DEVICE_NAME_MAX - 1)) {
+                   (ibdev->ibdev->dev.parent &&
+                    !strncmp(dev_name(ibdev->ibdev->dev.parent), ib_name,
+                            IB_DEVICE_NAME_MAX - 1))) {
                        goto out;
                }
        }
index 8bf2af8..af0174d 100644 (file)
@@ -1127,6 +1127,8 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
        struct rpc_task *task;
 
        task = rpc_new_task(task_setup_data);
+       if (IS_ERR(task))
+               return task;
 
        if (!RPC_IS_ASYNC(task))
                task->tk_flags |= RPC_TASK_CRED_NOREF;
@@ -1227,6 +1229,11 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
         * Create an rpc_task to send the data
         */
        task = rpc_new_task(&task_setup_data);
+       if (IS_ERR(task)) {
+               xprt_free_bc_request(req);
+               return task;
+       }
+
        xprt_init_bc_request(req, task);
 
        task->tk_action = call_bc_encode;
@@ -1858,6 +1865,9 @@ call_encode(struct rpc_task *task)
        xprt_request_dequeue_xprt(task);
        /* Encode here so that rpcsec_gss can use correct sequence number. */
        rpc_xdr_encode(task);
+       /* Add task to reply queue before transmission to avoid races */
+       if (task->tk_status == 0 && rpc_reply_expected(task))
+               task->tk_status = xprt_request_enqueue_receive(task);
        /* Did the encode result in an error condition? */
        if (task->tk_status != 0) {
                /* Was the error nonfatal? */
@@ -1881,9 +1891,6 @@ call_encode(struct rpc_task *task)
                return;
        }
 
-       /* Add task to reply queue before transmission to avoid races */
-       if (rpc_reply_expected(task))
-               xprt_request_enqueue_receive(task);
        xprt_request_enqueue_transmit(task);
 out:
        task->tk_action = call_transmit;
@@ -2200,6 +2207,7 @@ call_transmit_status(struct rpc_task *task)
                 * socket just returned a connection error,
                 * then hold onto the transport lock.
                 */
+       case -ENOMEM:
        case -ENOBUFS:
                rpc_delay(task, HZ>>2);
                fallthrough;
@@ -2283,6 +2291,7 @@ call_bc_transmit_status(struct rpc_task *task)
        case -ENOTCONN:
        case -EPIPE:
                break;
+       case -ENOMEM:
        case -ENOBUFS:
                rpc_delay(task, HZ>>2);
                fallthrough;
@@ -2365,6 +2374,11 @@ call_status(struct rpc_task *task)
        case -EPIPE:
        case -EAGAIN:
                break;
+       case -ENFILE:
+       case -ENOBUFS:
+       case -ENOMEM:
+               rpc_delay(task, HZ>>2);
+               break;
        case -EIO:
                /* shutdown or soft timeout */
                goto out_exit;
index b258b87..7f70c1e 100644 (file)
@@ -1128,6 +1128,11 @@ struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
 
        if (task == NULL) {
                task = rpc_alloc_task();
+               if (task == NULL) {
+                       rpc_release_calldata(setup_data->callback_ops,
+                                            setup_data->callback_data);
+                       return ERR_PTR(-ENOMEM);
+               }
                flags = RPC_TASK_DYNAMIC;
        }
 
index 05b38bf..71ba4cf 100644 (file)
@@ -221,12 +221,6 @@ static int xprt_send_kvec(struct socket *sock, struct msghdr *msg,
 static int xprt_send_pagedata(struct socket *sock, struct msghdr *msg,
                              struct xdr_buf *xdr, size_t base)
 {
-       int err;
-
-       err = xdr_alloc_bvec(xdr, rpc_task_gfp_mask());
-       if (err < 0)
-               return err;
-
        iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec, xdr_buf_pagecount(xdr),
                      xdr->page_len + xdr->page_base);
        return xprt_sendmsg(sock, msg, base + xdr->page_base);
index 297c498..5b59e21 100644 (file)
@@ -1231,6 +1231,8 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
                dr->daddr = rqstp->rq_daddr;
                dr->argslen = rqstp->rq_arg.len >> 2;
                dr->xprt_hlen = rqstp->rq_xprt_hlen;
+               dr->xprt_ctxt = rqstp->rq_xprt_ctxt;
+               rqstp->rq_xprt_ctxt = NULL;
 
                /* back up head to the start of the buffer and copy */
                skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
@@ -1269,6 +1271,7 @@ static noinline int svc_deferred_recv(struct svc_rqst *rqstp)
        rqstp->rq_xprt_hlen   = dr->xprt_hlen;
        rqstp->rq_daddr       = dr->daddr;
        rqstp->rq_respages    = rqstp->rq_pages;
+       rqstp->rq_xprt_ctxt   = dr->xprt_ctxt;
        svc_xprt_received(rqstp->rq_xprt);
        return (dr->argslen<<2) - dr->xprt_hlen;
 }
index 478f857..cc35ec4 100644 (file)
@@ -579,15 +579,18 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
        if (svc_xprt_is_dead(xprt))
                goto out_notconn;
 
+       err = xdr_alloc_bvec(xdr, GFP_KERNEL);
+       if (err < 0)
+               goto out_unlock;
+
        err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent);
-       xdr_free_bvec(xdr);
        if (err == -ECONNREFUSED) {
                /* ICMP error on earlier request. */
                err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent);
-               xdr_free_bvec(xdr);
        }
+       xdr_free_bvec(xdr);
        trace_svcsock_udp_send(xprt, err);
-
+out_unlock:
        mutex_unlock(&xprt->xpt_mutex);
        if (err < 0)
                return err;
@@ -1096,7 +1099,9 @@ static int svc_tcp_sendmsg(struct socket *sock, struct xdr_buf *xdr,
        int ret;
 
        *sentp = 0;
-       xdr_alloc_bvec(xdr, GFP_KERNEL);
+       ret = xdr_alloc_bvec(xdr, GFP_KERNEL);
+       if (ret < 0)
+               return ret;
 
        ret = kernel_sendmsg(sock, &msg, &rm, 1, rm.iov_len);
        if (ret < 0)
index 515501f..86d62cf 100644 (file)
 /*
  * Local functions
  */
-static void     xprt_init(struct rpc_xprt *xprt, struct net *net);
+static void    xprt_init(struct rpc_xprt *xprt, struct net *net);
 static __be32  xprt_alloc_xid(struct rpc_xprt *xprt);
-static void     xprt_destroy(struct rpc_xprt *xprt);
-static void     xprt_request_init(struct rpc_task *task);
+static void    xprt_destroy(struct rpc_xprt *xprt);
+static void    xprt_request_init(struct rpc_task *task);
+static int     xprt_request_prepare(struct rpc_rqst *req);
 
 static DEFINE_SPINLOCK(xprt_list_lock);
 static LIST_HEAD(xprt_list);
@@ -929,12 +930,7 @@ void xprt_connect(struct rpc_task *task)
        if (!xprt_lock_write(xprt, task))
                return;
 
-       if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
-               trace_xprt_disconnect_cleanup(xprt);
-               xprt->ops->close(xprt);
-       }
-
-       if (!xprt_connected(xprt)) {
+       if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
                task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
                rpc_sleep_on_timeout(&xprt->pending, task, NULL,
                                xprt_request_timeout(task->tk_rqstp));
@@ -1143,16 +1139,19 @@ xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
  * @task: RPC task
  *
  */
-void
+int
 xprt_request_enqueue_receive(struct rpc_task *task)
 {
        struct rpc_rqst *req = task->tk_rqstp;
        struct rpc_xprt *xprt = req->rq_xprt;
+       int ret;
 
        if (!xprt_request_need_enqueue_receive(task, req))
-               return;
+               return 0;
 
-       xprt_request_prepare(task->tk_rqstp);
+       ret = xprt_request_prepare(task->tk_rqstp);
+       if (ret)
+               return ret;
        spin_lock(&xprt->queue_lock);
 
        /* Update the softirq receive buffer */
@@ -1166,6 +1165,7 @@ xprt_request_enqueue_receive(struct rpc_task *task)
 
        /* Turn off autodisconnect */
        del_singleshot_timer_sync(&xprt->timer);
+       return 0;
 }
 
 /**
@@ -1452,14 +1452,16 @@ xprt_request_dequeue_xprt(struct rpc_task *task)
  *
  * Calls into the transport layer to do whatever is needed to prepare
  * the request for transmission or receive.
+ * Returns error, or zero.
  */
-void
+static int
 xprt_request_prepare(struct rpc_rqst *req)
 {
        struct rpc_xprt *xprt = req->rq_xprt;
 
        if (xprt->ops->prepare_request)
-               xprt->ops->prepare_request(req);
+               return xprt->ops->prepare_request(req);
+       return 0;
 }
 
 /**
index cf76a6a..864131a 100644 (file)
@@ -831,7 +831,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
                goto out_err;
        if (ret == 0)
                goto out_drop;
-       rqstp->rq_xprt_hlen = ret;
+       rqstp->rq_xprt_hlen = 0;
 
        if (svc_rdma_is_reverse_direction_reply(xprt, ctxt))
                goto out_backchannel;
index 78af751..8ab64ea 100644 (file)
@@ -822,12 +822,17 @@ static int xs_stream_nospace(struct rpc_rqst *req, bool vm_wait)
        return ret;
 }
 
-static void
+static int
 xs_stream_prepare_request(struct rpc_rqst *req)
 {
+       gfp_t gfp = rpc_task_gfp_mask();
+       int ret;
+
+       ret = xdr_alloc_bvec(&req->rq_snd_buf, gfp);
+       if (ret < 0)
+               return ret;
        xdr_free_bvec(&req->rq_rcv_buf);
-       req->rq_task->tk_status = xdr_alloc_bvec(
-               &req->rq_rcv_buf, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
+       return xdr_alloc_bvec(&req->rq_rcv_buf, gfp);
 }
 
 /*
@@ -879,7 +884,7 @@ static int xs_local_send_request(struct rpc_rqst *req)
 
        /* Close the stream if the previous transmission was incomplete */
        if (xs_send_request_was_aborted(transport, req)) {
-               xs_close(xprt);
+               xprt_force_disconnect(xprt);
                return -ENOTCONN;
        }
 
@@ -915,7 +920,7 @@ static int xs_local_send_request(struct rpc_rqst *req)
                        -status);
                fallthrough;
        case -EPIPE:
-               xs_close(xprt);
+               xprt_force_disconnect(xprt);
                status = -ENOTCONN;
        }
 
@@ -956,6 +961,9 @@ static int xs_udp_send_request(struct rpc_rqst *req)
        if (!xprt_request_get_cong(xprt, req))
                return -EBADSLT;
 
+       status = xdr_alloc_bvec(xdr, rpc_task_gfp_mask());
+       if (status < 0)
+               return status;
        req->rq_xtime = ktime_get();
        status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, 0, &sent);
 
@@ -1185,6 +1193,16 @@ static void xs_reset_transport(struct sock_xprt *transport)
 
        if (sk == NULL)
                return;
+       /*
+        * Make sure we're calling this in a context from which it is safe
+        * to call __fput_sync(). In practice that means rpciod and the
+        * system workqueue.
+        */
+       if (!(current->flags & PF_WQ_WORKER)) {
+               WARN_ON_ONCE(1);
+               set_bit(XPRT_CLOSE_WAIT, &xprt->state);
+               return;
+       }
 
        if (atomic_read(&transport->xprt.swapper))
                sk_clear_memalloc(sk);
@@ -1208,7 +1226,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
        mutex_unlock(&transport->recv_mutex);
 
        trace_rpc_socket_close(xprt, sock);
-       fput(filp);
+       __fput_sync(filp);
 
        xprt_disconnect_done(xprt);
 }
@@ -2544,6 +2562,9 @@ static int bc_sendto(struct rpc_rqst *req)
        int err;
 
        req->rq_xtime = ktime_get();
+       err = xdr_alloc_bvec(xdr, rpc_task_gfp_mask());
+       if (err < 0)
+               return err;
        err = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, marker, &sent);
        xdr_free_bvec(xdr);
        if (err < 0 || sent != (xdr->len + sizeof(marker)))
index 0024a69..a8976ef 100644 (file)
@@ -1496,7 +1496,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
        if (prot->version == TLS_1_3_VERSION ||
            prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305)
                memcpy(iv + iv_offset, tls_ctx->rx.iv,
-                      crypto_aead_ivsize(ctx->aead_recv));
+                      prot->iv_size + prot->salt_size);
        else
                memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
 
index ee1c2b6..21e808f 100644 (file)
@@ -528,7 +528,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
                                   .len = IEEE80211_MAX_MESH_ID_LEN },
        [NL80211_ATTR_MPATH_NEXT_HOP] = NLA_POLICY_ETH_ADDR_COMPAT,
 
-       [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
+       /* allow 3 for NUL-termination, we used to declare this NLA_STRING */
+       [NL80211_ATTR_REG_ALPHA2] = NLA_POLICY_RANGE(NLA_BINARY, 2, 3),
        [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED },
 
        [NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 },
index b2fdac9..4a6d864 100644 (file)
@@ -2018,11 +2018,13 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
                /* this is a nontransmitting bss, we need to add it to
                 * transmitting bss' list if it is not there
                 */
+               spin_lock_bh(&rdev->bss_lock);
                if (cfg80211_add_nontrans_list(non_tx_data->tx_bss,
                                               &res->pub)) {
                        if (__cfg80211_unlink_bss(rdev, res))
                                rdev->bss_generation++;
                }
+               spin_unlock_bh(&rdev->bss_lock);
        }
 
        trace_cfg80211_return_bss(&res->pub);
index 589454b..8425da4 100644 (file)
@@ -86,25 +86,31 @@ static struct plugin_info latent_entropy_plugin_info = {
        .help           = "disable\tturn off latent entropy instrumentation\n",
 };
 
-static unsigned HOST_WIDE_INT seed;
-/*
- * get_random_seed() (this is a GCC function) generates the seed.
- * This is a simple random generator without any cryptographic security because
- * the entropy doesn't come from here.
- */
+static unsigned HOST_WIDE_INT deterministic_seed;
+static unsigned HOST_WIDE_INT rnd_buf[32];
+static size_t rnd_idx = ARRAY_SIZE(rnd_buf);
+static int urandom_fd = -1;
+
 static unsigned HOST_WIDE_INT get_random_const(void)
 {
-       unsigned int i;
-       unsigned HOST_WIDE_INT ret = 0;
-
-       for (i = 0; i < 8 * sizeof(ret); i++) {
-               ret = (ret << 1) | (seed & 1);
-               seed >>= 1;
-               if (ret & 1)
-                       seed ^= 0xD800000000000000ULL;
+       if (deterministic_seed) {
+               unsigned HOST_WIDE_INT w = deterministic_seed;
+               w ^= w << 13;
+               w ^= w >> 7;
+               w ^= w << 17;
+               deterministic_seed = w;
+               return deterministic_seed;
        }
 
-       return ret;
+       if (urandom_fd < 0) {
+               urandom_fd = open("/dev/urandom", O_RDONLY);
+               gcc_assert(urandom_fd >= 0);
+       }
+       if (rnd_idx >= ARRAY_SIZE(rnd_buf)) {
+               gcc_assert(read(urandom_fd, rnd_buf, sizeof(rnd_buf)) == sizeof(rnd_buf));
+               rnd_idx = 0;
+       }
+       return rnd_buf[rnd_idx++];
 }
 
 static tree tree_get_random_const(tree type)
@@ -537,8 +543,6 @@ static void latent_entropy_start_unit(void *gcc_data __unused,
        tree type, id;
        int quals;
 
-       seed = get_random_seed(false);
-
        if (in_lto_p)
                return;
 
@@ -573,6 +577,12 @@ __visible int plugin_init(struct plugin_name_args *plugin_info,
        const struct plugin_argument * const argv = plugin_info->argv;
        int i;
 
+       /*
+        * Call get_random_seed() with noinit=true, so that this returns
+        * 0 in the case where no seed has been passed via -frandom-seed.
+        */
+       deterministic_seed = get_random_seed(true);
+
        static const struct ggc_root_tab gt_ggc_r_gt_latent_entropy[] = {
                {
                        .base = &latent_entropy_decl,
index 31ba702..726a835 100644 (file)
@@ -209,6 +209,12 @@ static void __snd_card_release(struct device *dev, void *data)
  * snd_card_register(), the very first devres action to call snd_card_free()
  * is added automatically.  In that way, the resource disconnection is assured
  * at first, then released in the expected order.
+ *
+ * If an error happens at the probe before snd_card_register() is called and
+ * there have been other devres resources, you'd need to free the card manually
+ * via snd_card_free() call in the error; otherwise it may lead to UAF due to
+ * devres call orders.  You can use snd_card_free_on_error() helper for
+ * handling it more easily.
  */
 int snd_devm_card_new(struct device *parent, int idx, const char *xid,
                      struct module *module, size_t extra_size,
@@ -235,6 +241,28 @@ int snd_devm_card_new(struct device *parent, int idx, const char *xid,
 }
 EXPORT_SYMBOL_GPL(snd_devm_card_new);
 
+/**
+ * snd_card_free_on_error - a small helper for handling devm probe errors
+ * @dev: the managed device object
+ * @ret: the return code from the probe callback
+ *
+ * This function handles the explicit snd_card_free() call at the error from
+ * the probe callback.  It's just a small helper for simplifying the error
+ * handling with the managed devices.
+ */
+int snd_card_free_on_error(struct device *dev, int ret)
+{
+       struct snd_card *card;
+
+       if (!ret)
+               return 0;
+       card = devres_find(dev, __snd_card_release, NULL, NULL);
+       if (card)
+               snd_card_free(card);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(snd_card_free_on_error);
+
 static int snd_card_init(struct snd_card *card, struct device *parent,
                         int idx, const char *xid, struct module *module,
                         size_t extra_size)
index 6fd763d..15dc716 100644 (file)
@@ -499,6 +499,10 @@ static const struct snd_malloc_ops snd_dma_wc_ops = {
 };
 #endif /* CONFIG_X86 */
 
+#ifdef CONFIG_SND_DMA_SGBUF
+static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
+#endif
+
 /*
  * Non-contiguous pages allocator
  */
@@ -509,8 +513,18 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
 
        sgt = dma_alloc_noncontiguous(dmab->dev.dev, size, dmab->dev.dir,
                                      DEFAULT_GFP, 0);
-       if (!sgt)
+       if (!sgt) {
+#ifdef CONFIG_SND_DMA_SGBUF
+               if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
+                       dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
+               else
+                       dmab->dev.type = SNDRV_DMA_TYPE_DEV_SG_FALLBACK;
+               return snd_dma_sg_fallback_alloc(dmab, size);
+#else
                return NULL;
+#endif
+       }
+
        dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
                                            sg_dma_address(sgt->sgl));
        p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
@@ -633,6 +647,8 @@ static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
 
        if (!p)
                return NULL;
+       if (dmab->dev.type != SNDRV_DMA_TYPE_DEV_WC_SG)
+               return p;
        for_each_sgtable_page(sgt, &iter, 0)
                set_memory_wc(sg_wc_address(&iter), 1);
        return p;
@@ -665,6 +681,95 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
        .get_page = snd_dma_noncontig_get_page,
        .get_chunk_size = snd_dma_noncontig_get_chunk_size,
 };
+
+/* Fallback SG-buffer allocations for x86 */
+struct snd_dma_sg_fallback {
+       size_t count;
+       struct page **pages;
+       dma_addr_t *addrs;
+};
+
+static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
+                                      struct snd_dma_sg_fallback *sgbuf)
+{
+       size_t i;
+
+       if (sgbuf->count && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
+               set_pages_array_wb(sgbuf->pages, sgbuf->count);
+       for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
+               dma_free_coherent(dmab->dev.dev, PAGE_SIZE,
+                                 page_address(sgbuf->pages[i]),
+                                 sgbuf->addrs[i]);
+       kvfree(sgbuf->pages);
+       kvfree(sgbuf->addrs);
+       kfree(sgbuf);
+}
+
+static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+       struct snd_dma_sg_fallback *sgbuf;
+       struct page **pages;
+       size_t i, count;
+       void *p;
+
+       sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
+       if (!sgbuf)
+               return NULL;
+       count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
+       if (!pages)
+               goto error;
+       sgbuf->pages = pages;
+       sgbuf->addrs = kvcalloc(count, sizeof(*sgbuf->addrs), GFP_KERNEL);
+       if (!sgbuf->addrs)
+               goto error;
+
+       for (i = 0; i < count; sgbuf->count++, i++) {
+               p = dma_alloc_coherent(dmab->dev.dev, PAGE_SIZE,
+                                      &sgbuf->addrs[i], DEFAULT_GFP);
+               if (!p)
+                       goto error;
+               sgbuf->pages[i] = virt_to_page(p);
+       }
+
+       if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
+               set_pages_array_wc(pages, count);
+       p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
+       if (!p)
+               goto error;
+       dmab->private_data = sgbuf;
+       return p;
+
+ error:
+       __snd_dma_sg_fallback_free(dmab, sgbuf);
+       return NULL;
+}
+
+static void snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab)
+{
+       vunmap(dmab->area);
+       __snd_dma_sg_fallback_free(dmab, dmab->private_data);
+}
+
+static int snd_dma_sg_fallback_mmap(struct snd_dma_buffer *dmab,
+                                   struct vm_area_struct *area)
+{
+       struct snd_dma_sg_fallback *sgbuf = dmab->private_data;
+
+       if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
+               area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
+       return vm_map_pages(area, sgbuf->pages, sgbuf->count);
+}
+
+static const struct snd_malloc_ops snd_dma_sg_fallback_ops = {
+       .alloc = snd_dma_sg_fallback_alloc,
+       .free = snd_dma_sg_fallback_free,
+       .mmap = snd_dma_sg_fallback_mmap,
+       /* reuse vmalloc helpers */
+       .get_addr = snd_dma_vmalloc_get_addr,
+       .get_page = snd_dma_vmalloc_get_page,
+       .get_chunk_size = snd_dma_vmalloc_get_chunk_size,
+};
 #endif /* CONFIG_SND_DMA_SGBUF */
 
 /*
@@ -736,6 +841,10 @@ static const struct snd_malloc_ops *dma_ops[] = {
 #ifdef CONFIG_GENERIC_ALLOCATOR
        [SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
 #endif /* CONFIG_GENERIC_ALLOCATOR */
+#ifdef CONFIG_SND_DMA_SGBUF
+       [SNDRV_DMA_TYPE_DEV_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
+       [SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK] = &snd_dma_sg_fallback_ops,
+#endif
 #endif /* CONFIG_HAS_DMA */
 };
 
index 4866aed..5588b6a 100644 (file)
@@ -433,7 +433,7 @@ int snd_pcm_format_set_silence(snd_pcm_format_t format, void *data, unsigned int
                return 0;
        width = pcm_formats[(INT)format].phys; /* physical width */
        pat = pcm_formats[(INT)format].silence;
-       if (! width)
+       if (!width || !pat)
                return -EINVAL;
        /* signed or 1 byte data */
        if (pcm_formats[(INT)format].signd == 1 || width <= 8) {
index 11235ba..f212f23 100644 (file)
@@ -693,8 +693,6 @@ static int snd_mtpav_probe(struct platform_device *dev)
        mtp_card->outmidihwport = 0xffffffff;
        timer_setup(&mtp_card->timer, snd_mtpav_output_timer, 0);
 
-       card->private_free = snd_mtpav_free;
-
        err = snd_mtpav_get_RAWMIDI(mtp_card);
        if (err < 0)
                return err;
@@ -716,6 +714,8 @@ static int snd_mtpav_probe(struct platform_device *dev)
        if (err < 0)
                return err;
 
+       card->private_free = snd_mtpav_free;
+
        platform_set_drvdata(dev, card);
        printk(KERN_INFO "Motu MidiTimePiece on parallel port irq: %d ioport: 0x%lx\n", irq, port);
        return 0;
index efe810a..48b8ed7 100644 (file)
@@ -116,16 +116,25 @@ static int i915_component_master_match(struct device *dev, int subcomponent,
        return 0;
 }
 
-/* check whether intel graphics is present */
-static bool i915_gfx_present(void)
+/* check whether Intel graphics is present and reachable */
+static int i915_gfx_present(struct pci_dev *hdac_pci)
 {
-       static const struct pci_device_id ids[] = {
-               { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID),
-                 .class = PCI_BASE_CLASS_DISPLAY << 16,
-                 .class_mask = 0xff << 16 },
-               {}
-       };
-       return pci_dev_present(ids);
+       unsigned int class = PCI_BASE_CLASS_DISPLAY << 16;
+       struct pci_dev *display_dev = NULL;
+       bool match = false;
+
+       do {
+               display_dev = pci_get_class(class, display_dev);
+
+               if (display_dev && display_dev->vendor == PCI_VENDOR_ID_INTEL &&
+                   connectivity_check(display_dev, hdac_pci))
+                       match = true;
+
+               pci_dev_put(display_dev);
+
+       } while (!match && display_dev);
+
+       return match;
 }
 
 /**
@@ -145,7 +154,7 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
        struct drm_audio_component *acomp;
        int err;
 
-       if (!i915_gfx_present())
+       if (!i915_gfx_present(to_pci_dev(bus->dev)))
                return -ENODEV;
 
        err = snd_hdac_acomp_init(bus, NULL,
index 70fd8b1..8b0a16b 100644 (file)
@@ -390,22 +390,36 @@ static const struct config_entry config_table[] = {
 
 /* Alder Lake */
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_ALDERLAKE)
+       /* Alderlake-S */
        {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x7ad0,
        },
+       /* RaptorLake-S */
        {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
-               .device = 0x51c8,
+               .device = 0x7a50,
        },
+       /* Alderlake-P */
        {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
-               .device = 0x51cc,
+               .device = 0x51c8,
        },
        {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x51cd,
        },
+       /* Alderlake-PS */
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x51c9,
+       },
+       /* Alderlake-M */
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x51cc,
+       },
+       /* Alderlake-N */
        {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x54c8,
index ea001c8..3164eb8 100644 (file)
@@ -478,7 +478,7 @@ static void snd_galaxy_free(struct snd_card *card)
                galaxy_set_config(galaxy, galaxy->config);
 }
 
-static int snd_galaxy_probe(struct device *dev, unsigned int n)
+static int __snd_galaxy_probe(struct device *dev, unsigned int n)
 {
        struct snd_galaxy *galaxy;
        struct snd_wss *chip;
@@ -598,6 +598,11 @@ static int snd_galaxy_probe(struct device *dev, unsigned int n)
        return 0;
 }
 
+static int snd_galaxy_probe(struct device *dev, unsigned int n)
+{
+       return snd_card_free_on_error(dev, __snd_galaxy_probe(dev, n));
+}
+
 static struct isa_driver snd_galaxy_driver = {
        .match          = snd_galaxy_match,
        .probe          = snd_galaxy_probe,
index 26ab7ff..60398fc 100644 (file)
@@ -537,7 +537,7 @@ static void snd_sc6000_free(struct snd_card *card)
                sc6000_setup_board(vport, 0);
 }
 
-static int snd_sc6000_probe(struct device *devptr, unsigned int dev)
+static int __snd_sc6000_probe(struct device *devptr, unsigned int dev)
 {
        static const int possible_irqs[] = { 5, 7, 9, 10, 11, -1 };
        static const int possible_dmas[] = { 1, 3, 0, -1 };
@@ -662,6 +662,11 @@ static int snd_sc6000_probe(struct device *devptr, unsigned int dev)
        return 0;
 }
 
+static int snd_sc6000_probe(struct device *devptr, unsigned int dev)
+{
+       return snd_card_free_on_error(devptr, __snd_sc6000_probe(devptr, dev));
+}
+
 static struct isa_driver snd_sc6000_driver = {
        .match          = snd_sc6000_match,
        .probe          = snd_sc6000_probe,
index c1c52b4..ad8ce6a 100644 (file)
@@ -88,11 +88,7 @@ static inline int ioctl_return(int __user *addr, int value)
      */
 
 extern int dmasound_init(void);
-#ifdef MODULE
 extern void dmasound_deinit(void);
-#else
-#define dmasound_deinit()      do { } while (0)
-#endif
 
 /* description of the set-up applies to either hard or soft settings */
 
@@ -114,9 +110,7 @@ typedef struct {
     void *(*dma_alloc)(unsigned int, gfp_t);
     void (*dma_free)(void *, unsigned int);
     int (*irqinit)(void);
-#ifdef MODULE
     void (*irqcleanup)(void);
-#endif
     void (*init)(void);
     void (*silence)(void);
     int (*setFormat)(int);
index 0c95828..9c48f3a 100644 (file)
@@ -206,12 +206,10 @@ module_param(writeBufSize, int, 0);
 
 MODULE_LICENSE("GPL");
 
-#ifdef MODULE
 static int sq_unit = -1;
 static int mixer_unit = -1;
 static int state_unit = -1;
 static int irq_installed;
-#endif /* MODULE */
 
 /* control over who can modify resources shared between play/record */
 static fmode_t shared_resource_owner;
@@ -391,9 +389,6 @@ static const struct file_operations mixer_fops =
 
 static void mixer_init(void)
 {
-#ifndef MODULE
-       int mixer_unit;
-#endif
        mixer_unit = register_sound_mixer(&mixer_fops, -1);
        if (mixer_unit < 0)
                return;
@@ -1171,9 +1166,6 @@ static const struct file_operations sq_fops =
 static int sq_init(void)
 {
        const struct file_operations *fops = &sq_fops;
-#ifndef MODULE
-       int sq_unit;
-#endif
 
        sq_unit = register_sound_dsp(fops, -1);
        if (sq_unit < 0) {
@@ -1366,9 +1358,6 @@ static const struct file_operations state_fops = {
 
 static int state_init(void)
 {
-#ifndef MODULE
-       int state_unit;
-#endif
        state_unit = register_sound_special(&state_fops, SND_DEV_STATUS);
        if (state_unit < 0)
                return state_unit ;
@@ -1386,10 +1375,9 @@ static int state_init(void)
 int dmasound_init(void)
 {
        int res ;
-#ifdef MODULE
+
        if (irq_installed)
                return -EBUSY;
-#endif
 
        /* Set up sound queue, /dev/audio and /dev/dsp. */
 
@@ -1408,9 +1396,7 @@ int dmasound_init(void)
                printk(KERN_ERR "DMA sound driver: Interrupt initialization failed\n");
                return -ENODEV;
        }
-#ifdef MODULE
        irq_installed = 1;
-#endif
 
        printk(KERN_INFO "%s DMA sound driver rev %03d installed\n",
                dmasound.mach.name, (DMASOUND_CORE_REVISION<<4) +
@@ -1424,8 +1410,6 @@ int dmasound_init(void)
        return 0;
 }
 
-#ifdef MODULE
-
 void dmasound_deinit(void)
 {
        if (irq_installed) {
@@ -1444,8 +1428,6 @@ void dmasound_deinit(void)
                unregister_sound_dsp(sq_unit);
 }
 
-#else /* !MODULE */
-
 static int dmasound_setup(char *str)
 {
        int ints[6], size;
@@ -1489,8 +1471,6 @@ static int dmasound_setup(char *str)
 
 __setup("dmasound=", dmasound_setup);
 
-#endif /* !MODULE */
-
     /*
      *  Conversion tables
      */
@@ -1577,9 +1557,7 @@ char dmasound_alaw2dma8[] = {
 
 EXPORT_SYMBOL(dmasound);
 EXPORT_SYMBOL(dmasound_init);
-#ifdef MODULE
 EXPORT_SYMBOL(dmasound_deinit);
-#endif
 EXPORT_SYMBOL(dmasound_write_sq);
 EXPORT_SYMBOL(dmasound_catchRadius);
 #ifdef HAS_8BIT_TABLES
index bba4dae..50e3070 100644 (file)
@@ -844,8 +844,8 @@ snd_ad1889_create(struct snd_card *card, struct pci_dev *pci)
 }
 
 static int
-snd_ad1889_probe(struct pci_dev *pci,
-                const struct pci_device_id *pci_id)
+__snd_ad1889_probe(struct pci_dev *pci,
+                  const struct pci_device_id *pci_id)
 {
        int err;
        static int devno;
@@ -904,6 +904,12 @@ snd_ad1889_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_ad1889_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_ad1889_probe(pci, pci_id));
+}
+
 static const struct pci_device_id snd_ad1889_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_ANALOG_DEVICES, PCI_DEVICE_ID_AD1889JS) },
        { 0, },
index 92eb59d..2378a39 100644 (file)
@@ -2124,8 +2124,8 @@ static int snd_ali_create(struct snd_card *card,
        return 0;
 }
 
-static int snd_ali_probe(struct pci_dev *pci,
-                        const struct pci_device_id *pci_id)
+static int __snd_ali_probe(struct pci_dev *pci,
+                          const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct snd_ali *codec;
@@ -2170,6 +2170,12 @@ static int snd_ali_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_ali_probe(struct pci_dev *pci,
+                        const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_ali_probe(pci, pci_id));
+}
+
 static struct pci_driver ali5451_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_ali_ids,
index b86565d..c70aff0 100644 (file)
@@ -708,7 +708,7 @@ static int snd_als300_probe(struct pci_dev *pci,
 
        err = snd_als300_create(card, pci, chip_type);
        if (err < 0)
-               return err;
+               goto error;
 
        strcpy(card->driver, "ALS300");
        if (chip->chip_type == DEVICE_ALS300_PLUS)
@@ -723,11 +723,15 @@ static int snd_als300_probe(struct pci_dev *pci,
 
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
 
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver als300_driver = {
index 535eccd..f33aeb6 100644 (file)
@@ -806,8 +806,8 @@ static void snd_card_als4000_free( struct snd_card *card )
        snd_als4000_free_gameport(acard);
 }
 
-static int snd_card_als4000_probe(struct pci_dev *pci,
-                                 const struct pci_device_id *pci_id)
+static int __snd_card_als4000_probe(struct pci_dev *pci,
+                                   const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -930,6 +930,12 @@ static int snd_card_als4000_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_card_als4000_probe(struct pci_dev *pci,
+                                 const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_card_als4000_probe(pci, pci_id));
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int snd_als4000_suspend(struct device *dev)
 {
index b8e035d..43d01f1 100644 (file)
@@ -1572,8 +1572,8 @@ static int snd_atiixp_init(struct snd_card *card, struct pci_dev *pci)
 }
 
 
-static int snd_atiixp_probe(struct pci_dev *pci,
-                           const struct pci_device_id *pci_id)
+static int __snd_atiixp_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct atiixp *chip;
@@ -1623,6 +1623,12 @@ static int snd_atiixp_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_atiixp_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_atiixp_probe(pci, pci_id));
+}
+
 static struct pci_driver atiixp_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_atiixp_ids,
index 178dce8..8864c4c 100644 (file)
@@ -1201,8 +1201,8 @@ static int snd_atiixp_init(struct snd_card *card, struct pci_dev *pci)
 }
 
 
-static int snd_atiixp_probe(struct pci_dev *pci,
-                           const struct pci_device_id *pci_id)
+static int __snd_atiixp_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct atiixp_modem *chip;
@@ -1247,6 +1247,12 @@ static int snd_atiixp_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_atiixp_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_atiixp_probe(pci, pci_id));
+}
+
 static struct pci_driver atiixp_modem_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_atiixp_ids,
index 342ef2a..eb23415 100644 (file)
@@ -193,7 +193,7 @@ snd_vortex_create(struct snd_card *card, struct pci_dev *pci)
 
 // constructor -- see "Constructor" sub-section
 static int
-snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -310,6 +310,12 @@ snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
        return 0;
 }
 
+static int
+snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_vortex_probe(pci, pci_id));
+}
+
 // pci_driver definition
 static struct pci_driver vortex_driver = {
        .name = KBUILD_MODNAME,
index d56f126..29a4bcd 100644 (file)
@@ -275,7 +275,7 @@ static int snd_aw2_probe(struct pci_dev *pci,
        /* (3) Create main component */
        err = snd_aw2_create(card, pci);
        if (err < 0)
-               return err;
+               goto error;
 
        /* initialize mutex */
        mutex_init(&chip->mtx);
@@ -294,13 +294,17 @@ static int snd_aw2_probe(struct pci_dev *pci,
        /* (6) Register card instance */
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
 
        /* (7) Set PCI driver data */
        pci_set_drvdata(pci, card);
 
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 /* open callback */
index 0890504..7f329df 100644 (file)
@@ -2427,7 +2427,7 @@ snd_azf3328_create(struct snd_card *card,
 }
 
 static int
-snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2520,6 +2520,12 @@ snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
        return 0;
 }
 
+static int
+snd_azf3328_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_azf3328_probe(pci, pci_id));
+}
+
 #ifdef CONFIG_PM_SLEEP
 static inline void
 snd_azf3328_suspend_regs(const struct snd_azf3328 *chip,
index d23f931..621985b 100644 (file)
@@ -805,8 +805,8 @@ static int snd_bt87x_detect_card(struct pci_dev *pci)
        return SND_BT87X_BOARD_UNKNOWN;
 }
 
-static int snd_bt87x_probe(struct pci_dev *pci,
-                          const struct pci_device_id *pci_id)
+static int __snd_bt87x_probe(struct pci_dev *pci,
+                            const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -889,6 +889,12 @@ static int snd_bt87x_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_bt87x_probe(struct pci_dev *pci,
+                          const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_bt87x_probe(pci, pci_id));
+}
+
 /* default entries for all Bt87x cards - it's not exported */
 /* driver_data is set to 0 to call detection */
 static const struct pci_device_id snd_bt87x_default_ids[] = {
index 8577f9f..cf1bac7 100644 (file)
@@ -1725,8 +1725,8 @@ static int snd_ca0106_midi(struct snd_ca0106 *chip, unsigned int channel)
 }
 
 
-static int snd_ca0106_probe(struct pci_dev *pci,
-                                       const struct pci_device_id *pci_id)
+static int __snd_ca0106_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -1786,6 +1786,12 @@ static int snd_ca0106_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_ca0106_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_ca0106_probe(pci, pci_id));
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int snd_ca0106_suspend(struct device *dev)
 {
index dab801d..727db6d 100644 (file)
@@ -3247,15 +3247,19 @@ static int snd_cmipci_probe(struct pci_dev *pci,
 
        err = snd_cmipci_create(card, pci, dev);
        if (err < 0)
-               return err;
+               goto error;
 
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
 
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 #ifdef CONFIG_PM_SLEEP
index e736740..0c9cadf 100644 (file)
@@ -1827,8 +1827,8 @@ static void snd_cs4281_opl3_command(struct snd_opl3 *opl3, unsigned short cmd,
        spin_unlock_irqrestore(&opl3->reg_lock, flags);
 }
 
-static int snd_cs4281_probe(struct pci_dev *pci,
-                           const struct pci_device_id *pci_id)
+static int __snd_cs4281_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -1888,6 +1888,12 @@ static int snd_cs4281_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_cs4281_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_cs4281_probe(pci, pci_id));
+}
+
 /*
  * Power Management
  */
index 499fa01..440b8f9 100644 (file)
@@ -281,8 +281,8 @@ static int snd_cs5535audio_create(struct snd_card *card,
        return 0;
 }
 
-static int snd_cs5535audio_probe(struct pci_dev *pci,
-                                const struct pci_device_id *pci_id)
+static int __snd_cs5535audio_probe(struct pci_dev *pci,
+                                  const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -331,6 +331,12 @@ static int snd_cs5535audio_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_cs5535audio_probe(struct pci_dev *pci,
+                                const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_cs5535audio_probe(pci, pci_id));
+}
+
 static struct pci_driver cs5535audio_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_cs5535audio_ids,
index 25b012e..c70c3ac 100644 (file)
@@ -1970,8 +1970,8 @@ static int snd_echo_create(struct snd_card *card,
 }
 
 /* constructor */
-static int snd_echo_probe(struct pci_dev *pci,
-                         const struct pci_device_id *pci_id)
+static int __snd_echo_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2139,6 +2139,11 @@ static int snd_echo_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_echo_probe(struct pci_dev *pci,
+                         const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_echo_probe(pci, pci_id));
+}
 
 
 #if defined(CONFIG_PM_SLEEP)
index c49c44d..8904339 100644 (file)
@@ -1491,8 +1491,8 @@ static int snd_emu10k1x_midi(struct emu10k1x *emu)
        return 0;
 }
 
-static int snd_emu10k1x_probe(struct pci_dev *pci,
-                             const struct pci_device_id *pci_id)
+static int __snd_emu10k1x_probe(struct pci_dev *pci,
+                               const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -1554,6 +1554,12 @@ static int snd_emu10k1x_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_emu10k1x_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_emu10k1x_probe(pci, pci_id));
+}
+
 // PCI IDs
 static const struct pci_device_id snd_emu10k1x_ids[] = {
        { PCI_VDEVICE(CREATIVE, 0x0006), 0 },   /* Dell OEM version (EMU10K1) */
index 2651f0c..94efe34 100644 (file)
@@ -2304,8 +2304,8 @@ static irqreturn_t snd_audiopci_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static int snd_audiopci_probe(struct pci_dev *pci,
-                             const struct pci_device_id *pci_id)
+static int __snd_audiopci_probe(struct pci_dev *pci,
+                               const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2369,6 +2369,12 @@ static int snd_audiopci_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_audiopci_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_audiopci_probe(pci, pci_id));
+}
+
 static struct pci_driver ens137x_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_audiopci_ids,
index 00b976f..e34ec6f 100644 (file)
@@ -1716,8 +1716,8 @@ static int snd_es1938_mixer(struct es1938 *chip)
 }
        
 
-static int snd_es1938_probe(struct pci_dev *pci,
-                           const struct pci_device_id *pci_id)
+static int __snd_es1938_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -1796,6 +1796,12 @@ static int snd_es1938_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_es1938_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_es1938_probe(pci, pci_id));
+}
+
 static struct pci_driver es1938_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_es1938_ids,
index 6a8a02a..4a7e20b 100644 (file)
@@ -2741,8 +2741,8 @@ static int snd_es1968_create(struct snd_card *card,
 
 /*
  */
-static int snd_es1968_probe(struct pci_dev *pci,
-                           const struct pci_device_id *pci_id)
+static int __snd_es1968_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2848,6 +2848,12 @@ static int snd_es1968_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_es1968_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_es1968_probe(pci, pci_id));
+}
+
 static struct pci_driver es1968_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_es1968_ids,
index 9c22ff1..62b3cb1 100644 (file)
@@ -1268,8 +1268,8 @@ static int snd_fm801_create(struct snd_card *card,
        return 0;
 }
 
-static int snd_card_fm801_probe(struct pci_dev *pci,
-                               const struct pci_device_id *pci_id)
+static int __snd_card_fm801_probe(struct pci_dev *pci,
+                                 const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -1333,6 +1333,12 @@ static int snd_card_fm801_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_card_fm801_probe(struct pci_dev *pci,
+                               const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_card_fm801_probe(pci, pci_id));
+}
+
 #ifdef CONFIG_PM_SLEEP
 static const unsigned char saved_regs[] = {
        FM801_PCM_VOL, FM801_I2S_VOL, FM801_FM_VOL, FM801_REC_SRC,
index 4e12af2..62fbf37 100644 (file)
@@ -2619,6 +2619,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x65f1, "Clevo PC50HS", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
+       SND_PCI_QUIRK(0x1558, 0x65f5, "Clevo PD50PN[NRT]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
        SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
@@ -9264,6 +9265,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x505d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x505f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x5062, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x508b, "Thinkpad X12 Gen 1", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
        SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
index f627586..6fab2ad 100644 (file)
@@ -2519,8 +2519,8 @@ static int snd_vt1724_create(struct snd_card *card,
  *
  */
 
-static int snd_vt1724_probe(struct pci_dev *pci,
-                           const struct pci_device_id *pci_id)
+static int __snd_vt1724_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2662,6 +2662,12 @@ static int snd_vt1724_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_vt1724_probe(struct pci_dev *pci,
+                           const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_vt1724_probe(pci, pci_id));
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int snd_vt1724_suspend(struct device *dev)
 {
index a51032b..ae285c0 100644 (file)
@@ -3109,8 +3109,8 @@ static int check_default_spdif_aclink(struct pci_dev *pci)
        return 0;
 }
 
-static int snd_intel8x0_probe(struct pci_dev *pci,
-                             const struct pci_device_id *pci_id)
+static int __snd_intel8x0_probe(struct pci_dev *pci,
+                               const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct intel8x0 *chip;
@@ -3189,6 +3189,12 @@ static int snd_intel8x0_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_intel8x0_probe(struct pci_dev *pci,
+                             const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_intel8x0_probe(pci, pci_id));
+}
+
 static struct pci_driver intel8x0_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_intel8x0_ids,
index 7de3cb2..2845cc0 100644 (file)
@@ -1178,8 +1178,8 @@ static struct shortname_table {
        { 0 },
 };
 
-static int snd_intel8x0m_probe(struct pci_dev *pci,
-                              const struct pci_device_id *pci_id)
+static int __snd_intel8x0m_probe(struct pci_dev *pci,
+                                const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct intel8x0m *chip;
@@ -1225,6 +1225,12 @@ static int snd_intel8x0m_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_intel8x0m_probe(struct pci_dev *pci,
+                              const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_intel8x0m_probe(pci, pci_id));
+}
+
 static struct pci_driver intel8x0m_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_intel8x0m_ids,
index 5c9e240..33b4f95 100644 (file)
@@ -2355,7 +2355,7 @@ snd_korg1212_probe(struct pci_dev *pci,
 
        err = snd_korg1212_create(card, pci);
        if (err < 0)
-               return err;
+               goto error;
 
        strcpy(card->driver, "korg1212");
        strcpy(card->shortname, "korg1212");
@@ -2366,10 +2366,14 @@ snd_korg1212_probe(struct pci_dev *pci,
 
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver korg1212_driver = {
index 5269a1d..1aa30e9 100644 (file)
@@ -637,8 +637,8 @@ static int lola_create(struct snd_card *card, struct pci_dev *pci, int dev)
        return 0;
 }
 
-static int lola_probe(struct pci_dev *pci,
-                     const struct pci_device_id *pci_id)
+static int __lola_probe(struct pci_dev *pci,
+                       const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -687,6 +687,12 @@ static int lola_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int lola_probe(struct pci_dev *pci,
+                     const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __lola_probe(pci, pci_id));
+}
+
 /* PCI IDs */
 static const struct pci_device_id lola_ids[] = {
        { PCI_VDEVICE(DIGIGRAM, 0x0001) },
index 168a108..bd9b614 100644 (file)
@@ -1019,7 +1019,7 @@ static int snd_lx6464es_probe(struct pci_dev *pci,
        err = snd_lx6464es_create(card, pci);
        if (err < 0) {
                dev_err(card->dev, "error during snd_lx6464es_create\n");
-               return err;
+               goto error;
        }
 
        strcpy(card->driver, "LX6464ES");
@@ -1036,12 +1036,16 @@ static int snd_lx6464es_probe(struct pci_dev *pci,
 
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
 
        dev_dbg(chip->card->dev, "initialization successful\n");
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver lx6464es_driver = {
index 056838e..2618507 100644 (file)
@@ -2637,7 +2637,7 @@ snd_m3_create(struct snd_card *card, struct pci_dev *pci,
 /*
  */
 static int
-snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2702,6 +2702,12 @@ snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
        return 0;
 }
 
+static int
+snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_m3_probe(pci, pci_id));
+}
+
 static struct pci_driver m3_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_m3_ids,
index c9c1785..f99a1e9 100644 (file)
@@ -1573,7 +1573,6 @@ snd_nm256_create(struct snd_card *card, struct pci_dev *pci)
        chip->coeffs_current = 0;
 
        snd_nm256_init_chip(chip);
-       card->private_free = snd_nm256_free;
 
        // pci_set_master(pci); /* needed? */
        return 0;
@@ -1680,6 +1679,7 @@ static int snd_nm256_probe(struct pci_dev *pci,
        err = snd_card_register(card);
        if (err < 0)
                return err;
+       card->private_free = snd_nm256_free;
 
        pci_set_drvdata(pci, card);
        return 0;
index 4fb3f24..92ffe9d 100644 (file)
@@ -576,7 +576,7 @@ static void oxygen_card_free(struct snd_card *card)
        mutex_destroy(&chip->mutex);
 }
 
-int oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
+static int __oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
                     struct module *owner,
                     const struct pci_device_id *ids,
                     int (*get_model)(struct oxygen *chip,
@@ -701,6 +701,16 @@ int oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
        pci_set_drvdata(pci, card);
        return 0;
 }
+
+int oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
+                    struct module *owner,
+                    const struct pci_device_id *ids,
+                    int (*get_model)(struct oxygen *chip,
+                                     const struct pci_device_id *id))
+{
+       return snd_card_free_on_error(&pci->dev,
+                                     __oxygen_pci_probe(pci, index, id, owner, ids, get_model));
+}
 EXPORT_SYMBOL(oxygen_pci_probe);
 
 #ifdef CONFIG_PM_SLEEP
index 5a987c6..b37c877 100644 (file)
@@ -2023,7 +2023,7 @@ static void snd_riptide_joystick_remove(struct pci_dev *pci)
 #endif
 
 static int
-snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -2124,6 +2124,12 @@ snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
        return 0;
 }
 
+static int
+snd_card_riptide_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_card_riptide_probe(pci, pci_id));
+}
+
 static struct pci_driver driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_riptide_ids,
index 5b6bd9f..9c0ac02 100644 (file)
@@ -1875,7 +1875,7 @@ static void snd_rme32_card_free(struct snd_card *card)
 }
 
 static int
-snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+__snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
 {
        static int dev;
        struct rme32 *rme32;
@@ -1927,6 +1927,12 @@ snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
        return 0;
 }
 
+static int
+snd_rme32_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_rme32_probe(pci, pci_id));
+}
+
 static struct pci_driver rme32_driver = {
        .name =         KBUILD_MODNAME,
        .id_table =     snd_rme32_ids,
index 8fc8115..bccb7e0 100644 (file)
@@ -2430,8 +2430,8 @@ static void snd_rme96_card_free(struct snd_card *card)
 }
 
 static int
-snd_rme96_probe(struct pci_dev *pci,
-               const struct pci_device_id *pci_id)
+__snd_rme96_probe(struct pci_dev *pci,
+                 const struct pci_device_id *pci_id)
 {
        static int dev;
        struct rme96 *rme96;
@@ -2498,6 +2498,12 @@ snd_rme96_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_rme96_probe(struct pci_dev *pci,
+                          const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_rme96_probe(pci, pci_id));
+}
+
 static struct pci_driver rme96_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_rme96_ids,
index 96c12df..3db6413 100644 (file)
@@ -5444,17 +5444,21 @@ static int snd_hdsp_probe(struct pci_dev *pci,
        hdsp->pci = pci;
        err = snd_hdsp_create(card, hdsp);
        if (err)
-               return err;
+               goto error;
 
        strcpy(card->shortname, "Hammerfall DSP");
        sprintf(card->longname, "%s at 0x%lx, irq %d", hdsp->card_name,
                hdsp->port, hdsp->irq);
        err = snd_card_register(card);
        if (err)
-               return err;
+               goto error;
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver hdsp_driver = {
index ff06ee8..fa1812e 100644 (file)
@@ -6895,7 +6895,7 @@ static int snd_hdspm_probe(struct pci_dev *pci,
 
        err = snd_hdspm_create(card, hdspm);
        if (err < 0)
-               return err;
+               goto error;
 
        if (hdspm->io_type != MADIface) {
                snprintf(card->shortname, sizeof(card->shortname), "%s_%x",
@@ -6914,12 +6914,16 @@ static int snd_hdspm_probe(struct pci_dev *pci,
 
        err = snd_card_register(card);
        if (err < 0)
-               return err;
+               goto error;
 
        pci_set_drvdata(pci, card);
 
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver hdspm_driver = {
index 7755e19..1d614fe 100644 (file)
@@ -2572,7 +2572,7 @@ static int snd_rme9652_probe(struct pci_dev *pci,
        rme9652->pci = pci;
        err = snd_rme9652_create(card, rme9652, precise_ptr[dev]);
        if (err)
-               return err;
+               goto error;
 
        strcpy(card->shortname, rme9652->card_name);
 
@@ -2580,10 +2580,14 @@ static int snd_rme9652_probe(struct pci_dev *pci,
                card->shortname, rme9652->port, rme9652->irq);
        err = snd_card_register(card);
        if (err)
-               return err;
+               goto error;
        pci_set_drvdata(pci, card);
        dev++;
        return 0;
+
+ error:
+       snd_card_free(card);
+       return err;
 }
 
 static struct pci_driver rme9652_driver = {
index 0b722b0..fabe393 100644 (file)
@@ -1331,8 +1331,8 @@ static int sis_chip_create(struct snd_card *card,
        return 0;
 }
 
-static int snd_sis7019_probe(struct pci_dev *pci,
-                            const struct pci_device_id *pci_id)
+static int __snd_sis7019_probe(struct pci_dev *pci,
+                              const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct sis7019 *sis;
@@ -1352,8 +1352,8 @@ static int snd_sis7019_probe(struct pci_dev *pci,
        if (!codecs)
                codecs = SIS_PRIMARY_CODEC_PRESENT;
 
-       rc = snd_card_new(&pci->dev, index, id, THIS_MODULE,
-                         sizeof(*sis), &card);
+       rc = snd_devm_card_new(&pci->dev, index, id, THIS_MODULE,
+                              sizeof(*sis), &card);
        if (rc < 0)
                return rc;
 
@@ -1386,6 +1386,12 @@ static int snd_sis7019_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_sis7019_probe(struct pci_dev *pci,
+                            const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_sis7019_probe(pci, pci_id));
+}
+
 static struct pci_driver sis7019_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_sis7019_ids,
index c8c4988..f91cbf6 100644 (file)
@@ -1387,8 +1387,8 @@ static int snd_sonicvibes_midi(struct sonicvibes *sonic,
        return 0;
 }
 
-static int snd_sonic_probe(struct pci_dev *pci,
-                          const struct pci_device_id *pci_id)
+static int __snd_sonic_probe(struct pci_dev *pci,
+                            const struct pci_device_id *pci_id)
 {
        static int dev;
        struct snd_card *card;
@@ -1459,6 +1459,12 @@ static int snd_sonic_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_sonic_probe(struct pci_dev *pci,
+                          const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_sonic_probe(pci, pci_id));
+}
+
 static struct pci_driver sonicvibes_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_sonic_ids,
index 65514f7..361b83f 100644 (file)
@@ -2458,8 +2458,8 @@ static int check_dxs_list(struct pci_dev *pci, int revision)
        return VIA_DXS_48K;
 };
 
-static int snd_via82xx_probe(struct pci_dev *pci,
-                            const struct pci_device_id *pci_id)
+static int __snd_via82xx_probe(struct pci_dev *pci,
+                              const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct via82xx *chip;
@@ -2569,6 +2569,12 @@ static int snd_via82xx_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_via82xx_probe(struct pci_dev *pci,
+                            const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_via82xx_probe(pci, pci_id));
+}
+
 static struct pci_driver via82xx_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_via82xx_ids,
index 234f7fb..ca7f024 100644 (file)
@@ -1103,8 +1103,8 @@ static int snd_via82xx_create(struct snd_card *card,
 }
 
 
-static int snd_via82xx_probe(struct pci_dev *pci,
-                            const struct pci_device_id *pci_id)
+static int __snd_via82xx_probe(struct pci_dev *pci,
+                              const struct pci_device_id *pci_id)
 {
        struct snd_card *card;
        struct via82xx_modem *chip;
@@ -1157,6 +1157,12 @@ static int snd_via82xx_probe(struct pci_dev *pci,
        return 0;
 }
 
+static int snd_via82xx_probe(struct pci_dev *pci,
+                            const struct pci_device_id *pci_id)
+{
+       return snd_card_free_on_error(&pci->dev, __snd_via82xx_probe(pci, pci_id));
+}
+
 static struct pci_driver via82xx_modem_driver = {
        .name = KBUILD_MODNAME,
        .id_table = snd_via82xx_modem_ids,
index cec6e91..6d69906 100644 (file)
@@ -669,9 +669,9 @@ static const struct snd_pcm_hardware snd_usb_hardware =
                                SNDRV_PCM_INFO_PAUSE,
        .channels_min =         1,
        .channels_max =         256,
-       .buffer_bytes_max =     1024 * 1024,
+       .buffer_bytes_max =     INT_MAX, /* limited by BUFFER_TIME later */
        .period_bytes_min =     64,
-       .period_bytes_max =     512 * 1024,
+       .period_bytes_max =     INT_MAX, /* limited by PERIOD_TIME later */
        .periods_min =          2,
        .periods_max =          1024,
 };
@@ -1064,6 +1064,18 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre
                        return err;
        }
 
+       /* set max period and buffer sizes for 1 and 2 seconds, respectively */
+       err = snd_pcm_hw_constraint_minmax(runtime,
+                                          SNDRV_PCM_HW_PARAM_PERIOD_TIME,
+                                          0, 1000000);
+       if (err < 0)
+               return err;
+       err = snd_pcm_hw_constraint_minmax(runtime,
+                                          SNDRV_PCM_HW_PARAM_BUFFER_TIME,
+                                          0, 2000000);
+       if (err < 0)
+               return err;
+
        /* additional hw constraints for implicit fb */
        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
                                  hw_rule_format_implicit_fb, subs,
index 1678341..b8359a0 100644 (file)
@@ -8,7 +8,7 @@
  */
 
 /* handling of USB vendor/product ID pairs as 32-bit numbers */
-#define USB_ID(vendor, product) (((vendor) << 16) | (product))
+#define USB_ID(vendor, product) (((unsigned int)(vendor) << 16) | (product))
 #define USB_ID_VENDOR(id) ((id) >> 16)
 #define USB_ID_PRODUCT(id) ((u16)(id))
 
index b006346..0d828e3 100644 (file)
@@ -1652,7 +1652,7 @@ static void hdmi_lpe_audio_free(struct snd_card *card)
  * This function is called when the i915 driver creates the
  * hdmi-lpe-audio platform device.
  */
-static int hdmi_lpe_audio_probe(struct platform_device *pdev)
+static int __hdmi_lpe_audio_probe(struct platform_device *pdev)
 {
        struct snd_card *card;
        struct snd_intelhad_card *card_ctx;
@@ -1815,6 +1815,11 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
        return 0;
 }
 
+static int hdmi_lpe_audio_probe(struct platform_device *pdev)
+{
+       return snd_card_free_on_error(&pdev->dev, __hdmi_lpe_audio_probe(pdev));
+}
+
 static const struct dev_pm_ops hdmi_lpe_audio_pm = {
        SET_SYSTEM_SLEEP_PM_OPS(hdmi_lpe_audio_suspend, hdmi_lpe_audio_resume)
 };
index 9afcc64..e09d690 100644 (file)
@@ -75,6 +75,7 @@
 #define ARM_CPU_PART_CORTEX_A77                0xD0D
 #define ARM_CPU_PART_NEOVERSE_V1       0xD40
 #define ARM_CPU_PART_CORTEX_A78                0xD41
+#define ARM_CPU_PART_CORTEX_A78AE      0xD42
 #define ARM_CPU_PART_CORTEX_X1         0xD44
 #define ARM_CPU_PART_CORTEX_A510       0xD46
 #define ARM_CPU_PART_CORTEX_A710       0xD47
 #define MIDR_CORTEX_A77        MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
 #define MIDR_NEOVERSE_V1       MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
 #define MIDR_CORTEX_A78        MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
+#define MIDR_CORTEX_A78AE      MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
 #define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
 #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
 #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
index 0eb90d2..ee15311 100644 (file)
 #define TSX_CTRL_RTM_DISABLE           BIT(0)  /* Disable RTM feature */
 #define TSX_CTRL_CPUID_CLEAR           BIT(1)  /* Disable TSX enumeration */
 
-/* SRBDS support */
 #define MSR_IA32_MCU_OPT_CTRL          0x00000123
-#define RNGDS_MITG_DIS                 BIT(0)
+#define RNGDS_MITG_DIS                 BIT(0)  /* SRBDS support */
+#define RTM_ALLOW                      BIT(1)  /* TSX development mode */
 
 #define MSR_IA32_SYSENTER_CS           0x00000174
 #define MSR_IA32_SYSENTER_ESP          0x00000175
index 91af285..7678af3 100644 (file)
@@ -828,8 +828,10 @@ codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
                        s->map_cnt = %zu;                           \n\
                        s->map_skel_sz = sizeof(*s->maps);          \n\
                        s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
-                       if (!s->maps)                               \n\
+                       if (!s->maps) {                             \n\
+                               err = -ENOMEM;                      \n\
                                goto err;                           \n\
+                       }                                           \n\
                ",
                map_cnt
        );
@@ -870,8 +872,10 @@ codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_li
                        s->prog_cnt = %zu;                          \n\
                        s->prog_skel_sz = sizeof(*s->progs);        \n\
                        s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
-                       if (!s->progs)                              \n\
+                       if (!s->progs) {                            \n\
+                               err = -ENOMEM;                      \n\
                                goto err;                           \n\
+                       }                                           \n\
                ",
                prog_cnt
        );
@@ -1182,10 +1186,13 @@ static int do_skeleton(int argc, char **argv)
                %1$s__create_skeleton(struct %1$s *obj)                     \n\
                {                                                           \n\
                        struct bpf_object_skeleton *s;                      \n\
+                       int err;                                            \n\
                                                                            \n\
                        s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
-                       if (!s)                                             \n\
+                       if (!s) {                                           \n\
+                               err = -ENOMEM;                              \n\
                                goto err;                                   \n\
+                       }                                                   \n\
                                                                            \n\
                        s->sz = sizeof(*s);                                 \n\
                        s->name = \"%1$s\";                                 \n\
@@ -1206,7 +1213,7 @@ static int do_skeleton(int argc, char **argv)
                        return 0;                                           \n\
                err:                                                        \n\
                        bpf_object__destroy_skeleton(s);                    \n\
-                       return -ENOMEM;                                     \n\
+                       return err;                                         \n\
                }                                                           \n\
                                                                            \n\
                static inline const void *%2$s__elf_bytes(size_t *sz)       \n\
@@ -1466,12 +1473,12 @@ static int do_subskeleton(int argc, char **argv)
                                                                            \n\
                        obj = (struct %1$s *)calloc(1, sizeof(*obj));       \n\
                        if (!obj) {                                         \n\
-                               errno = ENOMEM;                             \n\
+                               err = -ENOMEM;                              \n\
                                goto err;                                   \n\
                        }                                                   \n\
                        s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\
                        if (!s) {                                           \n\
-                               errno = ENOMEM;                             \n\
+                               err = -ENOMEM;                              \n\
                                goto err;                                   \n\
                        }                                                   \n\
                        s->sz = sizeof(*s);                                 \n\
@@ -1483,7 +1490,7 @@ static int do_subskeleton(int argc, char **argv)
                        s->var_cnt = %2$d;                                  \n\
                        s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\
                        if (!s->vars) {                                     \n\
-                               errno = ENOMEM;                             \n\
+                               err = -ENOMEM;                              \n\
                                goto err;                                   \n\
                        }                                                   \n\
                ",
@@ -1538,6 +1545,7 @@ static int do_subskeleton(int argc, char **argv)
                        return obj;                                         \n\
                err:                                                        \n\
                        %1$s__destroy(obj);                                 \n\
+                       errno = -err;                                       \n\
                        return NULL;                                        \n\
                }                                                           \n\
                                                                            \n\
index 1480910..de66e1c 100644 (file)
@@ -217,9 +217,16 @@ strip-libs = $(filter-out -l%,$(1))
 PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
 PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
 PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
-PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
+PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null)
 FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
 
+ifeq ($(CC_NO_CLANG), 0)
+  PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS))
+  PERL_EMBED_CCOPTS := $(filter-out -flto=auto -ffat-lto-objects, $(PERL_EMBED_CCOPTS))
+  PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS))
+  FLAGS_PERL_EMBED += -Wno-compound-token-split-by-macro
+endif
+
 $(OUTPUT)test-libperl.bin:
        $(BUILD) $(FLAGS_PERL_EMBED)
 
index c998860..5d99e7c 100644 (file)
 /* Get the valid iova range */
 #define VHOST_VDPA_GET_IOVA_RANGE      _IOR(VHOST_VIRTIO, 0x78, \
                                             struct vhost_vdpa_iova_range)
+
+/* Get the config size */
+#define VHOST_VDPA_GET_CONFIG_SIZE     _IOR(VHOST_VIRTIO, 0x79, __u32)
+
+/* Get the count of all virtqueues */
+#define VHOST_VDPA_GET_VQS_COUNT       _IOR(VHOST_VIRTIO, 0x80, __u32)
+
 #endif
index 1b15ba1..a093155 100644 (file)
@@ -577,7 +577,6 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
 {
        struct perf_evsel *evsel;
        const struct perf_cpu_map *cpus = evlist->user_requested_cpus;
-       const struct perf_thread_map *threads = evlist->threads;
 
        if (!ops || !ops->get || !ops->mmap)
                return -EINVAL;
@@ -589,7 +588,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
        perf_evlist__for_each_entry(evlist, evsel) {
                if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
                    evsel->sample_id == NULL &&
-                   perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
+                   perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
                        return -ENOMEM;
        }
 
index 6de5085..bd0c2c8 100644 (file)
@@ -1155,6 +1155,17 @@ static void annotate_call_site(struct objtool_file *file,
                                       : arch_nop_insn(insn->len));
 
                insn->type = sibling ? INSN_RETURN : INSN_NOP;
+
+               if (sibling) {
+                       /*
+                        * We've replaced the tail-call JMP insn by two new
+                        * insn: RET; INT3, except we only have a single struct
+                        * insn here. Mark it retpoline_safe to avoid the SLS
+                        * warning, instead of adding another insn.
+                        */
+                       insn->retpoline_safe = true;
+               }
+
                return;
        }
 
@@ -1239,11 +1250,20 @@ static bool same_function(struct instruction *insn1, struct instruction *insn2)
        return insn1->func->pfunc == insn2->func->pfunc;
 }
 
-static bool is_first_func_insn(struct instruction *insn)
+static bool is_first_func_insn(struct objtool_file *file, struct instruction *insn)
 {
-       return insn->offset == insn->func->offset ||
-              (insn->type == INSN_ENDBR &&
-               insn->offset == insn->func->offset + insn->len);
+       if (insn->offset == insn->func->offset)
+               return true;
+
+       if (ibt) {
+               struct instruction *prev = prev_insn_same_sym(file, insn);
+
+               if (prev && prev->type == INSN_ENDBR &&
+                   insn->offset == insn->func->offset + prev->len)
+                       return true;
+       }
+
+       return false;
 }
 
 /*
@@ -1327,7 +1347,7 @@ static int add_jump_destinations(struct objtool_file *file)
                                insn->jump_dest->func->pfunc = insn->func;
 
                        } else if (!same_function(insn, insn->jump_dest) &&
-                                  is_first_func_insn(insn->jump_dest)) {
+                                  is_first_func_insn(file, insn->jump_dest)) {
                                /* internal sibling call (without reloc) */
                                add_call_dest(file, insn, insn->jump_dest->func, true);
                        }
index 9c330cd..71ebdf8 100644 (file)
@@ -83,7 +83,7 @@ linkperf:perf-buildid-list[1], linkperf:perf-c2c[1],
 linkperf:perf-config[1], linkperf:perf-data[1], linkperf:perf-diff[1],
 linkperf:perf-evlist[1], linkperf:perf-ftrace[1],
 linkperf:perf-help[1], linkperf:perf-inject[1],
-linkperf:perf-intel-pt[1], linkperf:perf-kallsyms[1],
+linkperf:perf-intel-pt[1], linkperf:perf-iostat[1], linkperf:perf-kallsyms[1],
 linkperf:perf-kmem[1], linkperf:perf-kvm[1], linkperf:perf-lock[1],
 linkperf:perf-mem[1], linkperf:perf-probe[1], linkperf:perf-sched[1],
 linkperf:perf-script[1], linkperf:perf-test[1],
index 96ad944..f3bf929 100644 (file)
@@ -272,6 +272,9 @@ ifdef PYTHON_CONFIG
   PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
   PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null)
   FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
+  ifeq ($(CC_NO_CLANG), 0)
+    PYTHON_EMBED_CCOPTS := $(filter-out -ffat-lto-objects, $(PYTHON_EMBED_CCOPTS))
+  endif
 endif
 
 FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
@@ -790,6 +793,9 @@ else
     LDFLAGS += $(PERL_EMBED_LDFLAGS)
     EXTLIBS += $(PERL_EMBED_LIBADD)
     CFLAGS += -DHAVE_LIBPERL_SUPPORT
+    ifeq ($(CC_NO_CLANG), 0)
+      CFLAGS += -Wno-compound-token-split-by-macro
+    endif
     $(call detected,CONFIG_LIBPERL)
   endif
 endif
index 86e2e92..af4d63a 100644 (file)
@@ -239,6 +239,12 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
                arm_spe_set_timestamp(itr, arm_spe_evsel);
        }
 
+       /*
+        * Set this only so that perf report knows that SPE generates memory info. It has no effect
+        * on the opening of the event or the SPE data produced.
+        */
+       evsel__set_sample_bit(arm_spe_evsel, DATA_SRC);
+
        /* Add dummy event to keep tracking */
        err = parse_events(evlist, "dummy:u", NULL);
        if (err)
index 134612b..4256dc5 100644 (file)
@@ -222,13 +222,20 @@ static void init_fdmaps(struct worker *w, int pct)
 static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
 {
        pthread_attr_t thread_attr, *attrp = NULL;
-       cpu_set_t cpuset;
+       cpu_set_t *cpuset;
        unsigned int i, j;
        int ret = 0;
+       int nrcpus;
+       size_t size;
 
        if (!noaffinity)
                pthread_attr_init(&thread_attr);
 
+       nrcpus = perf_cpu_map__nr(cpu);
+       cpuset = CPU_ALLOC(nrcpus);
+       BUG_ON(!cpuset);
+       size = CPU_ALLOC_SIZE(nrcpus);
+
        for (i = 0; i < nthreads; i++) {
                struct worker *w = &worker[i];
 
@@ -252,22 +259,28 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
                        init_fdmaps(w, 50);
 
                if (!noaffinity) {
-                       CPU_ZERO(&cpuset);
-                       CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
+                       CPU_ZERO_S(size, cpuset);
+                       CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu,
+                                       size, cpuset);
 
-                       ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
-                       if (ret)
+                       ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
+                       if (ret) {
+                               CPU_FREE(cpuset);
                                err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+                       }
 
                        attrp = &thread_attr;
                }
 
                ret = pthread_create(&w->thread, attrp, workerfn,
                                     (void *)(struct worker *) w);
-               if (ret)
+               if (ret) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_create");
+               }
        }
 
+       CPU_FREE(cpuset);
        if (!noaffinity)
                pthread_attr_destroy(&thread_attr);
 
index 37de970..2728b01 100644 (file)
@@ -291,9 +291,11 @@ static void print_summary(void)
 static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
 {
        pthread_attr_t thread_attr, *attrp = NULL;
-       cpu_set_t cpuset;
+       cpu_set_t *cpuset;
        unsigned int i, j;
        int ret = 0, events = EPOLLIN;
+       int nrcpus;
+       size_t size;
 
        if (oneshot)
                events |= EPOLLONESHOT;
@@ -306,6 +308,11 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
        if (!noaffinity)
                pthread_attr_init(&thread_attr);
 
+       nrcpus = perf_cpu_map__nr(cpu);
+       cpuset = CPU_ALLOC(nrcpus);
+       BUG_ON(!cpuset);
+       size = CPU_ALLOC_SIZE(nrcpus);
+
        for (i = 0; i < nthreads; i++) {
                struct worker *w = &worker[i];
 
@@ -341,22 +348,28 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
                }
 
                if (!noaffinity) {
-                       CPU_ZERO(&cpuset);
-                       CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
+                       CPU_ZERO_S(size, cpuset);
+                       CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu,
+                                       size, cpuset);
 
-                       ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
-                       if (ret)
+                       ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
+                       if (ret) {
+                               CPU_FREE(cpuset);
                                err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+                       }
 
                        attrp = &thread_attr;
                }
 
                ret = pthread_create(&w->thread, attrp, workerfn,
                                     (void *)(struct worker *) w);
-               if (ret)
+               if (ret) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_create");
+               }
        }
 
+       CPU_FREE(cpuset);
        if (!noaffinity)
                pthread_attr_destroy(&thread_attr);
 
index dbcecec..f05db4c 100644 (file)
@@ -122,12 +122,14 @@ static void print_summary(void)
 int bench_futex_hash(int argc, const char **argv)
 {
        int ret = 0;
-       cpu_set_t cpuset;
+       cpu_set_t *cpuset;
        struct sigaction act;
        unsigned int i;
        pthread_attr_t thread_attr;
        struct worker *worker = NULL;
        struct perf_cpu_map *cpu;
+       int nrcpus;
+       size_t size;
 
        argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0);
        if (argc) {
@@ -170,25 +172,35 @@ int bench_futex_hash(int argc, const char **argv)
        threads_starting = params.nthreads;
        pthread_attr_init(&thread_attr);
        gettimeofday(&bench__start, NULL);
+
+       nrcpus = perf_cpu_map__nr(cpu);
+       cpuset = CPU_ALLOC(nrcpus);
+       BUG_ON(!cpuset);
+       size = CPU_ALLOC_SIZE(nrcpus);
+
        for (i = 0; i < params.nthreads; i++) {
                worker[i].tid = i;
                worker[i].futex = calloc(params.nfutexes, sizeof(*worker[i].futex));
                if (!worker[i].futex)
                        goto errmem;
 
-               CPU_ZERO(&cpuset);
-               CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
+               CPU_ZERO_S(size, cpuset);
 
-               ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
-               if (ret)
+               CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
+               ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
+               if (ret) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
-
+               }
                ret = pthread_create(&worker[i].thread, &thread_attr, workerfn,
                                     (void *)(struct worker *) &worker[i]);
-               if (ret)
+               if (ret) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_create");
+               }
 
        }
+       CPU_FREE(cpuset);
        pthread_attr_destroy(&thread_attr);
 
        pthread_mutex_lock(&thread_lock);
index 6fc9a3d..0abb3f7 100644 (file)
@@ -120,11 +120,17 @@ static void *workerfn(void *arg)
 static void create_threads(struct worker *w, pthread_attr_t thread_attr,
                           struct perf_cpu_map *cpu)
 {
-       cpu_set_t cpuset;
+       cpu_set_t *cpuset;
        unsigned int i;
+       int nrcpus =  perf_cpu_map__nr(cpu);
+       size_t size;
 
        threads_starting = params.nthreads;
 
+       cpuset = CPU_ALLOC(nrcpus);
+       BUG_ON(!cpuset);
+       size = CPU_ALLOC_SIZE(nrcpus);
+
        for (i = 0; i < params.nthreads; i++) {
                worker[i].tid = i;
 
@@ -135,15 +141,20 @@ static void create_threads(struct worker *w, pthread_attr_t thread_attr,
                } else
                        worker[i].futex = &global_futex;
 
-               CPU_ZERO(&cpuset);
-               CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
+               CPU_ZERO_S(size, cpuset);
+               CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
 
-               if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
+               if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+               }
 
-               if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i]))
+               if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i])) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_create");
+               }
        }
+       CPU_FREE(cpuset);
 }
 
 int bench_futex_lock_pi(int argc, const char **argv)
index 2f59d5d..b6faabf 100644 (file)
@@ -123,22 +123,33 @@ static void *workerfn(void *arg __maybe_unused)
 static void block_threads(pthread_t *w,
                          pthread_attr_t thread_attr, struct perf_cpu_map *cpu)
 {
-       cpu_set_t cpuset;
+       cpu_set_t *cpuset;
        unsigned int i;
+       int nrcpus = perf_cpu_map__nr(cpu);
+       size_t size;
 
        threads_starting = params.nthreads;
 
+       cpuset = CPU_ALLOC(nrcpus);
+       BUG_ON(!cpuset);
+       size = CPU_ALLOC_SIZE(nrcpus);
+
        /* create and block all threads */
        for (i = 0; i < params.nthreads; i++) {
-               CPU_ZERO(&cpuset);
-               CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
+               CPU_ZERO_S(size, cpuset);
+               CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
 
-               if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
+               if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+               }
 
-               if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
+               if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_create");
+               }
        }
+       CPU_FREE(cpuset);
 }
 
 static void toggle_done(int sig __maybe_unused,
index 861deb9..e47f46a 100644 (file)
@@ -144,22 +144,33 @@ static void *blocked_workerfn(void *arg __maybe_unused)
 static void block_threads(pthread_t *w, pthread_attr_t thread_attr,
                          struct perf_cpu_map *cpu)
 {
-       cpu_set_t cpuset;
+       cpu_set_t *cpuset;
        unsigned int i;
+       int nrcpus = perf_cpu_map__nr(cpu);
+       size_t size;
 
        threads_starting = params.nthreads;
 
+       cpuset = CPU_ALLOC(nrcpus);
+       BUG_ON(!cpuset);
+       size = CPU_ALLOC_SIZE(nrcpus);
+
        /* create and block all threads */
        for (i = 0; i < params.nthreads; i++) {
-               CPU_ZERO(&cpuset);
-               CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
+               CPU_ZERO_S(size, cpuset);
+               CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
 
-               if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
+               if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+               }
 
-               if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL))
+               if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL)) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_create");
+               }
        }
+       CPU_FREE(cpuset);
 }
 
 static void print_run(struct thread_data *waking_worker, unsigned int run_num)
index cfda48b..201a355 100644 (file)
@@ -97,22 +97,32 @@ static void print_summary(void)
 static void block_threads(pthread_t *w,
                          pthread_attr_t thread_attr, struct perf_cpu_map *cpu)
 {
-       cpu_set_t cpuset;
+       cpu_set_t *cpuset;
        unsigned int i;
-
+       size_t size;
+       int nrcpus = perf_cpu_map__nr(cpu);
        threads_starting = params.nthreads;
 
+       cpuset = CPU_ALLOC(nrcpus);
+       BUG_ON(!cpuset);
+       size = CPU_ALLOC_SIZE(nrcpus);
+
        /* create and block all threads */
        for (i = 0; i < params.nthreads; i++) {
-               CPU_ZERO(&cpuset);
-               CPU_SET(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, &cpuset);
+               CPU_ZERO_S(size, cpuset);
+               CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
 
-               if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
+               if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+               }
 
-               if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
+               if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) {
+                       CPU_FREE(cpuset);
                        err(EXIT_FAILURE, "pthread_create");
+               }
        }
+       CPU_FREE(cpuset);
 }
 
 static void toggle_done(int sig __maybe_unused,
index f264017..44e1f8a 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/numa.h>
 #include <linux/zalloc.h>
 
+#include "../util/header.h"
 #include <numa.h>
 #include <numaif.h>
 
@@ -54,7 +55,7 @@
 
 struct thread_data {
        int                     curr_cpu;
-       cpu_set_t               bind_cpumask;
+       cpu_set_t               *bind_cpumask;
        int                     bind_node;
        u8                      *process_data;
        int                     process_nr;
@@ -266,71 +267,115 @@ static bool node_has_cpus(int node)
        return ret;
 }
 
-static cpu_set_t bind_to_cpu(int target_cpu)
+static cpu_set_t *bind_to_cpu(int target_cpu)
 {
-       cpu_set_t orig_mask, mask;
-       int ret;
+       int nrcpus = numa_num_possible_cpus();
+       cpu_set_t *orig_mask, *mask;
+       size_t size;
 
-       ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
-       BUG_ON(ret);
+       orig_mask = CPU_ALLOC(nrcpus);
+       BUG_ON(!orig_mask);
+       size = CPU_ALLOC_SIZE(nrcpus);
+       CPU_ZERO_S(size, orig_mask);
+
+       if (sched_getaffinity(0, size, orig_mask))
+               goto err_out;
+
+       mask = CPU_ALLOC(nrcpus);
+       if (!mask)
+               goto err_out;
 
-       CPU_ZERO(&mask);
+       CPU_ZERO_S(size, mask);
 
        if (target_cpu == -1) {
                int cpu;
 
                for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
-                       CPU_SET(cpu, &mask);
+                       CPU_SET_S(cpu, size, mask);
        } else {
-               BUG_ON(target_cpu < 0 || target_cpu >= g->p.nr_cpus);
-               CPU_SET(target_cpu, &mask);
+               if (target_cpu < 0 || target_cpu >= g->p.nr_cpus)
+                       goto err;
+
+               CPU_SET_S(target_cpu, size, mask);
        }
 
-       ret = sched_setaffinity(0, sizeof(mask), &mask);
-       BUG_ON(ret);
+       if (sched_setaffinity(0, size, mask))
+               goto err;
 
        return orig_mask;
+
+err:
+       CPU_FREE(mask);
+err_out:
+       CPU_FREE(orig_mask);
+
+       /* BUG_ON due to failure in allocation of orig_mask/mask */
+       BUG_ON(-1);
 }
 
-static cpu_set_t bind_to_node(int target_node)
+static cpu_set_t *bind_to_node(int target_node)
 {
-       cpu_set_t orig_mask, mask;
+       int nrcpus = numa_num_possible_cpus();
+       size_t size;
+       cpu_set_t *orig_mask, *mask;
        int cpu;
-       int ret;
 
-       ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
-       BUG_ON(ret);
+       orig_mask = CPU_ALLOC(nrcpus);
+       BUG_ON(!orig_mask);
+       size = CPU_ALLOC_SIZE(nrcpus);
+       CPU_ZERO_S(size, orig_mask);
 
-       CPU_ZERO(&mask);
+       if (sched_getaffinity(0, size, orig_mask))
+               goto err_out;
+
+       mask = CPU_ALLOC(nrcpus);
+       if (!mask)
+               goto err_out;
+
+       CPU_ZERO_S(size, mask);
 
        if (target_node == NUMA_NO_NODE) {
                for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
-                       CPU_SET(cpu, &mask);
+                       CPU_SET_S(cpu, size, mask);
        } else {
                struct bitmask *cpumask = numa_allocate_cpumask();
 
-               BUG_ON(!cpumask);
+               if (!cpumask)
+                       goto err;
+
                if (!numa_node_to_cpus(target_node, cpumask)) {
                        for (cpu = 0; cpu < (int)cpumask->size; cpu++) {
                                if (numa_bitmask_isbitset(cpumask, cpu))
-                                       CPU_SET(cpu, &mask);
+                                       CPU_SET_S(cpu, size, mask);
                        }
                }
                numa_free_cpumask(cpumask);
        }
 
-       ret = sched_setaffinity(0, sizeof(mask), &mask);
-       BUG_ON(ret);
+       if (sched_setaffinity(0, size, mask))
+               goto err;
 
        return orig_mask;
+
+err:
+       CPU_FREE(mask);
+err_out:
+       CPU_FREE(orig_mask);
+
+       /* BUG_ON due to failure in allocation of orig_mask/mask */
+       BUG_ON(-1);
 }
 
-static void bind_to_cpumask(cpu_set_t mask)
+static void bind_to_cpumask(cpu_set_t *mask)
 {
        int ret;
+       size_t size = CPU_ALLOC_SIZE(numa_num_possible_cpus());
 
-       ret = sched_setaffinity(0, sizeof(mask), &mask);
-       BUG_ON(ret);
+       ret = sched_setaffinity(0, size, mask);
+       if (ret) {
+               CPU_FREE(mask);
+               BUG_ON(ret);
+       }
 }
 
 static void mempol_restore(void)
@@ -376,7 +421,7 @@ do {                                                        \
 static u8 *alloc_data(ssize_t bytes0, int map_flags,
                      int init_zero, int init_cpu0, int thp, int init_random)
 {
-       cpu_set_t orig_mask;
+       cpu_set_t *orig_mask = NULL;
        ssize_t bytes;
        u8 *buf;
        int ret;
@@ -434,6 +479,7 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
        /* Restore affinity: */
        if (init_cpu0) {
                bind_to_cpumask(orig_mask);
+               CPU_FREE(orig_mask);
                mempol_restore();
        }
 
@@ -585,10 +631,16 @@ static int parse_setup_cpu_list(void)
                        return -1;
                }
 
+               if (is_cpu_online(bind_cpu_0) != 1 || is_cpu_online(bind_cpu_1) != 1) {
+                       printf("\nTest not applicable, bind_cpu_0 or bind_cpu_1 is offline\n");
+                       return -1;
+               }
+
                BUG_ON(bind_cpu_0 < 0 || bind_cpu_1 < 0);
                BUG_ON(bind_cpu_0 > bind_cpu_1);
 
                for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) {
+                       size_t size = CPU_ALLOC_SIZE(g->p.nr_cpus);
                        int i;
 
                        for (i = 0; i < mul; i++) {
@@ -608,10 +660,15 @@ static int parse_setup_cpu_list(void)
                                        tprintf("%2d", bind_cpu);
                                }
 
-                               CPU_ZERO(&td->bind_cpumask);
+                               td->bind_cpumask = CPU_ALLOC(g->p.nr_cpus);
+                               BUG_ON(!td->bind_cpumask);
+                               CPU_ZERO_S(size, td->bind_cpumask);
                                for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) {
-                                       BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus);
-                                       CPU_SET(cpu, &td->bind_cpumask);
+                                       if (cpu < 0 || cpu >= g->p.nr_cpus) {
+                                               CPU_FREE(td->bind_cpumask);
+                                               BUG_ON(-1);
+                                       }
+                                       CPU_SET_S(cpu, size, td->bind_cpumask);
                                }
                                t++;
                        }
@@ -752,8 +809,6 @@ static int parse_nodes_opt(const struct option *opt __maybe_unused,
        return parse_node_list(arg);
 }
 
-#define BIT(x) (1ul << x)
-
 static inline uint32_t lfsr_32(uint32_t lfsr)
 {
        const uint32_t taps = BIT(1) | BIT(5) | BIT(6) | BIT(31);
@@ -1241,7 +1296,7 @@ static void *worker_thread(void *__tdata)
                 * by migrating to CPU#0:
                 */
                if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) {
-                       cpu_set_t orig_mask;
+                       cpu_set_t *orig_mask;
                        int target_cpu;
                        int this_cpu;
 
@@ -1265,6 +1320,7 @@ static void *worker_thread(void *__tdata)
                                printf(" (injecting perturbalance, moved to CPU#%d)\n", target_cpu);
 
                        bind_to_cpumask(orig_mask);
+                       CPU_FREE(orig_mask);
                }
 
                if (details >= 3) {
@@ -1398,21 +1454,31 @@ static void init_thread_data(void)
 
        for (t = 0; t < g->p.nr_tasks; t++) {
                struct thread_data *td = g->threads + t;
+               size_t cpuset_size = CPU_ALLOC_SIZE(g->p.nr_cpus);
                int cpu;
 
                /* Allow all nodes by default: */
                td->bind_node = NUMA_NO_NODE;
 
                /* Allow all CPUs by default: */
-               CPU_ZERO(&td->bind_cpumask);
+               td->bind_cpumask = CPU_ALLOC(g->p.nr_cpus);
+               BUG_ON(!td->bind_cpumask);
+               CPU_ZERO_S(cpuset_size, td->bind_cpumask);
                for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
-                       CPU_SET(cpu, &td->bind_cpumask);
+                       CPU_SET_S(cpu, cpuset_size, td->bind_cpumask);
        }
 }
 
 static void deinit_thread_data(void)
 {
        ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
+       int t;
+
+       /* Free the bind_cpumask allocated for thread_data */
+       for (t = 0; t < g->p.nr_tasks; t++) {
+               struct thread_data *td = g->threads + t;
+               CPU_FREE(td->bind_cpumask);
+       }
 
        free_data(g->threads, size);
 }
index ba74fab..069825c 100644 (file)
@@ -989,8 +989,11 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru
        struct mmap *overwrite_mmap = evlist->overwrite_mmap;
        struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
 
-       thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
-                                             thread_data->mask->maps.nbits);
+       if (cpu_map__is_dummy(cpus))
+               thread_data->nr_mmaps = nr_mmaps;
+       else
+               thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
+                                                     thread_data->mask->maps.nbits);
        if (mmap) {
                thread_data->maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
                if (!thread_data->maps)
@@ -1007,16 +1010,17 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru
                 thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps);
 
        for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) {
-               if (test_bit(cpus->map[m].cpu, thread_data->mask->maps.bits)) {
+               if (cpu_map__is_dummy(cpus) ||
+                   test_bit(cpus->map[m].cpu, thread_data->mask->maps.bits)) {
                        if (thread_data->maps) {
                                thread_data->maps[tm] = &mmap[m];
                                pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n",
-                                         thread_data, cpus->map[m].cpu, tm, m);
+                                         thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
                        }
                        if (thread_data->overwrite_maps) {
                                thread_data->overwrite_maps[tm] = &overwrite_mmap[m];
                                pr_debug2("thread_data[%p]: cpu%d: ow_maps[%d] -> ow_mmap[%d]\n",
-                                         thread_data, cpus->map[m].cpu, tm, m);
+                                         thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
                        }
                        tm++;
                }
@@ -3329,6 +3333,9 @@ static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_c
 {
        int c;
 
+       if (cpu_map__is_dummy(cpus))
+               return;
+
        for (c = 0; c < cpus->nr; c++)
                set_bit(cpus->map[c].cpu, mask->bits);
 }
@@ -3680,6 +3687,11 @@ static int record__init_thread_masks(struct record *rec)
        if (!record__threads_enabled(rec))
                return record__init_thread_default_masks(rec, cpus);
 
+       if (cpu_map__is_dummy(cpus)) {
+               pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n");
+               return -EINVAL;
+       }
+
        switch (rec->opts.threads_spec) {
        case THREAD_SPEC__CPU:
                ret = record__init_thread_cpu_masks(rec, cpus);
index 2f6b671..0170cb0 100644 (file)
@@ -55,6 +55,7 @@ struct cmd_struct {
 };
 
 static struct cmd_struct commands[] = {
+       { "archive",    NULL,   0 },
        { "buildid-cache", cmd_buildid_cache, 0 },
        { "buildid-list", cmd_buildid_list, 0 },
        { "config",     cmd_config,     0 },
@@ -62,6 +63,7 @@ static struct cmd_struct commands[] = {
        { "diff",       cmd_diff,       0 },
        { "evlist",     cmd_evlist,     0 },
        { "help",       cmd_help,       0 },
+       { "iostat",     NULL,   0 },
        { "kallsyms",   cmd_kallsyms,   0 },
        { "list",       cmd_list,       0 },
        { "record",     cmd_record,     0 },
@@ -360,6 +362,8 @@ static void handle_internal_command(int argc, const char **argv)
 
        for (i = 0; i < ARRAY_SIZE(commands); i++) {
                struct cmd_struct *p = commands+i;
+               if (p->fn == NULL)
+                       continue;
                if (strcmp(p->cmd, cmd))
                        continue;
                exit(run_builtin(p, argc, argv));
@@ -434,7 +438,7 @@ void pthread__unblock_sigwinch(void)
 static int libperf_print(enum libperf_print_level level,
                         const char *fmt, va_list ap)
 {
-       return eprintf(level, verbose, fmt, ap);
+       return veprintf(level, verbose, fmt, ap);
 }
 
 int main(int argc, const char **argv)
index 2dab2d2..afdca7f 100644 (file)
@@ -122,7 +122,7 @@ NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thr
        }
 
        err = unwind__get_entries(unwind_entry, &cnt, thread,
-                                 &sample, MAX_STACK);
+                                 &sample, MAX_STACK, false);
        if (err)
                pr_debug("unwind failed\n");
        else if (cnt != MAX_STACK) {
index d12d0ad..cc6df49 100644 (file)
        }                                       \
 }
 
+static int test__tsc_is_supported(struct test_suite *test __maybe_unused,
+                                 int subtest __maybe_unused)
+{
+       if (!TSC_IS_SUPPORTED) {
+               pr_debug("Test not supported on this architecture\n");
+               return TEST_SKIP;
+       }
+
+       return TEST_OK;
+}
+
 /**
  * test__perf_time_to_tsc - test converting perf time to TSC.
  *
@@ -70,7 +81,7 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
        struct perf_cpu_map *cpus = NULL;
        struct evlist *evlist = NULL;
        struct evsel *evsel = NULL;
-       int err = -1, ret, i;
+       int err = TEST_FAIL, ret, i;
        const char *comm1, *comm2;
        struct perf_tsc_conversion tc;
        struct perf_event_mmap_page *pc;
@@ -79,10 +90,6 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
        u64 test_time, comm1_time = 0, comm2_time = 0;
        struct mmap *md;
 
-       if (!TSC_IS_SUPPORTED) {
-               pr_debug("Test not supported on this architecture");
-               return TEST_SKIP;
-       }
 
        threads = thread_map__new(-1, getpid(), UINT_MAX);
        CHECK_NOT_NULL__(threads);
@@ -124,8 +131,8 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
        ret = perf_read_tsc_conversion(pc, &tc);
        if (ret) {
                if (ret == -EOPNOTSUPP) {
-                       fprintf(stderr, " (not supported)");
-                       return 0;
+                       pr_debug("perf_read_tsc_conversion is not supported in current kernel\n");
+                       err = TEST_SKIP;
                }
                goto out_err;
        }
@@ -191,7 +198,7 @@ next_event:
            test_tsc >= comm2_tsc)
                goto out_err;
 
-       err = 0;
+       err = TEST_OK;
 
 out_err:
        evlist__delete(evlist);
@@ -200,4 +207,15 @@ out_err:
        return err;
 }
 
-DEFINE_SUITE("Convert perf time to TSC", perf_time_to_tsc);
+static struct test_case time_to_tsc_tests[] = {
+       TEST_CASE_REASON("TSC support", tsc_is_supported,
+                        "This architecture does not support"),
+       TEST_CASE_REASON("Perf time to TSC", perf_time_to_tsc,
+                        "perf_read_tsc_conversion is not supported"),
+       { .name = NULL, }
+};
+
+struct test_suite suite__perf_time_to_tsc = {
+       .desc = "Convert perf time to TSC",
+       .test_cases = time_to_tsc_tests,
+};
index e4c641b..82cc396 100644 (file)
@@ -2047,6 +2047,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
        objdump_process.argv = objdump_argv;
        objdump_process.out = -1;
        objdump_process.err = -1;
+       objdump_process.no_stderr = 1;
        if (start_command(&objdump_process)) {
                pr_err("Failure starting to run %s\n", command);
                err = -1;
index 2242a88..4940be4 100644 (file)
@@ -53,7 +53,7 @@ u64 get_leaf_frame_caller_aarch64(struct perf_sample *sample, struct thread *thr
                sample->user_regs.cache_regs[PERF_REG_ARM64_SP] = 0;
        }
 
-       ret = unwind__get_entries(add_entry, &entries, thread, sample, 2);
+       ret = unwind__get_entries(add_entry, &entries, thread, sample, 2, true);
        sample->user_regs = old_regs;
 
        if (ret || entries.length != 2)
index d546ff7..a27132e 100644 (file)
@@ -983,6 +983,57 @@ static int write_dir_format(struct feat_fd *ff,
        return do_write(ff, &data->dir.version, sizeof(data->dir.version));
 }
 
+/*
+ * Check whether a CPU is online
+ *
+ * Returns:
+ *     1 -> if CPU is online
+ *     0 -> if CPU is offline
+ *    -1 -> error case
+ */
+int is_cpu_online(unsigned int cpu)
+{
+       char *str;
+       size_t strlen;
+       char buf[256];
+       int status = -1;
+       struct stat statbuf;
+
+       snprintf(buf, sizeof(buf),
+               "/sys/devices/system/cpu/cpu%d", cpu);
+       if (stat(buf, &statbuf) != 0)
+               return 0;
+
+       /*
+        * Check if /sys/devices/system/cpu/cpux/online file
+        * exists. Some cases cpu0 won't have online file since
+        * it is not expected to be turned off generally.
+        * In kernels without CONFIG_HOTPLUG_CPU, this
+        * file won't exist
+        */
+       snprintf(buf, sizeof(buf),
+               "/sys/devices/system/cpu/cpu%d/online", cpu);
+       if (stat(buf, &statbuf) != 0)
+               return 1;
+
+       /*
+        * Read online file using sysfs__read_str.
+        * If read or open fails, return -1.
+        * If read succeeds, return value from file
+        * which gets stored in "str"
+        */
+       snprintf(buf, sizeof(buf),
+               "devices/system/cpu/cpu%d/online", cpu);
+
+       if (sysfs__read_str(buf, &str, &strlen) < 0)
+               return status;
+
+       status = atoi(str);
+
+       free(str);
+       return status;
+}
+
 #ifdef HAVE_LIBBPF_SUPPORT
 static int write_bpf_prog_info(struct feat_fd *ff,
                               struct evlist *evlist __maybe_unused)
index c9e3265..0eb4bc2 100644 (file)
@@ -158,6 +158,7 @@ int do_write(struct feat_fd *fd, const void *buf, size_t size);
 int write_padded(struct feat_fd *fd, const void *bf,
                 size_t count, size_t count_aligned);
 
+int is_cpu_online(unsigned int cpu);
 /*
  * arch specific callback
  */
index b800485..9539123 100644 (file)
@@ -2987,7 +2987,7 @@ static int thread__resolve_callchain_unwind(struct thread *thread,
                return 0;
 
        return unwind__get_entries(unwind_entry, cursor,
-                                  thread, sample, max_stack);
+                                  thread, sample, max_stack, false);
 }
 
 int thread__resolve_callchain(struct thread *thread,
index 2499792..dd84fed 100644 (file)
@@ -1523,7 +1523,9 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
        bool use_uncore_alias;
        LIST_HEAD(config_terms);
 
-       if (verbose > 1) {
+       pmu = parse_state->fake_pmu ?: perf_pmu__find(name);
+
+       if (verbose > 1 && !(pmu && pmu->selectable)) {
                fprintf(stderr, "Attempting to add event pmu '%s' with '",
                        name);
                if (head_config) {
@@ -1536,7 +1538,6 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
                fprintf(stderr, "' that may result in non-fatal errors\n");
        }
 
-       pmu = parse_state->fake_pmu ?: perf_pmu__find(name);
        if (!pmu) {
                char *err_str;
 
index 3b8dfe6..45a3004 100644 (file)
@@ -2095,6 +2095,7 @@ prefetch_event(char *buf, u64 head, size_t mmap_size,
               bool needs_swap, union perf_event *error)
 {
        union perf_event *event;
+       u16 event_size;
 
        /*
         * Ensure we have enough space remaining to read
@@ -2107,15 +2108,23 @@ prefetch_event(char *buf, u64 head, size_t mmap_size,
        if (needs_swap)
                perf_event_header__bswap(&event->header);
 
-       if (head + event->header.size <= mmap_size)
+       event_size = event->header.size;
+       if (head + event_size <= mmap_size)
                return event;
 
        /* We're not fetching the event so swap back again */
        if (needs_swap)
                perf_event_header__bswap(&event->header);
 
-       pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
-                " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
+       /* Check if the event fits into the next mmapped buf. */
+       if (event_size <= mmap_size - head % page_size) {
+               /* Remap buf and fetch again. */
+               return NULL;
+       }
+
+       /* Invalid input. Event size should never exceed mmap_size. */
+       pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:"
+                " fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size);
 
        return error;
 }
index 483f050..c255a2c 100644 (file)
@@ -1,12 +1,14 @@
-from os import getenv
+from os import getenv, path
 from subprocess import Popen, PIPE
 from re import sub
 
 cc = getenv("CC")
 cc_is_clang = b"clang version" in Popen([cc.split()[0], "-v"], stderr=PIPE).stderr.readline()
+src_feature_tests  = getenv('srctree') + '/tools/build/feature'
 
 def clang_has_option(option):
-    return [o for o in Popen([cc, option], stderr=PIPE).stderr.readlines() if b"unknown argument" in o] == [ ]
+    cc_output = Popen([cc, option, path.join(src_feature_tests, "test-hello.c") ], stderr=PIPE).stderr.readlines()
+    return [o for o in cc_output if ((b"unknown argument" in o) or (b"is not supported" in o))] == [ ]
 
 if cc_is_clang:
     from distutils.sysconfig import get_config_vars
@@ -23,6 +25,8 @@ if cc_is_clang:
             vars[var] = sub("-fstack-protector-strong", "", vars[var])
         if not clang_has_option("-fno-semantic-interposition"):
             vars[var] = sub("-fno-semantic-interposition", "", vars[var])
+        if not clang_has_option("-ffat-lto-objects"):
+            vars[var] = sub("-ffat-lto-objects", "", vars[var])
 
 from distutils.core import setup, Extension
 
index ee6f034..817a2de 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <errno.h>
+#include <linux/err.h>
 #include <inttypes.h>
 #include <math.h>
 #include <string.h>
@@ -311,7 +312,7 @@ static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
 
        if (!mask) {
                mask = hashmap__new(pkg_id_hash, pkg_id_equal, NULL);
-               if (!mask)
+               if (IS_ERR(mask))
                        return -ENOMEM;
 
                counter->per_pkg_mask = mask;
index a74b517..94aa40f 100644 (file)
@@ -200,7 +200,8 @@ frame_callback(Dwfl_Frame *state, void *arg)
        bool isactivation;
 
        if (!dwfl_frame_pc(state, &pc, NULL)) {
-               pr_err("%s", dwfl_errmsg(-1));
+               if (!ui->best_effort)
+                       pr_err("%s", dwfl_errmsg(-1));
                return DWARF_CB_ABORT;
        }
 
@@ -208,7 +209,8 @@ frame_callback(Dwfl_Frame *state, void *arg)
        report_module(pc, ui);
 
        if (!dwfl_frame_pc(state, &pc, &isactivation)) {
-               pr_err("%s", dwfl_errmsg(-1));
+               if (!ui->best_effort)
+                       pr_err("%s", dwfl_errmsg(-1));
                return DWARF_CB_ABORT;
        }
 
@@ -222,7 +224,8 @@ frame_callback(Dwfl_Frame *state, void *arg)
 int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
                        struct thread *thread,
                        struct perf_sample *data,
-                       int max_stack)
+                       int max_stack,
+                       bool best_effort)
 {
        struct unwind_info *ui, ui_buf = {
                .sample         = data,
@@ -231,6 +234,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
                .cb             = cb,
                .arg            = arg,
                .max_stack      = max_stack,
+               .best_effort    = best_effort
        };
        Dwarf_Word ip;
        int err = -EINVAL, i;
index 0cbd265..8c88bc4 100644 (file)
@@ -20,6 +20,7 @@ struct unwind_info {
        void                    *arg;
        int                     max_stack;
        int                     idx;
+       bool                    best_effort;
        struct unwind_entry     entries[];
 };
 
index 71a3533..41e29fc 100644 (file)
@@ -96,6 +96,7 @@ struct unwind_info {
        struct perf_sample      *sample;
        struct machine          *machine;
        struct thread           *thread;
+       bool                     best_effort;
 };
 
 #define dw_read(ptr, type, end) ({     \
@@ -553,7 +554,8 @@ static int access_reg(unw_addr_space_t __maybe_unused as,
 
        ret = perf_reg_value(&val, &ui->sample->user_regs, id);
        if (ret) {
-               pr_err("unwind: can't read reg %d\n", regnum);
+               if (!ui->best_effort)
+                       pr_err("unwind: can't read reg %d\n", regnum);
                return ret;
        }
 
@@ -666,7 +668,7 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
                        return -1;
 
                ret = unw_init_remote(&c, addr_space, ui);
-               if (ret)
+               if (ret && !ui->best_effort)
                        display_error(ret);
 
                while (!ret && (unw_step(&c) > 0) && i < max_stack) {
@@ -704,12 +706,14 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
 
 static int _unwind__get_entries(unwind_entry_cb_t cb, void *arg,
                        struct thread *thread,
-                       struct perf_sample *data, int max_stack)
+                       struct perf_sample *data, int max_stack,
+                       bool best_effort)
 {
        struct unwind_info ui = {
                .sample       = data,
                .thread       = thread,
                .machine      = thread->maps->machine,
+               .best_effort  = best_effort
        };
 
        if (!data->user_regs.regs)
index e89a547..509c287 100644 (file)
@@ -80,9 +80,11 @@ void unwind__finish_access(struct maps *maps)
 
 int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
                         struct thread *thread,
-                        struct perf_sample *data, int max_stack)
+                        struct perf_sample *data, int max_stack,
+                        bool best_effort)
 {
        if (thread->maps->unwind_libunwind_ops)
-               return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
+               return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data,
+                                                                      max_stack, best_effort);
        return 0;
 }
index ab8ad46..b2a03fa 100644 (file)
@@ -23,13 +23,19 @@ struct unwind_libunwind_ops {
        void (*finish_access)(struct maps *maps);
        int (*get_entries)(unwind_entry_cb_t cb, void *arg,
                           struct thread *thread,
-                          struct perf_sample *data, int max_stack);
+                          struct perf_sample *data, int max_stack, bool best_effort);
 };
 
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
+/*
+ * When best_effort is set, don't report errors and fail silently. This could
+ * be expanded in the future to be more permissive about things other than
+ * error messages.
+ */
 int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
                        struct thread *thread,
-                       struct perf_sample *data, int max_stack);
+                       struct perf_sample *data, int max_stack,
+                       bool best_effort);
 /* libunwind specific */
 #ifdef HAVE_LIBUNWIND_SUPPORT
 #ifndef LIBUNWIND__ARCH_REG_ID
@@ -65,7 +71,8 @@ unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
                    void *arg __maybe_unused,
                    struct thread *thread __maybe_unused,
                    struct perf_sample *data __maybe_unused,
-                   int max_stack __maybe_unused)
+                   int max_stack __maybe_unused,
+                   bool best_effort __maybe_unused)
 {
        return 0;
 }
index 65dbdda..1da76cc 100644 (file)
@@ -1842,7 +1842,7 @@ static int nfit_test_dimm_init(struct nfit_test *t)
        return 0;
 }
 
-static void security_init(struct nfit_test *t)
+static void nfit_security_init(struct nfit_test *t)
 {
        int i;
 
@@ -1938,7 +1938,7 @@ static int nfit_test0_alloc(struct nfit_test *t)
        if (nfit_test_dimm_init(t))
                return -ENOMEM;
        smart_init(t);
-       security_init(t);
+       nfit_security_init(t);
        return ars_state_init(&t->pdev.dev, &t->ars_state);
 }
 
index 5aa52cc..c118326 100644 (file)
@@ -2,6 +2,7 @@
 /* Copyright (C) 2021. Huawei Technologies Co., Ltd */
 #include <test_progs.h>
 #include "dummy_st_ops.skel.h"
+#include "trace_dummy_st_ops.skel.h"
 
 /* Need to keep consistent with definition in include/linux/bpf.h */
 struct bpf_dummy_ops_state {
@@ -56,6 +57,7 @@ static void test_dummy_init_ptr_arg(void)
                .ctx_in = args,
                .ctx_size_in = sizeof(args),
        );
+       struct trace_dummy_st_ops *trace_skel;
        struct dummy_st_ops *skel;
        int fd, err;
 
@@ -64,12 +66,33 @@ static void test_dummy_init_ptr_arg(void)
                return;
 
        fd = bpf_program__fd(skel->progs.test_1);
+
+       trace_skel = trace_dummy_st_ops__open();
+       if (!ASSERT_OK_PTR(trace_skel, "trace_dummy_st_ops__open"))
+               goto done;
+
+       err = bpf_program__set_attach_target(trace_skel->progs.fentry_test_1,
+                                            fd, "test_1");
+       if (!ASSERT_OK(err, "set_attach_target(fentry_test_1)"))
+               goto done;
+
+       err = trace_dummy_st_ops__load(trace_skel);
+       if (!ASSERT_OK(err, "load(trace_skel)"))
+               goto done;
+
+       err = trace_dummy_st_ops__attach(trace_skel);
+       if (!ASSERT_OK(err, "attach(trace_skel)"))
+               goto done;
+
        err = bpf_prog_test_run_opts(fd, &attr);
        ASSERT_OK(err, "test_run");
        ASSERT_EQ(in_state.val, 0x5a, "test_ptr_ret");
        ASSERT_EQ(attr.retval, exp_retval, "test_ret");
+       ASSERT_EQ(trace_skel->bss->val, exp_retval, "fentry_val");
 
+done:
        dummy_st_ops__destroy(skel);
+       trace_dummy_st_ops__destroy(trace_skel);
 }
 
 static void test_dummy_multiple_args(void)
index b64df94..db388f5 100644 (file)
@@ -367,7 +367,7 @@ static inline int check_array_of_maps(void)
 
        VERIFY(check_default(&array_of_maps->map, map));
        inner_map = bpf_map_lookup_elem(array_of_maps, &key);
-       VERIFY(inner_map != 0);
+       VERIFY(inner_map != NULL);
        VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
 
        return 1;
@@ -394,7 +394,7 @@ static inline int check_hash_of_maps(void)
 
        VERIFY(check_default(&hash_of_maps->map, map));
        inner_map = bpf_map_lookup_elem(hash_of_maps, &key);
-       VERIFY(inner_map != 0);
+       VERIFY(inner_map != NULL);
        VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
 
        return 1;
diff --git a/tools/testing/selftests/bpf/progs/trace_dummy_st_ops.c b/tools/testing/selftests/bpf/progs/trace_dummy_st_ops.c
new file mode 100644 (file)
index 0000000..00a4be9
--- /dev/null
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+int val = 0;
+
+SEC("fentry/test_1")
+int BPF_PROG(fentry_test_1, __u64 *st_ops_ctx)
+{
+       __u64 state;
+
+       /* Read the traced st_ops arg1 which is a pointer */
+       bpf_probe_read_kernel(&state, sizeof(__u64), (void *)st_ops_ctx);
+       /* Read state->val */
+       bpf_probe_read_kernel(&val, sizeof(__u32), (void *)state);
+
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";
index b9e991d..e7775d3 100644 (file)
@@ -18,8 +18,9 @@
 #include "bpf_rlimit.h"
 #include "cgroup_helpers.h"
 
-static int start_server(const struct sockaddr *addr, socklen_t len)
+static int start_server(const struct sockaddr *addr, socklen_t len, bool dual)
 {
+       int mode = !dual;
        int fd;
 
        fd = socket(addr->sa_family, SOCK_STREAM, 0);
@@ -28,6 +29,14 @@ static int start_server(const struct sockaddr *addr, socklen_t len)
                goto out;
        }
 
+       if (addr->sa_family == AF_INET6) {
+               if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, (char *)&mode,
+                              sizeof(mode)) == -1) {
+                       log_err("Failed to set the dual-stack mode");
+                       goto close_out;
+               }
+       }
+
        if (bind(fd, addr, len) == -1) {
                log_err("Failed to bind server socket");
                goto close_out;
@@ -47,24 +56,17 @@ out:
        return fd;
 }
 
-static int connect_to_server(int server_fd)
+static int connect_to_server(const struct sockaddr *addr, socklen_t len)
 {
-       struct sockaddr_storage addr;
-       socklen_t len = sizeof(addr);
        int fd = -1;
 
-       if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) {
-               log_err("Failed to get server addr");
-               goto out;
-       }
-
-       fd = socket(addr.ss_family, SOCK_STREAM, 0);
+       fd = socket(addr->sa_family, SOCK_STREAM, 0);
        if (fd == -1) {
                log_err("Failed to create client socket");
                goto out;
        }
 
-       if (connect(fd, (const struct sockaddr *)&addr, len) == -1) {
+       if (connect(fd, (const struct sockaddr *)addr, len) == -1) {
                log_err("Fail to connect to server");
                goto close_out;
        }
@@ -116,7 +118,8 @@ err:
        return map_fd;
 }
 
-static int run_test(int server_fd, int results_fd, bool xdp)
+static int run_test(int server_fd, int results_fd, bool xdp,
+                   const struct sockaddr *addr, socklen_t len)
 {
        int client = -1, srv_client = -1;
        int ret = 0;
@@ -142,7 +145,7 @@ static int run_test(int server_fd, int results_fd, bool xdp)
                goto err;
        }
 
-       client = connect_to_server(server_fd);
+       client = connect_to_server(addr, len);
        if (client == -1)
                goto err;
 
@@ -199,12 +202,30 @@ out:
        return ret;
 }
 
+static bool get_port(int server_fd, in_port_t *port)
+{
+       struct sockaddr_in addr;
+       socklen_t len = sizeof(addr);
+
+       if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) {
+               log_err("Failed to get server addr");
+               return false;
+       }
+
+       /* sin_port and sin6_port are located at the same offset. */
+       *port = addr.sin_port;
+       return true;
+}
+
 int main(int argc, char **argv)
 {
        struct sockaddr_in addr4;
        struct sockaddr_in6 addr6;
+       struct sockaddr_in addr4dual;
+       struct sockaddr_in6 addr6dual;
        int server = -1;
        int server_v6 = -1;
+       int server_dual = -1;
        int results = -1;
        int err = 0;
        bool xdp;
@@ -224,25 +245,43 @@ int main(int argc, char **argv)
        addr4.sin_family = AF_INET;
        addr4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
        addr4.sin_port = 0;
+       memcpy(&addr4dual, &addr4, sizeof(addr4dual));
 
        memset(&addr6, 0, sizeof(addr6));
        addr6.sin6_family = AF_INET6;
        addr6.sin6_addr = in6addr_loopback;
        addr6.sin6_port = 0;
 
-       server = start_server((const struct sockaddr *)&addr4, sizeof(addr4));
-       if (server == -1)
+       memset(&addr6dual, 0, sizeof(addr6dual));
+       addr6dual.sin6_family = AF_INET6;
+       addr6dual.sin6_addr = in6addr_any;
+       addr6dual.sin6_port = 0;
+
+       server = start_server((const struct sockaddr *)&addr4, sizeof(addr4),
+                             false);
+       if (server == -1 || !get_port(server, &addr4.sin_port))
                goto err;
 
        server_v6 = start_server((const struct sockaddr *)&addr6,
-                                sizeof(addr6));
-       if (server_v6 == -1)
+                                sizeof(addr6), false);
+       if (server_v6 == -1 || !get_port(server_v6, &addr6.sin6_port))
+               goto err;
+
+       server_dual = start_server((const struct sockaddr *)&addr6dual,
+                                  sizeof(addr6dual), true);
+       if (server_dual == -1 || !get_port(server_dual, &addr4dual.sin_port))
+               goto err;
+
+       if (run_test(server, results, xdp,
+                    (const struct sockaddr *)&addr4, sizeof(addr4)))
                goto err;
 
-       if (run_test(server, results, xdp))
+       if (run_test(server_v6, results, xdp,
+                    (const struct sockaddr *)&addr6, sizeof(addr6)))
                goto err;
 
-       if (run_test(server_v6, results, xdp))
+       if (run_test(server_dual, results, xdp,
+                    (const struct sockaddr *)&addr4dual, sizeof(addr4dual)))
                goto err;
 
        printf("ok\n");
@@ -252,6 +291,7 @@ err:
 out:
        close(server);
        close(server_v6);
+       close(server_dual);
        close(results);
        return err;
 }
index 1177940..25f4d54 100644 (file)
@@ -64,6 +64,7 @@
 #include <sys/types.h>
 #include <sys/wait.h>
 #include <unistd.h>
+#include <setjmp.h>
 
 #include "kselftest.h"
 
                struct __test_metadata *_metadata, \
                struct __fixture_variant_metadata *variant) \
        { \
-               test_name(_metadata); \
+               _metadata->setup_completed = true; \
+               if (setjmp(_metadata->env) == 0) \
+                       test_name(_metadata); \
+               __test_check_assert(_metadata); \
        } \
        static struct __test_metadata _##test_name##_object = \
                { .name = #test_name, \
 #define FIXTURE_TEARDOWN(fixture_name) \
        void fixture_name##_teardown( \
                struct __test_metadata __attribute__((unused)) *_metadata, \
-               FIXTURE_DATA(fixture_name) __attribute__((unused)) *self)
+               FIXTURE_DATA(fixture_name) __attribute__((unused)) *self, \
+               const FIXTURE_VARIANT(fixture_name) \
+                       __attribute__((unused)) *variant)
 
 /**
  * FIXTURE_VARIANT() - Optionally called once per fixture
  *       ...
  *     };
  *
- * Defines type of constant parameters provided to FIXTURE_SETUP() and TEST_F()
- * as *variant*. Variants allow the same tests to be run with different
- * arguments.
+ * Defines type of constant parameters provided to FIXTURE_SETUP(), TEST_F() and
+ * FIXTURE_TEARDOWN as *variant*. Variants allow the same tests to be run with
+ * different arguments.
  */
 #define FIXTURE_VARIANT(fixture_name) struct _fixture_variant_##fixture_name
 
  * Defines a test that depends on a fixture (e.g., is part of a test case).
  * Very similar to TEST() except that *self* is the setup instance of fixture's
  * datatype exposed for use by the implementation.
- *
- * Warning: use of ASSERT_* here will skip TEARDOWN.
  */
-/* TODO(wad) register fixtures on dedicated test lists. */
 #define TEST_F(fixture_name, test_name) \
        __TEST_F_IMPL(fixture_name, test_name, -1, TEST_TIMEOUT_DEFAULT)
 
                /* fixture data is alloced, setup, and torn down per call. */ \
                FIXTURE_DATA(fixture_name) self; \
                memset(&self, 0, sizeof(FIXTURE_DATA(fixture_name))); \
-               fixture_name##_setup(_metadata, &self, variant->data); \
-               /* Let setup failure terminate early. */ \
-               if (!_metadata->passed) \
-                       return; \
-               fixture_name##_##test_name(_metadata, &self, variant->data); \
-               fixture_name##_teardown(_metadata, &self); \
+               if (setjmp(_metadata->env) == 0) { \
+                       fixture_name##_setup(_metadata, &self, variant->data); \
+                       /* Let setup failure terminate early. */ \
+                       if (!_metadata->passed) \
+                               return; \
+                       _metadata->setup_completed = true; \
+                       fixture_name##_##test_name(_metadata, &self, variant->data); \
+               } \
+               if (_metadata->setup_completed) \
+                       fixture_name##_teardown(_metadata, &self, variant->data); \
+               __test_check_assert(_metadata); \
        } \
        static struct __test_metadata \
                      _##fixture_name##_##test_name##_object = { \
  */
 #define OPTIONAL_HANDLER(_assert) \
        for (; _metadata->trigger; _metadata->trigger = \
-                       __bail(_assert, _metadata->no_print, _metadata->step))
+                       __bail(_assert, _metadata))
 
 #define __INC_STEP(_metadata) \
        /* Keep "step" below 255 (which is used for "SKIP" reporting). */       \
@@ -830,6 +838,9 @@ struct __test_metadata {
        bool timed_out; /* did this test timeout instead of exiting? */
        __u8 step;
        bool no_print; /* manual trigger when TH_LOG_STREAM is not available */
+       bool aborted;   /* stopped test due to failed ASSERT */
+       bool setup_completed; /* did setup finish? */
+       jmp_buf env;    /* for exiting out of test early */
        struct __test_results *results;
        struct __test_metadata *prev, *next;
 };
@@ -848,16 +859,26 @@ static inline void __register_test(struct __test_metadata *t)
        __LIST_APPEND(t->fixture->tests, t);
 }
 
-static inline int __bail(int for_realz, bool no_print, __u8 step)
+static inline int __bail(int for_realz, struct __test_metadata *t)
 {
+       /* if this is ASSERT, return immediately. */
        if (for_realz) {
-               if (no_print)
-                       _exit(step);
-               abort();
+               t->aborted = true;
+               longjmp(t->env, 1);
        }
+       /* otherwise, end the for loop and continue. */
        return 0;
 }
 
+static inline void __test_check_assert(struct __test_metadata *t)
+{
+       if (t->aborted) {
+               if (t->no_print)
+                       _exit(t->step);
+               abort();
+       }
+}
+
 struct __test_metadata *__active_test;
 static void __timeout_handler(int sig, siginfo_t *info, void *ucontext)
 {
index d1e8f52..0b0e440 100644 (file)
@@ -3,6 +3,7 @@
 /aarch64/debug-exceptions
 /aarch64/get-reg-list
 /aarch64/psci_cpu_on_test
+/aarch64/vcpu_width_config
 /aarch64/vgic_init
 /aarch64/vgic_irq
 /s390x/memop
@@ -33,6 +34,7 @@
 /x86_64/state_test
 /x86_64/svm_vmcall_test
 /x86_64/svm_int_ctl_test
+/x86_64/tsc_scaling_sync
 /x86_64/sync_regs_test
 /x86_64/tsc_msrs_test
 /x86_64/userspace_io_test
index 21c2dbd..681b173 100644 (file)
@@ -106,6 +106,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
 TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
 TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
 TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test
+TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
 TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
 TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
 TEST_GEN_PROGS_aarch64 += demand_paging_test
index b08d30b..3b940a1 100644 (file)
@@ -362,11 +362,12 @@ static void test_init_timer_irq(struct kvm_vm *vm)
        pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
 }
 
+static int gic_fd;
+
 static struct kvm_vm *test_vm_create(void)
 {
        struct kvm_vm *vm;
        unsigned int i;
-       int ret;
        int nr_vcpus = test_args.nr_vcpus;
 
        vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL);
@@ -383,8 +384,8 @@ static struct kvm_vm *test_vm_create(void)
 
        ucall_init(vm, NULL);
        test_init_timer_irq(vm);
-       ret = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
-       if (ret < 0) {
+       gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
+       if (gic_fd < 0) {
                print_skip("Failed to create vgic-v3");
                exit(KSFT_SKIP);
        }
@@ -395,6 +396,12 @@ static struct kvm_vm *test_vm_create(void)
        return vm;
 }
 
+static void test_vm_cleanup(struct kvm_vm *vm)
+{
+       close(gic_fd);
+       kvm_vm_free(vm);
+}
+
 static void test_print_help(char *name)
 {
        pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n",
@@ -478,7 +485,7 @@ int main(int argc, char *argv[])
 
        vm = test_vm_create();
        test_run(vm);
-       kvm_vm_free(vm);
+       test_vm_cleanup(vm);
 
        return 0;
 }
index f12147c..0b571f3 100644 (file)
@@ -503,8 +503,13 @@ static void run_test(struct vcpu_config *c)
                ++missing_regs;
 
        if (new_regs || missing_regs) {
+               n = 0;
+               for_each_reg_filtered(i)
+                       ++n;
+
                printf("%s: Number blessed registers: %5lld\n", config_name(c), blessed_n);
-               printf("%s: Number registers:         %5lld\n", config_name(c), reg_list->n);
+               printf("%s: Number registers:         %5lld (includes %lld filtered registers)\n",
+                      config_name(c), reg_list->n, reg_list->n - n);
        }
 
        if (new_regs) {
@@ -683,9 +688,10 @@ static __u64 base_regs[] = {
        KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(spsr[4]),
        KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpsr),
        KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(fp_regs.fpcr),
-       KVM_REG_ARM_FW_REG(0),
-       KVM_REG_ARM_FW_REG(1),
-       KVM_REG_ARM_FW_REG(2),
+       KVM_REG_ARM_FW_REG(0),          /* KVM_REG_ARM_PSCI_VERSION */
+       KVM_REG_ARM_FW_REG(1),          /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 */
+       KVM_REG_ARM_FW_REG(2),          /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 */
+       KVM_REG_ARM_FW_REG(3),          /* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 */
        ARM64_SYS_REG(3, 3, 14, 3, 1),  /* CNTV_CTL_EL0 */
        ARM64_SYS_REG(3, 3, 14, 3, 2),  /* CNTV_CVAL_EL0 */
        ARM64_SYS_REG(3, 3, 14, 0, 2),
diff --git a/tools/testing/selftests/kvm/aarch64/vcpu_width_config.c b/tools/testing/selftests/kvm/aarch64/vcpu_width_config.c
new file mode 100644 (file)
index 0000000..6e94026
--- /dev/null
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vcpu_width_config - Test KVM_ARM_VCPU_INIT() with KVM_ARM_VCPU_EL1_32BIT.
+ *
+ * Copyright (c) 2022 Google LLC.
+ *
+ * This is a test that ensures that non-mixed-width vCPUs (all 64bit vCPUs
+ * or all 32bit vcPUs) can be configured and mixed-width vCPUs cannot be
+ * configured.
+ */
+
+#include "kvm_util.h"
+#include "processor.h"
+#include "test_util.h"
+
+
+/*
+ * Add a vCPU, run KVM_ARM_VCPU_INIT with @init1, and then
+ * add another vCPU, and run KVM_ARM_VCPU_INIT with @init2.
+ */
+static int add_init_2vcpus(struct kvm_vcpu_init *init1,
+                          struct kvm_vcpu_init *init2)
+{
+       struct kvm_vm *vm;
+       int ret;
+
+       vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+
+       vm_vcpu_add(vm, 0);
+       ret = _vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
+       if (ret)
+               goto free_exit;
+
+       vm_vcpu_add(vm, 1);
+       ret = _vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
+
+free_exit:
+       kvm_vm_free(vm);
+       return ret;
+}
+
+/*
+ * Add two vCPUs, then run KVM_ARM_VCPU_INIT for one vCPU with @init1,
+ * and run KVM_ARM_VCPU_INIT for another vCPU with @init2.
+ */
+static int add_2vcpus_init_2vcpus(struct kvm_vcpu_init *init1,
+                                 struct kvm_vcpu_init *init2)
+{
+       struct kvm_vm *vm;
+       int ret;
+
+       vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+
+       vm_vcpu_add(vm, 0);
+       vm_vcpu_add(vm, 1);
+
+       ret = _vcpu_ioctl(vm, 0, KVM_ARM_VCPU_INIT, init1);
+       if (ret)
+               goto free_exit;
+
+       ret = _vcpu_ioctl(vm, 1, KVM_ARM_VCPU_INIT, init2);
+
+free_exit:
+       kvm_vm_free(vm);
+       return ret;
+}
+
+/*
+ * Tests that two 64bit vCPUs can be configured, two 32bit vCPUs can be
+ * configured, and two mixed-width vCPUs cannot be configured.
+ * Each of those three cases, configure vCPUs in two different orders.
+ * The one is running KVM_CREATE_VCPU for 2 vCPUs, and then running
+ * KVM_ARM_VCPU_INIT for them.
+ * The other is running KVM_CREATE_VCPU and KVM_ARM_VCPU_INIT for a vCPU,
+ * and then run those commands for another vCPU.
+ */
+int main(void)
+{
+       struct kvm_vcpu_init init1, init2;
+       struct kvm_vm *vm;
+       int ret;
+
+       if (!kvm_check_cap(KVM_CAP_ARM_EL1_32BIT)) {
+               print_skip("KVM_CAP_ARM_EL1_32BIT is not supported");
+               exit(KSFT_SKIP);
+       }
+
+       /* Get the preferred target type and copy that to init2 for later use */
+       vm = vm_create(VM_MODE_DEFAULT, DEFAULT_GUEST_PHY_PAGES, O_RDWR);
+       vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init1);
+       kvm_vm_free(vm);
+       init2 = init1;
+
+       /* Test with 64bit vCPUs */
+       ret = add_init_2vcpus(&init1, &init1);
+       TEST_ASSERT(ret == 0,
+                   "Configuring 64bit EL1 vCPUs failed unexpectedly");
+       ret = add_2vcpus_init_2vcpus(&init1, &init1);
+       TEST_ASSERT(ret == 0,
+                   "Configuring 64bit EL1 vCPUs failed unexpectedly");
+
+       /* Test with 32bit vCPUs */
+       init1.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
+       ret = add_init_2vcpus(&init1, &init1);
+       TEST_ASSERT(ret == 0,
+                   "Configuring 32bit EL1 vCPUs failed unexpectedly");
+       ret = add_2vcpus_init_2vcpus(&init1, &init1);
+       TEST_ASSERT(ret == 0,
+                   "Configuring 32bit EL1 vCPUs failed unexpectedly");
+
+       /* Test with mixed-width vCPUs  */
+       init1.features[0] = 0;
+       init2.features[0] = (1 << KVM_ARM_VCPU_EL1_32BIT);
+       ret = add_init_2vcpus(&init1, &init2);
+       TEST_ASSERT(ret != 0,
+                   "Configuring mixed-width vCPUs worked unexpectedly");
+       ret = add_2vcpus_init_2vcpus(&init1, &init2);
+       TEST_ASSERT(ret != 0,
+                   "Configuring mixed-width vCPUs worked unexpectedly");
+
+       return 0;
+}
index c9d9e51..7b47ae4 100644 (file)
 #include "test_util.h"
 #include "perf_test_util.h"
 #include "guest_modes.h"
+
 #ifdef __aarch64__
 #include "aarch64/vgic.h"
 
 #define GICD_BASE_GPA                  0x8000000ULL
 #define GICR_BASE_GPA                  0x80A0000ULL
+
+static int gic_fd;
+
+static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
+{
+       /*
+        * The test can still run even if hardware does not support GICv3, as it
+        * is only an optimization to reduce guest exits.
+        */
+       gic_fd = vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
+}
+
+static void arch_cleanup_vm(struct kvm_vm *vm)
+{
+       if (gic_fd > 0)
+               close(gic_fd);
+}
+
+#else /* __aarch64__ */
+
+static void arch_setup_vm(struct kvm_vm *vm, unsigned int nr_vcpus)
+{
+}
+
+static void arch_cleanup_vm(struct kvm_vm *vm)
+{
+}
+
 #endif
 
 /* How many host loops to run by default (one KVM_GET_DIRTY_LOG for each loop)*/
@@ -206,9 +235,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
                vm_enable_cap(vm, &cap);
        }
 
-#ifdef __aarch64__
-       vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
-#endif
+       arch_setup_vm(vm, nr_vcpus);
 
        /* Start the iterations */
        iteration = 0;
@@ -302,6 +329,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
        }
 
        free_bitmaps(bitmaps, p->slots);
+       arch_cleanup_vm(vm);
        perf_test_destroy_vm(vm);
 }
 
index dc284c6..eca5c62 100644 (file)
@@ -101,7 +101,9 @@ static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id,
 #define PGTBL_PTE_WRITE_SHIFT                  2
 #define PGTBL_PTE_READ_MASK                    0x0000000000000002ULL
 #define PGTBL_PTE_READ_SHIFT                   1
-#define PGTBL_PTE_PERM_MASK                    (PGTBL_PTE_EXECUTE_MASK | \
+#define PGTBL_PTE_PERM_MASK                    (PGTBL_PTE_ACCESSED_MASK | \
+                                                PGTBL_PTE_DIRTY_MASK | \
+                                                PGTBL_PTE_EXECUTE_MASK | \
                                                 PGTBL_PTE_WRITE_MASK | \
                                                 PGTBL_PTE_READ_MASK)
 #define PGTBL_PTE_VALID_MASK                   0x0000000000000001ULL
index d377f26..3961487 100644 (file)
@@ -268,7 +268,7 @@ void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
                core.regs.t3, core.regs.t4, core.regs.t5, core.regs.t6);
 }
 
-static void guest_hang(void)
+static void __aligned(16) guest_hang(void)
 {
        while (1)
                ;
index b019e0b..84fda3b 100644 (file)
@@ -180,6 +180,9 @@ void shutdown(int exit_val, char *err_cause, int line_no)
        if (in_shutdown++)
                return;
 
+       /* Free the cpu_set allocated using CPU_ALLOC in main function */
+       CPU_FREE(cpu_set);
+
        for (i = 0; i < num_cpus_to_pin; i++)
                if (cpu_threads[i]) {
                        pthread_kill(cpu_threads[i], SIGUSR1);
@@ -551,6 +554,12 @@ int main(int argc, char *argv[])
                perror("sysconf(_SC_NPROCESSORS_ONLN)");
                exit(1);
        }
+
+       if (getuid() != 0)
+               ksft_exit_skip("Not running as root, but almost all tests "
+                       "require root in order to modify\nsystem settings.  "
+                       "Exiting.\n");
+
        cpus_online = min(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN));
        cpu_set = CPU_ALLOC(cpus_online);
        if (cpu_set == NULL) {
@@ -589,7 +598,7 @@ int main(int argc, char *argv[])
                                                cpu_set)) {
                                        fprintf(stderr, "Any given CPU may "
                                                "only be given once.\n");
-                                       exit(1);
+                                       goto err_code;
                                } else
                                        CPU_SET_S(cpus_to_pin[cpu],
                                                  cpu_set_size, cpu_set);
@@ -607,7 +616,7 @@ int main(int argc, char *argv[])
                                queue_path = malloc(strlen(option) + 2);
                                if (!queue_path) {
                                        perror("malloc()");
-                                       exit(1);
+                                       goto err_code;
                                }
                                queue_path[0] = '/';
                                queue_path[1] = 0;
@@ -622,17 +631,12 @@ int main(int argc, char *argv[])
                fprintf(stderr, "Must pass at least one CPU to continuous "
                        "mode.\n");
                poptPrintUsage(popt_context, stderr, 0);
-               exit(1);
+               goto err_code;
        } else if (!continuous_mode) {
                num_cpus_to_pin = 1;
                cpus_to_pin[0] = cpus_online - 1;
        }
 
-       if (getuid() != 0)
-               ksft_exit_skip("Not running as root, but almost all tests "
-                       "require root in order to modify\nsystem settings.  "
-                       "Exiting.\n");
-
        max_msgs = fopen(MAX_MSGS, "r+");
        max_msgsize = fopen(MAX_MSGSIZE, "r+");
        if (!max_msgs)
@@ -740,4 +744,9 @@ int main(int argc, char *argv[])
                        sleep(1);
        }
        shutdown(0, "", 0);
+
+err_code:
+       CPU_FREE(cpu_set);
+       exit(1);
+
 }
index d444ee6..b3bf531 100755 (executable)
@@ -1208,6 +1208,20 @@ ipv4_fcnal()
        set +e
        check_nexthop "dev veth1" ""
        log_test $? 0 "Nexthops removed on admin down"
+
+       # nexthop route delete warning: route add with nhid and delete
+       # using device
+       run_cmd "$IP li set dev veth1 up"
+       run_cmd "$IP nexthop add id 12 via 172.16.1.3 dev veth1"
+       out1=`dmesg | grep "WARNING:.*fib_nh_match.*" | wc -l`
+       run_cmd "$IP route add 172.16.101.1/32 nhid 12"
+       run_cmd "$IP route delete 172.16.101.1/32 dev veth1"
+       out2=`dmesg | grep "WARNING:.*fib_nh_match.*" | wc -l`
+       [ $out1 -eq $out2 ]
+       rc=$?
+       log_test $rc 0 "Delete nexthop route warning"
+       run_cmd "$IP route delete 172.16.101.1/32 nhid 12"
+       run_cmd "$IP nexthop del id 12"
 }
 
 ipv4_grp_fcnal()
index dcaefa2..edafaca 100644 (file)
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 CFLAGS += -g -I../../../../usr/include/
 
-TEST_GEN_PROGS := regression_enomem
+TEST_GEN_PROGS = regression_enomem
 
-include ../lib.mk
+LOCAL_HDRS += $(selfdir)/pidfd/pidfd.h
 
-$(OUTPUT)/regression_enomem: regression_enomem.c ../pidfd/pidfd.h
+include ../lib.mk
index 17999e0..070c1c8 100644 (file)
@@ -95,7 +95,6 @@ TEST(wait_states)
                .flags = CLONE_PIDFD | CLONE_PARENT_SETTID,
                .exit_signal = SIGCHLD,
        };
-       int ret;
        pid_t pid;
        siginfo_t info = {
                .si_signo = 0,
index 18a3bde..28604c9 100644 (file)
@@ -46,6 +46,8 @@
 #include <sys/time.h>
 #include <sys/resource.h>
 
+#include "../kselftest.h"
+
 static inline long sys_execveat(int dirfd, const char *pathname, char **argv, char **envp, int flags)
 {
        return syscall(SYS_execveat, dirfd, pathname, argv, envp, flags);
@@ -368,7 +370,7 @@ int main(void)
                };
                int i;
 
-               for (i = 0; i < sizeof(S)/sizeof(S[0]); i++) {
+               for (i = 0; i < ARRAY_SIZE(S); i++) {
                        assert(memmem(buf, rv, S[i], strlen(S[i])));
                }
 
@@ -417,7 +419,7 @@ int main(void)
                };
                int i;
 
-               for (i = 0; i < sizeof(S)/sizeof(S[0]); i++) {
+               for (i = 0; i < ARRAY_SIZE(S); i++) {
                        assert(memmem(buf, rv, S[i], strlen(S[i])));
                }
        }
index c4aea79..e691a3c 100644 (file)
@@ -20,6 +20,7 @@
 #include <limits.h>
 
 #include "vdso_config.h"
+#include "../kselftest.h"
 
 static const char **name;
 
@@ -306,10 +307,8 @@ static void test_clock_gettime(void)
                return;
        }
 
-       for (int clock = 0; clock < sizeof(clocknames) / sizeof(clocknames[0]);
-            clock++) {
+       for (int clock = 0; clock < ARRAY_SIZE(clocknames); clock++)
                test_one_clock_gettime(clock, clocknames[clock]);
-       }
 
        /* Also test some invalid clock ids */
        test_one_clock_gettime(-1, "invalid");
@@ -370,10 +369,8 @@ static void test_clock_gettime64(void)
                return;
        }
 
-       for (int clock = 0; clock < sizeof(clocknames) / sizeof(clocknames[0]);
-            clock++) {
+       for (int clock = 0; clock < ARRAY_SIZE(clocknames); clock++)
                test_one_clock_gettime64(clock, clocknames[clock]);
-       }
 
        /* Also test some invalid clock ids */
        test_one_clock_gettime64(-1, "invalid");
index 53df7d3..0388c4d 100644 (file)
@@ -92,6 +92,10 @@ warn_32bit_failure:
        echo "If you are using a Fedora-like distribution, try:";       \
        echo "";                                                        \
        echo "  yum install glibc-devel.*i686";                         \
+       echo "";                                                        \
+       echo "If you are using a SUSE-like distribution, try:";         \
+       echo "";                                                        \
+       echo "  zypper install gcc-32bit glibc-devel-static-32bit";     \
        exit 0;
 endif
 
index 70e05af..dfb7dab 100644 (file)
@@ -434,8 +434,8 @@ static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
 
 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
-       kvm_dirty_ring_free(&vcpu->dirty_ring);
        kvm_arch_vcpu_destroy(vcpu);
+       kvm_dirty_ring_free(&vcpu->dirty_ring);
 
        /*
         * No need for rcu_read_lock as VCPU_RUN is the only place that changes
@@ -932,7 +932,7 @@ static void kvm_destroy_vm_debugfs(struct kvm *kvm)
        int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
                                      kvm_vcpu_stats_header.num_desc;
 
-       if (!kvm->debugfs_dentry)
+       if (IS_ERR(kvm->debugfs_dentry))
                return;
 
        debugfs_remove_recursive(kvm->debugfs_dentry);
@@ -955,6 +955,12 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
        int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
                                      kvm_vcpu_stats_header.num_desc;
 
+       /*
+        * Force subsequent debugfs file creations to fail if the VM directory
+        * is not created.
+        */
+       kvm->debugfs_dentry = ERR_PTR(-ENOENT);
+
        if (!debugfs_initialized())
                return 0;
 
@@ -5479,7 +5485,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
        }
        add_uevent_var(env, "PID=%d", kvm->userspace_pid);
 
-       if (kvm->debugfs_dentry) {
+       if (!IS_ERR(kvm->debugfs_dentry)) {
                char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
 
                if (p) {